1 //===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the SystemZTargetLowering class.
11 //===----------------------------------------------------------------------===//
13 #include "SystemZISelLowering.h"
14 #include "SystemZCallingConv.h"
15 #include "SystemZConstantPoolValue.h"
16 #include "SystemZMachineFunctionInfo.h"
17 #include "SystemZTargetMachine.h"
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
22 #include "llvm/IR/Intrinsics.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/Support/CommandLine.h"
25 #include "llvm/Support/KnownBits.h"
30 #define DEBUG_TYPE "systemz-lower"
33 // Represents information about a comparison.
35 Comparison(SDValue Op0In
, SDValue Op1In
)
36 : Op0(Op0In
), Op1(Op1In
), Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
38 // The operands to the comparison.
41 // The opcode that should be used to compare Op0 and Op1.
44 // A SystemZICMP value. Only used for integer comparisons.
47 // The mask of CC values that Opcode can produce.
50 // The mask of CC values for which the original condition is true.
53 } // end anonymous namespace
55 // Classify VT as either 32 or 64 bit.
56 static bool is32Bit(EVT VT
) {
57 switch (VT
.getSimpleVT().SimpleTy
) {
63 llvm_unreachable("Unsupported type");
67 // Return a version of MachineOperand that can be safely used before the
69 static MachineOperand
earlyUseOperand(MachineOperand Op
) {
75 SystemZTargetLowering::SystemZTargetLowering(const TargetMachine
&TM
,
76 const SystemZSubtarget
&STI
)
77 : TargetLowering(TM
), Subtarget(STI
) {
78 MVT PtrVT
= MVT::getIntegerVT(8 * TM
.getPointerSize(0));
80 // Set up the register classes.
81 if (Subtarget
.hasHighWord())
82 addRegisterClass(MVT::i32
, &SystemZ::GRX32BitRegClass
);
84 addRegisterClass(MVT::i32
, &SystemZ::GR32BitRegClass
);
85 addRegisterClass(MVT::i64
, &SystemZ::GR64BitRegClass
);
86 if (Subtarget
.hasVector()) {
87 addRegisterClass(MVT::f32
, &SystemZ::VR32BitRegClass
);
88 addRegisterClass(MVT::f64
, &SystemZ::VR64BitRegClass
);
90 addRegisterClass(MVT::f32
, &SystemZ::FP32BitRegClass
);
91 addRegisterClass(MVT::f64
, &SystemZ::FP64BitRegClass
);
93 if (Subtarget
.hasVectorEnhancements1())
94 addRegisterClass(MVT::f128
, &SystemZ::VR128BitRegClass
);
96 addRegisterClass(MVT::f128
, &SystemZ::FP128BitRegClass
);
98 if (Subtarget
.hasVector()) {
99 addRegisterClass(MVT::v16i8
, &SystemZ::VR128BitRegClass
);
100 addRegisterClass(MVT::v8i16
, &SystemZ::VR128BitRegClass
);
101 addRegisterClass(MVT::v4i32
, &SystemZ::VR128BitRegClass
);
102 addRegisterClass(MVT::v2i64
, &SystemZ::VR128BitRegClass
);
103 addRegisterClass(MVT::v4f32
, &SystemZ::VR128BitRegClass
);
104 addRegisterClass(MVT::v2f64
, &SystemZ::VR128BitRegClass
);
107 // Compute derived properties from the register classes
108 computeRegisterProperties(Subtarget
.getRegisterInfo());
110 // Set up special registers.
111 setStackPointerRegisterToSaveRestore(SystemZ::R15D
);
113 // TODO: It may be better to default to latency-oriented scheduling, however
114 // LLVM's current latency-oriented scheduler can't handle physreg definitions
115 // such as SystemZ has with CC, so set this to the register-pressure
116 // scheduler, because it can.
117 setSchedulingPreference(Sched::RegPressure
);
119 setBooleanContents(ZeroOrOneBooleanContent
);
120 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent
);
122 // Instructions are strings of 2-byte aligned 2-byte values.
123 setMinFunctionAlignment(2);
124 // For performance reasons we prefer 16-byte alignment.
125 setPrefFunctionAlignment(4);
127 // Handle operations that are handled in a similar way for all types.
128 for (unsigned I
= MVT::FIRST_INTEGER_VALUETYPE
;
129 I
<= MVT::LAST_FP_VALUETYPE
;
131 MVT VT
= MVT::SimpleValueType(I
);
132 if (isTypeLegal(VT
)) {
133 // Lower SET_CC into an IPM-based sequence.
134 setOperationAction(ISD::SETCC
, VT
, Custom
);
136 // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE).
137 setOperationAction(ISD::SELECT
, VT
, Expand
);
139 // Lower SELECT_CC and BR_CC into separate comparisons and branches.
140 setOperationAction(ISD::SELECT_CC
, VT
, Custom
);
141 setOperationAction(ISD::BR_CC
, VT
, Custom
);
145 // Expand jump table branches as address arithmetic followed by an
147 setOperationAction(ISD::BR_JT
, MVT::Other
, Expand
);
149 // Expand BRCOND into a BR_CC (see above).
150 setOperationAction(ISD::BRCOND
, MVT::Other
, Expand
);
152 // Handle integer types.
153 for (unsigned I
= MVT::FIRST_INTEGER_VALUETYPE
;
154 I
<= MVT::LAST_INTEGER_VALUETYPE
;
156 MVT VT
= MVT::SimpleValueType(I
);
157 if (isTypeLegal(VT
)) {
158 // Expand individual DIV and REMs into DIVREMs.
159 setOperationAction(ISD::SDIV
, VT
, Expand
);
160 setOperationAction(ISD::UDIV
, VT
, Expand
);
161 setOperationAction(ISD::SREM
, VT
, Expand
);
162 setOperationAction(ISD::UREM
, VT
, Expand
);
163 setOperationAction(ISD::SDIVREM
, VT
, Custom
);
164 setOperationAction(ISD::UDIVREM
, VT
, Custom
);
166 // Support addition/subtraction with overflow.
167 setOperationAction(ISD::SADDO
, VT
, Custom
);
168 setOperationAction(ISD::SSUBO
, VT
, Custom
);
170 // Support addition/subtraction with carry.
171 setOperationAction(ISD::UADDO
, VT
, Custom
);
172 setOperationAction(ISD::USUBO
, VT
, Custom
);
174 // Support carry in as value rather than glue.
175 setOperationAction(ISD::ADDCARRY
, VT
, Custom
);
176 setOperationAction(ISD::SUBCARRY
, VT
, Custom
);
178 // Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and
179 // stores, putting a serialization instruction after the stores.
180 setOperationAction(ISD::ATOMIC_LOAD
, VT
, Custom
);
181 setOperationAction(ISD::ATOMIC_STORE
, VT
, Custom
);
183 // Lower ATOMIC_LOAD_SUB into ATOMIC_LOAD_ADD if LAA and LAAG are
184 // available, or if the operand is constant.
185 setOperationAction(ISD::ATOMIC_LOAD_SUB
, VT
, Custom
);
187 // Use POPCNT on z196 and above.
188 if (Subtarget
.hasPopulationCount())
189 setOperationAction(ISD::CTPOP
, VT
, Custom
);
191 setOperationAction(ISD::CTPOP
, VT
, Expand
);
193 // No special instructions for these.
194 setOperationAction(ISD::CTTZ
, VT
, Expand
);
195 setOperationAction(ISD::ROTR
, VT
, Expand
);
197 // Use *MUL_LOHI where possible instead of MULH*.
198 setOperationAction(ISD::MULHS
, VT
, Expand
);
199 setOperationAction(ISD::MULHU
, VT
, Expand
);
200 setOperationAction(ISD::SMUL_LOHI
, VT
, Custom
);
201 setOperationAction(ISD::UMUL_LOHI
, VT
, Custom
);
203 // Only z196 and above have native support for conversions to unsigned.
204 // On z10, promoting to i64 doesn't generate an inexact condition for
205 // values that are outside the i32 range but in the i64 range, so use
206 // the default expansion.
207 if (!Subtarget
.hasFPExtension())
208 setOperationAction(ISD::FP_TO_UINT
, VT
, Expand
);
212 // Type legalization will convert 8- and 16-bit atomic operations into
213 // forms that operate on i32s (but still keeping the original memory VT).
214 // Lower them into full i32 operations.
215 setOperationAction(ISD::ATOMIC_SWAP
, MVT::i32
, Custom
);
216 setOperationAction(ISD::ATOMIC_LOAD_ADD
, MVT::i32
, Custom
);
217 setOperationAction(ISD::ATOMIC_LOAD_SUB
, MVT::i32
, Custom
);
218 setOperationAction(ISD::ATOMIC_LOAD_AND
, MVT::i32
, Custom
);
219 setOperationAction(ISD::ATOMIC_LOAD_OR
, MVT::i32
, Custom
);
220 setOperationAction(ISD::ATOMIC_LOAD_XOR
, MVT::i32
, Custom
);
221 setOperationAction(ISD::ATOMIC_LOAD_NAND
, MVT::i32
, Custom
);
222 setOperationAction(ISD::ATOMIC_LOAD_MIN
, MVT::i32
, Custom
);
223 setOperationAction(ISD::ATOMIC_LOAD_MAX
, MVT::i32
, Custom
);
224 setOperationAction(ISD::ATOMIC_LOAD_UMIN
, MVT::i32
, Custom
);
225 setOperationAction(ISD::ATOMIC_LOAD_UMAX
, MVT::i32
, Custom
);
227 // Even though i128 is not a legal type, we still need to custom lower
228 // the atomic operations in order to exploit SystemZ instructions.
229 setOperationAction(ISD::ATOMIC_LOAD
, MVT::i128
, Custom
);
230 setOperationAction(ISD::ATOMIC_STORE
, MVT::i128
, Custom
);
232 // We can use the CC result of compare-and-swap to implement
233 // the "success" result of ATOMIC_CMP_SWAP_WITH_SUCCESS.
234 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS
, MVT::i32
, Custom
);
235 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS
, MVT::i64
, Custom
);
236 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS
, MVT::i128
, Custom
);
238 setOperationAction(ISD::ATOMIC_FENCE
, MVT::Other
, Custom
);
240 // Traps are legal, as we will convert them to "j .+2".
241 setOperationAction(ISD::TRAP
, MVT::Other
, Legal
);
243 // z10 has instructions for signed but not unsigned FP conversion.
244 // Handle unsigned 32-bit types as signed 64-bit types.
245 if (!Subtarget
.hasFPExtension()) {
246 setOperationAction(ISD::UINT_TO_FP
, MVT::i32
, Promote
);
247 setOperationAction(ISD::UINT_TO_FP
, MVT::i64
, Expand
);
250 // We have native support for a 64-bit CTLZ, via FLOGR.
251 setOperationAction(ISD::CTLZ
, MVT::i32
, Promote
);
252 setOperationAction(ISD::CTLZ_ZERO_UNDEF
, MVT::i32
, Promote
);
253 setOperationAction(ISD::CTLZ
, MVT::i64
, Legal
);
255 // On arch13 we have native support for a 64-bit CTPOP.
256 if (Subtarget
.hasMiscellaneousExtensions3()) {
257 setOperationAction(ISD::CTPOP
, MVT::i32
, Promote
);
258 setOperationAction(ISD::CTPOP
, MVT::i64
, Legal
);
261 // Give LowerOperation the chance to replace 64-bit ORs with subregs.
262 setOperationAction(ISD::OR
, MVT::i64
, Custom
);
264 // FIXME: Can we support these natively?
265 setOperationAction(ISD::SRL_PARTS
, MVT::i64
, Expand
);
266 setOperationAction(ISD::SHL_PARTS
, MVT::i64
, Expand
);
267 setOperationAction(ISD::SRA_PARTS
, MVT::i64
, Expand
);
269 // We have native instructions for i8, i16 and i32 extensions, but not i1.
270 setOperationAction(ISD::SIGN_EXTEND_INREG
, MVT::i1
, Expand
);
271 for (MVT VT
: MVT::integer_valuetypes()) {
272 setLoadExtAction(ISD::SEXTLOAD
, VT
, MVT::i1
, Promote
);
273 setLoadExtAction(ISD::ZEXTLOAD
, VT
, MVT::i1
, Promote
);
274 setLoadExtAction(ISD::EXTLOAD
, VT
, MVT::i1
, Promote
);
277 // Handle the various types of symbolic address.
278 setOperationAction(ISD::ConstantPool
, PtrVT
, Custom
);
279 setOperationAction(ISD::GlobalAddress
, PtrVT
, Custom
);
280 setOperationAction(ISD::GlobalTLSAddress
, PtrVT
, Custom
);
281 setOperationAction(ISD::BlockAddress
, PtrVT
, Custom
);
282 setOperationAction(ISD::JumpTable
, PtrVT
, Custom
);
284 // We need to handle dynamic allocations specially because of the
285 // 160-byte area at the bottom of the stack.
286 setOperationAction(ISD::DYNAMIC_STACKALLOC
, PtrVT
, Custom
);
287 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET
, PtrVT
, Custom
);
289 // Use custom expanders so that we can force the function to use
291 setOperationAction(ISD::STACKSAVE
, MVT::Other
, Custom
);
292 setOperationAction(ISD::STACKRESTORE
, MVT::Other
, Custom
);
294 // Handle prefetches with PFD or PFDRL.
295 setOperationAction(ISD::PREFETCH
, MVT::Other
, Custom
);
297 for (MVT VT
: MVT::vector_valuetypes()) {
298 // Assume by default that all vector operations need to be expanded.
299 for (unsigned Opcode
= 0; Opcode
< ISD::BUILTIN_OP_END
; ++Opcode
)
300 if (getOperationAction(Opcode
, VT
) == Legal
)
301 setOperationAction(Opcode
, VT
, Expand
);
303 // Likewise all truncating stores and extending loads.
304 for (MVT InnerVT
: MVT::vector_valuetypes()) {
305 setTruncStoreAction(VT
, InnerVT
, Expand
);
306 setLoadExtAction(ISD::SEXTLOAD
, VT
, InnerVT
, Expand
);
307 setLoadExtAction(ISD::ZEXTLOAD
, VT
, InnerVT
, Expand
);
308 setLoadExtAction(ISD::EXTLOAD
, VT
, InnerVT
, Expand
);
311 if (isTypeLegal(VT
)) {
312 // These operations are legal for anything that can be stored in a
313 // vector register, even if there is no native support for the format
314 // as such. In particular, we can do these for v4f32 even though there
315 // are no specific instructions for that format.
316 setOperationAction(ISD::LOAD
, VT
, Legal
);
317 setOperationAction(ISD::STORE
, VT
, Legal
);
318 setOperationAction(ISD::VSELECT
, VT
, Legal
);
319 setOperationAction(ISD::BITCAST
, VT
, Legal
);
320 setOperationAction(ISD::UNDEF
, VT
, Legal
);
322 // Likewise, except that we need to replace the nodes with something
324 setOperationAction(ISD::BUILD_VECTOR
, VT
, Custom
);
325 setOperationAction(ISD::VECTOR_SHUFFLE
, VT
, Custom
);
329 // Handle integer vector types.
330 for (MVT VT
: MVT::integer_vector_valuetypes()) {
331 if (isTypeLegal(VT
)) {
332 // These operations have direct equivalents.
333 setOperationAction(ISD::EXTRACT_VECTOR_ELT
, VT
, Legal
);
334 setOperationAction(ISD::INSERT_VECTOR_ELT
, VT
, Legal
);
335 setOperationAction(ISD::ADD
, VT
, Legal
);
336 setOperationAction(ISD::SUB
, VT
, Legal
);
337 if (VT
!= MVT::v2i64
)
338 setOperationAction(ISD::MUL
, VT
, Legal
);
339 setOperationAction(ISD::AND
, VT
, Legal
);
340 setOperationAction(ISD::OR
, VT
, Legal
);
341 setOperationAction(ISD::XOR
, VT
, Legal
);
342 if (Subtarget
.hasVectorEnhancements1())
343 setOperationAction(ISD::CTPOP
, VT
, Legal
);
345 setOperationAction(ISD::CTPOP
, VT
, Custom
);
346 setOperationAction(ISD::CTTZ
, VT
, Legal
);
347 setOperationAction(ISD::CTLZ
, VT
, Legal
);
349 // Convert a GPR scalar to a vector by inserting it into element 0.
350 setOperationAction(ISD::SCALAR_TO_VECTOR
, VT
, Custom
);
352 // Use a series of unpacks for extensions.
353 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG
, VT
, Custom
);
354 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG
, VT
, Custom
);
356 // Detect shifts by a scalar amount and convert them into
358 setOperationAction(ISD::SHL
, VT
, Custom
);
359 setOperationAction(ISD::SRA
, VT
, Custom
);
360 setOperationAction(ISD::SRL
, VT
, Custom
);
362 // At present ROTL isn't matched by DAGCombiner. ROTR should be
363 // converted into ROTL.
364 setOperationAction(ISD::ROTL
, VT
, Expand
);
365 setOperationAction(ISD::ROTR
, VT
, Expand
);
367 // Map SETCCs onto one of VCE, VCH or VCHL, swapping the operands
368 // and inverting the result as necessary.
369 setOperationAction(ISD::SETCC
, VT
, Custom
);
373 if (Subtarget
.hasVector()) {
374 // There should be no need to check for float types other than v2f64
375 // since <2 x f32> isn't a legal type.
376 setOperationAction(ISD::FP_TO_SINT
, MVT::v2i64
, Legal
);
377 setOperationAction(ISD::FP_TO_SINT
, MVT::v2f64
, Legal
);
378 setOperationAction(ISD::FP_TO_UINT
, MVT::v2i64
, Legal
);
379 setOperationAction(ISD::FP_TO_UINT
, MVT::v2f64
, Legal
);
380 setOperationAction(ISD::SINT_TO_FP
, MVT::v2i64
, Legal
);
381 setOperationAction(ISD::SINT_TO_FP
, MVT::v2f64
, Legal
);
382 setOperationAction(ISD::UINT_TO_FP
, MVT::v2i64
, Legal
);
383 setOperationAction(ISD::UINT_TO_FP
, MVT::v2f64
, Legal
);
386 if (Subtarget
.hasVectorEnhancements2()) {
387 setOperationAction(ISD::FP_TO_SINT
, MVT::v4i32
, Legal
);
388 setOperationAction(ISD::FP_TO_SINT
, MVT::v4f32
, Legal
);
389 setOperationAction(ISD::FP_TO_UINT
, MVT::v4i32
, Legal
);
390 setOperationAction(ISD::FP_TO_UINT
, MVT::v4f32
, Legal
);
391 setOperationAction(ISD::SINT_TO_FP
, MVT::v4i32
, Legal
);
392 setOperationAction(ISD::SINT_TO_FP
, MVT::v4f32
, Legal
);
393 setOperationAction(ISD::UINT_TO_FP
, MVT::v4i32
, Legal
);
394 setOperationAction(ISD::UINT_TO_FP
, MVT::v4f32
, Legal
);
397 // Handle floating-point types.
398 for (unsigned I
= MVT::FIRST_FP_VALUETYPE
;
399 I
<= MVT::LAST_FP_VALUETYPE
;
401 MVT VT
= MVT::SimpleValueType(I
);
402 if (isTypeLegal(VT
)) {
403 // We can use FI for FRINT.
404 setOperationAction(ISD::FRINT
, VT
, Legal
);
406 // We can use the extended form of FI for other rounding operations.
407 if (Subtarget
.hasFPExtension()) {
408 setOperationAction(ISD::FNEARBYINT
, VT
, Legal
);
409 setOperationAction(ISD::FFLOOR
, VT
, Legal
);
410 setOperationAction(ISD::FCEIL
, VT
, Legal
);
411 setOperationAction(ISD::FTRUNC
, VT
, Legal
);
412 setOperationAction(ISD::FROUND
, VT
, Legal
);
415 // No special instructions for these.
416 setOperationAction(ISD::FSIN
, VT
, Expand
);
417 setOperationAction(ISD::FCOS
, VT
, Expand
);
418 setOperationAction(ISD::FSINCOS
, VT
, Expand
);
419 setOperationAction(ISD::FREM
, VT
, Expand
);
420 setOperationAction(ISD::FPOW
, VT
, Expand
);
422 // Handle constrained floating-point operations.
423 setOperationAction(ISD::STRICT_FADD
, VT
, Legal
);
424 setOperationAction(ISD::STRICT_FSUB
, VT
, Legal
);
425 setOperationAction(ISD::STRICT_FMUL
, VT
, Legal
);
426 setOperationAction(ISD::STRICT_FDIV
, VT
, Legal
);
427 setOperationAction(ISD::STRICT_FMA
, VT
, Legal
);
428 setOperationAction(ISD::STRICT_FSQRT
, VT
, Legal
);
429 setOperationAction(ISD::STRICT_FRINT
, VT
, Legal
);
430 setOperationAction(ISD::STRICT_FP_ROUND
, VT
, Legal
);
431 setOperationAction(ISD::STRICT_FP_EXTEND
, VT
, Legal
);
432 if (Subtarget
.hasFPExtension()) {
433 setOperationAction(ISD::STRICT_FNEARBYINT
, VT
, Legal
);
434 setOperationAction(ISD::STRICT_FFLOOR
, VT
, Legal
);
435 setOperationAction(ISD::STRICT_FCEIL
, VT
, Legal
);
436 setOperationAction(ISD::STRICT_FROUND
, VT
, Legal
);
437 setOperationAction(ISD::STRICT_FTRUNC
, VT
, Legal
);
442 // Handle floating-point vector types.
443 if (Subtarget
.hasVector()) {
444 // Scalar-to-vector conversion is just a subreg.
445 setOperationAction(ISD::SCALAR_TO_VECTOR
, MVT::v4f32
, Legal
);
446 setOperationAction(ISD::SCALAR_TO_VECTOR
, MVT::v2f64
, Legal
);
448 // Some insertions and extractions can be done directly but others
449 // need to go via integers.
450 setOperationAction(ISD::INSERT_VECTOR_ELT
, MVT::v4f32
, Custom
);
451 setOperationAction(ISD::INSERT_VECTOR_ELT
, MVT::v2f64
, Custom
);
452 setOperationAction(ISD::EXTRACT_VECTOR_ELT
, MVT::v4f32
, Custom
);
453 setOperationAction(ISD::EXTRACT_VECTOR_ELT
, MVT::v2f64
, Custom
);
455 // These operations have direct equivalents.
456 setOperationAction(ISD::FADD
, MVT::v2f64
, Legal
);
457 setOperationAction(ISD::FNEG
, MVT::v2f64
, Legal
);
458 setOperationAction(ISD::FSUB
, MVT::v2f64
, Legal
);
459 setOperationAction(ISD::FMUL
, MVT::v2f64
, Legal
);
460 setOperationAction(ISD::FMA
, MVT::v2f64
, Legal
);
461 setOperationAction(ISD::FDIV
, MVT::v2f64
, Legal
);
462 setOperationAction(ISD::FABS
, MVT::v2f64
, Legal
);
463 setOperationAction(ISD::FSQRT
, MVT::v2f64
, Legal
);
464 setOperationAction(ISD::FRINT
, MVT::v2f64
, Legal
);
465 setOperationAction(ISD::FNEARBYINT
, MVT::v2f64
, Legal
);
466 setOperationAction(ISD::FFLOOR
, MVT::v2f64
, Legal
);
467 setOperationAction(ISD::FCEIL
, MVT::v2f64
, Legal
);
468 setOperationAction(ISD::FTRUNC
, MVT::v2f64
, Legal
);
469 setOperationAction(ISD::FROUND
, MVT::v2f64
, Legal
);
471 // Handle constrained floating-point operations.
472 setOperationAction(ISD::STRICT_FADD
, MVT::v2f64
, Legal
);
473 setOperationAction(ISD::STRICT_FSUB
, MVT::v2f64
, Legal
);
474 setOperationAction(ISD::STRICT_FMUL
, MVT::v2f64
, Legal
);
475 setOperationAction(ISD::STRICT_FMA
, MVT::v2f64
, Legal
);
476 setOperationAction(ISD::STRICT_FDIV
, MVT::v2f64
, Legal
);
477 setOperationAction(ISD::STRICT_FSQRT
, MVT::v2f64
, Legal
);
478 setOperationAction(ISD::STRICT_FRINT
, MVT::v2f64
, Legal
);
479 setOperationAction(ISD::STRICT_FNEARBYINT
, MVT::v2f64
, Legal
);
480 setOperationAction(ISD::STRICT_FFLOOR
, MVT::v2f64
, Legal
);
481 setOperationAction(ISD::STRICT_FCEIL
, MVT::v2f64
, Legal
);
482 setOperationAction(ISD::STRICT_FTRUNC
, MVT::v2f64
, Legal
);
483 setOperationAction(ISD::STRICT_FROUND
, MVT::v2f64
, Legal
);
486 // The vector enhancements facility 1 has instructions for these.
487 if (Subtarget
.hasVectorEnhancements1()) {
488 setOperationAction(ISD::FADD
, MVT::v4f32
, Legal
);
489 setOperationAction(ISD::FNEG
, MVT::v4f32
, Legal
);
490 setOperationAction(ISD::FSUB
, MVT::v4f32
, Legal
);
491 setOperationAction(ISD::FMUL
, MVT::v4f32
, Legal
);
492 setOperationAction(ISD::FMA
, MVT::v4f32
, Legal
);
493 setOperationAction(ISD::FDIV
, MVT::v4f32
, Legal
);
494 setOperationAction(ISD::FABS
, MVT::v4f32
, Legal
);
495 setOperationAction(ISD::FSQRT
, MVT::v4f32
, Legal
);
496 setOperationAction(ISD::FRINT
, MVT::v4f32
, Legal
);
497 setOperationAction(ISD::FNEARBYINT
, MVT::v4f32
, Legal
);
498 setOperationAction(ISD::FFLOOR
, MVT::v4f32
, Legal
);
499 setOperationAction(ISD::FCEIL
, MVT::v4f32
, Legal
);
500 setOperationAction(ISD::FTRUNC
, MVT::v4f32
, Legal
);
501 setOperationAction(ISD::FROUND
, MVT::v4f32
, Legal
);
503 setOperationAction(ISD::FMAXNUM
, MVT::f64
, Legal
);
504 setOperationAction(ISD::FMAXIMUM
, MVT::f64
, Legal
);
505 setOperationAction(ISD::FMINNUM
, MVT::f64
, Legal
);
506 setOperationAction(ISD::FMINIMUM
, MVT::f64
, Legal
);
508 setOperationAction(ISD::FMAXNUM
, MVT::v2f64
, Legal
);
509 setOperationAction(ISD::FMAXIMUM
, MVT::v2f64
, Legal
);
510 setOperationAction(ISD::FMINNUM
, MVT::v2f64
, Legal
);
511 setOperationAction(ISD::FMINIMUM
, MVT::v2f64
, Legal
);
513 setOperationAction(ISD::FMAXNUM
, MVT::f32
, Legal
);
514 setOperationAction(ISD::FMAXIMUM
, MVT::f32
, Legal
);
515 setOperationAction(ISD::FMINNUM
, MVT::f32
, Legal
);
516 setOperationAction(ISD::FMINIMUM
, MVT::f32
, Legal
);
518 setOperationAction(ISD::FMAXNUM
, MVT::v4f32
, Legal
);
519 setOperationAction(ISD::FMAXIMUM
, MVT::v4f32
, Legal
);
520 setOperationAction(ISD::FMINNUM
, MVT::v4f32
, Legal
);
521 setOperationAction(ISD::FMINIMUM
, MVT::v4f32
, Legal
);
523 setOperationAction(ISD::FMAXNUM
, MVT::f128
, Legal
);
524 setOperationAction(ISD::FMAXIMUM
, MVT::f128
, Legal
);
525 setOperationAction(ISD::FMINNUM
, MVT::f128
, Legal
);
526 setOperationAction(ISD::FMINIMUM
, MVT::f128
, Legal
);
528 // Handle constrained floating-point operations.
529 setOperationAction(ISD::STRICT_FADD
, MVT::v4f32
, Legal
);
530 setOperationAction(ISD::STRICT_FSUB
, MVT::v4f32
, Legal
);
531 setOperationAction(ISD::STRICT_FMUL
, MVT::v4f32
, Legal
);
532 setOperationAction(ISD::STRICT_FMA
, MVT::v4f32
, Legal
);
533 setOperationAction(ISD::STRICT_FDIV
, MVT::v4f32
, Legal
);
534 setOperationAction(ISD::STRICT_FSQRT
, MVT::v4f32
, Legal
);
535 setOperationAction(ISD::STRICT_FRINT
, MVT::v4f32
, Legal
);
536 setOperationAction(ISD::STRICT_FNEARBYINT
, MVT::v4f32
, Legal
);
537 setOperationAction(ISD::STRICT_FFLOOR
, MVT::v4f32
, Legal
);
538 setOperationAction(ISD::STRICT_FCEIL
, MVT::v4f32
, Legal
);
539 setOperationAction(ISD::STRICT_FROUND
, MVT::v4f32
, Legal
);
540 setOperationAction(ISD::STRICT_FTRUNC
, MVT::v4f32
, Legal
);
541 for (auto VT
: { MVT::f32
, MVT::f64
, MVT::f128
,
542 MVT::v4f32
, MVT::v2f64
}) {
543 setOperationAction(ISD::STRICT_FMAXNUM
, VT
, Legal
);
544 setOperationAction(ISD::STRICT_FMINNUM
, VT
, Legal
);
548 // We have fused multiply-addition for f32 and f64 but not f128.
549 setOperationAction(ISD::FMA
, MVT::f32
, Legal
);
550 setOperationAction(ISD::FMA
, MVT::f64
, Legal
);
551 if (Subtarget
.hasVectorEnhancements1())
552 setOperationAction(ISD::FMA
, MVT::f128
, Legal
);
554 setOperationAction(ISD::FMA
, MVT::f128
, Expand
);
556 // We don't have a copysign instruction on vector registers.
557 if (Subtarget
.hasVectorEnhancements1())
558 setOperationAction(ISD::FCOPYSIGN
, MVT::f128
, Expand
);
560 // Needed so that we don't try to implement f128 constant loads using
561 // a load-and-extend of a f80 constant (in cases where the constant
562 // would fit in an f80).
563 for (MVT VT
: MVT::fp_valuetypes())
564 setLoadExtAction(ISD::EXTLOAD
, VT
, MVT::f80
, Expand
);
566 // We don't have extending load instruction on vector registers.
567 if (Subtarget
.hasVectorEnhancements1()) {
568 setLoadExtAction(ISD::EXTLOAD
, MVT::f128
, MVT::f32
, Expand
);
569 setLoadExtAction(ISD::EXTLOAD
, MVT::f128
, MVT::f64
, Expand
);
572 // Floating-point truncation and stores need to be done separately.
573 setTruncStoreAction(MVT::f64
, MVT::f32
, Expand
);
574 setTruncStoreAction(MVT::f128
, MVT::f32
, Expand
);
575 setTruncStoreAction(MVT::f128
, MVT::f64
, Expand
);
577 // We have 64-bit FPR<->GPR moves, but need special handling for
579 if (!Subtarget
.hasVector()) {
580 setOperationAction(ISD::BITCAST
, MVT::i32
, Custom
);
581 setOperationAction(ISD::BITCAST
, MVT::f32
, Custom
);
584 // VASTART and VACOPY need to deal with the SystemZ-specific varargs
585 // structure, but VAEND is a no-op.
586 setOperationAction(ISD::VASTART
, MVT::Other
, Custom
);
587 setOperationAction(ISD::VACOPY
, MVT::Other
, Custom
);
588 setOperationAction(ISD::VAEND
, MVT::Other
, Expand
);
590 // Codes for which we want to perform some z-specific combinations.
591 setTargetDAGCombine(ISD::ZERO_EXTEND
);
592 setTargetDAGCombine(ISD::SIGN_EXTEND
);
593 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG
);
594 setTargetDAGCombine(ISD::LOAD
);
595 setTargetDAGCombine(ISD::STORE
);
596 setTargetDAGCombine(ISD::VECTOR_SHUFFLE
);
597 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT
);
598 setTargetDAGCombine(ISD::FP_ROUND
);
599 setTargetDAGCombine(ISD::FP_EXTEND
);
600 setTargetDAGCombine(ISD::BSWAP
);
601 setTargetDAGCombine(ISD::SDIV
);
602 setTargetDAGCombine(ISD::UDIV
);
603 setTargetDAGCombine(ISD::SREM
);
604 setTargetDAGCombine(ISD::UREM
);
606 // Handle intrinsics.
607 setOperationAction(ISD::INTRINSIC_W_CHAIN
, MVT::Other
, Custom
);
608 setOperationAction(ISD::INTRINSIC_WO_CHAIN
, MVT::Other
, Custom
);
610 // We want to use MVC in preference to even a single load/store pair.
611 MaxStoresPerMemcpy
= 0;
612 MaxStoresPerMemcpyOptSize
= 0;
614 // The main memset sequence is a byte store followed by an MVC.
615 // Two STC or MV..I stores win over that, but the kind of fused stores
616 // generated by target-independent code don't when the byte value is
617 // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better
618 // than "STC;MVC". Handle the choice in target-specific code instead.
619 MaxStoresPerMemset
= 0;
620 MaxStoresPerMemsetOptSize
= 0;
623 EVT
SystemZTargetLowering::getSetCCResultType(const DataLayout
&DL
,
624 LLVMContext
&, EVT VT
) const {
627 return VT
.changeVectorElementTypeToInteger();
630 bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT
) const {
631 VT
= VT
.getScalarType();
636 switch (VT
.getSimpleVT().SimpleTy
) {
641 return Subtarget
.hasVectorEnhancements1();
649 // Return true if the constant can be generated with a vector instruction,
650 // such as VGM, VGMB or VREPI.
651 bool SystemZVectorConstantInfo::isVectorConstantLegal(
652 const SystemZSubtarget
&Subtarget
) {
653 const SystemZInstrInfo
*TII
=
654 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
655 if (!Subtarget
.hasVector() ||
656 (isFP128
&& !Subtarget
.hasVectorEnhancements1()))
659 // Try using VECTOR GENERATE BYTE MASK. This is the architecturally-
660 // preferred way of creating all-zero and all-one vectors so give it
661 // priority over other methods below.
664 for (; I
< SystemZ::VectorBytes
; ++I
) {
665 uint64_t Byte
= IntBits
.lshr(I
* 8).trunc(8).getZExtValue();
671 if (I
== SystemZ::VectorBytes
) {
672 Opcode
= SystemZISD::BYTE_MASK
;
673 OpVals
.push_back(Mask
);
674 VecVT
= MVT::getVectorVT(MVT::getIntegerVT(8), 16);
678 if (SplatBitSize
> 64)
681 auto tryValue
= [&](uint64_t Value
) -> bool {
682 // Try VECTOR REPLICATE IMMEDIATE
683 int64_t SignedValue
= SignExtend64(Value
, SplatBitSize
);
684 if (isInt
<16>(SignedValue
)) {
685 OpVals
.push_back(((unsigned) SignedValue
));
686 Opcode
= SystemZISD::REPLICATE
;
687 VecVT
= MVT::getVectorVT(MVT::getIntegerVT(SplatBitSize
),
688 SystemZ::VectorBits
/ SplatBitSize
);
691 // Try VECTOR GENERATE MASK
693 if (TII
->isRxSBGMask(Value
, SplatBitSize
, Start
, End
)) {
694 // isRxSBGMask returns the bit numbers for a full 64-bit value, with 0
695 // denoting 1 << 63 and 63 denoting 1. Convert them to bit numbers for
696 // an SplatBitSize value, so that 0 denotes 1 << (SplatBitSize-1).
697 OpVals
.push_back(Start
- (64 - SplatBitSize
));
698 OpVals
.push_back(End
- (64 - SplatBitSize
));
699 Opcode
= SystemZISD::ROTATE_MASK
;
700 VecVT
= MVT::getVectorVT(MVT::getIntegerVT(SplatBitSize
),
701 SystemZ::VectorBits
/ SplatBitSize
);
707 // First try assuming that any undefined bits above the highest set bit
708 // and below the lowest set bit are 1s. This increases the likelihood of
709 // being able to use a sign-extended element value in VECTOR REPLICATE
710 // IMMEDIATE or a wraparound mask in VECTOR GENERATE MASK.
711 uint64_t SplatBitsZ
= SplatBits
.getZExtValue();
712 uint64_t SplatUndefZ
= SplatUndef
.getZExtValue();
714 (SplatUndefZ
& ((uint64_t(1) << findFirstSet(SplatBitsZ
)) - 1));
716 (SplatUndefZ
& ~((uint64_t(1) << findLastSet(SplatBitsZ
)) - 1));
717 if (tryValue(SplatBitsZ
| Upper
| Lower
))
720 // Now try assuming that any undefined bits between the first and
721 // last defined set bits are set. This increases the chances of
722 // using a non-wraparound mask.
723 uint64_t Middle
= SplatUndefZ
& ~Upper
& ~Lower
;
724 return tryValue(SplatBitsZ
| Middle
);
727 SystemZVectorConstantInfo::SystemZVectorConstantInfo(APFloat FPImm
) {
728 IntBits
= FPImm
.bitcastToAPInt().zextOrSelf(128);
729 isFP128
= (&FPImm
.getSemantics() == &APFloat::IEEEquad());
731 // Find the smallest splat.
732 SplatBits
= FPImm
.bitcastToAPInt();
733 unsigned Width
= SplatBits
.getBitWidth();
735 unsigned HalfSize
= Width
/ 2;
736 APInt HighValue
= SplatBits
.lshr(HalfSize
).trunc(HalfSize
);
737 APInt LowValue
= SplatBits
.trunc(HalfSize
);
739 // If the two halves do not match, stop here.
740 if (HighValue
!= LowValue
|| 8 > HalfSize
)
743 SplatBits
= HighValue
;
747 SplatBitSize
= Width
;
750 SystemZVectorConstantInfo::SystemZVectorConstantInfo(BuildVectorSDNode
*BVN
) {
751 assert(BVN
->isConstant() && "Expected a constant BUILD_VECTOR");
754 // Get IntBits by finding the 128 bit splat.
755 BVN
->isConstantSplat(IntBits
, SplatUndef
, SplatBitSize
, HasAnyUndefs
, 128,
758 // Get SplatBits by finding the 8 bit or greater splat.
759 BVN
->isConstantSplat(SplatBits
, SplatUndef
, SplatBitSize
, HasAnyUndefs
, 8,
763 bool SystemZTargetLowering::isFPImmLegal(const APFloat
&Imm
, EVT VT
,
764 bool ForCodeSize
) const {
765 // We can load zero using LZ?R and negative zero using LZ?R;LC?BR.
766 if (Imm
.isZero() || Imm
.isNegZero())
769 return SystemZVectorConstantInfo(Imm
).isVectorConstantLegal(Subtarget
);
772 bool SystemZTargetLowering::isLegalICmpImmediate(int64_t Imm
) const {
773 // We can use CGFI or CLGFI.
774 return isInt
<32>(Imm
) || isUInt
<32>(Imm
);
777 bool SystemZTargetLowering::isLegalAddImmediate(int64_t Imm
) const {
778 // We can use ALGFI or SLGFI.
779 return isUInt
<32>(Imm
) || isUInt
<32>(-Imm
);
782 bool SystemZTargetLowering::allowsMisalignedMemoryAccesses(
783 EVT VT
, unsigned, unsigned, MachineMemOperand::Flags
, bool *Fast
) const {
784 // Unaligned accesses should never be slower than the expanded version.
785 // We check specifically for aligned accesses in the few cases where
786 // they are required.
792 // Information about the addressing mode for a memory access.
793 struct AddressingMode
{
794 // True if a long displacement is supported.
795 bool LongDisplacement
;
797 // True if use of index register is supported.
800 AddressingMode(bool LongDispl
, bool IdxReg
) :
801 LongDisplacement(LongDispl
), IndexReg(IdxReg
) {}
804 // Return the desired addressing mode for a Load which has only one use (in
805 // the same block) which is a Store.
806 static AddressingMode
getLoadStoreAddrMode(bool HasVector
,
808 // With vector support a Load->Store combination may be combined to either
809 // an MVC or vector operations and it seems to work best to allow the
810 // vector addressing mode.
812 return AddressingMode(false/*LongDispl*/, true/*IdxReg*/);
814 // Otherwise only the MVC case is special.
815 bool MVC
= Ty
->isIntegerTy(8);
816 return AddressingMode(!MVC
/*LongDispl*/, !MVC
/*IdxReg*/);
819 // Return the addressing mode which seems most desirable given an LLVM
820 // Instruction pointer.
821 static AddressingMode
822 supportedAddressingMode(Instruction
*I
, bool HasVector
) {
823 if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(I
)) {
824 switch (II
->getIntrinsicID()) {
826 case Intrinsic::memset
:
827 case Intrinsic::memmove
:
828 case Intrinsic::memcpy
:
829 return AddressingMode(false/*LongDispl*/, false/*IdxReg*/);
833 if (isa
<LoadInst
>(I
) && I
->hasOneUse()) {
834 auto *SingleUser
= dyn_cast
<Instruction
>(*I
->user_begin());
835 if (SingleUser
->getParent() == I
->getParent()) {
836 if (isa
<ICmpInst
>(SingleUser
)) {
837 if (auto *C
= dyn_cast
<ConstantInt
>(SingleUser
->getOperand(1)))
838 if (C
->getBitWidth() <= 64 &&
839 (isInt
<16>(C
->getSExtValue()) || isUInt
<16>(C
->getZExtValue())))
840 // Comparison of memory with 16 bit signed / unsigned immediate
841 return AddressingMode(false/*LongDispl*/, false/*IdxReg*/);
842 } else if (isa
<StoreInst
>(SingleUser
))
844 return getLoadStoreAddrMode(HasVector
, I
->getType());
846 } else if (auto *StoreI
= dyn_cast
<StoreInst
>(I
)) {
847 if (auto *LoadI
= dyn_cast
<LoadInst
>(StoreI
->getValueOperand()))
848 if (LoadI
->hasOneUse() && LoadI
->getParent() == I
->getParent())
850 return getLoadStoreAddrMode(HasVector
, LoadI
->getType());
853 if (HasVector
&& (isa
<LoadInst
>(I
) || isa
<StoreInst
>(I
))) {
855 // * Use LDE instead of LE/LEY for z13 to avoid partial register
856 // dependencies (LDE only supports small offsets).
857 // * Utilize the vector registers to hold floating point
858 // values (vector load / store instructions only support small
861 Type
*MemAccessTy
= (isa
<LoadInst
>(I
) ? I
->getType() :
862 I
->getOperand(0)->getType());
863 bool IsFPAccess
= MemAccessTy
->isFloatingPointTy();
864 bool IsVectorAccess
= MemAccessTy
->isVectorTy();
866 // A store of an extracted vector element will be combined into a VSTE type
868 if (!IsVectorAccess
&& isa
<StoreInst
>(I
)) {
869 Value
*DataOp
= I
->getOperand(0);
870 if (isa
<ExtractElementInst
>(DataOp
))
871 IsVectorAccess
= true;
874 // A load which gets inserted into a vector element will be combined into a
875 // VLE type instruction.
876 if (!IsVectorAccess
&& isa
<LoadInst
>(I
) && I
->hasOneUse()) {
877 User
*LoadUser
= *I
->user_begin();
878 if (isa
<InsertElementInst
>(LoadUser
))
879 IsVectorAccess
= true;
882 if (IsFPAccess
|| IsVectorAccess
)
883 return AddressingMode(false/*LongDispl*/, true/*IdxReg*/);
886 return AddressingMode(true/*LongDispl*/, true/*IdxReg*/);
889 bool SystemZTargetLowering::isLegalAddressingMode(const DataLayout
&DL
,
890 const AddrMode
&AM
, Type
*Ty
, unsigned AS
, Instruction
*I
) const {
891 // Punt on globals for now, although they can be used in limited
892 // RELATIVE LONG cases.
896 // Require a 20-bit signed offset.
897 if (!isInt
<20>(AM
.BaseOffs
))
900 AddressingMode
SupportedAM(true, true);
902 SupportedAM
= supportedAddressingMode(I
, Subtarget
.hasVector());
904 if (!SupportedAM
.LongDisplacement
&& !isUInt
<12>(AM
.BaseOffs
))
907 if (!SupportedAM
.IndexReg
)
908 // No indexing allowed.
909 return AM
.Scale
== 0;
911 // Indexing is OK but no scale factor can be applied.
912 return AM
.Scale
== 0 || AM
.Scale
== 1;
915 bool SystemZTargetLowering::isTruncateFree(Type
*FromType
, Type
*ToType
) const {
916 if (!FromType
->isIntegerTy() || !ToType
->isIntegerTy())
918 unsigned FromBits
= FromType
->getPrimitiveSizeInBits();
919 unsigned ToBits
= ToType
->getPrimitiveSizeInBits();
920 return FromBits
> ToBits
;
923 bool SystemZTargetLowering::isTruncateFree(EVT FromVT
, EVT ToVT
) const {
924 if (!FromVT
.isInteger() || !ToVT
.isInteger())
926 unsigned FromBits
= FromVT
.getSizeInBits();
927 unsigned ToBits
= ToVT
.getSizeInBits();
928 return FromBits
> ToBits
;
931 //===----------------------------------------------------------------------===//
932 // Inline asm support
933 //===----------------------------------------------------------------------===//
935 TargetLowering::ConstraintType
936 SystemZTargetLowering::getConstraintType(StringRef Constraint
) const {
937 if (Constraint
.size() == 1) {
938 switch (Constraint
[0]) {
939 case 'a': // Address register
940 case 'd': // Data register (equivalent to 'r')
941 case 'f': // Floating-point register
942 case 'h': // High-part register
943 case 'r': // General-purpose register
944 case 'v': // Vector register
945 return C_RegisterClass
;
947 case 'Q': // Memory with base and unsigned 12-bit displacement
948 case 'R': // Likewise, plus an index
949 case 'S': // Memory with base and signed 20-bit displacement
950 case 'T': // Likewise, plus an index
951 case 'm': // Equivalent to 'T'.
954 case 'I': // Unsigned 8-bit constant
955 case 'J': // Unsigned 12-bit constant
956 case 'K': // Signed 16-bit constant
957 case 'L': // Signed 20-bit displacement (on all targets we support)
958 case 'M': // 0x7fffffff
965 return TargetLowering::getConstraintType(Constraint
);
968 TargetLowering::ConstraintWeight
SystemZTargetLowering::
969 getSingleConstraintMatchWeight(AsmOperandInfo
&info
,
970 const char *constraint
) const {
971 ConstraintWeight weight
= CW_Invalid
;
972 Value
*CallOperandVal
= info
.CallOperandVal
;
973 // If we don't have a value, we can't do a match,
974 // but allow it at the lowest weight.
977 Type
*type
= CallOperandVal
->getType();
978 // Look at the constraint type.
979 switch (*constraint
) {
981 weight
= TargetLowering::getSingleConstraintMatchWeight(info
, constraint
);
984 case 'a': // Address register
985 case 'd': // Data register (equivalent to 'r')
986 case 'h': // High-part register
987 case 'r': // General-purpose register
988 if (CallOperandVal
->getType()->isIntegerTy())
989 weight
= CW_Register
;
992 case 'f': // Floating-point register
993 if (type
->isFloatingPointTy())
994 weight
= CW_Register
;
997 case 'v': // Vector register
998 if ((type
->isVectorTy() || type
->isFloatingPointTy()) &&
999 Subtarget
.hasVector())
1000 weight
= CW_Register
;
1003 case 'I': // Unsigned 8-bit constant
1004 if (auto *C
= dyn_cast
<ConstantInt
>(CallOperandVal
))
1005 if (isUInt
<8>(C
->getZExtValue()))
1006 weight
= CW_Constant
;
1009 case 'J': // Unsigned 12-bit constant
1010 if (auto *C
= dyn_cast
<ConstantInt
>(CallOperandVal
))
1011 if (isUInt
<12>(C
->getZExtValue()))
1012 weight
= CW_Constant
;
1015 case 'K': // Signed 16-bit constant
1016 if (auto *C
= dyn_cast
<ConstantInt
>(CallOperandVal
))
1017 if (isInt
<16>(C
->getSExtValue()))
1018 weight
= CW_Constant
;
1021 case 'L': // Signed 20-bit displacement (on all targets we support)
1022 if (auto *C
= dyn_cast
<ConstantInt
>(CallOperandVal
))
1023 if (isInt
<20>(C
->getSExtValue()))
1024 weight
= CW_Constant
;
1027 case 'M': // 0x7fffffff
1028 if (auto *C
= dyn_cast
<ConstantInt
>(CallOperandVal
))
1029 if (C
->getZExtValue() == 0x7fffffff)
1030 weight
= CW_Constant
;
1036 // Parse a "{tNNN}" register constraint for which the register type "t"
1037 // has already been verified. MC is the class associated with "t" and
1038 // Map maps 0-based register numbers to LLVM register numbers.
1039 static std::pair
<unsigned, const TargetRegisterClass
*>
1040 parseRegisterNumber(StringRef Constraint
, const TargetRegisterClass
*RC
,
1041 const unsigned *Map
, unsigned Size
) {
1042 assert(*(Constraint
.end()-1) == '}' && "Missing '}'");
1043 if (isdigit(Constraint
[2])) {
1046 Constraint
.slice(2, Constraint
.size() - 1).getAsInteger(10, Index
);
1047 if (!Failed
&& Index
< Size
&& Map
[Index
])
1048 return std::make_pair(Map
[Index
], RC
);
1050 return std::make_pair(0U, nullptr);
1053 std::pair
<unsigned, const TargetRegisterClass
*>
1054 SystemZTargetLowering::getRegForInlineAsmConstraint(
1055 const TargetRegisterInfo
*TRI
, StringRef Constraint
, MVT VT
) const {
1056 if (Constraint
.size() == 1) {
1057 // GCC Constraint Letters
1058 switch (Constraint
[0]) {
1060 case 'd': // Data register (equivalent to 'r')
1061 case 'r': // General-purpose register
1063 return std::make_pair(0U, &SystemZ::GR64BitRegClass
);
1064 else if (VT
== MVT::i128
)
1065 return std::make_pair(0U, &SystemZ::GR128BitRegClass
);
1066 return std::make_pair(0U, &SystemZ::GR32BitRegClass
);
1068 case 'a': // Address register
1070 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass
);
1071 else if (VT
== MVT::i128
)
1072 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass
);
1073 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass
);
1075 case 'h': // High-part register (an LLVM extension)
1076 return std::make_pair(0U, &SystemZ::GRH32BitRegClass
);
1078 case 'f': // Floating-point register
1080 return std::make_pair(0U, &SystemZ::FP64BitRegClass
);
1081 else if (VT
== MVT::f128
)
1082 return std::make_pair(0U, &SystemZ::FP128BitRegClass
);
1083 return std::make_pair(0U, &SystemZ::FP32BitRegClass
);
1085 case 'v': // Vector register
1086 if (Subtarget
.hasVector()) {
1088 return std::make_pair(0U, &SystemZ::VR32BitRegClass
);
1090 return std::make_pair(0U, &SystemZ::VR64BitRegClass
);
1091 return std::make_pair(0U, &SystemZ::VR128BitRegClass
);
1096 if (Constraint
.size() > 0 && Constraint
[0] == '{') {
1097 // We need to override the default register parsing for GPRs and FPRs
1098 // because the interpretation depends on VT. The internal names of
1099 // the registers are also different from the external names
1100 // (F0D and F0S instead of F0, etc.).
1101 if (Constraint
[1] == 'r') {
1103 return parseRegisterNumber(Constraint
, &SystemZ::GR32BitRegClass
,
1104 SystemZMC::GR32Regs
, 16);
1105 if (VT
== MVT::i128
)
1106 return parseRegisterNumber(Constraint
, &SystemZ::GR128BitRegClass
,
1107 SystemZMC::GR128Regs
, 16);
1108 return parseRegisterNumber(Constraint
, &SystemZ::GR64BitRegClass
,
1109 SystemZMC::GR64Regs
, 16);
1111 if (Constraint
[1] == 'f') {
1113 return parseRegisterNumber(Constraint
, &SystemZ::FP32BitRegClass
,
1114 SystemZMC::FP32Regs
, 16);
1115 if (VT
== MVT::f128
)
1116 return parseRegisterNumber(Constraint
, &SystemZ::FP128BitRegClass
,
1117 SystemZMC::FP128Regs
, 16);
1118 return parseRegisterNumber(Constraint
, &SystemZ::FP64BitRegClass
,
1119 SystemZMC::FP64Regs
, 16);
1121 if (Constraint
[1] == 'v') {
1123 return parseRegisterNumber(Constraint
, &SystemZ::VR32BitRegClass
,
1124 SystemZMC::VR32Regs
, 32);
1126 return parseRegisterNumber(Constraint
, &SystemZ::VR64BitRegClass
,
1127 SystemZMC::VR64Regs
, 32);
1128 return parseRegisterNumber(Constraint
, &SystemZ::VR128BitRegClass
,
1129 SystemZMC::VR128Regs
, 32);
1132 return TargetLowering::getRegForInlineAsmConstraint(TRI
, Constraint
, VT
);
1135 void SystemZTargetLowering::
1136 LowerAsmOperandForConstraint(SDValue Op
, std::string
&Constraint
,
1137 std::vector
<SDValue
> &Ops
,
1138 SelectionDAG
&DAG
) const {
1139 // Only support length 1 constraints for now.
1140 if (Constraint
.length() == 1) {
1141 switch (Constraint
[0]) {
1142 case 'I': // Unsigned 8-bit constant
1143 if (auto *C
= dyn_cast
<ConstantSDNode
>(Op
))
1144 if (isUInt
<8>(C
->getZExtValue()))
1145 Ops
.push_back(DAG
.getTargetConstant(C
->getZExtValue(), SDLoc(Op
),
1146 Op
.getValueType()));
1149 case 'J': // Unsigned 12-bit constant
1150 if (auto *C
= dyn_cast
<ConstantSDNode
>(Op
))
1151 if (isUInt
<12>(C
->getZExtValue()))
1152 Ops
.push_back(DAG
.getTargetConstant(C
->getZExtValue(), SDLoc(Op
),
1153 Op
.getValueType()));
1156 case 'K': // Signed 16-bit constant
1157 if (auto *C
= dyn_cast
<ConstantSDNode
>(Op
))
1158 if (isInt
<16>(C
->getSExtValue()))
1159 Ops
.push_back(DAG
.getTargetConstant(C
->getSExtValue(), SDLoc(Op
),
1160 Op
.getValueType()));
1163 case 'L': // Signed 20-bit displacement (on all targets we support)
1164 if (auto *C
= dyn_cast
<ConstantSDNode
>(Op
))
1165 if (isInt
<20>(C
->getSExtValue()))
1166 Ops
.push_back(DAG
.getTargetConstant(C
->getSExtValue(), SDLoc(Op
),
1167 Op
.getValueType()));
1170 case 'M': // 0x7fffffff
1171 if (auto *C
= dyn_cast
<ConstantSDNode
>(Op
))
1172 if (C
->getZExtValue() == 0x7fffffff)
1173 Ops
.push_back(DAG
.getTargetConstant(C
->getZExtValue(), SDLoc(Op
),
1174 Op
.getValueType()));
1178 TargetLowering::LowerAsmOperandForConstraint(Op
, Constraint
, Ops
, DAG
);
1181 //===----------------------------------------------------------------------===//
1182 // Calling conventions
1183 //===----------------------------------------------------------------------===//
1185 #include "SystemZGenCallingConv.inc"
1187 const MCPhysReg
*SystemZTargetLowering::getScratchRegisters(
1188 CallingConv::ID
) const {
1189 static const MCPhysReg ScratchRegs
[] = { SystemZ::R0D
, SystemZ::R1D
,
1194 bool SystemZTargetLowering::allowTruncateForTailCall(Type
*FromType
,
1195 Type
*ToType
) const {
1196 return isTruncateFree(FromType
, ToType
);
1199 bool SystemZTargetLowering::mayBeEmittedAsTailCall(const CallInst
*CI
) const {
1200 return CI
->isTailCall();
1203 // We do not yet support 128-bit single-element vector types. If the user
1204 // attempts to use such types as function argument or return type, prefer
1205 // to error out instead of emitting code violating the ABI.
1206 static void VerifyVectorType(MVT VT
, EVT ArgVT
) {
1207 if (ArgVT
.isVector() && !VT
.isVector())
1208 report_fatal_error("Unsupported vector argument or return type");
1211 static void VerifyVectorTypes(const SmallVectorImpl
<ISD::InputArg
> &Ins
) {
1212 for (unsigned i
= 0; i
< Ins
.size(); ++i
)
1213 VerifyVectorType(Ins
[i
].VT
, Ins
[i
].ArgVT
);
1216 static void VerifyVectorTypes(const SmallVectorImpl
<ISD::OutputArg
> &Outs
) {
1217 for (unsigned i
= 0; i
< Outs
.size(); ++i
)
1218 VerifyVectorType(Outs
[i
].VT
, Outs
[i
].ArgVT
);
1221 // Value is a value that has been passed to us in the location described by VA
1222 // (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining
1223 // any loads onto Chain.
1224 static SDValue
convertLocVTToValVT(SelectionDAG
&DAG
, const SDLoc
&DL
,
1225 CCValAssign
&VA
, SDValue Chain
,
1227 // If the argument has been promoted from a smaller type, insert an
1228 // assertion to capture this.
1229 if (VA
.getLocInfo() == CCValAssign::SExt
)
1230 Value
= DAG
.getNode(ISD::AssertSext
, DL
, VA
.getLocVT(), Value
,
1231 DAG
.getValueType(VA
.getValVT()));
1232 else if (VA
.getLocInfo() == CCValAssign::ZExt
)
1233 Value
= DAG
.getNode(ISD::AssertZext
, DL
, VA
.getLocVT(), Value
,
1234 DAG
.getValueType(VA
.getValVT()));
1236 if (VA
.isExtInLoc())
1237 Value
= DAG
.getNode(ISD::TRUNCATE
, DL
, VA
.getValVT(), Value
);
1238 else if (VA
.getLocInfo() == CCValAssign::BCvt
) {
1239 // If this is a short vector argument loaded from the stack,
1240 // extend from i64 to full vector size and then bitcast.
1241 assert(VA
.getLocVT() == MVT::i64
);
1242 assert(VA
.getValVT().isVector());
1243 Value
= DAG
.getBuildVector(MVT::v2i64
, DL
, {Value
, DAG
.getUNDEF(MVT::i64
)});
1244 Value
= DAG
.getNode(ISD::BITCAST
, DL
, VA
.getValVT(), Value
);
1246 assert(VA
.getLocInfo() == CCValAssign::Full
&& "Unsupported getLocInfo");
1250 // Value is a value of type VA.getValVT() that we need to copy into
1251 // the location described by VA. Return a copy of Value converted to
1252 // VA.getValVT(). The caller is responsible for handling indirect values.
1253 static SDValue
convertValVTToLocVT(SelectionDAG
&DAG
, const SDLoc
&DL
,
1254 CCValAssign
&VA
, SDValue Value
) {
1255 switch (VA
.getLocInfo()) {
1256 case CCValAssign::SExt
:
1257 return DAG
.getNode(ISD::SIGN_EXTEND
, DL
, VA
.getLocVT(), Value
);
1258 case CCValAssign::ZExt
:
1259 return DAG
.getNode(ISD::ZERO_EXTEND
, DL
, VA
.getLocVT(), Value
);
1260 case CCValAssign::AExt
:
1261 return DAG
.getNode(ISD::ANY_EXTEND
, DL
, VA
.getLocVT(), Value
);
1262 case CCValAssign::BCvt
:
1263 // If this is a short vector argument to be stored to the stack,
1264 // bitcast to v2i64 and then extract first element.
1265 assert(VA
.getLocVT() == MVT::i64
);
1266 assert(VA
.getValVT().isVector());
1267 Value
= DAG
.getNode(ISD::BITCAST
, DL
, MVT::v2i64
, Value
);
1268 return DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, DL
, VA
.getLocVT(), Value
,
1269 DAG
.getConstant(0, DL
, MVT::i32
));
1270 case CCValAssign::Full
:
1273 llvm_unreachable("Unhandled getLocInfo()");
1277 SDValue
SystemZTargetLowering::LowerFormalArguments(
1278 SDValue Chain
, CallingConv::ID CallConv
, bool IsVarArg
,
1279 const SmallVectorImpl
<ISD::InputArg
> &Ins
, const SDLoc
&DL
,
1280 SelectionDAG
&DAG
, SmallVectorImpl
<SDValue
> &InVals
) const {
1281 MachineFunction
&MF
= DAG
.getMachineFunction();
1282 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
1283 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
1284 SystemZMachineFunctionInfo
*FuncInfo
=
1285 MF
.getInfo
<SystemZMachineFunctionInfo
>();
1287 static_cast<const SystemZFrameLowering
*>(Subtarget
.getFrameLowering());
1288 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
1290 // Detect unsupported vector argument types.
1291 if (Subtarget
.hasVector())
1292 VerifyVectorTypes(Ins
);
1294 // Assign locations to all of the incoming arguments.
1295 SmallVector
<CCValAssign
, 16> ArgLocs
;
1296 SystemZCCState
CCInfo(CallConv
, IsVarArg
, MF
, ArgLocs
, *DAG
.getContext());
1297 CCInfo
.AnalyzeFormalArguments(Ins
, CC_SystemZ
);
1299 unsigned NumFixedGPRs
= 0;
1300 unsigned NumFixedFPRs
= 0;
1301 for (unsigned I
= 0, E
= ArgLocs
.size(); I
!= E
; ++I
) {
1303 CCValAssign
&VA
= ArgLocs
[I
];
1304 EVT LocVT
= VA
.getLocVT();
1305 if (VA
.isRegLoc()) {
1306 // Arguments passed in registers
1307 const TargetRegisterClass
*RC
;
1308 switch (LocVT
.getSimpleVT().SimpleTy
) {
1310 // Integers smaller than i64 should be promoted to i64.
1311 llvm_unreachable("Unexpected argument type");
1314 RC
= &SystemZ::GR32BitRegClass
;
1318 RC
= &SystemZ::GR64BitRegClass
;
1322 RC
= &SystemZ::FP32BitRegClass
;
1326 RC
= &SystemZ::FP64BitRegClass
;
1334 RC
= &SystemZ::VR128BitRegClass
;
1338 Register VReg
= MRI
.createVirtualRegister(RC
);
1339 MRI
.addLiveIn(VA
.getLocReg(), VReg
);
1340 ArgValue
= DAG
.getCopyFromReg(Chain
, DL
, VReg
, LocVT
);
1342 assert(VA
.isMemLoc() && "Argument not register or memory");
1344 // Create the frame index object for this incoming parameter.
1345 int FI
= MFI
.CreateFixedObject(LocVT
.getSizeInBits() / 8,
1346 VA
.getLocMemOffset(), true);
1348 // Create the SelectionDAG nodes corresponding to a load
1349 // from this parameter. Unpromoted ints and floats are
1350 // passed as right-justified 8-byte values.
1351 SDValue FIN
= DAG
.getFrameIndex(FI
, PtrVT
);
1352 if (VA
.getLocVT() == MVT::i32
|| VA
.getLocVT() == MVT::f32
)
1353 FIN
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, FIN
,
1354 DAG
.getIntPtrConstant(4, DL
));
1355 ArgValue
= DAG
.getLoad(LocVT
, DL
, Chain
, FIN
,
1356 MachinePointerInfo::getFixedStack(MF
, FI
));
1359 // Convert the value of the argument register into the value that's
1361 if (VA
.getLocInfo() == CCValAssign::Indirect
) {
1362 InVals
.push_back(DAG
.getLoad(VA
.getValVT(), DL
, Chain
, ArgValue
,
1363 MachinePointerInfo()));
1364 // If the original argument was split (e.g. i128), we need
1365 // to load all parts of it here (using the same address).
1366 unsigned ArgIndex
= Ins
[I
].OrigArgIndex
;
1367 assert (Ins
[I
].PartOffset
== 0);
1368 while (I
+ 1 != E
&& Ins
[I
+ 1].OrigArgIndex
== ArgIndex
) {
1369 CCValAssign
&PartVA
= ArgLocs
[I
+ 1];
1370 unsigned PartOffset
= Ins
[I
+ 1].PartOffset
;
1371 SDValue Address
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, ArgValue
,
1372 DAG
.getIntPtrConstant(PartOffset
, DL
));
1373 InVals
.push_back(DAG
.getLoad(PartVA
.getValVT(), DL
, Chain
, Address
,
1374 MachinePointerInfo()));
1378 InVals
.push_back(convertLocVTToValVT(DAG
, DL
, VA
, Chain
, ArgValue
));
1382 // Save the number of non-varargs registers for later use by va_start, etc.
1383 FuncInfo
->setVarArgsFirstGPR(NumFixedGPRs
);
1384 FuncInfo
->setVarArgsFirstFPR(NumFixedFPRs
);
1386 // Likewise the address (in the form of a frame index) of where the
1387 // first stack vararg would be. The 1-byte size here is arbitrary.
1388 int64_t StackSize
= CCInfo
.getNextStackOffset();
1389 FuncInfo
->setVarArgsFrameIndex(MFI
.CreateFixedObject(1, StackSize
, true));
1391 // ...and a similar frame index for the caller-allocated save area
1392 // that will be used to store the incoming registers.
1393 int64_t RegSaveOffset
= TFL
->getOffsetOfLocalArea();
1394 unsigned RegSaveIndex
= MFI
.CreateFixedObject(1, RegSaveOffset
, true);
1395 FuncInfo
->setRegSaveFrameIndex(RegSaveIndex
);
1397 // Store the FPR varargs in the reserved frame slots. (We store the
1398 // GPRs as part of the prologue.)
1399 if (NumFixedFPRs
< SystemZ::NumArgFPRs
) {
1400 SDValue MemOps
[SystemZ::NumArgFPRs
];
1401 for (unsigned I
= NumFixedFPRs
; I
< SystemZ::NumArgFPRs
; ++I
) {
1402 unsigned Offset
= TFL
->getRegSpillOffset(SystemZ::ArgFPRs
[I
]);
1403 int FI
= MFI
.CreateFixedObject(8, RegSaveOffset
+ Offset
, true);
1404 SDValue FIN
= DAG
.getFrameIndex(FI
, getPointerTy(DAG
.getDataLayout()));
1405 unsigned VReg
= MF
.addLiveIn(SystemZ::ArgFPRs
[I
],
1406 &SystemZ::FP64BitRegClass
);
1407 SDValue ArgValue
= DAG
.getCopyFromReg(Chain
, DL
, VReg
, MVT::f64
);
1408 MemOps
[I
] = DAG
.getStore(ArgValue
.getValue(1), DL
, ArgValue
, FIN
,
1409 MachinePointerInfo::getFixedStack(MF
, FI
));
1411 // Join the stores, which are independent of one another.
1412 Chain
= DAG
.getNode(ISD::TokenFactor
, DL
, MVT::Other
,
1413 makeArrayRef(&MemOps
[NumFixedFPRs
],
1414 SystemZ::NumArgFPRs
-NumFixedFPRs
));
1421 static bool canUseSiblingCall(const CCState
&ArgCCInfo
,
1422 SmallVectorImpl
<CCValAssign
> &ArgLocs
,
1423 SmallVectorImpl
<ISD::OutputArg
> &Outs
) {
1424 // Punt if there are any indirect or stack arguments, or if the call
1425 // needs the callee-saved argument register R6, or if the call uses
1426 // the callee-saved register arguments SwiftSelf and SwiftError.
1427 for (unsigned I
= 0, E
= ArgLocs
.size(); I
!= E
; ++I
) {
1428 CCValAssign
&VA
= ArgLocs
[I
];
1429 if (VA
.getLocInfo() == CCValAssign::Indirect
)
1433 Register Reg
= VA
.getLocReg();
1434 if (Reg
== SystemZ::R6H
|| Reg
== SystemZ::R6L
|| Reg
== SystemZ::R6D
)
1436 if (Outs
[I
].Flags
.isSwiftSelf() || Outs
[I
].Flags
.isSwiftError())
1443 SystemZTargetLowering::LowerCall(CallLoweringInfo
&CLI
,
1444 SmallVectorImpl
<SDValue
> &InVals
) const {
1445 SelectionDAG
&DAG
= CLI
.DAG
;
1447 SmallVectorImpl
<ISD::OutputArg
> &Outs
= CLI
.Outs
;
1448 SmallVectorImpl
<SDValue
> &OutVals
= CLI
.OutVals
;
1449 SmallVectorImpl
<ISD::InputArg
> &Ins
= CLI
.Ins
;
1450 SDValue Chain
= CLI
.Chain
;
1451 SDValue Callee
= CLI
.Callee
;
1452 bool &IsTailCall
= CLI
.IsTailCall
;
1453 CallingConv::ID CallConv
= CLI
.CallConv
;
1454 bool IsVarArg
= CLI
.IsVarArg
;
1455 MachineFunction
&MF
= DAG
.getMachineFunction();
1456 EVT PtrVT
= getPointerTy(MF
.getDataLayout());
1458 // Detect unsupported vector argument and return types.
1459 if (Subtarget
.hasVector()) {
1460 VerifyVectorTypes(Outs
);
1461 VerifyVectorTypes(Ins
);
1464 // Analyze the operands of the call, assigning locations to each operand.
1465 SmallVector
<CCValAssign
, 16> ArgLocs
;
1466 SystemZCCState
ArgCCInfo(CallConv
, IsVarArg
, MF
, ArgLocs
, *DAG
.getContext());
1467 ArgCCInfo
.AnalyzeCallOperands(Outs
, CC_SystemZ
);
1469 // We don't support GuaranteedTailCallOpt, only automatically-detected
1471 if (IsTailCall
&& !canUseSiblingCall(ArgCCInfo
, ArgLocs
, Outs
))
1474 // Get a count of how many bytes are to be pushed on the stack.
1475 unsigned NumBytes
= ArgCCInfo
.getNextStackOffset();
1477 // Mark the start of the call.
1479 Chain
= DAG
.getCALLSEQ_START(Chain
, NumBytes
, 0, DL
);
1481 // Copy argument values to their designated locations.
1482 SmallVector
<std::pair
<unsigned, SDValue
>, 9> RegsToPass
;
1483 SmallVector
<SDValue
, 8> MemOpChains
;
1485 for (unsigned I
= 0, E
= ArgLocs
.size(); I
!= E
; ++I
) {
1486 CCValAssign
&VA
= ArgLocs
[I
];
1487 SDValue ArgValue
= OutVals
[I
];
1489 if (VA
.getLocInfo() == CCValAssign::Indirect
) {
1490 // Store the argument in a stack slot and pass its address.
1491 SDValue SpillSlot
= DAG
.CreateStackTemporary(Outs
[I
].ArgVT
);
1492 int FI
= cast
<FrameIndexSDNode
>(SpillSlot
)->getIndex();
1493 MemOpChains
.push_back(
1494 DAG
.getStore(Chain
, DL
, ArgValue
, SpillSlot
,
1495 MachinePointerInfo::getFixedStack(MF
, FI
)));
1496 // If the original argument was split (e.g. i128), we need
1497 // to store all parts of it here (and pass just one address).
1498 unsigned ArgIndex
= Outs
[I
].OrigArgIndex
;
1499 assert (Outs
[I
].PartOffset
== 0);
1500 while (I
+ 1 != E
&& Outs
[I
+ 1].OrigArgIndex
== ArgIndex
) {
1501 SDValue PartValue
= OutVals
[I
+ 1];
1502 unsigned PartOffset
= Outs
[I
+ 1].PartOffset
;
1503 SDValue Address
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, SpillSlot
,
1504 DAG
.getIntPtrConstant(PartOffset
, DL
));
1505 MemOpChains
.push_back(
1506 DAG
.getStore(Chain
, DL
, PartValue
, Address
,
1507 MachinePointerInfo::getFixedStack(MF
, FI
)));
1510 ArgValue
= SpillSlot
;
1512 ArgValue
= convertValVTToLocVT(DAG
, DL
, VA
, ArgValue
);
1515 // Queue up the argument copies and emit them at the end.
1516 RegsToPass
.push_back(std::make_pair(VA
.getLocReg(), ArgValue
));
1518 assert(VA
.isMemLoc() && "Argument not register or memory");
1520 // Work out the address of the stack slot. Unpromoted ints and
1521 // floats are passed as right-justified 8-byte values.
1522 if (!StackPtr
.getNode())
1523 StackPtr
= DAG
.getCopyFromReg(Chain
, DL
, SystemZ::R15D
, PtrVT
);
1524 unsigned Offset
= SystemZMC::CallFrameSize
+ VA
.getLocMemOffset();
1525 if (VA
.getLocVT() == MVT::i32
|| VA
.getLocVT() == MVT::f32
)
1527 SDValue Address
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, StackPtr
,
1528 DAG
.getIntPtrConstant(Offset
, DL
));
1531 MemOpChains
.push_back(
1532 DAG
.getStore(Chain
, DL
, ArgValue
, Address
, MachinePointerInfo()));
1536 // Join the stores, which are independent of one another.
1537 if (!MemOpChains
.empty())
1538 Chain
= DAG
.getNode(ISD::TokenFactor
, DL
, MVT::Other
, MemOpChains
);
1540 // Accept direct calls by converting symbolic call addresses to the
1541 // associated Target* opcodes. Force %r1 to be used for indirect
1544 if (auto *G
= dyn_cast
<GlobalAddressSDNode
>(Callee
)) {
1545 Callee
= DAG
.getTargetGlobalAddress(G
->getGlobal(), DL
, PtrVT
);
1546 Callee
= DAG
.getNode(SystemZISD::PCREL_WRAPPER
, DL
, PtrVT
, Callee
);
1547 } else if (auto *E
= dyn_cast
<ExternalSymbolSDNode
>(Callee
)) {
1548 Callee
= DAG
.getTargetExternalSymbol(E
->getSymbol(), PtrVT
);
1549 Callee
= DAG
.getNode(SystemZISD::PCREL_WRAPPER
, DL
, PtrVT
, Callee
);
1550 } else if (IsTailCall
) {
1551 Chain
= DAG
.getCopyToReg(Chain
, DL
, SystemZ::R1D
, Callee
, Glue
);
1552 Glue
= Chain
.getValue(1);
1553 Callee
= DAG
.getRegister(SystemZ::R1D
, Callee
.getValueType());
1556 // Build a sequence of copy-to-reg nodes, chained and glued together.
1557 for (unsigned I
= 0, E
= RegsToPass
.size(); I
!= E
; ++I
) {
1558 Chain
= DAG
.getCopyToReg(Chain
, DL
, RegsToPass
[I
].first
,
1559 RegsToPass
[I
].second
, Glue
);
1560 Glue
= Chain
.getValue(1);
1563 // The first call operand is the chain and the second is the target address.
1564 SmallVector
<SDValue
, 8> Ops
;
1565 Ops
.push_back(Chain
);
1566 Ops
.push_back(Callee
);
1568 // Add argument registers to the end of the list so that they are
1569 // known live into the call.
1570 for (unsigned I
= 0, E
= RegsToPass
.size(); I
!= E
; ++I
)
1571 Ops
.push_back(DAG
.getRegister(RegsToPass
[I
].first
,
1572 RegsToPass
[I
].second
.getValueType()));
1574 // Add a register mask operand representing the call-preserved registers.
1575 const TargetRegisterInfo
*TRI
= Subtarget
.getRegisterInfo();
1576 const uint32_t *Mask
= TRI
->getCallPreservedMask(MF
, CallConv
);
1577 assert(Mask
&& "Missing call preserved mask for calling convention");
1578 Ops
.push_back(DAG
.getRegisterMask(Mask
));
1580 // Glue the call to the argument copies, if any.
1582 Ops
.push_back(Glue
);
1585 SDVTList NodeTys
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
1587 return DAG
.getNode(SystemZISD::SIBCALL
, DL
, NodeTys
, Ops
);
1588 Chain
= DAG
.getNode(SystemZISD::CALL
, DL
, NodeTys
, Ops
);
1589 Glue
= Chain
.getValue(1);
1591 // Mark the end of the call, which is glued to the call itself.
1592 Chain
= DAG
.getCALLSEQ_END(Chain
,
1593 DAG
.getConstant(NumBytes
, DL
, PtrVT
, true),
1594 DAG
.getConstant(0, DL
, PtrVT
, true),
1596 Glue
= Chain
.getValue(1);
1598 // Assign locations to each value returned by this call.
1599 SmallVector
<CCValAssign
, 16> RetLocs
;
1600 CCState
RetCCInfo(CallConv
, IsVarArg
, MF
, RetLocs
, *DAG
.getContext());
1601 RetCCInfo
.AnalyzeCallResult(Ins
, RetCC_SystemZ
);
1603 // Copy all of the result registers out of their specified physreg.
1604 for (unsigned I
= 0, E
= RetLocs
.size(); I
!= E
; ++I
) {
1605 CCValAssign
&VA
= RetLocs
[I
];
1607 // Copy the value out, gluing the copy to the end of the call sequence.
1608 SDValue RetValue
= DAG
.getCopyFromReg(Chain
, DL
, VA
.getLocReg(),
1609 VA
.getLocVT(), Glue
);
1610 Chain
= RetValue
.getValue(1);
1611 Glue
= RetValue
.getValue(2);
1613 // Convert the value of the return register into the value that's
1615 InVals
.push_back(convertLocVTToValVT(DAG
, DL
, VA
, Chain
, RetValue
));
1621 bool SystemZTargetLowering::
1622 CanLowerReturn(CallingConv::ID CallConv
,
1623 MachineFunction
&MF
, bool isVarArg
,
1624 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
1625 LLVMContext
&Context
) const {
1626 // Detect unsupported vector return types.
1627 if (Subtarget
.hasVector())
1628 VerifyVectorTypes(Outs
);
1630 // Special case that we cannot easily detect in RetCC_SystemZ since
1631 // i128 is not a legal type.
1632 for (auto &Out
: Outs
)
1633 if (Out
.ArgVT
== MVT::i128
)
1636 SmallVector
<CCValAssign
, 16> RetLocs
;
1637 CCState
RetCCInfo(CallConv
, isVarArg
, MF
, RetLocs
, Context
);
1638 return RetCCInfo
.CheckReturn(Outs
, RetCC_SystemZ
);
1642 SystemZTargetLowering::LowerReturn(SDValue Chain
, CallingConv::ID CallConv
,
1644 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
1645 const SmallVectorImpl
<SDValue
> &OutVals
,
1646 const SDLoc
&DL
, SelectionDAG
&DAG
) const {
1647 MachineFunction
&MF
= DAG
.getMachineFunction();
1649 // Detect unsupported vector return types.
1650 if (Subtarget
.hasVector())
1651 VerifyVectorTypes(Outs
);
1653 // Assign locations to each returned value.
1654 SmallVector
<CCValAssign
, 16> RetLocs
;
1655 CCState
RetCCInfo(CallConv
, IsVarArg
, MF
, RetLocs
, *DAG
.getContext());
1656 RetCCInfo
.AnalyzeReturn(Outs
, RetCC_SystemZ
);
1658 // Quick exit for void returns
1659 if (RetLocs
.empty())
1660 return DAG
.getNode(SystemZISD::RET_FLAG
, DL
, MVT::Other
, Chain
);
1662 // Copy the result values into the output registers.
1664 SmallVector
<SDValue
, 4> RetOps
;
1665 RetOps
.push_back(Chain
);
1666 for (unsigned I
= 0, E
= RetLocs
.size(); I
!= E
; ++I
) {
1667 CCValAssign
&VA
= RetLocs
[I
];
1668 SDValue RetValue
= OutVals
[I
];
1670 // Make the return register live on exit.
1671 assert(VA
.isRegLoc() && "Can only return in registers!");
1673 // Promote the value as required.
1674 RetValue
= convertValVTToLocVT(DAG
, DL
, VA
, RetValue
);
1676 // Chain and glue the copies together.
1677 Register Reg
= VA
.getLocReg();
1678 Chain
= DAG
.getCopyToReg(Chain
, DL
, Reg
, RetValue
, Glue
);
1679 Glue
= Chain
.getValue(1);
1680 RetOps
.push_back(DAG
.getRegister(Reg
, VA
.getLocVT()));
1683 // Update chain and glue.
1686 RetOps
.push_back(Glue
);
1688 return DAG
.getNode(SystemZISD::RET_FLAG
, DL
, MVT::Other
, RetOps
);
1691 // Return true if Op is an intrinsic node with chain that returns the CC value
1692 // as its only (other) argument. Provide the associated SystemZISD opcode and
1693 // the mask of valid CC values if so.
1694 static bool isIntrinsicWithCCAndChain(SDValue Op
, unsigned &Opcode
,
1695 unsigned &CCValid
) {
1696 unsigned Id
= cast
<ConstantSDNode
>(Op
.getOperand(1))->getZExtValue();
1698 case Intrinsic::s390_tbegin
:
1699 Opcode
= SystemZISD::TBEGIN
;
1700 CCValid
= SystemZ::CCMASK_TBEGIN
;
1703 case Intrinsic::s390_tbegin_nofloat
:
1704 Opcode
= SystemZISD::TBEGIN_NOFLOAT
;
1705 CCValid
= SystemZ::CCMASK_TBEGIN
;
1708 case Intrinsic::s390_tend
:
1709 Opcode
= SystemZISD::TEND
;
1710 CCValid
= SystemZ::CCMASK_TEND
;
1718 // Return true if Op is an intrinsic node without chain that returns the
1719 // CC value as its final argument. Provide the associated SystemZISD
1720 // opcode and the mask of valid CC values if so.
1721 static bool isIntrinsicWithCC(SDValue Op
, unsigned &Opcode
, unsigned &CCValid
) {
1722 unsigned Id
= cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue();
1724 case Intrinsic::s390_vpkshs
:
1725 case Intrinsic::s390_vpksfs
:
1726 case Intrinsic::s390_vpksgs
:
1727 Opcode
= SystemZISD::PACKS_CC
;
1728 CCValid
= SystemZ::CCMASK_VCMP
;
1731 case Intrinsic::s390_vpklshs
:
1732 case Intrinsic::s390_vpklsfs
:
1733 case Intrinsic::s390_vpklsgs
:
1734 Opcode
= SystemZISD::PACKLS_CC
;
1735 CCValid
= SystemZ::CCMASK_VCMP
;
1738 case Intrinsic::s390_vceqbs
:
1739 case Intrinsic::s390_vceqhs
:
1740 case Intrinsic::s390_vceqfs
:
1741 case Intrinsic::s390_vceqgs
:
1742 Opcode
= SystemZISD::VICMPES
;
1743 CCValid
= SystemZ::CCMASK_VCMP
;
1746 case Intrinsic::s390_vchbs
:
1747 case Intrinsic::s390_vchhs
:
1748 case Intrinsic::s390_vchfs
:
1749 case Intrinsic::s390_vchgs
:
1750 Opcode
= SystemZISD::VICMPHS
;
1751 CCValid
= SystemZ::CCMASK_VCMP
;
1754 case Intrinsic::s390_vchlbs
:
1755 case Intrinsic::s390_vchlhs
:
1756 case Intrinsic::s390_vchlfs
:
1757 case Intrinsic::s390_vchlgs
:
1758 Opcode
= SystemZISD::VICMPHLS
;
1759 CCValid
= SystemZ::CCMASK_VCMP
;
1762 case Intrinsic::s390_vtm
:
1763 Opcode
= SystemZISD::VTM
;
1764 CCValid
= SystemZ::CCMASK_VCMP
;
1767 case Intrinsic::s390_vfaebs
:
1768 case Intrinsic::s390_vfaehs
:
1769 case Intrinsic::s390_vfaefs
:
1770 Opcode
= SystemZISD::VFAE_CC
;
1771 CCValid
= SystemZ::CCMASK_ANY
;
1774 case Intrinsic::s390_vfaezbs
:
1775 case Intrinsic::s390_vfaezhs
:
1776 case Intrinsic::s390_vfaezfs
:
1777 Opcode
= SystemZISD::VFAEZ_CC
;
1778 CCValid
= SystemZ::CCMASK_ANY
;
1781 case Intrinsic::s390_vfeebs
:
1782 case Intrinsic::s390_vfeehs
:
1783 case Intrinsic::s390_vfeefs
:
1784 Opcode
= SystemZISD::VFEE_CC
;
1785 CCValid
= SystemZ::CCMASK_ANY
;
1788 case Intrinsic::s390_vfeezbs
:
1789 case Intrinsic::s390_vfeezhs
:
1790 case Intrinsic::s390_vfeezfs
:
1791 Opcode
= SystemZISD::VFEEZ_CC
;
1792 CCValid
= SystemZ::CCMASK_ANY
;
1795 case Intrinsic::s390_vfenebs
:
1796 case Intrinsic::s390_vfenehs
:
1797 case Intrinsic::s390_vfenefs
:
1798 Opcode
= SystemZISD::VFENE_CC
;
1799 CCValid
= SystemZ::CCMASK_ANY
;
1802 case Intrinsic::s390_vfenezbs
:
1803 case Intrinsic::s390_vfenezhs
:
1804 case Intrinsic::s390_vfenezfs
:
1805 Opcode
= SystemZISD::VFENEZ_CC
;
1806 CCValid
= SystemZ::CCMASK_ANY
;
1809 case Intrinsic::s390_vistrbs
:
1810 case Intrinsic::s390_vistrhs
:
1811 case Intrinsic::s390_vistrfs
:
1812 Opcode
= SystemZISD::VISTR_CC
;
1813 CCValid
= SystemZ::CCMASK_0
| SystemZ::CCMASK_3
;
1816 case Intrinsic::s390_vstrcbs
:
1817 case Intrinsic::s390_vstrchs
:
1818 case Intrinsic::s390_vstrcfs
:
1819 Opcode
= SystemZISD::VSTRC_CC
;
1820 CCValid
= SystemZ::CCMASK_ANY
;
1823 case Intrinsic::s390_vstrczbs
:
1824 case Intrinsic::s390_vstrczhs
:
1825 case Intrinsic::s390_vstrczfs
:
1826 Opcode
= SystemZISD::VSTRCZ_CC
;
1827 CCValid
= SystemZ::CCMASK_ANY
;
1830 case Intrinsic::s390_vstrsb
:
1831 case Intrinsic::s390_vstrsh
:
1832 case Intrinsic::s390_vstrsf
:
1833 Opcode
= SystemZISD::VSTRS_CC
;
1834 CCValid
= SystemZ::CCMASK_ANY
;
1837 case Intrinsic::s390_vstrszb
:
1838 case Intrinsic::s390_vstrszh
:
1839 case Intrinsic::s390_vstrszf
:
1840 Opcode
= SystemZISD::VSTRSZ_CC
;
1841 CCValid
= SystemZ::CCMASK_ANY
;
1844 case Intrinsic::s390_vfcedbs
:
1845 case Intrinsic::s390_vfcesbs
:
1846 Opcode
= SystemZISD::VFCMPES
;
1847 CCValid
= SystemZ::CCMASK_VCMP
;
1850 case Intrinsic::s390_vfchdbs
:
1851 case Intrinsic::s390_vfchsbs
:
1852 Opcode
= SystemZISD::VFCMPHS
;
1853 CCValid
= SystemZ::CCMASK_VCMP
;
1856 case Intrinsic::s390_vfchedbs
:
1857 case Intrinsic::s390_vfchesbs
:
1858 Opcode
= SystemZISD::VFCMPHES
;
1859 CCValid
= SystemZ::CCMASK_VCMP
;
1862 case Intrinsic::s390_vftcidb
:
1863 case Intrinsic::s390_vftcisb
:
1864 Opcode
= SystemZISD::VFTCI
;
1865 CCValid
= SystemZ::CCMASK_VCMP
;
1868 case Intrinsic::s390_tdc
:
1869 Opcode
= SystemZISD::TDC
;
1870 CCValid
= SystemZ::CCMASK_TDC
;
1878 // Emit an intrinsic with chain and an explicit CC register result.
1879 static SDNode
*emitIntrinsicWithCCAndChain(SelectionDAG
&DAG
, SDValue Op
,
1881 // Copy all operands except the intrinsic ID.
1882 unsigned NumOps
= Op
.getNumOperands();
1883 SmallVector
<SDValue
, 6> Ops
;
1884 Ops
.reserve(NumOps
- 1);
1885 Ops
.push_back(Op
.getOperand(0));
1886 for (unsigned I
= 2; I
< NumOps
; ++I
)
1887 Ops
.push_back(Op
.getOperand(I
));
1889 assert(Op
->getNumValues() == 2 && "Expected only CC result and chain");
1890 SDVTList RawVTs
= DAG
.getVTList(MVT::i32
, MVT::Other
);
1891 SDValue Intr
= DAG
.getNode(Opcode
, SDLoc(Op
), RawVTs
, Ops
);
1892 SDValue OldChain
= SDValue(Op
.getNode(), 1);
1893 SDValue NewChain
= SDValue(Intr
.getNode(), 1);
1894 DAG
.ReplaceAllUsesOfValueWith(OldChain
, NewChain
);
1895 return Intr
.getNode();
1898 // Emit an intrinsic with an explicit CC register result.
1899 static SDNode
*emitIntrinsicWithCC(SelectionDAG
&DAG
, SDValue Op
,
1901 // Copy all operands except the intrinsic ID.
1902 unsigned NumOps
= Op
.getNumOperands();
1903 SmallVector
<SDValue
, 6> Ops
;
1904 Ops
.reserve(NumOps
- 1);
1905 for (unsigned I
= 1; I
< NumOps
; ++I
)
1906 Ops
.push_back(Op
.getOperand(I
));
1908 SDValue Intr
= DAG
.getNode(Opcode
, SDLoc(Op
), Op
->getVTList(), Ops
);
1909 return Intr
.getNode();
1912 // CC is a comparison that will be implemented using an integer or
1913 // floating-point comparison. Return the condition code mask for
1914 // a branch on true. In the integer case, CCMASK_CMP_UO is set for
1915 // unsigned comparisons and clear for signed ones. In the floating-point
1916 // case, CCMASK_CMP_UO has its normal mask meaning (unordered).
1917 static unsigned CCMaskForCondCode(ISD::CondCode CC
) {
1919 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
1920 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
1921 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
1925 llvm_unreachable("Invalid integer condition!");
1934 case ISD::SETO
: return SystemZ::CCMASK_CMP_O
;
1935 case ISD::SETUO
: return SystemZ::CCMASK_CMP_UO
;
1940 // If C can be converted to a comparison against zero, adjust the operands
1942 static void adjustZeroCmp(SelectionDAG
&DAG
, const SDLoc
&DL
, Comparison
&C
) {
1943 if (C
.ICmpType
== SystemZICMP::UnsignedOnly
)
1946 auto *ConstOp1
= dyn_cast
<ConstantSDNode
>(C
.Op1
.getNode());
1950 int64_t Value
= ConstOp1
->getSExtValue();
1951 if ((Value
== -1 && C
.CCMask
== SystemZ::CCMASK_CMP_GT
) ||
1952 (Value
== -1 && C
.CCMask
== SystemZ::CCMASK_CMP_LE
) ||
1953 (Value
== 1 && C
.CCMask
== SystemZ::CCMASK_CMP_LT
) ||
1954 (Value
== 1 && C
.CCMask
== SystemZ::CCMASK_CMP_GE
)) {
1955 C
.CCMask
^= SystemZ::CCMASK_CMP_EQ
;
1956 C
.Op1
= DAG
.getConstant(0, DL
, C
.Op1
.getValueType());
1960 // If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI,
1961 // adjust the operands as necessary.
1962 static void adjustSubwordCmp(SelectionDAG
&DAG
, const SDLoc
&DL
,
1964 // For us to make any changes, it must a comparison between a single-use
1965 // load and a constant.
1966 if (!C
.Op0
.hasOneUse() ||
1967 C
.Op0
.getOpcode() != ISD::LOAD
||
1968 C
.Op1
.getOpcode() != ISD::Constant
)
1971 // We must have an 8- or 16-bit load.
1972 auto *Load
= cast
<LoadSDNode
>(C
.Op0
);
1973 unsigned NumBits
= Load
->getMemoryVT().getStoreSizeInBits();
1974 if (NumBits
!= 8 && NumBits
!= 16)
1977 // The load must be an extending one and the constant must be within the
1978 // range of the unextended value.
1979 auto *ConstOp1
= cast
<ConstantSDNode
>(C
.Op1
);
1980 uint64_t Value
= ConstOp1
->getZExtValue();
1981 uint64_t Mask
= (1 << NumBits
) - 1;
1982 if (Load
->getExtensionType() == ISD::SEXTLOAD
) {
1983 // Make sure that ConstOp1 is in range of C.Op0.
1984 int64_t SignedValue
= ConstOp1
->getSExtValue();
1985 if (uint64_t(SignedValue
) + (uint64_t(1) << (NumBits
- 1)) > Mask
)
1987 if (C
.ICmpType
!= SystemZICMP::SignedOnly
) {
1988 // Unsigned comparison between two sign-extended values is equivalent
1989 // to unsigned comparison between two zero-extended values.
1991 } else if (NumBits
== 8) {
1992 // Try to treat the comparison as unsigned, so that we can use CLI.
1993 // Adjust CCMask and Value as necessary.
1994 if (Value
== 0 && C
.CCMask
== SystemZ::CCMASK_CMP_LT
)
1995 // Test whether the high bit of the byte is set.
1996 Value
= 127, C
.CCMask
= SystemZ::CCMASK_CMP_GT
;
1997 else if (Value
== 0 && C
.CCMask
== SystemZ::CCMASK_CMP_GE
)
1998 // Test whether the high bit of the byte is clear.
1999 Value
= 128, C
.CCMask
= SystemZ::CCMASK_CMP_LT
;
2001 // No instruction exists for this combination.
2003 C
.ICmpType
= SystemZICMP::UnsignedOnly
;
2005 } else if (Load
->getExtensionType() == ISD::ZEXTLOAD
) {
2008 // If the constant is in range, we can use any comparison.
2009 C
.ICmpType
= SystemZICMP::Any
;
2013 // Make sure that the first operand is an i32 of the right extension type.
2014 ISD::LoadExtType ExtType
= (C
.ICmpType
== SystemZICMP::SignedOnly
?
2017 if (C
.Op0
.getValueType() != MVT::i32
||
2018 Load
->getExtensionType() != ExtType
) {
2019 C
.Op0
= DAG
.getExtLoad(ExtType
, SDLoc(Load
), MVT::i32
, Load
->getChain(),
2020 Load
->getBasePtr(), Load
->getPointerInfo(),
2021 Load
->getMemoryVT(), Load
->getAlignment(),
2022 Load
->getMemOperand()->getFlags());
2023 // Update the chain uses.
2024 DAG
.ReplaceAllUsesOfValueWith(SDValue(Load
, 1), C
.Op0
.getValue(1));
2027 // Make sure that the second operand is an i32 with the right value.
2028 if (C
.Op1
.getValueType() != MVT::i32
||
2029 Value
!= ConstOp1
->getZExtValue())
2030 C
.Op1
= DAG
.getConstant(Value
, DL
, MVT::i32
);
2033 // Return true if Op is either an unextended load, or a load suitable
2034 // for integer register-memory comparisons of type ICmpType.
2035 static bool isNaturalMemoryOperand(SDValue Op
, unsigned ICmpType
) {
2036 auto *Load
= dyn_cast
<LoadSDNode
>(Op
.getNode());
2038 // There are no instructions to compare a register with a memory byte.
2039 if (Load
->getMemoryVT() == MVT::i8
)
2041 // Otherwise decide on extension type.
2042 switch (Load
->getExtensionType()) {
2043 case ISD::NON_EXTLOAD
:
2046 return ICmpType
!= SystemZICMP::UnsignedOnly
;
2048 return ICmpType
!= SystemZICMP::SignedOnly
;
2056 // Return true if it is better to swap the operands of C.
2057 static bool shouldSwapCmpOperands(const Comparison
&C
) {
2058 // Leave f128 comparisons alone, since they have no memory forms.
2059 if (C
.Op0
.getValueType() == MVT::f128
)
2062 // Always keep a floating-point constant second, since comparisons with
2063 // zero can use LOAD TEST and comparisons with other constants make a
2064 // natural memory operand.
2065 if (isa
<ConstantFPSDNode
>(C
.Op1
))
2068 // Never swap comparisons with zero since there are many ways to optimize
2070 auto *ConstOp1
= dyn_cast
<ConstantSDNode
>(C
.Op1
);
2071 if (ConstOp1
&& ConstOp1
->getZExtValue() == 0)
2074 // Also keep natural memory operands second if the loaded value is
2075 // only used here. Several comparisons have memory forms.
2076 if (isNaturalMemoryOperand(C
.Op1
, C
.ICmpType
) && C
.Op1
.hasOneUse())
2079 // Look for cases where Cmp0 is a single-use load and Cmp1 isn't.
2080 // In that case we generally prefer the memory to be second.
2081 if (isNaturalMemoryOperand(C
.Op0
, C
.ICmpType
) && C
.Op0
.hasOneUse()) {
2082 // The only exceptions are when the second operand is a constant and
2083 // we can use things like CHHSI.
2086 // The unsigned memory-immediate instructions can handle 16-bit
2087 // unsigned integers.
2088 if (C
.ICmpType
!= SystemZICMP::SignedOnly
&&
2089 isUInt
<16>(ConstOp1
->getZExtValue()))
2091 // The signed memory-immediate instructions can handle 16-bit
2093 if (C
.ICmpType
!= SystemZICMP::UnsignedOnly
&&
2094 isInt
<16>(ConstOp1
->getSExtValue()))
2099 // Try to promote the use of CGFR and CLGFR.
2100 unsigned Opcode0
= C
.Op0
.getOpcode();
2101 if (C
.ICmpType
!= SystemZICMP::UnsignedOnly
&& Opcode0
== ISD::SIGN_EXTEND
)
2103 if (C
.ICmpType
!= SystemZICMP::SignedOnly
&& Opcode0
== ISD::ZERO_EXTEND
)
2105 if (C
.ICmpType
!= SystemZICMP::SignedOnly
&&
2106 Opcode0
== ISD::AND
&&
2107 C
.Op0
.getOperand(1).getOpcode() == ISD::Constant
&&
2108 cast
<ConstantSDNode
>(C
.Op0
.getOperand(1))->getZExtValue() == 0xffffffff)
2114 // Return a version of comparison CC mask CCMask in which the LT and GT
2115 // actions are swapped.
2116 static unsigned reverseCCMask(unsigned CCMask
) {
2117 return ((CCMask
& SystemZ::CCMASK_CMP_EQ
) |
2118 (CCMask
& SystemZ::CCMASK_CMP_GT
? SystemZ::CCMASK_CMP_LT
: 0) |
2119 (CCMask
& SystemZ::CCMASK_CMP_LT
? SystemZ::CCMASK_CMP_GT
: 0) |
2120 (CCMask
& SystemZ::CCMASK_CMP_UO
));
2123 // Check whether C tests for equality between X and Y and whether X - Y
2124 // or Y - X is also computed. In that case it's better to compare the
2125 // result of the subtraction against zero.
2126 static void adjustForSubtraction(SelectionDAG
&DAG
, const SDLoc
&DL
,
2128 if (C
.CCMask
== SystemZ::CCMASK_CMP_EQ
||
2129 C
.CCMask
== SystemZ::CCMASK_CMP_NE
) {
2130 for (auto I
= C
.Op0
->use_begin(), E
= C
.Op0
->use_end(); I
!= E
; ++I
) {
2132 if (N
->getOpcode() == ISD::SUB
&&
2133 ((N
->getOperand(0) == C
.Op0
&& N
->getOperand(1) == C
.Op1
) ||
2134 (N
->getOperand(0) == C
.Op1
&& N
->getOperand(1) == C
.Op0
))) {
2135 C
.Op0
= SDValue(N
, 0);
2136 C
.Op1
= DAG
.getConstant(0, DL
, N
->getValueType(0));
2143 // Check whether C compares a floating-point value with zero and if that
2144 // floating-point value is also negated. In this case we can use the
2145 // negation to set CC, so avoiding separate LOAD AND TEST and
2146 // LOAD (NEGATIVE/COMPLEMENT) instructions.
2147 static void adjustForFNeg(Comparison
&C
) {
2148 auto *C1
= dyn_cast
<ConstantFPSDNode
>(C
.Op1
);
2149 if (C1
&& C1
->isZero()) {
2150 for (auto I
= C
.Op0
->use_begin(), E
= C
.Op0
->use_end(); I
!= E
; ++I
) {
2152 if (N
->getOpcode() == ISD::FNEG
) {
2153 C
.Op0
= SDValue(N
, 0);
2154 C
.CCMask
= reverseCCMask(C
.CCMask
);
2161 // Check whether C compares (shl X, 32) with 0 and whether X is
2162 // also sign-extended. In that case it is better to test the result
2163 // of the sign extension using LTGFR.
2165 // This case is important because InstCombine transforms a comparison
2166 // with (sext (trunc X)) into a comparison with (shl X, 32).
2167 static void adjustForLTGFR(Comparison
&C
) {
2168 // Check for a comparison between (shl X, 32) and 0.
2169 if (C
.Op0
.getOpcode() == ISD::SHL
&&
2170 C
.Op0
.getValueType() == MVT::i64
&&
2171 C
.Op1
.getOpcode() == ISD::Constant
&&
2172 cast
<ConstantSDNode
>(C
.Op1
)->getZExtValue() == 0) {
2173 auto *C1
= dyn_cast
<ConstantSDNode
>(C
.Op0
.getOperand(1));
2174 if (C1
&& C1
->getZExtValue() == 32) {
2175 SDValue ShlOp0
= C
.Op0
.getOperand(0);
2176 // See whether X has any SIGN_EXTEND_INREG uses.
2177 for (auto I
= ShlOp0
->use_begin(), E
= ShlOp0
->use_end(); I
!= E
; ++I
) {
2179 if (N
->getOpcode() == ISD::SIGN_EXTEND_INREG
&&
2180 cast
<VTSDNode
>(N
->getOperand(1))->getVT() == MVT::i32
) {
2181 C
.Op0
= SDValue(N
, 0);
2189 // If C compares the truncation of an extending load, try to compare
2190 // the untruncated value instead. This exposes more opportunities to
2192 static void adjustICmpTruncate(SelectionDAG
&DAG
, const SDLoc
&DL
,
2194 if (C
.Op0
.getOpcode() == ISD::TRUNCATE
&&
2195 C
.Op0
.getOperand(0).getOpcode() == ISD::LOAD
&&
2196 C
.Op1
.getOpcode() == ISD::Constant
&&
2197 cast
<ConstantSDNode
>(C
.Op1
)->getZExtValue() == 0) {
2198 auto *L
= cast
<LoadSDNode
>(C
.Op0
.getOperand(0));
2199 if (L
->getMemoryVT().getStoreSizeInBits() <= C
.Op0
.getValueSizeInBits()) {
2200 unsigned Type
= L
->getExtensionType();
2201 if ((Type
== ISD::ZEXTLOAD
&& C
.ICmpType
!= SystemZICMP::SignedOnly
) ||
2202 (Type
== ISD::SEXTLOAD
&& C
.ICmpType
!= SystemZICMP::UnsignedOnly
)) {
2203 C
.Op0
= C
.Op0
.getOperand(0);
2204 C
.Op1
= DAG
.getConstant(0, DL
, C
.Op0
.getValueType());
2210 // Return true if shift operation N has an in-range constant shift value.
2211 // Store it in ShiftVal if so.
2212 static bool isSimpleShift(SDValue N
, unsigned &ShiftVal
) {
2213 auto *Shift
= dyn_cast
<ConstantSDNode
>(N
.getOperand(1));
2217 uint64_t Amount
= Shift
->getZExtValue();
2218 if (Amount
>= N
.getValueSizeInBits())
2225 // Check whether an AND with Mask is suitable for a TEST UNDER MASK
2226 // instruction and whether the CC value is descriptive enough to handle
2227 // a comparison of type Opcode between the AND result and CmpVal.
2228 // CCMask says which comparison result is being tested and BitSize is
2229 // the number of bits in the operands. If TEST UNDER MASK can be used,
2230 // return the corresponding CC mask, otherwise return 0.
2231 static unsigned getTestUnderMaskCond(unsigned BitSize
, unsigned CCMask
,
2232 uint64_t Mask
, uint64_t CmpVal
,
2233 unsigned ICmpType
) {
2234 assert(Mask
!= 0 && "ANDs with zero should have been removed by now");
2236 // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL.
2237 if (!SystemZ::isImmLL(Mask
) && !SystemZ::isImmLH(Mask
) &&
2238 !SystemZ::isImmHL(Mask
) && !SystemZ::isImmHH(Mask
))
2241 // Work out the masks for the lowest and highest bits.
2242 unsigned HighShift
= 63 - countLeadingZeros(Mask
);
2243 uint64_t High
= uint64_t(1) << HighShift
;
2244 uint64_t Low
= uint64_t(1) << countTrailingZeros(Mask
);
2246 // Signed ordered comparisons are effectively unsigned if the sign
2248 bool EffectivelyUnsigned
= (ICmpType
!= SystemZICMP::SignedOnly
);
2250 // Check for equality comparisons with 0, or the equivalent.
2252 if (CCMask
== SystemZ::CCMASK_CMP_EQ
)
2253 return SystemZ::CCMASK_TM_ALL_0
;
2254 if (CCMask
== SystemZ::CCMASK_CMP_NE
)
2255 return SystemZ::CCMASK_TM_SOME_1
;
2257 if (EffectivelyUnsigned
&& CmpVal
> 0 && CmpVal
<= Low
) {
2258 if (CCMask
== SystemZ::CCMASK_CMP_LT
)
2259 return SystemZ::CCMASK_TM_ALL_0
;
2260 if (CCMask
== SystemZ::CCMASK_CMP_GE
)
2261 return SystemZ::CCMASK_TM_SOME_1
;
2263 if (EffectivelyUnsigned
&& CmpVal
< Low
) {
2264 if (CCMask
== SystemZ::CCMASK_CMP_LE
)
2265 return SystemZ::CCMASK_TM_ALL_0
;
2266 if (CCMask
== SystemZ::CCMASK_CMP_GT
)
2267 return SystemZ::CCMASK_TM_SOME_1
;
2270 // Check for equality comparisons with the mask, or the equivalent.
2271 if (CmpVal
== Mask
) {
2272 if (CCMask
== SystemZ::CCMASK_CMP_EQ
)
2273 return SystemZ::CCMASK_TM_ALL_1
;
2274 if (CCMask
== SystemZ::CCMASK_CMP_NE
)
2275 return SystemZ::CCMASK_TM_SOME_0
;
2277 if (EffectivelyUnsigned
&& CmpVal
>= Mask
- Low
&& CmpVal
< Mask
) {
2278 if (CCMask
== SystemZ::CCMASK_CMP_GT
)
2279 return SystemZ::CCMASK_TM_ALL_1
;
2280 if (CCMask
== SystemZ::CCMASK_CMP_LE
)
2281 return SystemZ::CCMASK_TM_SOME_0
;
2283 if (EffectivelyUnsigned
&& CmpVal
> Mask
- Low
&& CmpVal
<= Mask
) {
2284 if (CCMask
== SystemZ::CCMASK_CMP_GE
)
2285 return SystemZ::CCMASK_TM_ALL_1
;
2286 if (CCMask
== SystemZ::CCMASK_CMP_LT
)
2287 return SystemZ::CCMASK_TM_SOME_0
;
2290 // Check for ordered comparisons with the top bit.
2291 if (EffectivelyUnsigned
&& CmpVal
>= Mask
- High
&& CmpVal
< High
) {
2292 if (CCMask
== SystemZ::CCMASK_CMP_LE
)
2293 return SystemZ::CCMASK_TM_MSB_0
;
2294 if (CCMask
== SystemZ::CCMASK_CMP_GT
)
2295 return SystemZ::CCMASK_TM_MSB_1
;
2297 if (EffectivelyUnsigned
&& CmpVal
> Mask
- High
&& CmpVal
<= High
) {
2298 if (CCMask
== SystemZ::CCMASK_CMP_LT
)
2299 return SystemZ::CCMASK_TM_MSB_0
;
2300 if (CCMask
== SystemZ::CCMASK_CMP_GE
)
2301 return SystemZ::CCMASK_TM_MSB_1
;
2304 // If there are just two bits, we can do equality checks for Low and High
2306 if (Mask
== Low
+ High
) {
2307 if (CCMask
== SystemZ::CCMASK_CMP_EQ
&& CmpVal
== Low
)
2308 return SystemZ::CCMASK_TM_MIXED_MSB_0
;
2309 if (CCMask
== SystemZ::CCMASK_CMP_NE
&& CmpVal
== Low
)
2310 return SystemZ::CCMASK_TM_MIXED_MSB_0
^ SystemZ::CCMASK_ANY
;
2311 if (CCMask
== SystemZ::CCMASK_CMP_EQ
&& CmpVal
== High
)
2312 return SystemZ::CCMASK_TM_MIXED_MSB_1
;
2313 if (CCMask
== SystemZ::CCMASK_CMP_NE
&& CmpVal
== High
)
2314 return SystemZ::CCMASK_TM_MIXED_MSB_1
^ SystemZ::CCMASK_ANY
;
2317 // Looks like we've exhausted our options.
2321 // See whether C can be implemented as a TEST UNDER MASK instruction.
2322 // Update the arguments with the TM version if so.
2323 static void adjustForTestUnderMask(SelectionDAG
&DAG
, const SDLoc
&DL
,
2325 // Check that we have a comparison with a constant.
2326 auto *ConstOp1
= dyn_cast
<ConstantSDNode
>(C
.Op1
);
2329 uint64_t CmpVal
= ConstOp1
->getZExtValue();
2331 // Check whether the nonconstant input is an AND with a constant mask.
2334 ConstantSDNode
*Mask
= nullptr;
2335 if (C
.Op0
.getOpcode() == ISD::AND
) {
2336 NewC
.Op0
= C
.Op0
.getOperand(0);
2337 NewC
.Op1
= C
.Op0
.getOperand(1);
2338 Mask
= dyn_cast
<ConstantSDNode
>(NewC
.Op1
);
2341 MaskVal
= Mask
->getZExtValue();
2343 // There is no instruction to compare with a 64-bit immediate
2344 // so use TMHH instead if possible. We need an unsigned ordered
2345 // comparison with an i64 immediate.
2346 if (NewC
.Op0
.getValueType() != MVT::i64
||
2347 NewC
.CCMask
== SystemZ::CCMASK_CMP_EQ
||
2348 NewC
.CCMask
== SystemZ::CCMASK_CMP_NE
||
2349 NewC
.ICmpType
== SystemZICMP::SignedOnly
)
2351 // Convert LE and GT comparisons into LT and GE.
2352 if (NewC
.CCMask
== SystemZ::CCMASK_CMP_LE
||
2353 NewC
.CCMask
== SystemZ::CCMASK_CMP_GT
) {
2354 if (CmpVal
== uint64_t(-1))
2357 NewC
.CCMask
^= SystemZ::CCMASK_CMP_EQ
;
2359 // If the low N bits of Op1 are zero than the low N bits of Op0 can
2360 // be masked off without changing the result.
2361 MaskVal
= -(CmpVal
& -CmpVal
);
2362 NewC
.ICmpType
= SystemZICMP::UnsignedOnly
;
2367 // Check whether the combination of mask, comparison value and comparison
2368 // type are suitable.
2369 unsigned BitSize
= NewC
.Op0
.getValueSizeInBits();
2370 unsigned NewCCMask
, ShiftVal
;
2371 if (NewC
.ICmpType
!= SystemZICMP::SignedOnly
&&
2372 NewC
.Op0
.getOpcode() == ISD::SHL
&&
2373 isSimpleShift(NewC
.Op0
, ShiftVal
) &&
2374 (MaskVal
>> ShiftVal
!= 0) &&
2375 ((CmpVal
>> ShiftVal
) << ShiftVal
) == CmpVal
&&
2376 (NewCCMask
= getTestUnderMaskCond(BitSize
, NewC
.CCMask
,
2377 MaskVal
>> ShiftVal
,
2379 SystemZICMP::Any
))) {
2380 NewC
.Op0
= NewC
.Op0
.getOperand(0);
2381 MaskVal
>>= ShiftVal
;
2382 } else if (NewC
.ICmpType
!= SystemZICMP::SignedOnly
&&
2383 NewC
.Op0
.getOpcode() == ISD::SRL
&&
2384 isSimpleShift(NewC
.Op0
, ShiftVal
) &&
2385 (MaskVal
<< ShiftVal
!= 0) &&
2386 ((CmpVal
<< ShiftVal
) >> ShiftVal
) == CmpVal
&&
2387 (NewCCMask
= getTestUnderMaskCond(BitSize
, NewC
.CCMask
,
2388 MaskVal
<< ShiftVal
,
2390 SystemZICMP::UnsignedOnly
))) {
2391 NewC
.Op0
= NewC
.Op0
.getOperand(0);
2392 MaskVal
<<= ShiftVal
;
2394 NewCCMask
= getTestUnderMaskCond(BitSize
, NewC
.CCMask
, MaskVal
, CmpVal
,
2400 // Go ahead and make the change.
2401 C
.Opcode
= SystemZISD::TM
;
2403 if (Mask
&& Mask
->getZExtValue() == MaskVal
)
2404 C
.Op1
= SDValue(Mask
, 0);
2406 C
.Op1
= DAG
.getConstant(MaskVal
, DL
, C
.Op0
.getValueType());
2407 C
.CCValid
= SystemZ::CCMASK_TM
;
2408 C
.CCMask
= NewCCMask
;
2411 // See whether the comparison argument contains a redundant AND
2412 // and remove it if so. This sometimes happens due to the generic
2413 // BRCOND expansion.
2414 static void adjustForRedundantAnd(SelectionDAG
&DAG
, const SDLoc
&DL
,
2416 if (C
.Op0
.getOpcode() != ISD::AND
)
2418 auto *Mask
= dyn_cast
<ConstantSDNode
>(C
.Op0
.getOperand(1));
2421 KnownBits Known
= DAG
.computeKnownBits(C
.Op0
.getOperand(0));
2422 if ((~Known
.Zero
).getZExtValue() & ~Mask
->getZExtValue())
2425 C
.Op0
= C
.Op0
.getOperand(0);
2428 // Return a Comparison that tests the condition-code result of intrinsic
2429 // node Call against constant integer CC using comparison code Cond.
2430 // Opcode is the opcode of the SystemZISD operation for the intrinsic
2431 // and CCValid is the set of possible condition-code results.
2432 static Comparison
getIntrinsicCmp(SelectionDAG
&DAG
, unsigned Opcode
,
2433 SDValue Call
, unsigned CCValid
, uint64_t CC
,
2434 ISD::CondCode Cond
) {
2435 Comparison
C(Call
, SDValue());
2437 C
.CCValid
= CCValid
;
2438 if (Cond
== ISD::SETEQ
)
2439 // bit 3 for CC==0, bit 0 for CC==3, always false for CC>3.
2440 C
.CCMask
= CC
< 4 ? 1 << (3 - CC
) : 0;
2441 else if (Cond
== ISD::SETNE
)
2442 // ...and the inverse of that.
2443 C
.CCMask
= CC
< 4 ? ~(1 << (3 - CC
)) : -1;
2444 else if (Cond
== ISD::SETLT
|| Cond
== ISD::SETULT
)
2445 // bits above bit 3 for CC==0 (always false), bits above bit 0 for CC==3,
2446 // always true for CC>3.
2447 C
.CCMask
= CC
< 4 ? ~0U << (4 - CC
) : -1;
2448 else if (Cond
== ISD::SETGE
|| Cond
== ISD::SETUGE
)
2449 // ...and the inverse of that.
2450 C
.CCMask
= CC
< 4 ? ~(~0U << (4 - CC
)) : 0;
2451 else if (Cond
== ISD::SETLE
|| Cond
== ISD::SETULE
)
2452 // bit 3 and above for CC==0, bit 0 and above for CC==3 (always true),
2453 // always true for CC>3.
2454 C
.CCMask
= CC
< 4 ? ~0U << (3 - CC
) : -1;
2455 else if (Cond
== ISD::SETGT
|| Cond
== ISD::SETUGT
)
2456 // ...and the inverse of that.
2457 C
.CCMask
= CC
< 4 ? ~(~0U << (3 - CC
)) : 0;
2459 llvm_unreachable("Unexpected integer comparison type");
2460 C
.CCMask
&= CCValid
;
2464 // Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1.
2465 static Comparison
getCmp(SelectionDAG
&DAG
, SDValue CmpOp0
, SDValue CmpOp1
,
2466 ISD::CondCode Cond
, const SDLoc
&DL
) {
2467 if (CmpOp1
.getOpcode() == ISD::Constant
) {
2468 uint64_t Constant
= cast
<ConstantSDNode
>(CmpOp1
)->getZExtValue();
2469 unsigned Opcode
, CCValid
;
2470 if (CmpOp0
.getOpcode() == ISD::INTRINSIC_W_CHAIN
&&
2471 CmpOp0
.getResNo() == 0 && CmpOp0
->hasNUsesOfValue(1, 0) &&
2472 isIntrinsicWithCCAndChain(CmpOp0
, Opcode
, CCValid
))
2473 return getIntrinsicCmp(DAG
, Opcode
, CmpOp0
, CCValid
, Constant
, Cond
);
2474 if (CmpOp0
.getOpcode() == ISD::INTRINSIC_WO_CHAIN
&&
2475 CmpOp0
.getResNo() == CmpOp0
->getNumValues() - 1 &&
2476 isIntrinsicWithCC(CmpOp0
, Opcode
, CCValid
))
2477 return getIntrinsicCmp(DAG
, Opcode
, CmpOp0
, CCValid
, Constant
, Cond
);
2479 Comparison
C(CmpOp0
, CmpOp1
);
2480 C
.CCMask
= CCMaskForCondCode(Cond
);
2481 if (C
.Op0
.getValueType().isFloatingPoint()) {
2482 C
.CCValid
= SystemZ::CCMASK_FCMP
;
2483 C
.Opcode
= SystemZISD::FCMP
;
2486 C
.CCValid
= SystemZ::CCMASK_ICMP
;
2487 C
.Opcode
= SystemZISD::ICMP
;
2488 // Choose the type of comparison. Equality and inequality tests can
2489 // use either signed or unsigned comparisons. The choice also doesn't
2490 // matter if both sign bits are known to be clear. In those cases we
2491 // want to give the main isel code the freedom to choose whichever
2493 if (C
.CCMask
== SystemZ::CCMASK_CMP_EQ
||
2494 C
.CCMask
== SystemZ::CCMASK_CMP_NE
||
2495 (DAG
.SignBitIsZero(C
.Op0
) && DAG
.SignBitIsZero(C
.Op1
)))
2496 C
.ICmpType
= SystemZICMP::Any
;
2497 else if (C
.CCMask
& SystemZ::CCMASK_CMP_UO
)
2498 C
.ICmpType
= SystemZICMP::UnsignedOnly
;
2500 C
.ICmpType
= SystemZICMP::SignedOnly
;
2501 C
.CCMask
&= ~SystemZ::CCMASK_CMP_UO
;
2502 adjustForRedundantAnd(DAG
, DL
, C
);
2503 adjustZeroCmp(DAG
, DL
, C
);
2504 adjustSubwordCmp(DAG
, DL
, C
);
2505 adjustForSubtraction(DAG
, DL
, C
);
2507 adjustICmpTruncate(DAG
, DL
, C
);
2510 if (shouldSwapCmpOperands(C
)) {
2511 std::swap(C
.Op0
, C
.Op1
);
2512 C
.CCMask
= reverseCCMask(C
.CCMask
);
2515 adjustForTestUnderMask(DAG
, DL
, C
);
2519 // Emit the comparison instruction described by C.
2520 static SDValue
emitCmp(SelectionDAG
&DAG
, const SDLoc
&DL
, Comparison
&C
) {
2521 if (!C
.Op1
.getNode()) {
2523 switch (C
.Op0
.getOpcode()) {
2524 case ISD::INTRINSIC_W_CHAIN
:
2525 Node
= emitIntrinsicWithCCAndChain(DAG
, C
.Op0
, C
.Opcode
);
2526 return SDValue(Node
, 0);
2527 case ISD::INTRINSIC_WO_CHAIN
:
2528 Node
= emitIntrinsicWithCC(DAG
, C
.Op0
, C
.Opcode
);
2529 return SDValue(Node
, Node
->getNumValues() - 1);
2531 llvm_unreachable("Invalid comparison operands");
2534 if (C
.Opcode
== SystemZISD::ICMP
)
2535 return DAG
.getNode(SystemZISD::ICMP
, DL
, MVT::i32
, C
.Op0
, C
.Op1
,
2536 DAG
.getConstant(C
.ICmpType
, DL
, MVT::i32
));
2537 if (C
.Opcode
== SystemZISD::TM
) {
2538 bool RegisterOnly
= (bool(C
.CCMask
& SystemZ::CCMASK_TM_MIXED_MSB_0
) !=
2539 bool(C
.CCMask
& SystemZ::CCMASK_TM_MIXED_MSB_1
));
2540 return DAG
.getNode(SystemZISD::TM
, DL
, MVT::i32
, C
.Op0
, C
.Op1
,
2541 DAG
.getConstant(RegisterOnly
, DL
, MVT::i32
));
2543 return DAG
.getNode(C
.Opcode
, DL
, MVT::i32
, C
.Op0
, C
.Op1
);
2546 // Implement a 32-bit *MUL_LOHI operation by extending both operands to
2547 // 64 bits. Extend is the extension type to use. Store the high part
2548 // in Hi and the low part in Lo.
2549 static void lowerMUL_LOHI32(SelectionDAG
&DAG
, const SDLoc
&DL
, unsigned Extend
,
2550 SDValue Op0
, SDValue Op1
, SDValue
&Hi
,
2552 Op0
= DAG
.getNode(Extend
, DL
, MVT::i64
, Op0
);
2553 Op1
= DAG
.getNode(Extend
, DL
, MVT::i64
, Op1
);
2554 SDValue Mul
= DAG
.getNode(ISD::MUL
, DL
, MVT::i64
, Op0
, Op1
);
2555 Hi
= DAG
.getNode(ISD::SRL
, DL
, MVT::i64
, Mul
,
2556 DAG
.getConstant(32, DL
, MVT::i64
));
2557 Hi
= DAG
.getNode(ISD::TRUNCATE
, DL
, MVT::i32
, Hi
);
2558 Lo
= DAG
.getNode(ISD::TRUNCATE
, DL
, MVT::i32
, Mul
);
2561 // Lower a binary operation that produces two VT results, one in each
2562 // half of a GR128 pair. Op0 and Op1 are the VT operands to the operation,
2563 // and Opcode performs the GR128 operation. Store the even register result
2564 // in Even and the odd register result in Odd.
2565 static void lowerGR128Binary(SelectionDAG
&DAG
, const SDLoc
&DL
, EVT VT
,
2566 unsigned Opcode
, SDValue Op0
, SDValue Op1
,
2567 SDValue
&Even
, SDValue
&Odd
) {
2568 SDValue Result
= DAG
.getNode(Opcode
, DL
, MVT::Untyped
, Op0
, Op1
);
2569 bool Is32Bit
= is32Bit(VT
);
2570 Even
= DAG
.getTargetExtractSubreg(SystemZ::even128(Is32Bit
), DL
, VT
, Result
);
2571 Odd
= DAG
.getTargetExtractSubreg(SystemZ::odd128(Is32Bit
), DL
, VT
, Result
);
2574 // Return an i32 value that is 1 if the CC value produced by CCReg is
2575 // in the mask CCMask and 0 otherwise. CC is known to have a value
2576 // in CCValid, so other values can be ignored.
2577 static SDValue
emitSETCC(SelectionDAG
&DAG
, const SDLoc
&DL
, SDValue CCReg
,
2578 unsigned CCValid
, unsigned CCMask
) {
2579 SDValue Ops
[] = { DAG
.getConstant(1, DL
, MVT::i32
),
2580 DAG
.getConstant(0, DL
, MVT::i32
),
2581 DAG
.getConstant(CCValid
, DL
, MVT::i32
),
2582 DAG
.getConstant(CCMask
, DL
, MVT::i32
), CCReg
};
2583 return DAG
.getNode(SystemZISD::SELECT_CCMASK
, DL
, MVT::i32
, Ops
);
2586 // Return the SystemISD vector comparison operation for CC, or 0 if it cannot
2587 // be done directly. IsFP is true if CC is for a floating-point rather than
2588 // integer comparison.
2589 static unsigned getVectorComparison(ISD::CondCode CC
, bool IsFP
) {
2593 return IsFP
? SystemZISD::VFCMPE
: SystemZISD::VICMPE
;
2597 return IsFP
? SystemZISD::VFCMPHE
: static_cast<SystemZISD::NodeType
>(0);
2601 return IsFP
? SystemZISD::VFCMPH
: SystemZISD::VICMPH
;
2604 return IsFP
? static_cast<SystemZISD::NodeType
>(0) : SystemZISD::VICMPHL
;
2611 // Return the SystemZISD vector comparison operation for CC or its inverse,
2612 // or 0 if neither can be done directly. Indicate in Invert whether the
2613 // result is for the inverse of CC. IsFP is true if CC is for a
2614 // floating-point rather than integer comparison.
2615 static unsigned getVectorComparisonOrInvert(ISD::CondCode CC
, bool IsFP
,
2617 if (unsigned Opcode
= getVectorComparison(CC
, IsFP
)) {
2622 CC
= ISD::getSetCCInverse(CC
, !IsFP
);
2623 if (unsigned Opcode
= getVectorComparison(CC
, IsFP
)) {
2631 // Return a v2f64 that contains the extended form of elements Start and Start+1
2632 // of v4f32 value Op.
2633 static SDValue
expandV4F32ToV2F64(SelectionDAG
&DAG
, int Start
, const SDLoc
&DL
,
2635 int Mask
[] = { Start
, -1, Start
+ 1, -1 };
2636 Op
= DAG
.getVectorShuffle(MVT::v4f32
, DL
, Op
, DAG
.getUNDEF(MVT::v4f32
), Mask
);
2637 return DAG
.getNode(SystemZISD::VEXTEND
, DL
, MVT::v2f64
, Op
);
2640 // Build a comparison of vectors CmpOp0 and CmpOp1 using opcode Opcode,
2641 // producing a result of type VT.
2642 SDValue
SystemZTargetLowering::getVectorCmp(SelectionDAG
&DAG
, unsigned Opcode
,
2643 const SDLoc
&DL
, EVT VT
,
2645 SDValue CmpOp1
) const {
2646 // There is no hardware support for v4f32 (unless we have the vector
2647 // enhancements facility 1), so extend the vector into two v2f64s
2648 // and compare those.
2649 if (CmpOp0
.getValueType() == MVT::v4f32
&&
2650 !Subtarget
.hasVectorEnhancements1()) {
2651 SDValue H0
= expandV4F32ToV2F64(DAG
, 0, DL
, CmpOp0
);
2652 SDValue L0
= expandV4F32ToV2F64(DAG
, 2, DL
, CmpOp0
);
2653 SDValue H1
= expandV4F32ToV2F64(DAG
, 0, DL
, CmpOp1
);
2654 SDValue L1
= expandV4F32ToV2F64(DAG
, 2, DL
, CmpOp1
);
2655 SDValue HRes
= DAG
.getNode(Opcode
, DL
, MVT::v2i64
, H0
, H1
);
2656 SDValue LRes
= DAG
.getNode(Opcode
, DL
, MVT::v2i64
, L0
, L1
);
2657 return DAG
.getNode(SystemZISD::PACK
, DL
, VT
, HRes
, LRes
);
2659 return DAG
.getNode(Opcode
, DL
, VT
, CmpOp0
, CmpOp1
);
2662 // Lower a vector comparison of type CC between CmpOp0 and CmpOp1, producing
2663 // an integer mask of type VT.
2664 SDValue
SystemZTargetLowering::lowerVectorSETCC(SelectionDAG
&DAG
,
2665 const SDLoc
&DL
, EVT VT
,
2668 SDValue CmpOp1
) const {
2669 bool IsFP
= CmpOp0
.getValueType().isFloatingPoint();
2670 bool Invert
= false;
2673 // Handle tests for order using (or (ogt y x) (oge x y)).
2678 assert(IsFP
&& "Unexpected integer comparison");
2679 SDValue LT
= getVectorCmp(DAG
, SystemZISD::VFCMPH
, DL
, VT
, CmpOp1
, CmpOp0
);
2680 SDValue GE
= getVectorCmp(DAG
, SystemZISD::VFCMPHE
, DL
, VT
, CmpOp0
, CmpOp1
);
2681 Cmp
= DAG
.getNode(ISD::OR
, DL
, VT
, LT
, GE
);
2685 // Handle <> tests using (or (ogt y x) (ogt x y)).
2690 assert(IsFP
&& "Unexpected integer comparison");
2691 SDValue LT
= getVectorCmp(DAG
, SystemZISD::VFCMPH
, DL
, VT
, CmpOp1
, CmpOp0
);
2692 SDValue GT
= getVectorCmp(DAG
, SystemZISD::VFCMPH
, DL
, VT
, CmpOp0
, CmpOp1
);
2693 Cmp
= DAG
.getNode(ISD::OR
, DL
, VT
, LT
, GT
);
2697 // Otherwise a single comparison is enough. It doesn't really
2698 // matter whether we try the inversion or the swap first, since
2699 // there are no cases where both work.
2701 if (unsigned Opcode
= getVectorComparisonOrInvert(CC
, IsFP
, Invert
))
2702 Cmp
= getVectorCmp(DAG
, Opcode
, DL
, VT
, CmpOp0
, CmpOp1
);
2704 CC
= ISD::getSetCCSwappedOperands(CC
);
2705 if (unsigned Opcode
= getVectorComparisonOrInvert(CC
, IsFP
, Invert
))
2706 Cmp
= getVectorCmp(DAG
, Opcode
, DL
, VT
, CmpOp1
, CmpOp0
);
2708 llvm_unreachable("Unhandled comparison");
2714 DAG
.getSplatBuildVector(VT
, DL
, DAG
.getConstant(-1, DL
, MVT::i64
));
2715 Cmp
= DAG
.getNode(ISD::XOR
, DL
, VT
, Cmp
, Mask
);
2720 SDValue
SystemZTargetLowering::lowerSETCC(SDValue Op
,
2721 SelectionDAG
&DAG
) const {
2722 SDValue CmpOp0
= Op
.getOperand(0);
2723 SDValue CmpOp1
= Op
.getOperand(1);
2724 ISD::CondCode CC
= cast
<CondCodeSDNode
>(Op
.getOperand(2))->get();
2726 EVT VT
= Op
.getValueType();
2728 return lowerVectorSETCC(DAG
, DL
, VT
, CC
, CmpOp0
, CmpOp1
);
2730 Comparison
C(getCmp(DAG
, CmpOp0
, CmpOp1
, CC
, DL
));
2731 SDValue CCReg
= emitCmp(DAG
, DL
, C
);
2732 return emitSETCC(DAG
, DL
, CCReg
, C
.CCValid
, C
.CCMask
);
2735 SDValue
SystemZTargetLowering::lowerBR_CC(SDValue Op
, SelectionDAG
&DAG
) const {
2736 ISD::CondCode CC
= cast
<CondCodeSDNode
>(Op
.getOperand(1))->get();
2737 SDValue CmpOp0
= Op
.getOperand(2);
2738 SDValue CmpOp1
= Op
.getOperand(3);
2739 SDValue Dest
= Op
.getOperand(4);
2742 Comparison
C(getCmp(DAG
, CmpOp0
, CmpOp1
, CC
, DL
));
2743 SDValue CCReg
= emitCmp(DAG
, DL
, C
);
2744 return DAG
.getNode(SystemZISD::BR_CCMASK
, DL
, Op
.getValueType(),
2745 Op
.getOperand(0), DAG
.getConstant(C
.CCValid
, DL
, MVT::i32
),
2746 DAG
.getConstant(C
.CCMask
, DL
, MVT::i32
), Dest
, CCReg
);
2749 // Return true if Pos is CmpOp and Neg is the negative of CmpOp,
2750 // allowing Pos and Neg to be wider than CmpOp.
2751 static bool isAbsolute(SDValue CmpOp
, SDValue Pos
, SDValue Neg
) {
2752 return (Neg
.getOpcode() == ISD::SUB
&&
2753 Neg
.getOperand(0).getOpcode() == ISD::Constant
&&
2754 cast
<ConstantSDNode
>(Neg
.getOperand(0))->getZExtValue() == 0 &&
2755 Neg
.getOperand(1) == Pos
&&
2757 (Pos
.getOpcode() == ISD::SIGN_EXTEND
&&
2758 Pos
.getOperand(0) == CmpOp
)));
2761 // Return the absolute or negative absolute of Op; IsNegative decides which.
2762 static SDValue
getAbsolute(SelectionDAG
&DAG
, const SDLoc
&DL
, SDValue Op
,
2764 Op
= DAG
.getNode(SystemZISD::IABS
, DL
, Op
.getValueType(), Op
);
2766 Op
= DAG
.getNode(ISD::SUB
, DL
, Op
.getValueType(),
2767 DAG
.getConstant(0, DL
, Op
.getValueType()), Op
);
2771 SDValue
SystemZTargetLowering::lowerSELECT_CC(SDValue Op
,
2772 SelectionDAG
&DAG
) const {
2773 SDValue CmpOp0
= Op
.getOperand(0);
2774 SDValue CmpOp1
= Op
.getOperand(1);
2775 SDValue TrueOp
= Op
.getOperand(2);
2776 SDValue FalseOp
= Op
.getOperand(3);
2777 ISD::CondCode CC
= cast
<CondCodeSDNode
>(Op
.getOperand(4))->get();
2780 Comparison
C(getCmp(DAG
, CmpOp0
, CmpOp1
, CC
, DL
));
2782 // Check for absolute and negative-absolute selections, including those
2783 // where the comparison value is sign-extended (for LPGFR and LNGFR).
2784 // This check supplements the one in DAGCombiner.
2785 if (C
.Opcode
== SystemZISD::ICMP
&&
2786 C
.CCMask
!= SystemZ::CCMASK_CMP_EQ
&&
2787 C
.CCMask
!= SystemZ::CCMASK_CMP_NE
&&
2788 C
.Op1
.getOpcode() == ISD::Constant
&&
2789 cast
<ConstantSDNode
>(C
.Op1
)->getZExtValue() == 0) {
2790 if (isAbsolute(C
.Op0
, TrueOp
, FalseOp
))
2791 return getAbsolute(DAG
, DL
, TrueOp
, C
.CCMask
& SystemZ::CCMASK_CMP_LT
);
2792 if (isAbsolute(C
.Op0
, FalseOp
, TrueOp
))
2793 return getAbsolute(DAG
, DL
, FalseOp
, C
.CCMask
& SystemZ::CCMASK_CMP_GT
);
2796 SDValue CCReg
= emitCmp(DAG
, DL
, C
);
2797 SDValue Ops
[] = {TrueOp
, FalseOp
, DAG
.getConstant(C
.CCValid
, DL
, MVT::i32
),
2798 DAG
.getConstant(C
.CCMask
, DL
, MVT::i32
), CCReg
};
2800 return DAG
.getNode(SystemZISD::SELECT_CCMASK
, DL
, Op
.getValueType(), Ops
);
2803 SDValue
SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode
*Node
,
2804 SelectionDAG
&DAG
) const {
2806 const GlobalValue
*GV
= Node
->getGlobal();
2807 int64_t Offset
= Node
->getOffset();
2808 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
2809 CodeModel::Model CM
= DAG
.getTarget().getCodeModel();
2812 if (Subtarget
.isPC32DBLSymbol(GV
, CM
)) {
2813 // Assign anchors at 1<<12 byte boundaries.
2814 uint64_t Anchor
= Offset
& ~uint64_t(0xfff);
2815 Result
= DAG
.getTargetGlobalAddress(GV
, DL
, PtrVT
, Anchor
);
2816 Result
= DAG
.getNode(SystemZISD::PCREL_WRAPPER
, DL
, PtrVT
, Result
);
2818 // The offset can be folded into the address if it is aligned to a halfword.
2820 if (Offset
!= 0 && (Offset
& 1) == 0) {
2821 SDValue Full
= DAG
.getTargetGlobalAddress(GV
, DL
, PtrVT
, Anchor
+ Offset
);
2822 Result
= DAG
.getNode(SystemZISD::PCREL_OFFSET
, DL
, PtrVT
, Full
, Result
);
2826 Result
= DAG
.getTargetGlobalAddress(GV
, DL
, PtrVT
, 0, SystemZII::MO_GOT
);
2827 Result
= DAG
.getNode(SystemZISD::PCREL_WRAPPER
, DL
, PtrVT
, Result
);
2828 Result
= DAG
.getLoad(PtrVT
, DL
, DAG
.getEntryNode(), Result
,
2829 MachinePointerInfo::getGOT(DAG
.getMachineFunction()));
2832 // If there was a non-zero offset that we didn't fold, create an explicit
2835 Result
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, Result
,
2836 DAG
.getConstant(Offset
, DL
, PtrVT
));
2841 SDValue
SystemZTargetLowering::lowerTLSGetOffset(GlobalAddressSDNode
*Node
,
2844 SDValue GOTOffset
) const {
2846 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
2847 SDValue Chain
= DAG
.getEntryNode();
2850 // __tls_get_offset takes the GOT offset in %r2 and the GOT in %r12.
2851 SDValue GOT
= DAG
.getGLOBAL_OFFSET_TABLE(PtrVT
);
2852 Chain
= DAG
.getCopyToReg(Chain
, DL
, SystemZ::R12D
, GOT
, Glue
);
2853 Glue
= Chain
.getValue(1);
2854 Chain
= DAG
.getCopyToReg(Chain
, DL
, SystemZ::R2D
, GOTOffset
, Glue
);
2855 Glue
= Chain
.getValue(1);
2857 // The first call operand is the chain and the second is the TLS symbol.
2858 SmallVector
<SDValue
, 8> Ops
;
2859 Ops
.push_back(Chain
);
2860 Ops
.push_back(DAG
.getTargetGlobalAddress(Node
->getGlobal(), DL
,
2861 Node
->getValueType(0),
2864 // Add argument registers to the end of the list so that they are
2865 // known live into the call.
2866 Ops
.push_back(DAG
.getRegister(SystemZ::R2D
, PtrVT
));
2867 Ops
.push_back(DAG
.getRegister(SystemZ::R12D
, PtrVT
));
2869 // Add a register mask operand representing the call-preserved registers.
2870 const TargetRegisterInfo
*TRI
= Subtarget
.getRegisterInfo();
2871 const uint32_t *Mask
=
2872 TRI
->getCallPreservedMask(DAG
.getMachineFunction(), CallingConv::C
);
2873 assert(Mask
&& "Missing call preserved mask for calling convention");
2874 Ops
.push_back(DAG
.getRegisterMask(Mask
));
2876 // Glue the call to the argument copies.
2877 Ops
.push_back(Glue
);
2880 SDVTList NodeTys
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
2881 Chain
= DAG
.getNode(Opcode
, DL
, NodeTys
, Ops
);
2882 Glue
= Chain
.getValue(1);
2884 // Copy the return value from %r2.
2885 return DAG
.getCopyFromReg(Chain
, DL
, SystemZ::R2D
, PtrVT
, Glue
);
2888 SDValue
SystemZTargetLowering::lowerThreadPointer(const SDLoc
&DL
,
2889 SelectionDAG
&DAG
) const {
2890 SDValue Chain
= DAG
.getEntryNode();
2891 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
2893 // The high part of the thread pointer is in access register 0.
2894 SDValue TPHi
= DAG
.getCopyFromReg(Chain
, DL
, SystemZ::A0
, MVT::i32
);
2895 TPHi
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, PtrVT
, TPHi
);
2897 // The low part of the thread pointer is in access register 1.
2898 SDValue TPLo
= DAG
.getCopyFromReg(Chain
, DL
, SystemZ::A1
, MVT::i32
);
2899 TPLo
= DAG
.getNode(ISD::ZERO_EXTEND
, DL
, PtrVT
, TPLo
);
2901 // Merge them into a single 64-bit address.
2902 SDValue TPHiShifted
= DAG
.getNode(ISD::SHL
, DL
, PtrVT
, TPHi
,
2903 DAG
.getConstant(32, DL
, PtrVT
));
2904 return DAG
.getNode(ISD::OR
, DL
, PtrVT
, TPHiShifted
, TPLo
);
2907 SDValue
SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode
*Node
,
2908 SelectionDAG
&DAG
) const {
2909 if (DAG
.getTarget().useEmulatedTLS())
2910 return LowerToTLSEmulatedModel(Node
, DAG
);
2912 const GlobalValue
*GV
= Node
->getGlobal();
2913 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
2914 TLSModel::Model model
= DAG
.getTarget().getTLSModel(GV
);
2916 SDValue TP
= lowerThreadPointer(DL
, DAG
);
2918 // Get the offset of GA from the thread pointer, based on the TLS model.
2921 case TLSModel::GeneralDynamic
: {
2922 // Load the GOT offset of the tls_index (module ID / per-symbol offset).
2923 SystemZConstantPoolValue
*CPV
=
2924 SystemZConstantPoolValue::Create(GV
, SystemZCP::TLSGD
);
2926 Offset
= DAG
.getConstantPool(CPV
, PtrVT
, 8);
2927 Offset
= DAG
.getLoad(
2928 PtrVT
, DL
, DAG
.getEntryNode(), Offset
,
2929 MachinePointerInfo::getConstantPool(DAG
.getMachineFunction()));
2931 // Call __tls_get_offset to retrieve the offset.
2932 Offset
= lowerTLSGetOffset(Node
, DAG
, SystemZISD::TLS_GDCALL
, Offset
);
2936 case TLSModel::LocalDynamic
: {
2937 // Load the GOT offset of the module ID.
2938 SystemZConstantPoolValue
*CPV
=
2939 SystemZConstantPoolValue::Create(GV
, SystemZCP::TLSLDM
);
2941 Offset
= DAG
.getConstantPool(CPV
, PtrVT
, 8);
2942 Offset
= DAG
.getLoad(
2943 PtrVT
, DL
, DAG
.getEntryNode(), Offset
,
2944 MachinePointerInfo::getConstantPool(DAG
.getMachineFunction()));
2946 // Call __tls_get_offset to retrieve the module base offset.
2947 Offset
= lowerTLSGetOffset(Node
, DAG
, SystemZISD::TLS_LDCALL
, Offset
);
2949 // Note: The SystemZLDCleanupPass will remove redundant computations
2950 // of the module base offset. Count total number of local-dynamic
2951 // accesses to trigger execution of that pass.
2952 SystemZMachineFunctionInfo
* MFI
=
2953 DAG
.getMachineFunction().getInfo
<SystemZMachineFunctionInfo
>();
2954 MFI
->incNumLocalDynamicTLSAccesses();
2956 // Add the per-symbol offset.
2957 CPV
= SystemZConstantPoolValue::Create(GV
, SystemZCP::DTPOFF
);
2959 SDValue DTPOffset
= DAG
.getConstantPool(CPV
, PtrVT
, 8);
2960 DTPOffset
= DAG
.getLoad(
2961 PtrVT
, DL
, DAG
.getEntryNode(), DTPOffset
,
2962 MachinePointerInfo::getConstantPool(DAG
.getMachineFunction()));
2964 Offset
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, Offset
, DTPOffset
);
2968 case TLSModel::InitialExec
: {
2969 // Load the offset from the GOT.
2970 Offset
= DAG
.getTargetGlobalAddress(GV
, DL
, PtrVT
, 0,
2971 SystemZII::MO_INDNTPOFF
);
2972 Offset
= DAG
.getNode(SystemZISD::PCREL_WRAPPER
, DL
, PtrVT
, Offset
);
2974 DAG
.getLoad(PtrVT
, DL
, DAG
.getEntryNode(), Offset
,
2975 MachinePointerInfo::getGOT(DAG
.getMachineFunction()));
2979 case TLSModel::LocalExec
: {
2980 // Force the offset into the constant pool and load it from there.
2981 SystemZConstantPoolValue
*CPV
=
2982 SystemZConstantPoolValue::Create(GV
, SystemZCP::NTPOFF
);
2984 Offset
= DAG
.getConstantPool(CPV
, PtrVT
, 8);
2985 Offset
= DAG
.getLoad(
2986 PtrVT
, DL
, DAG
.getEntryNode(), Offset
,
2987 MachinePointerInfo::getConstantPool(DAG
.getMachineFunction()));
2992 // Add the base and offset together.
2993 return DAG
.getNode(ISD::ADD
, DL
, PtrVT
, TP
, Offset
);
2996 SDValue
SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode
*Node
,
2997 SelectionDAG
&DAG
) const {
2999 const BlockAddress
*BA
= Node
->getBlockAddress();
3000 int64_t Offset
= Node
->getOffset();
3001 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3003 SDValue Result
= DAG
.getTargetBlockAddress(BA
, PtrVT
, Offset
);
3004 Result
= DAG
.getNode(SystemZISD::PCREL_WRAPPER
, DL
, PtrVT
, Result
);
3008 SDValue
SystemZTargetLowering::lowerJumpTable(JumpTableSDNode
*JT
,
3009 SelectionDAG
&DAG
) const {
3011 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3012 SDValue Result
= DAG
.getTargetJumpTable(JT
->getIndex(), PtrVT
);
3014 // Use LARL to load the address of the table.
3015 return DAG
.getNode(SystemZISD::PCREL_WRAPPER
, DL
, PtrVT
, Result
);
3018 SDValue
SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode
*CP
,
3019 SelectionDAG
&DAG
) const {
3021 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3024 if (CP
->isMachineConstantPoolEntry())
3025 Result
= DAG
.getTargetConstantPool(CP
->getMachineCPVal(), PtrVT
,
3026 CP
->getAlignment());
3028 Result
= DAG
.getTargetConstantPool(CP
->getConstVal(), PtrVT
,
3029 CP
->getAlignment(), CP
->getOffset());
3031 // Use LARL to load the address of the constant pool entry.
3032 return DAG
.getNode(SystemZISD::PCREL_WRAPPER
, DL
, PtrVT
, Result
);
3035 SDValue
SystemZTargetLowering::lowerFRAMEADDR(SDValue Op
,
3036 SelectionDAG
&DAG
) const {
3037 MachineFunction
&MF
= DAG
.getMachineFunction();
3038 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
3039 MFI
.setFrameAddressIsTaken(true);
3042 unsigned Depth
= cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue();
3043 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3045 // If the back chain frame index has not been allocated yet, do so.
3046 SystemZMachineFunctionInfo
*FI
= MF
.getInfo
<SystemZMachineFunctionInfo
>();
3047 int BackChainIdx
= FI
->getFramePointerSaveIndex();
3048 if (!BackChainIdx
) {
3049 // By definition, the frame address is the address of the back chain.
3050 BackChainIdx
= MFI
.CreateFixedObject(8, -SystemZMC::CallFrameSize
, false);
3051 FI
->setFramePointerSaveIndex(BackChainIdx
);
3053 SDValue BackChain
= DAG
.getFrameIndex(BackChainIdx
, PtrVT
);
3055 // FIXME The frontend should detect this case.
3057 report_fatal_error("Unsupported stack frame traversal count");
3063 SDValue
SystemZTargetLowering::lowerRETURNADDR(SDValue Op
,
3064 SelectionDAG
&DAG
) const {
3065 MachineFunction
&MF
= DAG
.getMachineFunction();
3066 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
3067 MFI
.setReturnAddressIsTaken(true);
3069 if (verifyReturnAddressArgumentIsConstant(Op
, DAG
))
3073 unsigned Depth
= cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue();
3074 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3076 // FIXME The frontend should detect this case.
3078 report_fatal_error("Unsupported stack frame traversal count");
3081 // Return R14D, which has the return address. Mark it an implicit live-in.
3082 unsigned LinkReg
= MF
.addLiveIn(SystemZ::R14D
, &SystemZ::GR64BitRegClass
);
3083 return DAG
.getCopyFromReg(DAG
.getEntryNode(), DL
, LinkReg
, PtrVT
);
3086 SDValue
SystemZTargetLowering::lowerBITCAST(SDValue Op
,
3087 SelectionDAG
&DAG
) const {
3089 SDValue In
= Op
.getOperand(0);
3090 EVT InVT
= In
.getValueType();
3091 EVT ResVT
= Op
.getValueType();
3093 // Convert loads directly. This is normally done by DAGCombiner,
3094 // but we need this case for bitcasts that are created during lowering
3095 // and which are then lowered themselves.
3096 if (auto *LoadN
= dyn_cast
<LoadSDNode
>(In
))
3097 if (ISD::isNormalLoad(LoadN
)) {
3098 SDValue NewLoad
= DAG
.getLoad(ResVT
, DL
, LoadN
->getChain(),
3099 LoadN
->getBasePtr(), LoadN
->getMemOperand());
3100 // Update the chain uses.
3101 DAG
.ReplaceAllUsesOfValueWith(SDValue(LoadN
, 1), NewLoad
.getValue(1));
3105 if (InVT
== MVT::i32
&& ResVT
== MVT::f32
) {
3107 if (Subtarget
.hasHighWord()) {
3108 SDNode
*U64
= DAG
.getMachineNode(TargetOpcode::IMPLICIT_DEF
, DL
,
3110 In64
= DAG
.getTargetInsertSubreg(SystemZ::subreg_h32
, DL
,
3111 MVT::i64
, SDValue(U64
, 0), In
);
3113 In64
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, MVT::i64
, In
);
3114 In64
= DAG
.getNode(ISD::SHL
, DL
, MVT::i64
, In64
,
3115 DAG
.getConstant(32, DL
, MVT::i64
));
3117 SDValue Out64
= DAG
.getNode(ISD::BITCAST
, DL
, MVT::f64
, In64
);
3118 return DAG
.getTargetExtractSubreg(SystemZ::subreg_h32
,
3119 DL
, MVT::f32
, Out64
);
3121 if (InVT
== MVT::f32
&& ResVT
== MVT::i32
) {
3122 SDNode
*U64
= DAG
.getMachineNode(TargetOpcode::IMPLICIT_DEF
, DL
, MVT::f64
);
3123 SDValue In64
= DAG
.getTargetInsertSubreg(SystemZ::subreg_h32
, DL
,
3124 MVT::f64
, SDValue(U64
, 0), In
);
3125 SDValue Out64
= DAG
.getNode(ISD::BITCAST
, DL
, MVT::i64
, In64
);
3126 if (Subtarget
.hasHighWord())
3127 return DAG
.getTargetExtractSubreg(SystemZ::subreg_h32
, DL
,
3129 SDValue Shift
= DAG
.getNode(ISD::SRL
, DL
, MVT::i64
, Out64
,
3130 DAG
.getConstant(32, DL
, MVT::i64
));
3131 return DAG
.getNode(ISD::TRUNCATE
, DL
, MVT::i32
, Shift
);
3133 llvm_unreachable("Unexpected bitcast combination");
3136 SDValue
SystemZTargetLowering::lowerVASTART(SDValue Op
,
3137 SelectionDAG
&DAG
) const {
3138 MachineFunction
&MF
= DAG
.getMachineFunction();
3139 SystemZMachineFunctionInfo
*FuncInfo
=
3140 MF
.getInfo
<SystemZMachineFunctionInfo
>();
3141 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3143 SDValue Chain
= Op
.getOperand(0);
3144 SDValue Addr
= Op
.getOperand(1);
3145 const Value
*SV
= cast
<SrcValueSDNode
>(Op
.getOperand(2))->getValue();
3148 // The initial values of each field.
3149 const unsigned NumFields
= 4;
3150 SDValue Fields
[NumFields
] = {
3151 DAG
.getConstant(FuncInfo
->getVarArgsFirstGPR(), DL
, PtrVT
),
3152 DAG
.getConstant(FuncInfo
->getVarArgsFirstFPR(), DL
, PtrVT
),
3153 DAG
.getFrameIndex(FuncInfo
->getVarArgsFrameIndex(), PtrVT
),
3154 DAG
.getFrameIndex(FuncInfo
->getRegSaveFrameIndex(), PtrVT
)
3157 // Store each field into its respective slot.
3158 SDValue MemOps
[NumFields
];
3159 unsigned Offset
= 0;
3160 for (unsigned I
= 0; I
< NumFields
; ++I
) {
3161 SDValue FieldAddr
= Addr
;
3163 FieldAddr
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, FieldAddr
,
3164 DAG
.getIntPtrConstant(Offset
, DL
));
3165 MemOps
[I
] = DAG
.getStore(Chain
, DL
, Fields
[I
], FieldAddr
,
3166 MachinePointerInfo(SV
, Offset
));
3169 return DAG
.getNode(ISD::TokenFactor
, DL
, MVT::Other
, MemOps
);
3172 SDValue
SystemZTargetLowering::lowerVACOPY(SDValue Op
,
3173 SelectionDAG
&DAG
) const {
3174 SDValue Chain
= Op
.getOperand(0);
3175 SDValue DstPtr
= Op
.getOperand(1);
3176 SDValue SrcPtr
= Op
.getOperand(2);
3177 const Value
*DstSV
= cast
<SrcValueSDNode
>(Op
.getOperand(3))->getValue();
3178 const Value
*SrcSV
= cast
<SrcValueSDNode
>(Op
.getOperand(4))->getValue();
3181 return DAG
.getMemcpy(Chain
, DL
, DstPtr
, SrcPtr
, DAG
.getIntPtrConstant(32, DL
),
3182 /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false,
3183 /*isTailCall*/false,
3184 MachinePointerInfo(DstSV
), MachinePointerInfo(SrcSV
));
3187 SDValue
SystemZTargetLowering::
3188 lowerDYNAMIC_STACKALLOC(SDValue Op
, SelectionDAG
&DAG
) const {
3189 const TargetFrameLowering
*TFI
= Subtarget
.getFrameLowering();
3190 MachineFunction
&MF
= DAG
.getMachineFunction();
3191 bool RealignOpt
= !MF
.getFunction().hasFnAttribute("no-realign-stack");
3192 bool StoreBackchain
= MF
.getFunction().hasFnAttribute("backchain");
3194 SDValue Chain
= Op
.getOperand(0);
3195 SDValue Size
= Op
.getOperand(1);
3196 SDValue Align
= Op
.getOperand(2);
3199 // If user has set the no alignment function attribute, ignore
3200 // alloca alignments.
3201 uint64_t AlignVal
= (RealignOpt
?
3202 dyn_cast
<ConstantSDNode
>(Align
)->getZExtValue() : 0);
3204 uint64_t StackAlign
= TFI
->getStackAlignment();
3205 uint64_t RequiredAlign
= std::max(AlignVal
, StackAlign
);
3206 uint64_t ExtraAlignSpace
= RequiredAlign
- StackAlign
;
3208 unsigned SPReg
= getStackPointerRegisterToSaveRestore();
3209 SDValue NeededSpace
= Size
;
3211 // Get a reference to the stack pointer.
3212 SDValue OldSP
= DAG
.getCopyFromReg(Chain
, DL
, SPReg
, MVT::i64
);
3214 // If we need a backchain, save it now.
3217 Backchain
= DAG
.getLoad(MVT::i64
, DL
, Chain
, OldSP
, MachinePointerInfo());
3219 // Add extra space for alignment if needed.
3220 if (ExtraAlignSpace
)
3221 NeededSpace
= DAG
.getNode(ISD::ADD
, DL
, MVT::i64
, NeededSpace
,
3222 DAG
.getConstant(ExtraAlignSpace
, DL
, MVT::i64
));
3224 // Get the new stack pointer value.
3225 SDValue NewSP
= DAG
.getNode(ISD::SUB
, DL
, MVT::i64
, OldSP
, NeededSpace
);
3227 // Copy the new stack pointer back.
3228 Chain
= DAG
.getCopyToReg(Chain
, DL
, SPReg
, NewSP
);
3230 // The allocated data lives above the 160 bytes allocated for the standard
3231 // frame, plus any outgoing stack arguments. We don't know how much that
3232 // amounts to yet, so emit a special ADJDYNALLOC placeholder.
3233 SDValue ArgAdjust
= DAG
.getNode(SystemZISD::ADJDYNALLOC
, DL
, MVT::i64
);
3234 SDValue Result
= DAG
.getNode(ISD::ADD
, DL
, MVT::i64
, NewSP
, ArgAdjust
);
3236 // Dynamically realign if needed.
3237 if (RequiredAlign
> StackAlign
) {
3239 DAG
.getNode(ISD::ADD
, DL
, MVT::i64
, Result
,
3240 DAG
.getConstant(ExtraAlignSpace
, DL
, MVT::i64
));
3242 DAG
.getNode(ISD::AND
, DL
, MVT::i64
, Result
,
3243 DAG
.getConstant(~(RequiredAlign
- 1), DL
, MVT::i64
));
3247 Chain
= DAG
.getStore(Chain
, DL
, Backchain
, NewSP
, MachinePointerInfo());
3249 SDValue Ops
[2] = { Result
, Chain
};
3250 return DAG
.getMergeValues(Ops
, DL
);
3253 SDValue
SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
3254 SDValue Op
, SelectionDAG
&DAG
) const {
3257 return DAG
.getNode(SystemZISD::ADJDYNALLOC
, DL
, MVT::i64
);
3260 SDValue
SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op
,
3261 SelectionDAG
&DAG
) const {
3262 EVT VT
= Op
.getValueType();
3266 // Just do a normal 64-bit multiplication and extract the results.
3267 // We define this so that it can be used for constant division.
3268 lowerMUL_LOHI32(DAG
, DL
, ISD::SIGN_EXTEND
, Op
.getOperand(0),
3269 Op
.getOperand(1), Ops
[1], Ops
[0]);
3270 else if (Subtarget
.hasMiscellaneousExtensions2())
3271 // SystemZISD::SMUL_LOHI returns the low result in the odd register and
3272 // the high result in the even register. ISD::SMUL_LOHI is defined to
3273 // return the low half first, so the results are in reverse order.
3274 lowerGR128Binary(DAG
, DL
, VT
, SystemZISD::SMUL_LOHI
,
3275 Op
.getOperand(0), Op
.getOperand(1), Ops
[1], Ops
[0]);
3277 // Do a full 128-bit multiplication based on SystemZISD::UMUL_LOHI:
3279 // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64)
3281 // but using the fact that the upper halves are either all zeros
3284 // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64)
3286 // and grouping the right terms together since they are quicker than the
3289 // (ll * rl) - (((lh & rl) + (ll & rh)) << 64)
3290 SDValue C63
= DAG
.getConstant(63, DL
, MVT::i64
);
3291 SDValue LL
= Op
.getOperand(0);
3292 SDValue RL
= Op
.getOperand(1);
3293 SDValue LH
= DAG
.getNode(ISD::SRA
, DL
, VT
, LL
, C63
);
3294 SDValue RH
= DAG
.getNode(ISD::SRA
, DL
, VT
, RL
, C63
);
3295 // SystemZISD::UMUL_LOHI returns the low result in the odd register and
3296 // the high result in the even register. ISD::SMUL_LOHI is defined to
3297 // return the low half first, so the results are in reverse order.
3298 lowerGR128Binary(DAG
, DL
, VT
, SystemZISD::UMUL_LOHI
,
3299 LL
, RL
, Ops
[1], Ops
[0]);
3300 SDValue NegLLTimesRH
= DAG
.getNode(ISD::AND
, DL
, VT
, LL
, RH
);
3301 SDValue NegLHTimesRL
= DAG
.getNode(ISD::AND
, DL
, VT
, LH
, RL
);
3302 SDValue NegSum
= DAG
.getNode(ISD::ADD
, DL
, VT
, NegLLTimesRH
, NegLHTimesRL
);
3303 Ops
[1] = DAG
.getNode(ISD::SUB
, DL
, VT
, Ops
[1], NegSum
);
3305 return DAG
.getMergeValues(Ops
, DL
);
3308 SDValue
SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op
,
3309 SelectionDAG
&DAG
) const {
3310 EVT VT
= Op
.getValueType();
3314 // Just do a normal 64-bit multiplication and extract the results.
3315 // We define this so that it can be used for constant division.
3316 lowerMUL_LOHI32(DAG
, DL
, ISD::ZERO_EXTEND
, Op
.getOperand(0),
3317 Op
.getOperand(1), Ops
[1], Ops
[0]);
3319 // SystemZISD::UMUL_LOHI returns the low result in the odd register and
3320 // the high result in the even register. ISD::UMUL_LOHI is defined to
3321 // return the low half first, so the results are in reverse order.
3322 lowerGR128Binary(DAG
, DL
, VT
, SystemZISD::UMUL_LOHI
,
3323 Op
.getOperand(0), Op
.getOperand(1), Ops
[1], Ops
[0]);
3324 return DAG
.getMergeValues(Ops
, DL
);
3327 SDValue
SystemZTargetLowering::lowerSDIVREM(SDValue Op
,
3328 SelectionDAG
&DAG
) const {
3329 SDValue Op0
= Op
.getOperand(0);
3330 SDValue Op1
= Op
.getOperand(1);
3331 EVT VT
= Op
.getValueType();
3334 // We use DSGF for 32-bit division. This means the first operand must
3335 // always be 64-bit, and the second operand should be 32-bit whenever
3336 // that is possible, to improve performance.
3338 Op0
= DAG
.getNode(ISD::SIGN_EXTEND
, DL
, MVT::i64
, Op0
);
3339 else if (DAG
.ComputeNumSignBits(Op1
) > 32)
3340 Op1
= DAG
.getNode(ISD::TRUNCATE
, DL
, MVT::i32
, Op1
);
3342 // DSG(F) returns the remainder in the even register and the
3343 // quotient in the odd register.
3345 lowerGR128Binary(DAG
, DL
, VT
, SystemZISD::SDIVREM
, Op0
, Op1
, Ops
[1], Ops
[0]);
3346 return DAG
.getMergeValues(Ops
, DL
);
3349 SDValue
SystemZTargetLowering::lowerUDIVREM(SDValue Op
,
3350 SelectionDAG
&DAG
) const {
3351 EVT VT
= Op
.getValueType();
3354 // DL(G) returns the remainder in the even register and the
3355 // quotient in the odd register.
3357 lowerGR128Binary(DAG
, DL
, VT
, SystemZISD::UDIVREM
,
3358 Op
.getOperand(0), Op
.getOperand(1), Ops
[1], Ops
[0]);
3359 return DAG
.getMergeValues(Ops
, DL
);
3362 SDValue
SystemZTargetLowering::lowerOR(SDValue Op
, SelectionDAG
&DAG
) const {
3363 assert(Op
.getValueType() == MVT::i64
&& "Should be 64-bit operation");
3365 // Get the known-zero masks for each operand.
3366 SDValue Ops
[] = {Op
.getOperand(0), Op
.getOperand(1)};
3367 KnownBits Known
[2] = {DAG
.computeKnownBits(Ops
[0]),
3368 DAG
.computeKnownBits(Ops
[1])};
3370 // See if the upper 32 bits of one operand and the lower 32 bits of the
3371 // other are known zero. They are the low and high operands respectively.
3372 uint64_t Masks
[] = { Known
[0].Zero
.getZExtValue(),
3373 Known
[1].Zero
.getZExtValue() };
3375 if ((Masks
[0] >> 32) == 0xffffffff && uint32_t(Masks
[1]) == 0xffffffff)
3377 else if ((Masks
[1] >> 32) == 0xffffffff && uint32_t(Masks
[0]) == 0xffffffff)
3382 SDValue LowOp
= Ops
[Low
];
3383 SDValue HighOp
= Ops
[High
];
3385 // If the high part is a constant, we're better off using IILH.
3386 if (HighOp
.getOpcode() == ISD::Constant
)
3389 // If the low part is a constant that is outside the range of LHI,
3390 // then we're better off using IILF.
3391 if (LowOp
.getOpcode() == ISD::Constant
) {
3392 int64_t Value
= int32_t(cast
<ConstantSDNode
>(LowOp
)->getZExtValue());
3393 if (!isInt
<16>(Value
))
3397 // Check whether the high part is an AND that doesn't change the
3398 // high 32 bits and just masks out low bits. We can skip it if so.
3399 if (HighOp
.getOpcode() == ISD::AND
&&
3400 HighOp
.getOperand(1).getOpcode() == ISD::Constant
) {
3401 SDValue HighOp0
= HighOp
.getOperand(0);
3402 uint64_t Mask
= cast
<ConstantSDNode
>(HighOp
.getOperand(1))->getZExtValue();
3403 if (DAG
.MaskedValueIsZero(HighOp0
, APInt(64, ~(Mask
| 0xffffffff))))
3407 // Take advantage of the fact that all GR32 operations only change the
3408 // low 32 bits by truncating Low to an i32 and inserting it directly
3409 // using a subreg. The interesting cases are those where the truncation
3412 SDValue Low32
= DAG
.getNode(ISD::TRUNCATE
, DL
, MVT::i32
, LowOp
);
3413 return DAG
.getTargetInsertSubreg(SystemZ::subreg_l32
, DL
,
3414 MVT::i64
, HighOp
, Low32
);
3417 // Lower SADDO/SSUBO/UADDO/USUBO nodes.
3418 SDValue
SystemZTargetLowering::lowerXALUO(SDValue Op
,
3419 SelectionDAG
&DAG
) const {
3420 SDNode
*N
= Op
.getNode();
3421 SDValue LHS
= N
->getOperand(0);
3422 SDValue RHS
= N
->getOperand(1);
3424 unsigned BaseOp
= 0;
3425 unsigned CCValid
= 0;
3426 unsigned CCMask
= 0;
3428 switch (Op
.getOpcode()) {
3429 default: llvm_unreachable("Unknown instruction!");
3431 BaseOp
= SystemZISD::SADDO
;
3432 CCValid
= SystemZ::CCMASK_ARITH
;
3433 CCMask
= SystemZ::CCMASK_ARITH_OVERFLOW
;
3436 BaseOp
= SystemZISD::SSUBO
;
3437 CCValid
= SystemZ::CCMASK_ARITH
;
3438 CCMask
= SystemZ::CCMASK_ARITH_OVERFLOW
;
3441 BaseOp
= SystemZISD::UADDO
;
3442 CCValid
= SystemZ::CCMASK_LOGICAL
;
3443 CCMask
= SystemZ::CCMASK_LOGICAL_CARRY
;
3446 BaseOp
= SystemZISD::USUBO
;
3447 CCValid
= SystemZ::CCMASK_LOGICAL
;
3448 CCMask
= SystemZ::CCMASK_LOGICAL_BORROW
;
3452 SDVTList VTs
= DAG
.getVTList(N
->getValueType(0), MVT::i32
);
3453 SDValue Result
= DAG
.getNode(BaseOp
, DL
, VTs
, LHS
, RHS
);
3455 SDValue SetCC
= emitSETCC(DAG
, DL
, Result
.getValue(1), CCValid
, CCMask
);
3456 if (N
->getValueType(1) == MVT::i1
)
3457 SetCC
= DAG
.getNode(ISD::TRUNCATE
, DL
, MVT::i1
, SetCC
);
3459 return DAG
.getNode(ISD::MERGE_VALUES
, DL
, N
->getVTList(), Result
, SetCC
);
3462 static bool isAddCarryChain(SDValue Carry
) {
3463 while (Carry
.getOpcode() == ISD::ADDCARRY
)
3464 Carry
= Carry
.getOperand(2);
3465 return Carry
.getOpcode() == ISD::UADDO
;
3468 static bool isSubBorrowChain(SDValue Carry
) {
3469 while (Carry
.getOpcode() == ISD::SUBCARRY
)
3470 Carry
= Carry
.getOperand(2);
3471 return Carry
.getOpcode() == ISD::USUBO
;
3474 // Lower ADDCARRY/SUBCARRY nodes.
3475 SDValue
SystemZTargetLowering::lowerADDSUBCARRY(SDValue Op
,
3476 SelectionDAG
&DAG
) const {
3478 SDNode
*N
= Op
.getNode();
3479 MVT VT
= N
->getSimpleValueType(0);
3481 // Let legalize expand this if it isn't a legal type yet.
3482 if (!DAG
.getTargetLoweringInfo().isTypeLegal(VT
))
3485 SDValue LHS
= N
->getOperand(0);
3486 SDValue RHS
= N
->getOperand(1);
3487 SDValue Carry
= Op
.getOperand(2);
3489 unsigned BaseOp
= 0;
3490 unsigned CCValid
= 0;
3491 unsigned CCMask
= 0;
3493 switch (Op
.getOpcode()) {
3494 default: llvm_unreachable("Unknown instruction!");
3496 if (!isAddCarryChain(Carry
))
3499 BaseOp
= SystemZISD::ADDCARRY
;
3500 CCValid
= SystemZ::CCMASK_LOGICAL
;
3501 CCMask
= SystemZ::CCMASK_LOGICAL_CARRY
;
3504 if (!isSubBorrowChain(Carry
))
3507 BaseOp
= SystemZISD::SUBCARRY
;
3508 CCValid
= SystemZ::CCMASK_LOGICAL
;
3509 CCMask
= SystemZ::CCMASK_LOGICAL_BORROW
;
3513 // Set the condition code from the carry flag.
3514 Carry
= DAG
.getNode(SystemZISD::GET_CCMASK
, DL
, MVT::i32
, Carry
,
3515 DAG
.getConstant(CCValid
, DL
, MVT::i32
),
3516 DAG
.getConstant(CCMask
, DL
, MVT::i32
));
3518 SDVTList VTs
= DAG
.getVTList(VT
, MVT::i32
);
3519 SDValue Result
= DAG
.getNode(BaseOp
, DL
, VTs
, LHS
, RHS
, Carry
);
3521 SDValue SetCC
= emitSETCC(DAG
, DL
, Result
.getValue(1), CCValid
, CCMask
);
3522 if (N
->getValueType(1) == MVT::i1
)
3523 SetCC
= DAG
.getNode(ISD::TRUNCATE
, DL
, MVT::i1
, SetCC
);
3525 return DAG
.getNode(ISD::MERGE_VALUES
, DL
, N
->getVTList(), Result
, SetCC
);
3528 SDValue
SystemZTargetLowering::lowerCTPOP(SDValue Op
,
3529 SelectionDAG
&DAG
) const {
3530 EVT VT
= Op
.getValueType();
3532 Op
= Op
.getOperand(0);
3534 // Handle vector types via VPOPCT.
3535 if (VT
.isVector()) {
3536 Op
= DAG
.getNode(ISD::BITCAST
, DL
, MVT::v16i8
, Op
);
3537 Op
= DAG
.getNode(SystemZISD::POPCNT
, DL
, MVT::v16i8
, Op
);
3538 switch (VT
.getScalarSizeInBits()) {
3542 Op
= DAG
.getNode(ISD::BITCAST
, DL
, VT
, Op
);
3543 SDValue Shift
= DAG
.getConstant(8, DL
, MVT::i32
);
3544 SDValue Tmp
= DAG
.getNode(SystemZISD::VSHL_BY_SCALAR
, DL
, VT
, Op
, Shift
);
3545 Op
= DAG
.getNode(ISD::ADD
, DL
, VT
, Op
, Tmp
);
3546 Op
= DAG
.getNode(SystemZISD::VSRL_BY_SCALAR
, DL
, VT
, Op
, Shift
);
3550 SDValue Tmp
= DAG
.getSplatBuildVector(MVT::v16i8
, DL
,
3551 DAG
.getConstant(0, DL
, MVT::i32
));
3552 Op
= DAG
.getNode(SystemZISD::VSUM
, DL
, VT
, Op
, Tmp
);
3556 SDValue Tmp
= DAG
.getSplatBuildVector(MVT::v16i8
, DL
,
3557 DAG
.getConstant(0, DL
, MVT::i32
));
3558 Op
= DAG
.getNode(SystemZISD::VSUM
, DL
, MVT::v4i32
, Op
, Tmp
);
3559 Op
= DAG
.getNode(SystemZISD::VSUM
, DL
, VT
, Op
, Tmp
);
3563 llvm_unreachable("Unexpected type");
3568 // Get the known-zero mask for the operand.
3569 KnownBits Known
= DAG
.computeKnownBits(Op
);
3570 unsigned NumSignificantBits
= (~Known
.Zero
).getActiveBits();
3571 if (NumSignificantBits
== 0)
3572 return DAG
.getConstant(0, DL
, VT
);
3574 // Skip known-zero high parts of the operand.
3575 int64_t OrigBitSize
= VT
.getSizeInBits();
3576 int64_t BitSize
= (int64_t)1 << Log2_32_Ceil(NumSignificantBits
);
3577 BitSize
= std::min(BitSize
, OrigBitSize
);
3579 // The POPCNT instruction counts the number of bits in each byte.
3580 Op
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, MVT::i64
, Op
);
3581 Op
= DAG
.getNode(SystemZISD::POPCNT
, DL
, MVT::i64
, Op
);
3582 Op
= DAG
.getNode(ISD::TRUNCATE
, DL
, VT
, Op
);
3584 // Add up per-byte counts in a binary tree. All bits of Op at
3585 // position larger than BitSize remain zero throughout.
3586 for (int64_t I
= BitSize
/ 2; I
>= 8; I
= I
/ 2) {
3587 SDValue Tmp
= DAG
.getNode(ISD::SHL
, DL
, VT
, Op
, DAG
.getConstant(I
, DL
, VT
));
3588 if (BitSize
!= OrigBitSize
)
3589 Tmp
= DAG
.getNode(ISD::AND
, DL
, VT
, Tmp
,
3590 DAG
.getConstant(((uint64_t)1 << BitSize
) - 1, DL
, VT
));
3591 Op
= DAG
.getNode(ISD::ADD
, DL
, VT
, Op
, Tmp
);
3594 // Extract overall result from high byte.
3596 Op
= DAG
.getNode(ISD::SRL
, DL
, VT
, Op
,
3597 DAG
.getConstant(BitSize
- 8, DL
, VT
));
3602 SDValue
SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op
,
3603 SelectionDAG
&DAG
) const {
3605 AtomicOrdering FenceOrdering
= static_cast<AtomicOrdering
>(
3606 cast
<ConstantSDNode
>(Op
.getOperand(1))->getZExtValue());
3607 SyncScope::ID FenceSSID
= static_cast<SyncScope::ID
>(
3608 cast
<ConstantSDNode
>(Op
.getOperand(2))->getZExtValue());
3610 // The only fence that needs an instruction is a sequentially-consistent
3611 // cross-thread fence.
3612 if (FenceOrdering
== AtomicOrdering::SequentiallyConsistent
&&
3613 FenceSSID
== SyncScope::System
) {
3614 return SDValue(DAG
.getMachineNode(SystemZ::Serialize
, DL
, MVT::Other
,
3619 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
3620 return DAG
.getNode(SystemZISD::MEMBARRIER
, DL
, MVT::Other
, Op
.getOperand(0));
3623 // Op is an atomic load. Lower it into a normal volatile load.
3624 SDValue
SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op
,
3625 SelectionDAG
&DAG
) const {
3626 auto *Node
= cast
<AtomicSDNode
>(Op
.getNode());
3627 return DAG
.getExtLoad(ISD::EXTLOAD
, SDLoc(Op
), Op
.getValueType(),
3628 Node
->getChain(), Node
->getBasePtr(),
3629 Node
->getMemoryVT(), Node
->getMemOperand());
3632 // Op is an atomic store. Lower it into a normal volatile store.
3633 SDValue
SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op
,
3634 SelectionDAG
&DAG
) const {
3635 auto *Node
= cast
<AtomicSDNode
>(Op
.getNode());
3636 SDValue Chain
= DAG
.getTruncStore(Node
->getChain(), SDLoc(Op
), Node
->getVal(),
3637 Node
->getBasePtr(), Node
->getMemoryVT(),
3638 Node
->getMemOperand());
3639 // We have to enforce sequential consistency by performing a
3640 // serialization operation after the store.
3641 if (Node
->getOrdering() == AtomicOrdering::SequentiallyConsistent
)
3642 Chain
= SDValue(DAG
.getMachineNode(SystemZ::Serialize
, SDLoc(Op
),
3643 MVT::Other
, Chain
), 0);
3647 // Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first
3648 // two into the fullword ATOMIC_LOADW_* operation given by Opcode.
3649 SDValue
SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op
,
3651 unsigned Opcode
) const {
3652 auto *Node
= cast
<AtomicSDNode
>(Op
.getNode());
3654 // 32-bit operations need no code outside the main loop.
3655 EVT NarrowVT
= Node
->getMemoryVT();
3656 EVT WideVT
= MVT::i32
;
3657 if (NarrowVT
== WideVT
)
3660 int64_t BitSize
= NarrowVT
.getSizeInBits();
3661 SDValue ChainIn
= Node
->getChain();
3662 SDValue Addr
= Node
->getBasePtr();
3663 SDValue Src2
= Node
->getVal();
3664 MachineMemOperand
*MMO
= Node
->getMemOperand();
3666 EVT PtrVT
= Addr
.getValueType();
3668 // Convert atomic subtracts of constants into additions.
3669 if (Opcode
== SystemZISD::ATOMIC_LOADW_SUB
)
3670 if (auto *Const
= dyn_cast
<ConstantSDNode
>(Src2
)) {
3671 Opcode
= SystemZISD::ATOMIC_LOADW_ADD
;
3672 Src2
= DAG
.getConstant(-Const
->getSExtValue(), DL
, Src2
.getValueType());
3675 // Get the address of the containing word.
3676 SDValue AlignedAddr
= DAG
.getNode(ISD::AND
, DL
, PtrVT
, Addr
,
3677 DAG
.getConstant(-4, DL
, PtrVT
));
3679 // Get the number of bits that the word must be rotated left in order
3680 // to bring the field to the top bits of a GR32.
3681 SDValue BitShift
= DAG
.getNode(ISD::SHL
, DL
, PtrVT
, Addr
,
3682 DAG
.getConstant(3, DL
, PtrVT
));
3683 BitShift
= DAG
.getNode(ISD::TRUNCATE
, DL
, WideVT
, BitShift
);
3685 // Get the complementing shift amount, for rotating a field in the top
3686 // bits back to its proper position.
3687 SDValue NegBitShift
= DAG
.getNode(ISD::SUB
, DL
, WideVT
,
3688 DAG
.getConstant(0, DL
, WideVT
), BitShift
);
3690 // Extend the source operand to 32 bits and prepare it for the inner loop.
3691 // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other
3692 // operations require the source to be shifted in advance. (This shift
3693 // can be folded if the source is constant.) For AND and NAND, the lower
3694 // bits must be set, while for other opcodes they should be left clear.
3695 if (Opcode
!= SystemZISD::ATOMIC_SWAPW
)
3696 Src2
= DAG
.getNode(ISD::SHL
, DL
, WideVT
, Src2
,
3697 DAG
.getConstant(32 - BitSize
, DL
, WideVT
));
3698 if (Opcode
== SystemZISD::ATOMIC_LOADW_AND
||
3699 Opcode
== SystemZISD::ATOMIC_LOADW_NAND
)
3700 Src2
= DAG
.getNode(ISD::OR
, DL
, WideVT
, Src2
,
3701 DAG
.getConstant(uint32_t(-1) >> BitSize
, DL
, WideVT
));
3703 // Construct the ATOMIC_LOADW_* node.
3704 SDVTList VTList
= DAG
.getVTList(WideVT
, MVT::Other
);
3705 SDValue Ops
[] = { ChainIn
, AlignedAddr
, Src2
, BitShift
, NegBitShift
,
3706 DAG
.getConstant(BitSize
, DL
, WideVT
) };
3707 SDValue AtomicOp
= DAG
.getMemIntrinsicNode(Opcode
, DL
, VTList
, Ops
,
3710 // Rotate the result of the final CS so that the field is in the lower
3711 // bits of a GR32, then truncate it.
3712 SDValue ResultShift
= DAG
.getNode(ISD::ADD
, DL
, WideVT
, BitShift
,
3713 DAG
.getConstant(BitSize
, DL
, WideVT
));
3714 SDValue Result
= DAG
.getNode(ISD::ROTL
, DL
, WideVT
, AtomicOp
, ResultShift
);
3716 SDValue RetOps
[2] = { Result
, AtomicOp
.getValue(1) };
3717 return DAG
.getMergeValues(RetOps
, DL
);
3720 // Op is an ATOMIC_LOAD_SUB operation. Lower 8- and 16-bit operations
3721 // into ATOMIC_LOADW_SUBs and decide whether to convert 32- and 64-bit
3722 // operations into additions.
3723 SDValue
SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op
,
3724 SelectionDAG
&DAG
) const {
3725 auto *Node
= cast
<AtomicSDNode
>(Op
.getNode());
3726 EVT MemVT
= Node
->getMemoryVT();
3727 if (MemVT
== MVT::i32
|| MemVT
== MVT::i64
) {
3728 // A full-width operation.
3729 assert(Op
.getValueType() == MemVT
&& "Mismatched VTs");
3730 SDValue Src2
= Node
->getVal();
3734 if (auto *Op2
= dyn_cast
<ConstantSDNode
>(Src2
)) {
3735 // Use an addition if the operand is constant and either LAA(G) is
3736 // available or the negative value is in the range of A(G)FHI.
3737 int64_t Value
= (-Op2
->getAPIntValue()).getSExtValue();
3738 if (isInt
<32>(Value
) || Subtarget
.hasInterlockedAccess1())
3739 NegSrc2
= DAG
.getConstant(Value
, DL
, MemVT
);
3740 } else if (Subtarget
.hasInterlockedAccess1())
3741 // Use LAA(G) if available.
3742 NegSrc2
= DAG
.getNode(ISD::SUB
, DL
, MemVT
, DAG
.getConstant(0, DL
, MemVT
),
3745 if (NegSrc2
.getNode())
3746 return DAG
.getAtomic(ISD::ATOMIC_LOAD_ADD
, DL
, MemVT
,
3747 Node
->getChain(), Node
->getBasePtr(), NegSrc2
,
3748 Node
->getMemOperand());
3750 // Use the node as-is.
3754 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_SUB
);
3757 // Lower 8/16/32/64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS node.
3758 SDValue
SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op
,
3759 SelectionDAG
&DAG
) const {
3760 auto *Node
= cast
<AtomicSDNode
>(Op
.getNode());
3761 SDValue ChainIn
= Node
->getOperand(0);
3762 SDValue Addr
= Node
->getOperand(1);
3763 SDValue CmpVal
= Node
->getOperand(2);
3764 SDValue SwapVal
= Node
->getOperand(3);
3765 MachineMemOperand
*MMO
= Node
->getMemOperand();
3768 // We have native support for 32-bit and 64-bit compare and swap, but we
3769 // still need to expand extracting the "success" result from the CC.
3770 EVT NarrowVT
= Node
->getMemoryVT();
3771 EVT WideVT
= NarrowVT
== MVT::i64
? MVT::i64
: MVT::i32
;
3772 if (NarrowVT
== WideVT
) {
3773 SDVTList Tys
= DAG
.getVTList(WideVT
, MVT::i32
, MVT::Other
);
3774 SDValue Ops
[] = { ChainIn
, Addr
, CmpVal
, SwapVal
};
3775 SDValue AtomicOp
= DAG
.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP
,
3776 DL
, Tys
, Ops
, NarrowVT
, MMO
);
3777 SDValue Success
= emitSETCC(DAG
, DL
, AtomicOp
.getValue(1),
3778 SystemZ::CCMASK_CS
, SystemZ::CCMASK_CS_EQ
);
3780 DAG
.ReplaceAllUsesOfValueWith(Op
.getValue(0), AtomicOp
.getValue(0));
3781 DAG
.ReplaceAllUsesOfValueWith(Op
.getValue(1), Success
);
3782 DAG
.ReplaceAllUsesOfValueWith(Op
.getValue(2), AtomicOp
.getValue(2));
3786 // Convert 8-bit and 16-bit compare and swap to a loop, implemented
3787 // via a fullword ATOMIC_CMP_SWAPW operation.
3788 int64_t BitSize
= NarrowVT
.getSizeInBits();
3789 EVT PtrVT
= Addr
.getValueType();
3791 // Get the address of the containing word.
3792 SDValue AlignedAddr
= DAG
.getNode(ISD::AND
, DL
, PtrVT
, Addr
,
3793 DAG
.getConstant(-4, DL
, PtrVT
));
3795 // Get the number of bits that the word must be rotated left in order
3796 // to bring the field to the top bits of a GR32.
3797 SDValue BitShift
= DAG
.getNode(ISD::SHL
, DL
, PtrVT
, Addr
,
3798 DAG
.getConstant(3, DL
, PtrVT
));
3799 BitShift
= DAG
.getNode(ISD::TRUNCATE
, DL
, WideVT
, BitShift
);
3801 // Get the complementing shift amount, for rotating a field in the top
3802 // bits back to its proper position.
3803 SDValue NegBitShift
= DAG
.getNode(ISD::SUB
, DL
, WideVT
,
3804 DAG
.getConstant(0, DL
, WideVT
), BitShift
);
3806 // Construct the ATOMIC_CMP_SWAPW node.
3807 SDVTList VTList
= DAG
.getVTList(WideVT
, MVT::i32
, MVT::Other
);
3808 SDValue Ops
[] = { ChainIn
, AlignedAddr
, CmpVal
, SwapVal
, BitShift
,
3809 NegBitShift
, DAG
.getConstant(BitSize
, DL
, WideVT
) };
3810 SDValue AtomicOp
= DAG
.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW
, DL
,
3811 VTList
, Ops
, NarrowVT
, MMO
);
3812 SDValue Success
= emitSETCC(DAG
, DL
, AtomicOp
.getValue(1),
3813 SystemZ::CCMASK_ICMP
, SystemZ::CCMASK_CMP_EQ
);
3815 DAG
.ReplaceAllUsesOfValueWith(Op
.getValue(0), AtomicOp
.getValue(0));
3816 DAG
.ReplaceAllUsesOfValueWith(Op
.getValue(1), Success
);
3817 DAG
.ReplaceAllUsesOfValueWith(Op
.getValue(2), AtomicOp
.getValue(2));
3821 MachineMemOperand::Flags
3822 SystemZTargetLowering::getMMOFlags(const Instruction
&I
) const {
3823 // Because of how we convert atomic_load and atomic_store to normal loads and
3824 // stores in the DAG, we need to ensure that the MMOs are marked volatile
3825 // since DAGCombine hasn't been updated to account for atomic, but non
3826 // volatile loads. (See D57601)
3827 if (auto *SI
= dyn_cast
<StoreInst
>(&I
))
3829 return MachineMemOperand::MOVolatile
;
3830 if (auto *LI
= dyn_cast
<LoadInst
>(&I
))
3832 return MachineMemOperand::MOVolatile
;
3833 if (auto *AI
= dyn_cast
<AtomicRMWInst
>(&I
))
3835 return MachineMemOperand::MOVolatile
;
3836 if (auto *AI
= dyn_cast
<AtomicCmpXchgInst
>(&I
))
3838 return MachineMemOperand::MOVolatile
;
3839 return MachineMemOperand::MONone
;
3842 SDValue
SystemZTargetLowering::lowerSTACKSAVE(SDValue Op
,
3843 SelectionDAG
&DAG
) const {
3844 MachineFunction
&MF
= DAG
.getMachineFunction();
3845 MF
.getInfo
<SystemZMachineFunctionInfo
>()->setManipulatesSP(true);
3846 return DAG
.getCopyFromReg(Op
.getOperand(0), SDLoc(Op
),
3847 SystemZ::R15D
, Op
.getValueType());
3850 SDValue
SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op
,
3851 SelectionDAG
&DAG
) const {
3852 MachineFunction
&MF
= DAG
.getMachineFunction();
3853 MF
.getInfo
<SystemZMachineFunctionInfo
>()->setManipulatesSP(true);
3854 bool StoreBackchain
= MF
.getFunction().hasFnAttribute("backchain");
3856 SDValue Chain
= Op
.getOperand(0);
3857 SDValue NewSP
= Op
.getOperand(1);
3861 if (StoreBackchain
) {
3862 SDValue OldSP
= DAG
.getCopyFromReg(Chain
, DL
, SystemZ::R15D
, MVT::i64
);
3863 Backchain
= DAG
.getLoad(MVT::i64
, DL
, Chain
, OldSP
, MachinePointerInfo());
3866 Chain
= DAG
.getCopyToReg(Chain
, DL
, SystemZ::R15D
, NewSP
);
3869 Chain
= DAG
.getStore(Chain
, DL
, Backchain
, NewSP
, MachinePointerInfo());
3874 SDValue
SystemZTargetLowering::lowerPREFETCH(SDValue Op
,
3875 SelectionDAG
&DAG
) const {
3876 bool IsData
= cast
<ConstantSDNode
>(Op
.getOperand(4))->getZExtValue();
3878 // Just preserve the chain.
3879 return Op
.getOperand(0);
3882 bool IsWrite
= cast
<ConstantSDNode
>(Op
.getOperand(2))->getZExtValue();
3883 unsigned Code
= IsWrite
? SystemZ::PFD_WRITE
: SystemZ::PFD_READ
;
3884 auto *Node
= cast
<MemIntrinsicSDNode
>(Op
.getNode());
3887 DAG
.getConstant(Code
, DL
, MVT::i32
),
3890 return DAG
.getMemIntrinsicNode(SystemZISD::PREFETCH
, DL
,
3891 Node
->getVTList(), Ops
,
3892 Node
->getMemoryVT(), Node
->getMemOperand());
3895 // Convert condition code in CCReg to an i32 value.
3896 static SDValue
getCCResult(SelectionDAG
&DAG
, SDValue CCReg
) {
3898 SDValue IPM
= DAG
.getNode(SystemZISD::IPM
, DL
, MVT::i32
, CCReg
);
3899 return DAG
.getNode(ISD::SRL
, DL
, MVT::i32
, IPM
,
3900 DAG
.getConstant(SystemZ::IPM_CC
, DL
, MVT::i32
));
3904 SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op
,
3905 SelectionDAG
&DAG
) const {
3906 unsigned Opcode
, CCValid
;
3907 if (isIntrinsicWithCCAndChain(Op
, Opcode
, CCValid
)) {
3908 assert(Op
->getNumValues() == 2 && "Expected only CC result and chain");
3909 SDNode
*Node
= emitIntrinsicWithCCAndChain(DAG
, Op
, Opcode
);
3910 SDValue CC
= getCCResult(DAG
, SDValue(Node
, 0));
3911 DAG
.ReplaceAllUsesOfValueWith(SDValue(Op
.getNode(), 0), CC
);
3919 SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op
,
3920 SelectionDAG
&DAG
) const {
3921 unsigned Opcode
, CCValid
;
3922 if (isIntrinsicWithCC(Op
, Opcode
, CCValid
)) {
3923 SDNode
*Node
= emitIntrinsicWithCC(DAG
, Op
, Opcode
);
3924 if (Op
->getNumValues() == 1)
3925 return getCCResult(DAG
, SDValue(Node
, 0));
3926 assert(Op
->getNumValues() == 2 && "Expected a CC and non-CC result");
3927 return DAG
.getNode(ISD::MERGE_VALUES
, SDLoc(Op
), Op
->getVTList(),
3928 SDValue(Node
, 0), getCCResult(DAG
, SDValue(Node
, 1)));
3931 unsigned Id
= cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue();
3933 case Intrinsic::thread_pointer
:
3934 return lowerThreadPointer(SDLoc(Op
), DAG
);
3936 case Intrinsic::s390_vpdi
:
3937 return DAG
.getNode(SystemZISD::PERMUTE_DWORDS
, SDLoc(Op
), Op
.getValueType(),
3938 Op
.getOperand(1), Op
.getOperand(2), Op
.getOperand(3));
3940 case Intrinsic::s390_vperm
:
3941 return DAG
.getNode(SystemZISD::PERMUTE
, SDLoc(Op
), Op
.getValueType(),
3942 Op
.getOperand(1), Op
.getOperand(2), Op
.getOperand(3));
3944 case Intrinsic::s390_vuphb
:
3945 case Intrinsic::s390_vuphh
:
3946 case Intrinsic::s390_vuphf
:
3947 return DAG
.getNode(SystemZISD::UNPACK_HIGH
, SDLoc(Op
), Op
.getValueType(),
3950 case Intrinsic::s390_vuplhb
:
3951 case Intrinsic::s390_vuplhh
:
3952 case Intrinsic::s390_vuplhf
:
3953 return DAG
.getNode(SystemZISD::UNPACKL_HIGH
, SDLoc(Op
), Op
.getValueType(),
3956 case Intrinsic::s390_vuplb
:
3957 case Intrinsic::s390_vuplhw
:
3958 case Intrinsic::s390_vuplf
:
3959 return DAG
.getNode(SystemZISD::UNPACK_LOW
, SDLoc(Op
), Op
.getValueType(),
3962 case Intrinsic::s390_vupllb
:
3963 case Intrinsic::s390_vupllh
:
3964 case Intrinsic::s390_vupllf
:
3965 return DAG
.getNode(SystemZISD::UNPACKL_LOW
, SDLoc(Op
), Op
.getValueType(),
3968 case Intrinsic::s390_vsumb
:
3969 case Intrinsic::s390_vsumh
:
3970 case Intrinsic::s390_vsumgh
:
3971 case Intrinsic::s390_vsumgf
:
3972 case Intrinsic::s390_vsumqf
:
3973 case Intrinsic::s390_vsumqg
:
3974 return DAG
.getNode(SystemZISD::VSUM
, SDLoc(Op
), Op
.getValueType(),
3975 Op
.getOperand(1), Op
.getOperand(2));
3982 // Says that SystemZISD operation Opcode can be used to perform the equivalent
3983 // of a VPERM with permute vector Bytes. If Opcode takes three operands,
3984 // Operand is the constant third operand, otherwise it is the number of
3985 // bytes in each element of the result.
3989 unsigned char Bytes
[SystemZ::VectorBytes
];
3993 static const Permute PermuteForms
[] = {
3995 { SystemZISD::MERGE_HIGH
, 8,
3996 { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } },
3998 { SystemZISD::MERGE_HIGH
, 4,
3999 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
4001 { SystemZISD::MERGE_HIGH
, 2,
4002 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
4004 { SystemZISD::MERGE_HIGH
, 1,
4005 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
4007 { SystemZISD::MERGE_LOW
, 8,
4008 { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } },
4010 { SystemZISD::MERGE_LOW
, 4,
4011 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
4013 { SystemZISD::MERGE_LOW
, 2,
4014 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
4016 { SystemZISD::MERGE_LOW
, 1,
4017 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
4019 { SystemZISD::PACK
, 4,
4020 { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } },
4022 { SystemZISD::PACK
, 2,
4023 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
4025 { SystemZISD::PACK
, 1,
4026 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
4027 // VPDI V1, V2, 4 (low half of V1, high half of V2)
4028 { SystemZISD::PERMUTE_DWORDS
, 4,
4029 { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
4030 // VPDI V1, V2, 1 (high half of V1, low half of V2)
4031 { SystemZISD::PERMUTE_DWORDS
, 1,
4032 { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } }
4035 // Called after matching a vector shuffle against a particular pattern.
4036 // Both the original shuffle and the pattern have two vector operands.
4037 // OpNos[0] is the operand of the original shuffle that should be used for
4038 // operand 0 of the pattern, or -1 if operand 0 of the pattern can be anything.
4039 // OpNos[1] is the same for operand 1 of the pattern. Resolve these -1s and
4040 // set OpNo0 and OpNo1 to the shuffle operands that should actually be used
4041 // for operands 0 and 1 of the pattern.
4042 static bool chooseShuffleOpNos(int *OpNos
, unsigned &OpNo0
, unsigned &OpNo1
) {
4046 OpNo0
= OpNo1
= OpNos
[1];
4047 } else if (OpNos
[1] < 0) {
4048 OpNo0
= OpNo1
= OpNos
[0];
4056 // Bytes is a VPERM-like permute vector, except that -1 is used for
4057 // undefined bytes. Return true if the VPERM can be implemented using P.
4058 // When returning true set OpNo0 to the VPERM operand that should be
4059 // used for operand 0 of P and likewise OpNo1 for operand 1 of P.
4061 // For example, if swapping the VPERM operands allows P to match, OpNo0
4062 // will be 1 and OpNo1 will be 0. If instead Bytes only refers to one
4063 // operand, but rewriting it to use two duplicated operands allows it to
4064 // match P, then OpNo0 and OpNo1 will be the same.
4065 static bool matchPermute(const SmallVectorImpl
<int> &Bytes
, const Permute
&P
,
4066 unsigned &OpNo0
, unsigned &OpNo1
) {
4067 int OpNos
[] = { -1, -1 };
4068 for (unsigned I
= 0; I
< SystemZ::VectorBytes
; ++I
) {
4071 // Make sure that the two permute vectors use the same suboperand
4072 // byte number. Only the operand numbers (the high bits) are
4073 // allowed to differ.
4074 if ((Elt
^ P
.Bytes
[I
]) & (SystemZ::VectorBytes
- 1))
4076 int ModelOpNo
= P
.Bytes
[I
] / SystemZ::VectorBytes
;
4077 int RealOpNo
= unsigned(Elt
) / SystemZ::VectorBytes
;
4078 // Make sure that the operand mappings are consistent with previous
4080 if (OpNos
[ModelOpNo
] == 1 - RealOpNo
)
4082 OpNos
[ModelOpNo
] = RealOpNo
;
4085 return chooseShuffleOpNos(OpNos
, OpNo0
, OpNo1
);
4088 // As above, but search for a matching permute.
4089 static const Permute
*matchPermute(const SmallVectorImpl
<int> &Bytes
,
4090 unsigned &OpNo0
, unsigned &OpNo1
) {
4091 for (auto &P
: PermuteForms
)
4092 if (matchPermute(Bytes
, P
, OpNo0
, OpNo1
))
4097 // Bytes is a VPERM-like permute vector, except that -1 is used for
4098 // undefined bytes. This permute is an operand of an outer permute.
4099 // See whether redistributing the -1 bytes gives a shuffle that can be
4100 // implemented using P. If so, set Transform to a VPERM-like permute vector
4101 // that, when applied to the result of P, gives the original permute in Bytes.
4102 static bool matchDoublePermute(const SmallVectorImpl
<int> &Bytes
,
4104 SmallVectorImpl
<int> &Transform
) {
4106 for (unsigned From
= 0; From
< SystemZ::VectorBytes
; ++From
) {
4107 int Elt
= Bytes
[From
];
4109 // Byte number From of the result is undefined.
4110 Transform
[From
] = -1;
4112 while (P
.Bytes
[To
] != Elt
) {
4114 if (To
== SystemZ::VectorBytes
)
4117 Transform
[From
] = To
;
4123 // As above, but search for a matching permute.
4124 static const Permute
*matchDoublePermute(const SmallVectorImpl
<int> &Bytes
,
4125 SmallVectorImpl
<int> &Transform
) {
4126 for (auto &P
: PermuteForms
)
4127 if (matchDoublePermute(Bytes
, P
, Transform
))
4132 // Convert the mask of the given shuffle op into a byte-level mask,
4133 // as if it had type vNi8.
4134 static bool getVPermMask(SDValue ShuffleOp
,
4135 SmallVectorImpl
<int> &Bytes
) {
4136 EVT VT
= ShuffleOp
.getValueType();
4137 unsigned NumElements
= VT
.getVectorNumElements();
4138 unsigned BytesPerElement
= VT
.getVectorElementType().getStoreSize();
4140 if (auto *VSN
= dyn_cast
<ShuffleVectorSDNode
>(ShuffleOp
)) {
4141 Bytes
.resize(NumElements
* BytesPerElement
, -1);
4142 for (unsigned I
= 0; I
< NumElements
; ++I
) {
4143 int Index
= VSN
->getMaskElt(I
);
4145 for (unsigned J
= 0; J
< BytesPerElement
; ++J
)
4146 Bytes
[I
* BytesPerElement
+ J
] = Index
* BytesPerElement
+ J
;
4150 if (SystemZISD::SPLAT
== ShuffleOp
.getOpcode() &&
4151 isa
<ConstantSDNode
>(ShuffleOp
.getOperand(1))) {
4152 unsigned Index
= ShuffleOp
.getConstantOperandVal(1);
4153 Bytes
.resize(NumElements
* BytesPerElement
, -1);
4154 for (unsigned I
= 0; I
< NumElements
; ++I
)
4155 for (unsigned J
= 0; J
< BytesPerElement
; ++J
)
4156 Bytes
[I
* BytesPerElement
+ J
] = Index
* BytesPerElement
+ J
;
4162 // Bytes is a VPERM-like permute vector, except that -1 is used for
4163 // undefined bytes. See whether bytes [Start, Start + BytesPerElement) of
4164 // the result come from a contiguous sequence of bytes from one input.
4165 // Set Base to the selector for the first byte if so.
4166 static bool getShuffleInput(const SmallVectorImpl
<int> &Bytes
, unsigned Start
,
4167 unsigned BytesPerElement
, int &Base
) {
4169 for (unsigned I
= 0; I
< BytesPerElement
; ++I
) {
4170 if (Bytes
[Start
+ I
] >= 0) {
4171 unsigned Elem
= Bytes
[Start
+ I
];
4174 // Make sure the bytes would come from one input operand.
4175 if (unsigned(Base
) % Bytes
.size() + BytesPerElement
> Bytes
.size())
4177 } else if (unsigned(Base
) != Elem
- I
)
4184 // Bytes is a VPERM-like permute vector, except that -1 is used for
4185 // undefined bytes. Return true if it can be performed using VSLDI.
4186 // When returning true, set StartIndex to the shift amount and OpNo0
4187 // and OpNo1 to the VPERM operands that should be used as the first
4188 // and second shift operand respectively.
4189 static bool isShlDoublePermute(const SmallVectorImpl
<int> &Bytes
,
4190 unsigned &StartIndex
, unsigned &OpNo0
,
4192 int OpNos
[] = { -1, -1 };
4194 for (unsigned I
= 0; I
< 16; ++I
) {
4195 int Index
= Bytes
[I
];
4197 int ExpectedShift
= (Index
- I
) % SystemZ::VectorBytes
;
4198 int ModelOpNo
= unsigned(ExpectedShift
+ I
) / SystemZ::VectorBytes
;
4199 int RealOpNo
= unsigned(Index
) / SystemZ::VectorBytes
;
4201 Shift
= ExpectedShift
;
4202 else if (Shift
!= ExpectedShift
)
4204 // Make sure that the operand mappings are consistent with previous
4206 if (OpNos
[ModelOpNo
] == 1 - RealOpNo
)
4208 OpNos
[ModelOpNo
] = RealOpNo
;
4212 return chooseShuffleOpNos(OpNos
, OpNo0
, OpNo1
);
4215 // Create a node that performs P on operands Op0 and Op1, casting the
4216 // operands to the appropriate type. The type of the result is determined by P.
4217 static SDValue
getPermuteNode(SelectionDAG
&DAG
, const SDLoc
&DL
,
4218 const Permute
&P
, SDValue Op0
, SDValue Op1
) {
4219 // VPDI (PERMUTE_DWORDS) always operates on v2i64s. The input
4220 // elements of a PACK are twice as wide as the outputs.
4221 unsigned InBytes
= (P
.Opcode
== SystemZISD::PERMUTE_DWORDS
? 8 :
4222 P
.Opcode
== SystemZISD::PACK
? P
.Operand
* 2 :
4224 // Cast both operands to the appropriate type.
4225 MVT InVT
= MVT::getVectorVT(MVT::getIntegerVT(InBytes
* 8),
4226 SystemZ::VectorBytes
/ InBytes
);
4227 Op0
= DAG
.getNode(ISD::BITCAST
, DL
, InVT
, Op0
);
4228 Op1
= DAG
.getNode(ISD::BITCAST
, DL
, InVT
, Op1
);
4230 if (P
.Opcode
== SystemZISD::PERMUTE_DWORDS
) {
4231 SDValue Op2
= DAG
.getConstant(P
.Operand
, DL
, MVT::i32
);
4232 Op
= DAG
.getNode(SystemZISD::PERMUTE_DWORDS
, DL
, InVT
, Op0
, Op1
, Op2
);
4233 } else if (P
.Opcode
== SystemZISD::PACK
) {
4234 MVT OutVT
= MVT::getVectorVT(MVT::getIntegerVT(P
.Operand
* 8),
4235 SystemZ::VectorBytes
/ P
.Operand
);
4236 Op
= DAG
.getNode(SystemZISD::PACK
, DL
, OutVT
, Op0
, Op1
);
4238 Op
= DAG
.getNode(P
.Opcode
, DL
, InVT
, Op0
, Op1
);
4243 // Bytes is a VPERM-like permute vector, except that -1 is used for
4244 // undefined bytes. Implement it on operands Ops[0] and Ops[1] using
4246 static SDValue
getGeneralPermuteNode(SelectionDAG
&DAG
, const SDLoc
&DL
,
4248 const SmallVectorImpl
<int> &Bytes
) {
4249 for (unsigned I
= 0; I
< 2; ++I
)
4250 Ops
[I
] = DAG
.getNode(ISD::BITCAST
, DL
, MVT::v16i8
, Ops
[I
]);
4252 // First see whether VSLDI can be used.
4253 unsigned StartIndex
, OpNo0
, OpNo1
;
4254 if (isShlDoublePermute(Bytes
, StartIndex
, OpNo0
, OpNo1
))
4255 return DAG
.getNode(SystemZISD::SHL_DOUBLE
, DL
, MVT::v16i8
, Ops
[OpNo0
],
4256 Ops
[OpNo1
], DAG
.getConstant(StartIndex
, DL
, MVT::i32
));
4258 // Fall back on VPERM. Construct an SDNode for the permute vector.
4259 SDValue IndexNodes
[SystemZ::VectorBytes
];
4260 for (unsigned I
= 0; I
< SystemZ::VectorBytes
; ++I
)
4262 IndexNodes
[I
] = DAG
.getConstant(Bytes
[I
], DL
, MVT::i32
);
4264 IndexNodes
[I
] = DAG
.getUNDEF(MVT::i32
);
4265 SDValue Op2
= DAG
.getBuildVector(MVT::v16i8
, DL
, IndexNodes
);
4266 return DAG
.getNode(SystemZISD::PERMUTE
, DL
, MVT::v16i8
, Ops
[0], Ops
[1], Op2
);
4270 // Describes a general N-operand vector shuffle.
4271 struct GeneralShuffle
{
4272 GeneralShuffle(EVT vt
) : VT(vt
) {}
4274 bool add(SDValue
, unsigned);
4275 SDValue
getNode(SelectionDAG
&, const SDLoc
&);
4277 // The operands of the shuffle.
4278 SmallVector
<SDValue
, SystemZ::VectorBytes
> Ops
;
4280 // Index I is -1 if byte I of the result is undefined. Otherwise the
4281 // result comes from byte Bytes[I] % SystemZ::VectorBytes of operand
4282 // Bytes[I] / SystemZ::VectorBytes.
4283 SmallVector
<int, SystemZ::VectorBytes
> Bytes
;
4285 // The type of the shuffle result.
4290 // Add an extra undefined element to the shuffle.
4291 void GeneralShuffle::addUndef() {
4292 unsigned BytesPerElement
= VT
.getVectorElementType().getStoreSize();
4293 for (unsigned I
= 0; I
< BytesPerElement
; ++I
)
4294 Bytes
.push_back(-1);
4297 // Add an extra element to the shuffle, taking it from element Elem of Op.
4298 // A null Op indicates a vector input whose value will be calculated later;
4299 // there is at most one such input per shuffle and it always has the same
4300 // type as the result. Aborts and returns false if the source vector elements
4301 // of an EXTRACT_VECTOR_ELT are smaller than the destination elements. Per
4302 // LLVM they become implicitly extended, but this is rare and not optimized.
4303 bool GeneralShuffle::add(SDValue Op
, unsigned Elem
) {
4304 unsigned BytesPerElement
= VT
.getVectorElementType().getStoreSize();
4306 // The source vector can have wider elements than the result,
4307 // either through an explicit TRUNCATE or because of type legalization.
4308 // We want the least significant part.
4309 EVT FromVT
= Op
.getNode() ? Op
.getValueType() : VT
;
4310 unsigned FromBytesPerElement
= FromVT
.getVectorElementType().getStoreSize();
4312 // Return false if the source elements are smaller than their destination
4314 if (FromBytesPerElement
< BytesPerElement
)
4317 unsigned Byte
= ((Elem
* FromBytesPerElement
) % SystemZ::VectorBytes
+
4318 (FromBytesPerElement
- BytesPerElement
));
4320 // Look through things like shuffles and bitcasts.
4321 while (Op
.getNode()) {
4322 if (Op
.getOpcode() == ISD::BITCAST
)
4323 Op
= Op
.getOperand(0);
4324 else if (Op
.getOpcode() == ISD::VECTOR_SHUFFLE
&& Op
.hasOneUse()) {
4325 // See whether the bytes we need come from a contiguous part of one
4327 SmallVector
<int, SystemZ::VectorBytes
> OpBytes
;
4328 if (!getVPermMask(Op
, OpBytes
))
4331 if (!getShuffleInput(OpBytes
, Byte
, BytesPerElement
, NewByte
))
4337 Op
= Op
.getOperand(unsigned(NewByte
) / SystemZ::VectorBytes
);
4338 Byte
= unsigned(NewByte
) % SystemZ::VectorBytes
;
4339 } else if (Op
.isUndef()) {
4346 // Make sure that the source of the extraction is in Ops.
4348 for (; OpNo
< Ops
.size(); ++OpNo
)
4349 if (Ops
[OpNo
] == Op
)
4351 if (OpNo
== Ops
.size())
4354 // Add the element to Bytes.
4355 unsigned Base
= OpNo
* SystemZ::VectorBytes
+ Byte
;
4356 for (unsigned I
= 0; I
< BytesPerElement
; ++I
)
4357 Bytes
.push_back(Base
+ I
);
4362 // Return SDNodes for the completed shuffle.
4363 SDValue
GeneralShuffle::getNode(SelectionDAG
&DAG
, const SDLoc
&DL
) {
4364 assert(Bytes
.size() == SystemZ::VectorBytes
&& "Incomplete vector");
4366 if (Ops
.size() == 0)
4367 return DAG
.getUNDEF(VT
);
4369 // Make sure that there are at least two shuffle operands.
4370 if (Ops
.size() == 1)
4371 Ops
.push_back(DAG
.getUNDEF(MVT::v16i8
));
4373 // Create a tree of shuffles, deferring root node until after the loop.
4374 // Try to redistribute the undefined elements of non-root nodes so that
4375 // the non-root shuffles match something like a pack or merge, then adjust
4376 // the parent node's permute vector to compensate for the new order.
4377 // Among other things, this copes with vectors like <2 x i16> that were
4378 // padded with undefined elements during type legalization.
4380 // In the best case this redistribution will lead to the whole tree
4381 // using packs and merges. It should rarely be a loss in other cases.
4382 unsigned Stride
= 1;
4383 for (; Stride
* 2 < Ops
.size(); Stride
*= 2) {
4384 for (unsigned I
= 0; I
< Ops
.size() - Stride
; I
+= Stride
* 2) {
4385 SDValue SubOps
[] = { Ops
[I
], Ops
[I
+ Stride
] };
4387 // Create a mask for just these two operands.
4388 SmallVector
<int, SystemZ::VectorBytes
> NewBytes(SystemZ::VectorBytes
);
4389 for (unsigned J
= 0; J
< SystemZ::VectorBytes
; ++J
) {
4390 unsigned OpNo
= unsigned(Bytes
[J
]) / SystemZ::VectorBytes
;
4391 unsigned Byte
= unsigned(Bytes
[J
]) % SystemZ::VectorBytes
;
4394 else if (OpNo
== I
+ Stride
)
4395 NewBytes
[J
] = SystemZ::VectorBytes
+ Byte
;
4399 // See if it would be better to reorganize NewMask to avoid using VPERM.
4400 SmallVector
<int, SystemZ::VectorBytes
> NewBytesMap(SystemZ::VectorBytes
);
4401 if (const Permute
*P
= matchDoublePermute(NewBytes
, NewBytesMap
)) {
4402 Ops
[I
] = getPermuteNode(DAG
, DL
, *P
, SubOps
[0], SubOps
[1]);
4403 // Applying NewBytesMap to Ops[I] gets back to NewBytes.
4404 for (unsigned J
= 0; J
< SystemZ::VectorBytes
; ++J
) {
4405 if (NewBytes
[J
] >= 0) {
4406 assert(unsigned(NewBytesMap
[J
]) < SystemZ::VectorBytes
&&
4407 "Invalid double permute");
4408 Bytes
[J
] = I
* SystemZ::VectorBytes
+ NewBytesMap
[J
];
4410 assert(NewBytesMap
[J
] < 0 && "Invalid double permute");
4413 // Just use NewBytes on the operands.
4414 Ops
[I
] = getGeneralPermuteNode(DAG
, DL
, SubOps
, NewBytes
);
4415 for (unsigned J
= 0; J
< SystemZ::VectorBytes
; ++J
)
4416 if (NewBytes
[J
] >= 0)
4417 Bytes
[J
] = I
* SystemZ::VectorBytes
+ J
;
4422 // Now we just have 2 inputs. Put the second operand in Ops[1].
4424 Ops
[1] = Ops
[Stride
];
4425 for (unsigned I
= 0; I
< SystemZ::VectorBytes
; ++I
)
4426 if (Bytes
[I
] >= int(SystemZ::VectorBytes
))
4427 Bytes
[I
] -= (Stride
- 1) * SystemZ::VectorBytes
;
4430 // Look for an instruction that can do the permute without resorting
4432 unsigned OpNo0
, OpNo1
;
4434 if (const Permute
*P
= matchPermute(Bytes
, OpNo0
, OpNo1
))
4435 Op
= getPermuteNode(DAG
, DL
, *P
, Ops
[OpNo0
], Ops
[OpNo1
]);
4437 Op
= getGeneralPermuteNode(DAG
, DL
, &Ops
[0], Bytes
);
4438 return DAG
.getNode(ISD::BITCAST
, DL
, VT
, Op
);
4441 // Return true if the given BUILD_VECTOR is a scalar-to-vector conversion.
4442 static bool isScalarToVector(SDValue Op
) {
4443 for (unsigned I
= 1, E
= Op
.getNumOperands(); I
!= E
; ++I
)
4444 if (!Op
.getOperand(I
).isUndef())
4449 // Return a vector of type VT that contains Value in the first element.
4450 // The other elements don't matter.
4451 static SDValue
buildScalarToVector(SelectionDAG
&DAG
, const SDLoc
&DL
, EVT VT
,
4453 // If we have a constant, replicate it to all elements and let the
4454 // BUILD_VECTOR lowering take care of it.
4455 if (Value
.getOpcode() == ISD::Constant
||
4456 Value
.getOpcode() == ISD::ConstantFP
) {
4457 SmallVector
<SDValue
, 16> Ops(VT
.getVectorNumElements(), Value
);
4458 return DAG
.getBuildVector(VT
, DL
, Ops
);
4460 if (Value
.isUndef())
4461 return DAG
.getUNDEF(VT
);
4462 return DAG
.getNode(ISD::SCALAR_TO_VECTOR
, DL
, VT
, Value
);
4465 // Return a vector of type VT in which Op0 is in element 0 and Op1 is in
4466 // element 1. Used for cases in which replication is cheap.
4467 static SDValue
buildMergeScalars(SelectionDAG
&DAG
, const SDLoc
&DL
, EVT VT
,
4468 SDValue Op0
, SDValue Op1
) {
4469 if (Op0
.isUndef()) {
4471 return DAG
.getUNDEF(VT
);
4472 return DAG
.getNode(SystemZISD::REPLICATE
, DL
, VT
, Op1
);
4475 return DAG
.getNode(SystemZISD::REPLICATE
, DL
, VT
, Op0
);
4476 return DAG
.getNode(SystemZISD::MERGE_HIGH
, DL
, VT
,
4477 buildScalarToVector(DAG
, DL
, VT
, Op0
),
4478 buildScalarToVector(DAG
, DL
, VT
, Op1
));
4481 // Extend GPR scalars Op0 and Op1 to doublewords and return a v2i64
4483 static SDValue
joinDwords(SelectionDAG
&DAG
, const SDLoc
&DL
, SDValue Op0
,
4485 if (Op0
.isUndef() && Op1
.isUndef())
4486 return DAG
.getUNDEF(MVT::v2i64
);
4487 // If one of the two inputs is undefined then replicate the other one,
4488 // in order to avoid using another register unnecessarily.
4490 Op0
= Op1
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, MVT::i64
, Op1
);
4491 else if (Op1
.isUndef())
4492 Op0
= Op1
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, MVT::i64
, Op0
);
4494 Op0
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, MVT::i64
, Op0
);
4495 Op1
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, MVT::i64
, Op1
);
4497 return DAG
.getNode(SystemZISD::JOIN_DWORDS
, DL
, MVT::v2i64
, Op0
, Op1
);
4500 // If a BUILD_VECTOR contains some EXTRACT_VECTOR_ELTs, it's usually
4501 // better to use VECTOR_SHUFFLEs on them, only using BUILD_VECTOR for
4502 // the non-EXTRACT_VECTOR_ELT elements. See if the given BUILD_VECTOR
4503 // would benefit from this representation and return it if so.
4504 static SDValue
tryBuildVectorShuffle(SelectionDAG
&DAG
,
4505 BuildVectorSDNode
*BVN
) {
4506 EVT VT
= BVN
->getValueType(0);
4507 unsigned NumElements
= VT
.getVectorNumElements();
4509 // Represent the BUILD_VECTOR as an N-operand VECTOR_SHUFFLE-like operation
4510 // on byte vectors. If there are non-EXTRACT_VECTOR_ELT elements that still
4511 // need a BUILD_VECTOR, add an additional placeholder operand for that
4512 // BUILD_VECTOR and store its operands in ResidueOps.
4513 GeneralShuffle
GS(VT
);
4514 SmallVector
<SDValue
, SystemZ::VectorBytes
> ResidueOps
;
4515 bool FoundOne
= false;
4516 for (unsigned I
= 0; I
< NumElements
; ++I
) {
4517 SDValue Op
= BVN
->getOperand(I
);
4518 if (Op
.getOpcode() == ISD::TRUNCATE
)
4519 Op
= Op
.getOperand(0);
4520 if (Op
.getOpcode() == ISD::EXTRACT_VECTOR_ELT
&&
4521 Op
.getOperand(1).getOpcode() == ISD::Constant
) {
4522 unsigned Elem
= cast
<ConstantSDNode
>(Op
.getOperand(1))->getZExtValue();
4523 if (!GS
.add(Op
.getOperand(0), Elem
))
4526 } else if (Op
.isUndef()) {
4529 if (!GS
.add(SDValue(), ResidueOps
.size()))
4531 ResidueOps
.push_back(BVN
->getOperand(I
));
4535 // Nothing to do if there are no EXTRACT_VECTOR_ELTs.
4539 // Create the BUILD_VECTOR for the remaining elements, if any.
4540 if (!ResidueOps
.empty()) {
4541 while (ResidueOps
.size() < NumElements
)
4542 ResidueOps
.push_back(DAG
.getUNDEF(ResidueOps
[0].getValueType()));
4543 for (auto &Op
: GS
.Ops
) {
4544 if (!Op
.getNode()) {
4545 Op
= DAG
.getBuildVector(VT
, SDLoc(BVN
), ResidueOps
);
4550 return GS
.getNode(DAG
, SDLoc(BVN
));
4553 bool SystemZTargetLowering::isVectorElementLoad(SDValue Op
) const {
4554 if (Op
.getOpcode() == ISD::LOAD
&& cast
<LoadSDNode
>(Op
)->isUnindexed())
4556 if (Subtarget
.hasVectorEnhancements2() && Op
.getOpcode() == SystemZISD::LRV
)
4561 // Combine GPR scalar values Elems into a vector of type VT.
4563 SystemZTargetLowering::buildVector(SelectionDAG
&DAG
, const SDLoc
&DL
, EVT VT
,
4564 SmallVectorImpl
<SDValue
> &Elems
) const {
4565 // See whether there is a single replicated value.
4567 unsigned int NumElements
= Elems
.size();
4568 unsigned int Count
= 0;
4569 for (auto Elem
: Elems
) {
4570 if (!Elem
.isUndef()) {
4571 if (!Single
.getNode())
4573 else if (Elem
!= Single
) {
4580 // There are three cases here:
4582 // - if the only defined element is a loaded one, the best sequence
4583 // is a replicating load.
4585 // - otherwise, if the only defined element is an i64 value, we will
4586 // end up with the same VLVGP sequence regardless of whether we short-cut
4587 // for replication or fall through to the later code.
4589 // - otherwise, if the only defined element is an i32 or smaller value,
4590 // we would need 2 instructions to replicate it: VLVGP followed by VREPx.
4591 // This is only a win if the single defined element is used more than once.
4592 // In other cases we're better off using a single VLVGx.
4593 if (Single
.getNode() && (Count
> 1 || isVectorElementLoad(Single
)))
4594 return DAG
.getNode(SystemZISD::REPLICATE
, DL
, VT
, Single
);
4596 // If all elements are loads, use VLREP/VLEs (below).
4597 bool AllLoads
= true;
4598 for (auto Elem
: Elems
)
4599 if (!isVectorElementLoad(Elem
)) {
4604 // The best way of building a v2i64 from two i64s is to use VLVGP.
4605 if (VT
== MVT::v2i64
&& !AllLoads
)
4606 return joinDwords(DAG
, DL
, Elems
[0], Elems
[1]);
4608 // Use a 64-bit merge high to combine two doubles.
4609 if (VT
== MVT::v2f64
&& !AllLoads
)
4610 return buildMergeScalars(DAG
, DL
, VT
, Elems
[0], Elems
[1]);
4612 // Build v4f32 values directly from the FPRs:
4614 // <Axxx> <Bxxx> <Cxxxx> <Dxxx>
4619 if (VT
== MVT::v4f32
&& !AllLoads
) {
4620 SDValue Op01
= buildMergeScalars(DAG
, DL
, VT
, Elems
[0], Elems
[1]);
4621 SDValue Op23
= buildMergeScalars(DAG
, DL
, VT
, Elems
[2], Elems
[3]);
4622 // Avoid unnecessary undefs by reusing the other operand.
4625 else if (Op23
.isUndef())
4627 // Merging identical replications is a no-op.
4628 if (Op01
.getOpcode() == SystemZISD::REPLICATE
&& Op01
== Op23
)
4630 Op01
= DAG
.getNode(ISD::BITCAST
, DL
, MVT::v2i64
, Op01
);
4631 Op23
= DAG
.getNode(ISD::BITCAST
, DL
, MVT::v2i64
, Op23
);
4632 SDValue Op
= DAG
.getNode(SystemZISD::MERGE_HIGH
,
4633 DL
, MVT::v2i64
, Op01
, Op23
);
4634 return DAG
.getNode(ISD::BITCAST
, DL
, VT
, Op
);
4637 // Collect the constant terms.
4638 SmallVector
<SDValue
, SystemZ::VectorBytes
> Constants(NumElements
, SDValue());
4639 SmallVector
<bool, SystemZ::VectorBytes
> Done(NumElements
, false);
4641 unsigned NumConstants
= 0;
4642 for (unsigned I
= 0; I
< NumElements
; ++I
) {
4643 SDValue Elem
= Elems
[I
];
4644 if (Elem
.getOpcode() == ISD::Constant
||
4645 Elem
.getOpcode() == ISD::ConstantFP
) {
4647 Constants
[I
] = Elem
;
4651 // If there was at least one constant, fill in the other elements of
4652 // Constants with undefs to get a full vector constant and use that
4653 // as the starting point.
4655 SDValue ReplicatedVal
;
4656 if (NumConstants
> 0) {
4657 for (unsigned I
= 0; I
< NumElements
; ++I
)
4658 if (!Constants
[I
].getNode())
4659 Constants
[I
] = DAG
.getUNDEF(Elems
[I
].getValueType());
4660 Result
= DAG
.getBuildVector(VT
, DL
, Constants
);
4662 // Otherwise try to use VLREP or VLVGP to start the sequence in order to
4663 // avoid a false dependency on any previous contents of the vector
4666 // Use a VLREP if at least one element is a load. Make sure to replicate
4667 // the load with the most elements having its value.
4668 std::map
<const SDNode
*, unsigned> UseCounts
;
4669 SDNode
*LoadMaxUses
= nullptr;
4670 for (unsigned I
= 0; I
< NumElements
; ++I
)
4671 if (isVectorElementLoad(Elems
[I
])) {
4672 SDNode
*Ld
= Elems
[I
].getNode();
4674 if (LoadMaxUses
== nullptr || UseCounts
[LoadMaxUses
] < UseCounts
[Ld
])
4677 if (LoadMaxUses
!= nullptr) {
4678 ReplicatedVal
= SDValue(LoadMaxUses
, 0);
4679 Result
= DAG
.getNode(SystemZISD::REPLICATE
, DL
, VT
, ReplicatedVal
);
4681 // Try to use VLVGP.
4682 unsigned I1
= NumElements
/ 2 - 1;
4683 unsigned I2
= NumElements
- 1;
4684 bool Def1
= !Elems
[I1
].isUndef();
4685 bool Def2
= !Elems
[I2
].isUndef();
4687 SDValue Elem1
= Elems
[Def1
? I1
: I2
];
4688 SDValue Elem2
= Elems
[Def2
? I2
: I1
];
4689 Result
= DAG
.getNode(ISD::BITCAST
, DL
, VT
,
4690 joinDwords(DAG
, DL
, Elem1
, Elem2
));
4694 Result
= DAG
.getUNDEF(VT
);
4698 // Use VLVGx to insert the other elements.
4699 for (unsigned I
= 0; I
< NumElements
; ++I
)
4700 if (!Done
[I
] && !Elems
[I
].isUndef() && Elems
[I
] != ReplicatedVal
)
4701 Result
= DAG
.getNode(ISD::INSERT_VECTOR_ELT
, DL
, VT
, Result
, Elems
[I
],
4702 DAG
.getConstant(I
, DL
, MVT::i32
));
4706 SDValue
SystemZTargetLowering::lowerBUILD_VECTOR(SDValue Op
,
4707 SelectionDAG
&DAG
) const {
4708 auto *BVN
= cast
<BuildVectorSDNode
>(Op
.getNode());
4710 EVT VT
= Op
.getValueType();
4712 if (BVN
->isConstant()) {
4713 if (SystemZVectorConstantInfo(BVN
).isVectorConstantLegal(Subtarget
))
4716 // Fall back to loading it from memory.
4720 // See if we should use shuffles to construct the vector from other vectors.
4721 if (SDValue Res
= tryBuildVectorShuffle(DAG
, BVN
))
4724 // Detect SCALAR_TO_VECTOR conversions.
4725 if (isOperationLegal(ISD::SCALAR_TO_VECTOR
, VT
) && isScalarToVector(Op
))
4726 return buildScalarToVector(DAG
, DL
, VT
, Op
.getOperand(0));
4728 // Otherwise use buildVector to build the vector up from GPRs.
4729 unsigned NumElements
= Op
.getNumOperands();
4730 SmallVector
<SDValue
, SystemZ::VectorBytes
> Ops(NumElements
);
4731 for (unsigned I
= 0; I
< NumElements
; ++I
)
4732 Ops
[I
] = Op
.getOperand(I
);
4733 return buildVector(DAG
, DL
, VT
, Ops
);
4736 SDValue
SystemZTargetLowering::lowerVECTOR_SHUFFLE(SDValue Op
,
4737 SelectionDAG
&DAG
) const {
4738 auto *VSN
= cast
<ShuffleVectorSDNode
>(Op
.getNode());
4740 EVT VT
= Op
.getValueType();
4741 unsigned NumElements
= VT
.getVectorNumElements();
4743 if (VSN
->isSplat()) {
4744 SDValue Op0
= Op
.getOperand(0);
4745 unsigned Index
= VSN
->getSplatIndex();
4746 assert(Index
< VT
.getVectorNumElements() &&
4747 "Splat index should be defined and in first operand");
4748 // See whether the value we're splatting is directly available as a scalar.
4749 if ((Index
== 0 && Op0
.getOpcode() == ISD::SCALAR_TO_VECTOR
) ||
4750 Op0
.getOpcode() == ISD::BUILD_VECTOR
)
4751 return DAG
.getNode(SystemZISD::REPLICATE
, DL
, VT
, Op0
.getOperand(Index
));
4752 // Otherwise keep it as a vector-to-vector operation.
4753 return DAG
.getNode(SystemZISD::SPLAT
, DL
, VT
, Op
.getOperand(0),
4754 DAG
.getConstant(Index
, DL
, MVT::i32
));
4757 GeneralShuffle
GS(VT
);
4758 for (unsigned I
= 0; I
< NumElements
; ++I
) {
4759 int Elt
= VSN
->getMaskElt(I
);
4762 else if (!GS
.add(Op
.getOperand(unsigned(Elt
) / NumElements
),
4763 unsigned(Elt
) % NumElements
))
4766 return GS
.getNode(DAG
, SDLoc(VSN
));
4769 SDValue
SystemZTargetLowering::lowerSCALAR_TO_VECTOR(SDValue Op
,
4770 SelectionDAG
&DAG
) const {
4772 // Just insert the scalar into element 0 of an undefined vector.
4773 return DAG
.getNode(ISD::INSERT_VECTOR_ELT
, DL
,
4774 Op
.getValueType(), DAG
.getUNDEF(Op
.getValueType()),
4775 Op
.getOperand(0), DAG
.getConstant(0, DL
, MVT::i32
));
4778 SDValue
SystemZTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op
,
4779 SelectionDAG
&DAG
) const {
4780 // Handle insertions of floating-point values.
4782 SDValue Op0
= Op
.getOperand(0);
4783 SDValue Op1
= Op
.getOperand(1);
4784 SDValue Op2
= Op
.getOperand(2);
4785 EVT VT
= Op
.getValueType();
4787 // Insertions into constant indices of a v2f64 can be done using VPDI.
4788 // However, if the inserted value is a bitcast or a constant then it's
4789 // better to use GPRs, as below.
4790 if (VT
== MVT::v2f64
&&
4791 Op1
.getOpcode() != ISD::BITCAST
&&
4792 Op1
.getOpcode() != ISD::ConstantFP
&&
4793 Op2
.getOpcode() == ISD::Constant
) {
4794 uint64_t Index
= cast
<ConstantSDNode
>(Op2
)->getZExtValue();
4795 unsigned Mask
= VT
.getVectorNumElements() - 1;
4800 // Otherwise bitcast to the equivalent integer form and insert via a GPR.
4801 MVT IntVT
= MVT::getIntegerVT(VT
.getScalarSizeInBits());
4802 MVT IntVecVT
= MVT::getVectorVT(IntVT
, VT
.getVectorNumElements());
4803 SDValue Res
= DAG
.getNode(ISD::INSERT_VECTOR_ELT
, DL
, IntVecVT
,
4804 DAG
.getNode(ISD::BITCAST
, DL
, IntVecVT
, Op0
),
4805 DAG
.getNode(ISD::BITCAST
, DL
, IntVT
, Op1
), Op2
);
4806 return DAG
.getNode(ISD::BITCAST
, DL
, VT
, Res
);
4810 SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op
,
4811 SelectionDAG
&DAG
) const {
4812 // Handle extractions of floating-point values.
4814 SDValue Op0
= Op
.getOperand(0);
4815 SDValue Op1
= Op
.getOperand(1);
4816 EVT VT
= Op
.getValueType();
4817 EVT VecVT
= Op0
.getValueType();
4819 // Extractions of constant indices can be done directly.
4820 if (auto *CIndexN
= dyn_cast
<ConstantSDNode
>(Op1
)) {
4821 uint64_t Index
= CIndexN
->getZExtValue();
4822 unsigned Mask
= VecVT
.getVectorNumElements() - 1;
4827 // Otherwise bitcast to the equivalent integer form and extract via a GPR.
4828 MVT IntVT
= MVT::getIntegerVT(VT
.getSizeInBits());
4829 MVT IntVecVT
= MVT::getVectorVT(IntVT
, VecVT
.getVectorNumElements());
4830 SDValue Res
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, DL
, IntVT
,
4831 DAG
.getNode(ISD::BITCAST
, DL
, IntVecVT
, Op0
), Op1
);
4832 return DAG
.getNode(ISD::BITCAST
, DL
, VT
, Res
);
4836 SystemZTargetLowering::lowerExtendVectorInreg(SDValue Op
, SelectionDAG
&DAG
,
4837 unsigned UnpackHigh
) const {
4838 SDValue PackedOp
= Op
.getOperand(0);
4839 EVT OutVT
= Op
.getValueType();
4840 EVT InVT
= PackedOp
.getValueType();
4841 unsigned ToBits
= OutVT
.getScalarSizeInBits();
4842 unsigned FromBits
= InVT
.getScalarSizeInBits();
4845 EVT OutVT
= MVT::getVectorVT(MVT::getIntegerVT(FromBits
),
4846 SystemZ::VectorBits
/ FromBits
);
4847 PackedOp
= DAG
.getNode(UnpackHigh
, SDLoc(PackedOp
), OutVT
, PackedOp
);
4848 } while (FromBits
!= ToBits
);
4852 SDValue
SystemZTargetLowering::lowerShift(SDValue Op
, SelectionDAG
&DAG
,
4853 unsigned ByScalar
) const {
4854 // Look for cases where a vector shift can use the *_BY_SCALAR form.
4855 SDValue Op0
= Op
.getOperand(0);
4856 SDValue Op1
= Op
.getOperand(1);
4858 EVT VT
= Op
.getValueType();
4859 unsigned ElemBitSize
= VT
.getScalarSizeInBits();
4861 // See whether the shift vector is a splat represented as BUILD_VECTOR.
4862 if (auto *BVN
= dyn_cast
<BuildVectorSDNode
>(Op1
)) {
4863 APInt SplatBits
, SplatUndef
;
4864 unsigned SplatBitSize
;
4866 // Check for constant splats. Use ElemBitSize as the minimum element
4867 // width and reject splats that need wider elements.
4868 if (BVN
->isConstantSplat(SplatBits
, SplatUndef
, SplatBitSize
, HasAnyUndefs
,
4869 ElemBitSize
, true) &&
4870 SplatBitSize
== ElemBitSize
) {
4871 SDValue Shift
= DAG
.getConstant(SplatBits
.getZExtValue() & 0xfff,
4873 return DAG
.getNode(ByScalar
, DL
, VT
, Op0
, Shift
);
4875 // Check for variable splats.
4876 BitVector UndefElements
;
4877 SDValue Splat
= BVN
->getSplatValue(&UndefElements
);
4879 // Since i32 is the smallest legal type, we either need a no-op
4881 SDValue Shift
= DAG
.getNode(ISD::TRUNCATE
, DL
, MVT::i32
, Splat
);
4882 return DAG
.getNode(ByScalar
, DL
, VT
, Op0
, Shift
);
4886 // See whether the shift vector is a splat represented as SHUFFLE_VECTOR,
4887 // and the shift amount is directly available in a GPR.
4888 if (auto *VSN
= dyn_cast
<ShuffleVectorSDNode
>(Op1
)) {
4889 if (VSN
->isSplat()) {
4890 SDValue VSNOp0
= VSN
->getOperand(0);
4891 unsigned Index
= VSN
->getSplatIndex();
4892 assert(Index
< VT
.getVectorNumElements() &&
4893 "Splat index should be defined and in first operand");
4894 if ((Index
== 0 && VSNOp0
.getOpcode() == ISD::SCALAR_TO_VECTOR
) ||
4895 VSNOp0
.getOpcode() == ISD::BUILD_VECTOR
) {
4896 // Since i32 is the smallest legal type, we either need a no-op
4898 SDValue Shift
= DAG
.getNode(ISD::TRUNCATE
, DL
, MVT::i32
,
4899 VSNOp0
.getOperand(Index
));
4900 return DAG
.getNode(ByScalar
, DL
, VT
, Op0
, Shift
);
4905 // Otherwise just treat the current form as legal.
4909 SDValue
SystemZTargetLowering::LowerOperation(SDValue Op
,
4910 SelectionDAG
&DAG
) const {
4911 switch (Op
.getOpcode()) {
4912 case ISD::FRAMEADDR
:
4913 return lowerFRAMEADDR(Op
, DAG
);
4914 case ISD::RETURNADDR
:
4915 return lowerRETURNADDR(Op
, DAG
);
4917 return lowerBR_CC(Op
, DAG
);
4918 case ISD::SELECT_CC
:
4919 return lowerSELECT_CC(Op
, DAG
);
4921 return lowerSETCC(Op
, DAG
);
4922 case ISD::GlobalAddress
:
4923 return lowerGlobalAddress(cast
<GlobalAddressSDNode
>(Op
), DAG
);
4924 case ISD::GlobalTLSAddress
:
4925 return lowerGlobalTLSAddress(cast
<GlobalAddressSDNode
>(Op
), DAG
);
4926 case ISD::BlockAddress
:
4927 return lowerBlockAddress(cast
<BlockAddressSDNode
>(Op
), DAG
);
4928 case ISD::JumpTable
:
4929 return lowerJumpTable(cast
<JumpTableSDNode
>(Op
), DAG
);
4930 case ISD::ConstantPool
:
4931 return lowerConstantPool(cast
<ConstantPoolSDNode
>(Op
), DAG
);
4933 return lowerBITCAST(Op
, DAG
);
4935 return lowerVASTART(Op
, DAG
);
4937 return lowerVACOPY(Op
, DAG
);
4938 case ISD::DYNAMIC_STACKALLOC
:
4939 return lowerDYNAMIC_STACKALLOC(Op
, DAG
);
4940 case ISD::GET_DYNAMIC_AREA_OFFSET
:
4941 return lowerGET_DYNAMIC_AREA_OFFSET(Op
, DAG
);
4942 case ISD::SMUL_LOHI
:
4943 return lowerSMUL_LOHI(Op
, DAG
);
4944 case ISD::UMUL_LOHI
:
4945 return lowerUMUL_LOHI(Op
, DAG
);
4947 return lowerSDIVREM(Op
, DAG
);
4949 return lowerUDIVREM(Op
, DAG
);
4954 return lowerXALUO(Op
, DAG
);
4957 return lowerADDSUBCARRY(Op
, DAG
);
4959 return lowerOR(Op
, DAG
);
4961 return lowerCTPOP(Op
, DAG
);
4962 case ISD::ATOMIC_FENCE
:
4963 return lowerATOMIC_FENCE(Op
, DAG
);
4964 case ISD::ATOMIC_SWAP
:
4965 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_SWAPW
);
4966 case ISD::ATOMIC_STORE
:
4967 return lowerATOMIC_STORE(Op
, DAG
);
4968 case ISD::ATOMIC_LOAD
:
4969 return lowerATOMIC_LOAD(Op
, DAG
);
4970 case ISD::ATOMIC_LOAD_ADD
:
4971 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_ADD
);
4972 case ISD::ATOMIC_LOAD_SUB
:
4973 return lowerATOMIC_LOAD_SUB(Op
, DAG
);
4974 case ISD::ATOMIC_LOAD_AND
:
4975 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_AND
);
4976 case ISD::ATOMIC_LOAD_OR
:
4977 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_OR
);
4978 case ISD::ATOMIC_LOAD_XOR
:
4979 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_XOR
);
4980 case ISD::ATOMIC_LOAD_NAND
:
4981 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_NAND
);
4982 case ISD::ATOMIC_LOAD_MIN
:
4983 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_MIN
);
4984 case ISD::ATOMIC_LOAD_MAX
:
4985 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_MAX
);
4986 case ISD::ATOMIC_LOAD_UMIN
:
4987 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_UMIN
);
4988 case ISD::ATOMIC_LOAD_UMAX
:
4989 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_UMAX
);
4990 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS
:
4991 return lowerATOMIC_CMP_SWAP(Op
, DAG
);
4992 case ISD::STACKSAVE
:
4993 return lowerSTACKSAVE(Op
, DAG
);
4994 case ISD::STACKRESTORE
:
4995 return lowerSTACKRESTORE(Op
, DAG
);
4997 return lowerPREFETCH(Op
, DAG
);
4998 case ISD::INTRINSIC_W_CHAIN
:
4999 return lowerINTRINSIC_W_CHAIN(Op
, DAG
);
5000 case ISD::INTRINSIC_WO_CHAIN
:
5001 return lowerINTRINSIC_WO_CHAIN(Op
, DAG
);
5002 case ISD::BUILD_VECTOR
:
5003 return lowerBUILD_VECTOR(Op
, DAG
);
5004 case ISD::VECTOR_SHUFFLE
:
5005 return lowerVECTOR_SHUFFLE(Op
, DAG
);
5006 case ISD::SCALAR_TO_VECTOR
:
5007 return lowerSCALAR_TO_VECTOR(Op
, DAG
);
5008 case ISD::INSERT_VECTOR_ELT
:
5009 return lowerINSERT_VECTOR_ELT(Op
, DAG
);
5010 case ISD::EXTRACT_VECTOR_ELT
:
5011 return lowerEXTRACT_VECTOR_ELT(Op
, DAG
);
5012 case ISD::SIGN_EXTEND_VECTOR_INREG
:
5013 return lowerExtendVectorInreg(Op
, DAG
, SystemZISD::UNPACK_HIGH
);
5014 case ISD::ZERO_EXTEND_VECTOR_INREG
:
5015 return lowerExtendVectorInreg(Op
, DAG
, SystemZISD::UNPACKL_HIGH
);
5017 return lowerShift(Op
, DAG
, SystemZISD::VSHL_BY_SCALAR
);
5019 return lowerShift(Op
, DAG
, SystemZISD::VSRL_BY_SCALAR
);
5021 return lowerShift(Op
, DAG
, SystemZISD::VSRA_BY_SCALAR
);
5023 llvm_unreachable("Unexpected node to lower");
5027 // Lower operations with invalid operand or result types (currently used
5028 // only for 128-bit integer types).
5030 static SDValue
lowerI128ToGR128(SelectionDAG
&DAG
, SDValue In
) {
5032 SDValue Lo
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
, MVT::i64
, In
,
5033 DAG
.getIntPtrConstant(0, DL
));
5034 SDValue Hi
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
, MVT::i64
, In
,
5035 DAG
.getIntPtrConstant(1, DL
));
5036 SDNode
*Pair
= DAG
.getMachineNode(SystemZ::PAIR128
, DL
,
5037 MVT::Untyped
, Hi
, Lo
);
5038 return SDValue(Pair
, 0);
5041 static SDValue
lowerGR128ToI128(SelectionDAG
&DAG
, SDValue In
) {
5043 SDValue Hi
= DAG
.getTargetExtractSubreg(SystemZ::subreg_h64
,
5045 SDValue Lo
= DAG
.getTargetExtractSubreg(SystemZ::subreg_l64
,
5047 return DAG
.getNode(ISD::BUILD_PAIR
, DL
, MVT::i128
, Lo
, Hi
);
5051 SystemZTargetLowering::LowerOperationWrapper(SDNode
*N
,
5052 SmallVectorImpl
<SDValue
> &Results
,
5053 SelectionDAG
&DAG
) const {
5054 switch (N
->getOpcode()) {
5055 case ISD::ATOMIC_LOAD
: {
5057 SDVTList Tys
= DAG
.getVTList(MVT::Untyped
, MVT::Other
);
5058 SDValue Ops
[] = { N
->getOperand(0), N
->getOperand(1) };
5059 MachineMemOperand
*MMO
= cast
<AtomicSDNode
>(N
)->getMemOperand();
5060 SDValue Res
= DAG
.getMemIntrinsicNode(SystemZISD::ATOMIC_LOAD_128
,
5061 DL
, Tys
, Ops
, MVT::i128
, MMO
);
5062 Results
.push_back(lowerGR128ToI128(DAG
, Res
));
5063 Results
.push_back(Res
.getValue(1));
5066 case ISD::ATOMIC_STORE
: {
5068 SDVTList Tys
= DAG
.getVTList(MVT::Other
);
5069 SDValue Ops
[] = { N
->getOperand(0),
5070 lowerI128ToGR128(DAG
, N
->getOperand(2)),
5072 MachineMemOperand
*MMO
= cast
<AtomicSDNode
>(N
)->getMemOperand();
5073 SDValue Res
= DAG
.getMemIntrinsicNode(SystemZISD::ATOMIC_STORE_128
,
5074 DL
, Tys
, Ops
, MVT::i128
, MMO
);
5075 // We have to enforce sequential consistency by performing a
5076 // serialization operation after the store.
5077 if (cast
<AtomicSDNode
>(N
)->getOrdering() ==
5078 AtomicOrdering::SequentiallyConsistent
)
5079 Res
= SDValue(DAG
.getMachineNode(SystemZ::Serialize
, DL
,
5080 MVT::Other
, Res
), 0);
5081 Results
.push_back(Res
);
5084 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS
: {
5086 SDVTList Tys
= DAG
.getVTList(MVT::Untyped
, MVT::i32
, MVT::Other
);
5087 SDValue Ops
[] = { N
->getOperand(0), N
->getOperand(1),
5088 lowerI128ToGR128(DAG
, N
->getOperand(2)),
5089 lowerI128ToGR128(DAG
, N
->getOperand(3)) };
5090 MachineMemOperand
*MMO
= cast
<AtomicSDNode
>(N
)->getMemOperand();
5091 SDValue Res
= DAG
.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP_128
,
5092 DL
, Tys
, Ops
, MVT::i128
, MMO
);
5093 SDValue Success
= emitSETCC(DAG
, DL
, Res
.getValue(1),
5094 SystemZ::CCMASK_CS
, SystemZ::CCMASK_CS_EQ
);
5095 Success
= DAG
.getZExtOrTrunc(Success
, DL
, N
->getValueType(1));
5096 Results
.push_back(lowerGR128ToI128(DAG
, Res
));
5097 Results
.push_back(Success
);
5098 Results
.push_back(Res
.getValue(2));
5102 llvm_unreachable("Unexpected node to lower");
5107 SystemZTargetLowering::ReplaceNodeResults(SDNode
*N
,
5108 SmallVectorImpl
<SDValue
> &Results
,
5109 SelectionDAG
&DAG
) const {
5110 return LowerOperationWrapper(N
, Results
, DAG
);
5113 const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode
) const {
5114 #define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME
5115 switch ((SystemZISD::NodeType
)Opcode
) {
5116 case SystemZISD::FIRST_NUMBER
: break;
5122 OPCODE(PCREL_WRAPPER
);
5123 OPCODE(PCREL_OFFSET
);
5129 OPCODE(SELECT_CCMASK
);
5130 OPCODE(ADJDYNALLOC
);
5155 OPCODE(SEARCH_STRING
);
5159 OPCODE(TBEGIN_NOFLOAT
);
5162 OPCODE(ROTATE_MASK
);
5164 OPCODE(JOIN_DWORDS
);
5169 OPCODE(PERMUTE_DWORDS
);
5174 OPCODE(UNPACK_HIGH
);
5175 OPCODE(UNPACKL_HIGH
);
5177 OPCODE(UNPACKL_LOW
);
5178 OPCODE(VSHL_BY_SCALAR
);
5179 OPCODE(VSRL_BY_SCALAR
);
5180 OPCODE(VSRA_BY_SCALAR
);
5210 OPCODE(ATOMIC_SWAPW
);
5211 OPCODE(ATOMIC_LOADW_ADD
);
5212 OPCODE(ATOMIC_LOADW_SUB
);
5213 OPCODE(ATOMIC_LOADW_AND
);
5214 OPCODE(ATOMIC_LOADW_OR
);
5215 OPCODE(ATOMIC_LOADW_XOR
);
5216 OPCODE(ATOMIC_LOADW_NAND
);
5217 OPCODE(ATOMIC_LOADW_MIN
);
5218 OPCODE(ATOMIC_LOADW_MAX
);
5219 OPCODE(ATOMIC_LOADW_UMIN
);
5220 OPCODE(ATOMIC_LOADW_UMAX
);
5221 OPCODE(ATOMIC_CMP_SWAPW
);
5222 OPCODE(ATOMIC_CMP_SWAP
);
5223 OPCODE(ATOMIC_LOAD_128
);
5224 OPCODE(ATOMIC_STORE_128
);
5225 OPCODE(ATOMIC_CMP_SWAP_128
);
5236 // Return true if VT is a vector whose elements are a whole number of bytes
5237 // in width. Also check for presence of vector support.
5238 bool SystemZTargetLowering::canTreatAsByteVector(EVT VT
) const {
5239 if (!Subtarget
.hasVector())
5242 return VT
.isVector() && VT
.getScalarSizeInBits() % 8 == 0 && VT
.isSimple();
5245 // Try to simplify an EXTRACT_VECTOR_ELT from a vector of type VecVT
5246 // producing a result of type ResVT. Op is a possibly bitcast version
5247 // of the input vector and Index is the index (based on type VecVT) that
5248 // should be extracted. Return the new extraction if a simplification
5249 // was possible or if Force is true.
5250 SDValue
SystemZTargetLowering::combineExtract(const SDLoc
&DL
, EVT ResVT
,
5251 EVT VecVT
, SDValue Op
,
5253 DAGCombinerInfo
&DCI
,
5255 SelectionDAG
&DAG
= DCI
.DAG
;
5257 // The number of bytes being extracted.
5258 unsigned BytesPerElement
= VecVT
.getVectorElementType().getStoreSize();
5261 unsigned Opcode
= Op
.getOpcode();
5262 if (Opcode
== ISD::BITCAST
)
5263 // Look through bitcasts.
5264 Op
= Op
.getOperand(0);
5265 else if ((Opcode
== ISD::VECTOR_SHUFFLE
|| Opcode
== SystemZISD::SPLAT
) &&
5266 canTreatAsByteVector(Op
.getValueType())) {
5267 // Get a VPERM-like permute mask and see whether the bytes covered
5268 // by the extracted element are a contiguous sequence from one
5270 SmallVector
<int, SystemZ::VectorBytes
> Bytes
;
5271 if (!getVPermMask(Op
, Bytes
))
5274 if (!getShuffleInput(Bytes
, Index
* BytesPerElement
,
5275 BytesPerElement
, First
))
5278 return DAG
.getUNDEF(ResVT
);
5279 // Make sure the contiguous sequence starts at a multiple of the
5280 // original element size.
5281 unsigned Byte
= unsigned(First
) % Bytes
.size();
5282 if (Byte
% BytesPerElement
!= 0)
5284 // We can get the extracted value directly from an input.
5285 Index
= Byte
/ BytesPerElement
;
5286 Op
= Op
.getOperand(unsigned(First
) / Bytes
.size());
5288 } else if (Opcode
== ISD::BUILD_VECTOR
&&
5289 canTreatAsByteVector(Op
.getValueType())) {
5290 // We can only optimize this case if the BUILD_VECTOR elements are
5291 // at least as wide as the extracted value.
5292 EVT OpVT
= Op
.getValueType();
5293 unsigned OpBytesPerElement
= OpVT
.getVectorElementType().getStoreSize();
5294 if (OpBytesPerElement
< BytesPerElement
)
5296 // Make sure that the least-significant bit of the extracted value
5297 // is the least significant bit of an input.
5298 unsigned End
= (Index
+ 1) * BytesPerElement
;
5299 if (End
% OpBytesPerElement
!= 0)
5301 // We're extracting the low part of one operand of the BUILD_VECTOR.
5302 Op
= Op
.getOperand(End
/ OpBytesPerElement
- 1);
5303 if (!Op
.getValueType().isInteger()) {
5304 EVT VT
= MVT::getIntegerVT(Op
.getValueSizeInBits());
5305 Op
= DAG
.getNode(ISD::BITCAST
, DL
, VT
, Op
);
5306 DCI
.AddToWorklist(Op
.getNode());
5308 EVT VT
= MVT::getIntegerVT(ResVT
.getSizeInBits());
5309 Op
= DAG
.getNode(ISD::TRUNCATE
, DL
, VT
, Op
);
5311 DCI
.AddToWorklist(Op
.getNode());
5312 Op
= DAG
.getNode(ISD::BITCAST
, DL
, ResVT
, Op
);
5315 } else if ((Opcode
== ISD::SIGN_EXTEND_VECTOR_INREG
||
5316 Opcode
== ISD::ZERO_EXTEND_VECTOR_INREG
||
5317 Opcode
== ISD::ANY_EXTEND_VECTOR_INREG
) &&
5318 canTreatAsByteVector(Op
.getValueType()) &&
5319 canTreatAsByteVector(Op
.getOperand(0).getValueType())) {
5320 // Make sure that only the unextended bits are significant.
5321 EVT ExtVT
= Op
.getValueType();
5322 EVT OpVT
= Op
.getOperand(0).getValueType();
5323 unsigned ExtBytesPerElement
= ExtVT
.getVectorElementType().getStoreSize();
5324 unsigned OpBytesPerElement
= OpVT
.getVectorElementType().getStoreSize();
5325 unsigned Byte
= Index
* BytesPerElement
;
5326 unsigned SubByte
= Byte
% ExtBytesPerElement
;
5327 unsigned MinSubByte
= ExtBytesPerElement
- OpBytesPerElement
;
5328 if (SubByte
< MinSubByte
||
5329 SubByte
+ BytesPerElement
> ExtBytesPerElement
)
5331 // Get the byte offset of the unextended element
5332 Byte
= Byte
/ ExtBytesPerElement
* OpBytesPerElement
;
5333 // ...then add the byte offset relative to that element.
5334 Byte
+= SubByte
- MinSubByte
;
5335 if (Byte
% BytesPerElement
!= 0)
5337 Op
= Op
.getOperand(0);
5338 Index
= Byte
/ BytesPerElement
;
5344 if (Op
.getValueType() != VecVT
) {
5345 Op
= DAG
.getNode(ISD::BITCAST
, DL
, VecVT
, Op
);
5346 DCI
.AddToWorklist(Op
.getNode());
5348 return DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, DL
, ResVT
, Op
,
5349 DAG
.getConstant(Index
, DL
, MVT::i32
));
5354 // Optimize vector operations in scalar value Op on the basis that Op
5355 // is truncated to TruncVT.
5356 SDValue
SystemZTargetLowering::combineTruncateExtract(
5357 const SDLoc
&DL
, EVT TruncVT
, SDValue Op
, DAGCombinerInfo
&DCI
) const {
5358 // If we have (trunc (extract_vector_elt X, Y)), try to turn it into
5359 // (extract_vector_elt (bitcast X), Y'), where (bitcast X) has elements
5361 if (Op
.getOpcode() == ISD::EXTRACT_VECTOR_ELT
&&
5362 TruncVT
.getSizeInBits() % 8 == 0) {
5363 SDValue Vec
= Op
.getOperand(0);
5364 EVT VecVT
= Vec
.getValueType();
5365 if (canTreatAsByteVector(VecVT
)) {
5366 if (auto *IndexN
= dyn_cast
<ConstantSDNode
>(Op
.getOperand(1))) {
5367 unsigned BytesPerElement
= VecVT
.getVectorElementType().getStoreSize();
5368 unsigned TruncBytes
= TruncVT
.getStoreSize();
5369 if (BytesPerElement
% TruncBytes
== 0) {
5370 // Calculate the value of Y' in the above description. We are
5371 // splitting the original elements into Scale equal-sized pieces
5372 // and for truncation purposes want the last (least-significant)
5373 // of these pieces for IndexN. This is easiest to do by calculating
5374 // the start index of the following element and then subtracting 1.
5375 unsigned Scale
= BytesPerElement
/ TruncBytes
;
5376 unsigned NewIndex
= (IndexN
->getZExtValue() + 1) * Scale
- 1;
5378 // Defer the creation of the bitcast from X to combineExtract,
5379 // which might be able to optimize the extraction.
5380 VecVT
= MVT::getVectorVT(MVT::getIntegerVT(TruncBytes
* 8),
5381 VecVT
.getStoreSize() / TruncBytes
);
5382 EVT ResVT
= (TruncBytes
< 4 ? MVT::i32
: TruncVT
);
5383 return combineExtract(DL
, ResVT
, VecVT
, Vec
, NewIndex
, DCI
, true);
5391 SDValue
SystemZTargetLowering::combineZERO_EXTEND(
5392 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
5393 // Convert (zext (select_ccmask C1, C2)) into (select_ccmask C1', C2')
5394 SelectionDAG
&DAG
= DCI
.DAG
;
5395 SDValue N0
= N
->getOperand(0);
5396 EVT VT
= N
->getValueType(0);
5397 if (N0
.getOpcode() == SystemZISD::SELECT_CCMASK
) {
5398 auto *TrueOp
= dyn_cast
<ConstantSDNode
>(N0
.getOperand(0));
5399 auto *FalseOp
= dyn_cast
<ConstantSDNode
>(N0
.getOperand(1));
5400 if (TrueOp
&& FalseOp
) {
5402 SDValue Ops
[] = { DAG
.getConstant(TrueOp
->getZExtValue(), DL
, VT
),
5403 DAG
.getConstant(FalseOp
->getZExtValue(), DL
, VT
),
5404 N0
.getOperand(2), N0
.getOperand(3), N0
.getOperand(4) };
5405 SDValue NewSelect
= DAG
.getNode(SystemZISD::SELECT_CCMASK
, DL
, VT
, Ops
);
5406 // If N0 has multiple uses, change other uses as well.
5407 if (!N0
.hasOneUse()) {
5408 SDValue TruncSelect
=
5409 DAG
.getNode(ISD::TRUNCATE
, DL
, N0
.getValueType(), NewSelect
);
5410 DCI
.CombineTo(N0
.getNode(), TruncSelect
);
5418 SDValue
SystemZTargetLowering::combineSIGN_EXTEND_INREG(
5419 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
5420 // Convert (sext_in_reg (setcc LHS, RHS, COND), i1)
5421 // and (sext_in_reg (any_extend (setcc LHS, RHS, COND)), i1)
5422 // into (select_cc LHS, RHS, -1, 0, COND)
5423 SelectionDAG
&DAG
= DCI
.DAG
;
5424 SDValue N0
= N
->getOperand(0);
5425 EVT VT
= N
->getValueType(0);
5426 EVT EVT
= cast
<VTSDNode
>(N
->getOperand(1))->getVT();
5427 if (N0
.hasOneUse() && N0
.getOpcode() == ISD::ANY_EXTEND
)
5428 N0
= N0
.getOperand(0);
5429 if (EVT
== MVT::i1
&& N0
.hasOneUse() && N0
.getOpcode() == ISD::SETCC
) {
5431 SDValue Ops
[] = { N0
.getOperand(0), N0
.getOperand(1),
5432 DAG
.getConstant(-1, DL
, VT
), DAG
.getConstant(0, DL
, VT
),
5434 return DAG
.getNode(ISD::SELECT_CC
, DL
, VT
, Ops
);
5439 SDValue
SystemZTargetLowering::combineSIGN_EXTEND(
5440 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
5441 // Convert (sext (ashr (shl X, C1), C2)) to
5442 // (ashr (shl (anyext X), C1'), C2')), since wider shifts are as
5443 // cheap as narrower ones.
5444 SelectionDAG
&DAG
= DCI
.DAG
;
5445 SDValue N0
= N
->getOperand(0);
5446 EVT VT
= N
->getValueType(0);
5447 if (N0
.hasOneUse() && N0
.getOpcode() == ISD::SRA
) {
5448 auto *SraAmt
= dyn_cast
<ConstantSDNode
>(N0
.getOperand(1));
5449 SDValue Inner
= N0
.getOperand(0);
5450 if (SraAmt
&& Inner
.hasOneUse() && Inner
.getOpcode() == ISD::SHL
) {
5451 if (auto *ShlAmt
= dyn_cast
<ConstantSDNode
>(Inner
.getOperand(1))) {
5452 unsigned Extra
= (VT
.getSizeInBits() - N0
.getValueSizeInBits());
5453 unsigned NewShlAmt
= ShlAmt
->getZExtValue() + Extra
;
5454 unsigned NewSraAmt
= SraAmt
->getZExtValue() + Extra
;
5455 EVT ShiftVT
= N0
.getOperand(1).getValueType();
5456 SDValue Ext
= DAG
.getNode(ISD::ANY_EXTEND
, SDLoc(Inner
), VT
,
5457 Inner
.getOperand(0));
5458 SDValue Shl
= DAG
.getNode(ISD::SHL
, SDLoc(Inner
), VT
, Ext
,
5459 DAG
.getConstant(NewShlAmt
, SDLoc(Inner
),
5461 return DAG
.getNode(ISD::SRA
, SDLoc(N0
), VT
, Shl
,
5462 DAG
.getConstant(NewSraAmt
, SDLoc(N0
), ShiftVT
));
5469 SDValue
SystemZTargetLowering::combineMERGE(
5470 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
5471 SelectionDAG
&DAG
= DCI
.DAG
;
5472 unsigned Opcode
= N
->getOpcode();
5473 SDValue Op0
= N
->getOperand(0);
5474 SDValue Op1
= N
->getOperand(1);
5475 if (Op0
.getOpcode() == ISD::BITCAST
)
5476 Op0
= Op0
.getOperand(0);
5477 if (ISD::isBuildVectorAllZeros(Op0
.getNode())) {
5478 // (z_merge_* 0, 0) -> 0. This is mostly useful for using VLLEZF
5480 if (Op1
== N
->getOperand(0))
5482 // (z_merge_? 0, X) -> (z_unpackl_? 0, X).
5483 EVT VT
= Op1
.getValueType();
5484 unsigned ElemBytes
= VT
.getVectorElementType().getStoreSize();
5485 if (ElemBytes
<= 4) {
5486 Opcode
= (Opcode
== SystemZISD::MERGE_HIGH
?
5487 SystemZISD::UNPACKL_HIGH
: SystemZISD::UNPACKL_LOW
);
5488 EVT InVT
= VT
.changeVectorElementTypeToInteger();
5489 EVT OutVT
= MVT::getVectorVT(MVT::getIntegerVT(ElemBytes
* 16),
5490 SystemZ::VectorBytes
/ ElemBytes
/ 2);
5492 Op1
= DAG
.getNode(ISD::BITCAST
, SDLoc(N
), InVT
, Op1
);
5493 DCI
.AddToWorklist(Op1
.getNode());
5495 SDValue Op
= DAG
.getNode(Opcode
, SDLoc(N
), OutVT
, Op1
);
5496 DCI
.AddToWorklist(Op
.getNode());
5497 return DAG
.getNode(ISD::BITCAST
, SDLoc(N
), VT
, Op
);
5503 SDValue
SystemZTargetLowering::combineLOAD(
5504 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
5505 SelectionDAG
&DAG
= DCI
.DAG
;
5506 EVT LdVT
= N
->getValueType(0);
5507 if (LdVT
.isVector() || LdVT
.isInteger())
5509 // Transform a scalar load that is REPLICATEd as well as having other
5510 // use(s) to the form where the other use(s) use the first element of the
5511 // REPLICATE instead of the load. Otherwise instruction selection will not
5512 // produce a VLREP. Avoid extracting to a GPR, so only do this for floating
5516 SmallVector
<SDNode
*, 8> OtherUses
;
5517 for (SDNode::use_iterator UI
= N
->use_begin(), UE
= N
->use_end();
5519 if (UI
->getOpcode() == SystemZISD::REPLICATE
) {
5521 return SDValue(); // Should never happen
5522 Replicate
= SDValue(*UI
, 0);
5524 else if (UI
.getUse().getResNo() == 0)
5525 OtherUses
.push_back(*UI
);
5527 if (!Replicate
|| OtherUses
.empty())
5531 SDValue Extract0
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, DL
, LdVT
,
5532 Replicate
, DAG
.getConstant(0, DL
, MVT::i32
));
5533 // Update uses of the loaded Value while preserving old chains.
5534 for (SDNode
*U
: OtherUses
) {
5535 SmallVector
<SDValue
, 8> Ops
;
5536 for (SDValue Op
: U
->ops())
5537 Ops
.push_back((Op
.getNode() == N
&& Op
.getResNo() == 0) ? Extract0
: Op
);
5538 DAG
.UpdateNodeOperands(U
, Ops
);
5540 return SDValue(N
, 0);
5543 bool SystemZTargetLowering::canLoadStoreByteSwapped(EVT VT
) const {
5544 if (VT
== MVT::i16
|| VT
== MVT::i32
|| VT
== MVT::i64
)
5546 if (Subtarget
.hasVectorEnhancements2())
5547 if (VT
== MVT::v8i16
|| VT
== MVT::v4i32
|| VT
== MVT::v2i64
)
5552 static bool isVectorElementSwap(ArrayRef
<int> M
, EVT VT
) {
5553 if (!VT
.isVector() || !VT
.isSimple() ||
5554 VT
.getSizeInBits() != 128 ||
5555 VT
.getScalarSizeInBits() % 8 != 0)
5558 unsigned NumElts
= VT
.getVectorNumElements();
5559 for (unsigned i
= 0; i
< NumElts
; ++i
) {
5560 if (M
[i
] < 0) continue; // ignore UNDEF indices
5561 if ((unsigned) M
[i
] != NumElts
- 1 - i
)
5568 SDValue
SystemZTargetLowering::combineSTORE(
5569 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
5570 SelectionDAG
&DAG
= DCI
.DAG
;
5571 auto *SN
= cast
<StoreSDNode
>(N
);
5572 auto &Op1
= N
->getOperand(1);
5573 EVT MemVT
= SN
->getMemoryVT();
5574 // If we have (truncstoreiN (extract_vector_elt X, Y), Z) then it is better
5575 // for the extraction to be done on a vMiN value, so that we can use VSTE.
5576 // If X has wider elements then convert it to:
5577 // (truncstoreiN (extract_vector_elt (bitcast X), Y2), Z).
5578 if (MemVT
.isInteger() && SN
->isTruncatingStore()) {
5580 combineTruncateExtract(SDLoc(N
), MemVT
, SN
->getValue(), DCI
)) {
5581 DCI
.AddToWorklist(Value
.getNode());
5583 // Rewrite the store with the new form of stored value.
5584 return DAG
.getTruncStore(SN
->getChain(), SDLoc(SN
), Value
,
5585 SN
->getBasePtr(), SN
->getMemoryVT(),
5586 SN
->getMemOperand());
5589 // Combine STORE (BSWAP) into STRVH/STRV/STRVG/VSTBR
5590 if (!SN
->isTruncatingStore() &&
5591 Op1
.getOpcode() == ISD::BSWAP
&&
5592 Op1
.getNode()->hasOneUse() &&
5593 canLoadStoreByteSwapped(Op1
.getValueType())) {
5595 SDValue BSwapOp
= Op1
.getOperand(0);
5597 if (BSwapOp
.getValueType() == MVT::i16
)
5598 BSwapOp
= DAG
.getNode(ISD::ANY_EXTEND
, SDLoc(N
), MVT::i32
, BSwapOp
);
5601 N
->getOperand(0), BSwapOp
, N
->getOperand(2)
5605 DAG
.getMemIntrinsicNode(SystemZISD::STRV
, SDLoc(N
), DAG
.getVTList(MVT::Other
),
5606 Ops
, MemVT
, SN
->getMemOperand());
5608 // Combine STORE (element-swap) into VSTER
5609 if (!SN
->isTruncatingStore() &&
5610 Op1
.getOpcode() == ISD::VECTOR_SHUFFLE
&&
5611 Op1
.getNode()->hasOneUse() &&
5612 Subtarget
.hasVectorEnhancements2()) {
5613 ShuffleVectorSDNode
*SVN
= cast
<ShuffleVectorSDNode
>(Op1
.getNode());
5614 ArrayRef
<int> ShuffleMask
= SVN
->getMask();
5615 if (isVectorElementSwap(ShuffleMask
, Op1
.getValueType())) {
5617 N
->getOperand(0), Op1
.getOperand(0), N
->getOperand(2)
5620 return DAG
.getMemIntrinsicNode(SystemZISD::VSTER
, SDLoc(N
),
5621 DAG
.getVTList(MVT::Other
),
5622 Ops
, MemVT
, SN
->getMemOperand());
5629 SDValue
SystemZTargetLowering::combineVECTOR_SHUFFLE(
5630 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
5631 SelectionDAG
&DAG
= DCI
.DAG
;
5632 // Combine element-swap (LOAD) into VLER
5633 if (ISD::isNON_EXTLoad(N
->getOperand(0).getNode()) &&
5634 N
->getOperand(0).hasOneUse() &&
5635 Subtarget
.hasVectorEnhancements2()) {
5636 ShuffleVectorSDNode
*SVN
= cast
<ShuffleVectorSDNode
>(N
);
5637 ArrayRef
<int> ShuffleMask
= SVN
->getMask();
5638 if (isVectorElementSwap(ShuffleMask
, N
->getValueType(0))) {
5639 SDValue Load
= N
->getOperand(0);
5640 LoadSDNode
*LD
= cast
<LoadSDNode
>(Load
);
5642 // Create the element-swapping load.
5644 LD
->getChain(), // Chain
5645 LD
->getBasePtr() // Ptr
5648 DAG
.getMemIntrinsicNode(SystemZISD::VLER
, SDLoc(N
),
5649 DAG
.getVTList(LD
->getValueType(0), MVT::Other
),
5650 Ops
, LD
->getMemoryVT(), LD
->getMemOperand());
5652 // First, combine the VECTOR_SHUFFLE away. This makes the value produced
5653 // by the load dead.
5654 DCI
.CombineTo(N
, ESLoad
);
5656 // Next, combine the load away, we give it a bogus result value but a real
5657 // chain result. The result value is dead because the shuffle is dead.
5658 DCI
.CombineTo(Load
.getNode(), ESLoad
, ESLoad
.getValue(1));
5660 // Return N so it doesn't get rechecked!
5661 return SDValue(N
, 0);
5668 SDValue
SystemZTargetLowering::combineEXTRACT_VECTOR_ELT(
5669 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
5670 SelectionDAG
&DAG
= DCI
.DAG
;
5672 if (!Subtarget
.hasVector())
5675 // Look through bitcasts that retain the number of vector elements.
5676 SDValue Op
= N
->getOperand(0);
5677 if (Op
.getOpcode() == ISD::BITCAST
&&
5678 Op
.getValueType().isVector() &&
5679 Op
.getOperand(0).getValueType().isVector() &&
5680 Op
.getValueType().getVectorNumElements() ==
5681 Op
.getOperand(0).getValueType().getVectorNumElements())
5682 Op
= Op
.getOperand(0);
5684 // Pull BSWAP out of a vector extraction.
5685 if (Op
.getOpcode() == ISD::BSWAP
&& Op
.hasOneUse()) {
5686 EVT VecVT
= Op
.getValueType();
5687 EVT EltVT
= VecVT
.getVectorElementType();
5688 Op
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, SDLoc(N
), EltVT
,
5689 Op
.getOperand(0), N
->getOperand(1));
5690 DCI
.AddToWorklist(Op
.getNode());
5691 Op
= DAG
.getNode(ISD::BSWAP
, SDLoc(N
), EltVT
, Op
);
5692 if (EltVT
!= N
->getValueType(0)) {
5693 DCI
.AddToWorklist(Op
.getNode());
5694 Op
= DAG
.getNode(ISD::BITCAST
, SDLoc(N
), N
->getValueType(0), Op
);
5699 // Try to simplify a vector extraction.
5700 if (auto *IndexN
= dyn_cast
<ConstantSDNode
>(N
->getOperand(1))) {
5701 SDValue Op0
= N
->getOperand(0);
5702 EVT VecVT
= Op0
.getValueType();
5703 return combineExtract(SDLoc(N
), N
->getValueType(0), VecVT
, Op0
,
5704 IndexN
->getZExtValue(), DCI
, false);
5709 SDValue
SystemZTargetLowering::combineJOIN_DWORDS(
5710 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
5711 SelectionDAG
&DAG
= DCI
.DAG
;
5712 // (join_dwords X, X) == (replicate X)
5713 if (N
->getOperand(0) == N
->getOperand(1))
5714 return DAG
.getNode(SystemZISD::REPLICATE
, SDLoc(N
), N
->getValueType(0),
5719 SDValue
SystemZTargetLowering::combineFP_ROUND(
5720 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
5722 if (!Subtarget
.hasVector())
5725 // (fpround (extract_vector_elt X 0))
5726 // (fpround (extract_vector_elt X 1)) ->
5727 // (extract_vector_elt (VROUND X) 0)
5728 // (extract_vector_elt (VROUND X) 2)
5730 // This is a special case since the target doesn't really support v2f32s.
5731 SelectionDAG
&DAG
= DCI
.DAG
;
5732 SDValue Op0
= N
->getOperand(0);
5733 if (N
->getValueType(0) == MVT::f32
&&
5735 Op0
.getOpcode() == ISD::EXTRACT_VECTOR_ELT
&&
5736 Op0
.getOperand(0).getValueType() == MVT::v2f64
&&
5737 Op0
.getOperand(1).getOpcode() == ISD::Constant
&&
5738 cast
<ConstantSDNode
>(Op0
.getOperand(1))->getZExtValue() == 0) {
5739 SDValue Vec
= Op0
.getOperand(0);
5740 for (auto *U
: Vec
->uses()) {
5741 if (U
!= Op0
.getNode() &&
5743 U
->getOpcode() == ISD::EXTRACT_VECTOR_ELT
&&
5744 U
->getOperand(0) == Vec
&&
5745 U
->getOperand(1).getOpcode() == ISD::Constant
&&
5746 cast
<ConstantSDNode
>(U
->getOperand(1))->getZExtValue() == 1) {
5747 SDValue OtherRound
= SDValue(*U
->use_begin(), 0);
5748 if (OtherRound
.getOpcode() == ISD::FP_ROUND
&&
5749 OtherRound
.getOperand(0) == SDValue(U
, 0) &&
5750 OtherRound
.getValueType() == MVT::f32
) {
5751 SDValue VRound
= DAG
.getNode(SystemZISD::VROUND
, SDLoc(N
),
5753 DCI
.AddToWorklist(VRound
.getNode());
5755 DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, SDLoc(U
), MVT::f32
,
5756 VRound
, DAG
.getConstant(2, SDLoc(U
), MVT::i32
));
5757 DCI
.AddToWorklist(Extract1
.getNode());
5758 DAG
.ReplaceAllUsesOfValueWith(OtherRound
, Extract1
);
5760 DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, SDLoc(Op0
), MVT::f32
,
5761 VRound
, DAG
.getConstant(0, SDLoc(Op0
), MVT::i32
));
5770 SDValue
SystemZTargetLowering::combineFP_EXTEND(
5771 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
5773 if (!Subtarget
.hasVector())
5776 // (fpextend (extract_vector_elt X 0))
5777 // (fpextend (extract_vector_elt X 2)) ->
5778 // (extract_vector_elt (VEXTEND X) 0)
5779 // (extract_vector_elt (VEXTEND X) 1)
5781 // This is a special case since the target doesn't really support v2f32s.
5782 SelectionDAG
&DAG
= DCI
.DAG
;
5783 SDValue Op0
= N
->getOperand(0);
5784 if (N
->getValueType(0) == MVT::f64
&&
5786 Op0
.getOpcode() == ISD::EXTRACT_VECTOR_ELT
&&
5787 Op0
.getOperand(0).getValueType() == MVT::v4f32
&&
5788 Op0
.getOperand(1).getOpcode() == ISD::Constant
&&
5789 cast
<ConstantSDNode
>(Op0
.getOperand(1))->getZExtValue() == 0) {
5790 SDValue Vec
= Op0
.getOperand(0);
5791 for (auto *U
: Vec
->uses()) {
5792 if (U
!= Op0
.getNode() &&
5794 U
->getOpcode() == ISD::EXTRACT_VECTOR_ELT
&&
5795 U
->getOperand(0) == Vec
&&
5796 U
->getOperand(1).getOpcode() == ISD::Constant
&&
5797 cast
<ConstantSDNode
>(U
->getOperand(1))->getZExtValue() == 2) {
5798 SDValue OtherExtend
= SDValue(*U
->use_begin(), 0);
5799 if (OtherExtend
.getOpcode() == ISD::FP_EXTEND
&&
5800 OtherExtend
.getOperand(0) == SDValue(U
, 0) &&
5801 OtherExtend
.getValueType() == MVT::f64
) {
5802 SDValue VExtend
= DAG
.getNode(SystemZISD::VEXTEND
, SDLoc(N
),
5804 DCI
.AddToWorklist(VExtend
.getNode());
5806 DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, SDLoc(U
), MVT::f64
,
5807 VExtend
, DAG
.getConstant(1, SDLoc(U
), MVT::i32
));
5808 DCI
.AddToWorklist(Extract1
.getNode());
5809 DAG
.ReplaceAllUsesOfValueWith(OtherExtend
, Extract1
);
5811 DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, SDLoc(Op0
), MVT::f64
,
5812 VExtend
, DAG
.getConstant(0, SDLoc(Op0
), MVT::i32
));
5821 SDValue
SystemZTargetLowering::combineBSWAP(
5822 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
5823 SelectionDAG
&DAG
= DCI
.DAG
;
5824 // Combine BSWAP (LOAD) into LRVH/LRV/LRVG/VLBR
5825 if (ISD::isNON_EXTLoad(N
->getOperand(0).getNode()) &&
5826 N
->getOperand(0).hasOneUse() &&
5827 canLoadStoreByteSwapped(N
->getValueType(0))) {
5828 SDValue Load
= N
->getOperand(0);
5829 LoadSDNode
*LD
= cast
<LoadSDNode
>(Load
);
5831 // Create the byte-swapping load.
5833 LD
->getChain(), // Chain
5834 LD
->getBasePtr() // Ptr
5836 EVT LoadVT
= N
->getValueType(0);
5837 if (LoadVT
== MVT::i16
)
5840 DAG
.getMemIntrinsicNode(SystemZISD::LRV
, SDLoc(N
),
5841 DAG
.getVTList(LoadVT
, MVT::Other
),
5842 Ops
, LD
->getMemoryVT(), LD
->getMemOperand());
5844 // If this is an i16 load, insert the truncate.
5845 SDValue ResVal
= BSLoad
;
5846 if (N
->getValueType(0) == MVT::i16
)
5847 ResVal
= DAG
.getNode(ISD::TRUNCATE
, SDLoc(N
), MVT::i16
, BSLoad
);
5849 // First, combine the bswap away. This makes the value produced by the
5851 DCI
.CombineTo(N
, ResVal
);
5853 // Next, combine the load away, we give it a bogus result value but a real
5854 // chain result. The result value is dead because the bswap is dead.
5855 DCI
.CombineTo(Load
.getNode(), ResVal
, BSLoad
.getValue(1));
5857 // Return N so it doesn't get rechecked!
5858 return SDValue(N
, 0);
5861 // Look through bitcasts that retain the number of vector elements.
5862 SDValue Op
= N
->getOperand(0);
5863 if (Op
.getOpcode() == ISD::BITCAST
&&
5864 Op
.getValueType().isVector() &&
5865 Op
.getOperand(0).getValueType().isVector() &&
5866 Op
.getValueType().getVectorNumElements() ==
5867 Op
.getOperand(0).getValueType().getVectorNumElements())
5868 Op
= Op
.getOperand(0);
5870 // Push BSWAP into a vector insertion if at least one side then simplifies.
5871 if (Op
.getOpcode() == ISD::INSERT_VECTOR_ELT
&& Op
.hasOneUse()) {
5872 SDValue Vec
= Op
.getOperand(0);
5873 SDValue Elt
= Op
.getOperand(1);
5874 SDValue Idx
= Op
.getOperand(2);
5876 if (DAG
.isConstantIntBuildVectorOrConstantInt(Vec
) ||
5877 Vec
.getOpcode() == ISD::BSWAP
|| Vec
.isUndef() ||
5878 DAG
.isConstantIntBuildVectorOrConstantInt(Elt
) ||
5879 Elt
.getOpcode() == ISD::BSWAP
|| Elt
.isUndef() ||
5880 (canLoadStoreByteSwapped(N
->getValueType(0)) &&
5881 ISD::isNON_EXTLoad(Elt
.getNode()) && Elt
.hasOneUse())) {
5882 EVT VecVT
= N
->getValueType(0);
5883 EVT EltVT
= N
->getValueType(0).getVectorElementType();
5884 if (VecVT
!= Vec
.getValueType()) {
5885 Vec
= DAG
.getNode(ISD::BITCAST
, SDLoc(N
), VecVT
, Vec
);
5886 DCI
.AddToWorklist(Vec
.getNode());
5888 if (EltVT
!= Elt
.getValueType()) {
5889 Elt
= DAG
.getNode(ISD::BITCAST
, SDLoc(N
), EltVT
, Elt
);
5890 DCI
.AddToWorklist(Elt
.getNode());
5892 Vec
= DAG
.getNode(ISD::BSWAP
, SDLoc(N
), VecVT
, Vec
);
5893 DCI
.AddToWorklist(Vec
.getNode());
5894 Elt
= DAG
.getNode(ISD::BSWAP
, SDLoc(N
), EltVT
, Elt
);
5895 DCI
.AddToWorklist(Elt
.getNode());
5896 return DAG
.getNode(ISD::INSERT_VECTOR_ELT
, SDLoc(N
), VecVT
,
5901 // Push BSWAP into a vector shuffle if at least one side then simplifies.
5902 ShuffleVectorSDNode
*SV
= dyn_cast
<ShuffleVectorSDNode
>(Op
);
5903 if (SV
&& Op
.hasOneUse()) {
5904 SDValue Op0
= Op
.getOperand(0);
5905 SDValue Op1
= Op
.getOperand(1);
5907 if (DAG
.isConstantIntBuildVectorOrConstantInt(Op0
) ||
5908 Op0
.getOpcode() == ISD::BSWAP
|| Op0
.isUndef() ||
5909 DAG
.isConstantIntBuildVectorOrConstantInt(Op1
) ||
5910 Op1
.getOpcode() == ISD::BSWAP
|| Op1
.isUndef()) {
5911 EVT VecVT
= N
->getValueType(0);
5912 if (VecVT
!= Op0
.getValueType()) {
5913 Op0
= DAG
.getNode(ISD::BITCAST
, SDLoc(N
), VecVT
, Op0
);
5914 DCI
.AddToWorklist(Op0
.getNode());
5916 if (VecVT
!= Op1
.getValueType()) {
5917 Op1
= DAG
.getNode(ISD::BITCAST
, SDLoc(N
), VecVT
, Op1
);
5918 DCI
.AddToWorklist(Op1
.getNode());
5920 Op0
= DAG
.getNode(ISD::BSWAP
, SDLoc(N
), VecVT
, Op0
);
5921 DCI
.AddToWorklist(Op0
.getNode());
5922 Op1
= DAG
.getNode(ISD::BSWAP
, SDLoc(N
), VecVT
, Op1
);
5923 DCI
.AddToWorklist(Op1
.getNode());
5924 return DAG
.getVectorShuffle(VecVT
, SDLoc(N
), Op0
, Op1
, SV
->getMask());
5931 static bool combineCCMask(SDValue
&CCReg
, int &CCValid
, int &CCMask
) {
5932 // We have a SELECT_CCMASK or BR_CCMASK comparing the condition code
5933 // set by the CCReg instruction using the CCValid / CCMask masks,
5934 // If the CCReg instruction is itself a ICMP testing the condition
5935 // code set by some other instruction, see whether we can directly
5936 // use that condition code.
5938 // Verify that we have an ICMP against some constant.
5939 if (CCValid
!= SystemZ::CCMASK_ICMP
)
5941 auto *ICmp
= CCReg
.getNode();
5942 if (ICmp
->getOpcode() != SystemZISD::ICMP
)
5944 auto *CompareLHS
= ICmp
->getOperand(0).getNode();
5945 auto *CompareRHS
= dyn_cast
<ConstantSDNode
>(ICmp
->getOperand(1));
5949 // Optimize the case where CompareLHS is a SELECT_CCMASK.
5950 if (CompareLHS
->getOpcode() == SystemZISD::SELECT_CCMASK
) {
5951 // Verify that we have an appropriate mask for a EQ or NE comparison.
5952 bool Invert
= false;
5953 if (CCMask
== SystemZ::CCMASK_CMP_NE
)
5955 else if (CCMask
!= SystemZ::CCMASK_CMP_EQ
)
5958 // Verify that the ICMP compares against one of select values.
5959 auto *TrueVal
= dyn_cast
<ConstantSDNode
>(CompareLHS
->getOperand(0));
5962 auto *FalseVal
= dyn_cast
<ConstantSDNode
>(CompareLHS
->getOperand(1));
5965 if (CompareRHS
->getZExtValue() == FalseVal
->getZExtValue())
5967 else if (CompareRHS
->getZExtValue() != TrueVal
->getZExtValue())
5970 // Compute the effective CC mask for the new branch or select.
5971 auto *NewCCValid
= dyn_cast
<ConstantSDNode
>(CompareLHS
->getOperand(2));
5972 auto *NewCCMask
= dyn_cast
<ConstantSDNode
>(CompareLHS
->getOperand(3));
5973 if (!NewCCValid
|| !NewCCMask
)
5975 CCValid
= NewCCValid
->getZExtValue();
5976 CCMask
= NewCCMask
->getZExtValue();
5980 // Return the updated CCReg link.
5981 CCReg
= CompareLHS
->getOperand(4);
5985 // Optimize the case where CompareRHS is (SRA (SHL (IPM))).
5986 if (CompareLHS
->getOpcode() == ISD::SRA
) {
5987 auto *SRACount
= dyn_cast
<ConstantSDNode
>(CompareLHS
->getOperand(1));
5988 if (!SRACount
|| SRACount
->getZExtValue() != 30)
5990 auto *SHL
= CompareLHS
->getOperand(0).getNode();
5991 if (SHL
->getOpcode() != ISD::SHL
)
5993 auto *SHLCount
= dyn_cast
<ConstantSDNode
>(SHL
->getOperand(1));
5994 if (!SHLCount
|| SHLCount
->getZExtValue() != 30 - SystemZ::IPM_CC
)
5996 auto *IPM
= SHL
->getOperand(0).getNode();
5997 if (IPM
->getOpcode() != SystemZISD::IPM
)
6000 // Avoid introducing CC spills (because SRA would clobber CC).
6001 if (!CompareLHS
->hasOneUse())
6003 // Verify that the ICMP compares against zero.
6004 if (CompareRHS
->getZExtValue() != 0)
6007 // Compute the effective CC mask for the new branch or select.
6009 case SystemZ::CCMASK_CMP_EQ
: break;
6010 case SystemZ::CCMASK_CMP_NE
: break;
6011 case SystemZ::CCMASK_CMP_LT
: CCMask
= SystemZ::CCMASK_CMP_GT
; break;
6012 case SystemZ::CCMASK_CMP_GT
: CCMask
= SystemZ::CCMASK_CMP_LT
; break;
6013 case SystemZ::CCMASK_CMP_LE
: CCMask
= SystemZ::CCMASK_CMP_GE
; break;
6014 case SystemZ::CCMASK_CMP_GE
: CCMask
= SystemZ::CCMASK_CMP_LE
; break;
6015 default: return false;
6018 // Return the updated CCReg link.
6019 CCReg
= IPM
->getOperand(0);
6026 SDValue
SystemZTargetLowering::combineBR_CCMASK(
6027 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
6028 SelectionDAG
&DAG
= DCI
.DAG
;
6030 // Combine BR_CCMASK (ICMP (SELECT_CCMASK)) into a single BR_CCMASK.
6031 auto *CCValid
= dyn_cast
<ConstantSDNode
>(N
->getOperand(1));
6032 auto *CCMask
= dyn_cast
<ConstantSDNode
>(N
->getOperand(2));
6033 if (!CCValid
|| !CCMask
)
6036 int CCValidVal
= CCValid
->getZExtValue();
6037 int CCMaskVal
= CCMask
->getZExtValue();
6038 SDValue Chain
= N
->getOperand(0);
6039 SDValue CCReg
= N
->getOperand(4);
6041 if (combineCCMask(CCReg
, CCValidVal
, CCMaskVal
))
6042 return DAG
.getNode(SystemZISD::BR_CCMASK
, SDLoc(N
), N
->getValueType(0),
6044 DAG
.getConstant(CCValidVal
, SDLoc(N
), MVT::i32
),
6045 DAG
.getConstant(CCMaskVal
, SDLoc(N
), MVT::i32
),
6046 N
->getOperand(3), CCReg
);
6050 SDValue
SystemZTargetLowering::combineSELECT_CCMASK(
6051 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
6052 SelectionDAG
&DAG
= DCI
.DAG
;
6054 // Combine SELECT_CCMASK (ICMP (SELECT_CCMASK)) into a single SELECT_CCMASK.
6055 auto *CCValid
= dyn_cast
<ConstantSDNode
>(N
->getOperand(2));
6056 auto *CCMask
= dyn_cast
<ConstantSDNode
>(N
->getOperand(3));
6057 if (!CCValid
|| !CCMask
)
6060 int CCValidVal
= CCValid
->getZExtValue();
6061 int CCMaskVal
= CCMask
->getZExtValue();
6062 SDValue CCReg
= N
->getOperand(4);
6064 if (combineCCMask(CCReg
, CCValidVal
, CCMaskVal
))
6065 return DAG
.getNode(SystemZISD::SELECT_CCMASK
, SDLoc(N
), N
->getValueType(0),
6068 DAG
.getConstant(CCValidVal
, SDLoc(N
), MVT::i32
),
6069 DAG
.getConstant(CCMaskVal
, SDLoc(N
), MVT::i32
),
6075 SDValue
SystemZTargetLowering::combineGET_CCMASK(
6076 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
6078 // Optimize away GET_CCMASK (SELECT_CCMASK) if the CC masks are compatible
6079 auto *CCValid
= dyn_cast
<ConstantSDNode
>(N
->getOperand(1));
6080 auto *CCMask
= dyn_cast
<ConstantSDNode
>(N
->getOperand(2));
6081 if (!CCValid
|| !CCMask
)
6083 int CCValidVal
= CCValid
->getZExtValue();
6084 int CCMaskVal
= CCMask
->getZExtValue();
6086 SDValue Select
= N
->getOperand(0);
6087 if (Select
->getOpcode() != SystemZISD::SELECT_CCMASK
)
6090 auto *SelectCCValid
= dyn_cast
<ConstantSDNode
>(Select
->getOperand(2));
6091 auto *SelectCCMask
= dyn_cast
<ConstantSDNode
>(Select
->getOperand(3));
6092 if (!SelectCCValid
|| !SelectCCMask
)
6094 int SelectCCValidVal
= SelectCCValid
->getZExtValue();
6095 int SelectCCMaskVal
= SelectCCMask
->getZExtValue();
6097 auto *TrueVal
= dyn_cast
<ConstantSDNode
>(Select
->getOperand(0));
6098 auto *FalseVal
= dyn_cast
<ConstantSDNode
>(Select
->getOperand(1));
6099 if (!TrueVal
|| !FalseVal
)
6101 if (TrueVal
->getZExtValue() != 0 && FalseVal
->getZExtValue() == 0)
6103 else if (TrueVal
->getZExtValue() == 0 && FalseVal
->getZExtValue() != 0)
6104 SelectCCMaskVal
^= SelectCCValidVal
;
6108 if (SelectCCValidVal
& ~CCValidVal
)
6110 if (SelectCCMaskVal
!= (CCMaskVal
& SelectCCValidVal
))
6113 return Select
->getOperand(4);
6116 SDValue
SystemZTargetLowering::combineIntDIVREM(
6117 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
6118 SelectionDAG
&DAG
= DCI
.DAG
;
6119 EVT VT
= N
->getValueType(0);
6120 // In the case where the divisor is a vector of constants a cheaper
6121 // sequence of instructions can replace the divide. BuildSDIV is called to
6122 // do this during DAG combining, but it only succeeds when it can build a
6123 // multiplication node. The only option for SystemZ is ISD::SMUL_LOHI, and
6124 // since it is not Legal but Custom it can only happen before
6125 // legalization. Therefore we must scalarize this early before Combine
6126 // 1. For widened vectors, this is already the result of type legalization.
6127 if (DCI
.Level
== BeforeLegalizeTypes
&& VT
.isVector() && isTypeLegal(VT
) &&
6128 DAG
.isConstantIntBuildVectorOrConstantInt(N
->getOperand(1)))
6129 return DAG
.UnrollVectorOp(N
);
6133 SDValue
SystemZTargetLowering::unwrapAddress(SDValue N
) const {
6134 if (N
->getOpcode() == SystemZISD::PCREL_WRAPPER
)
6135 return N
->getOperand(0);
6139 SDValue
SystemZTargetLowering::PerformDAGCombine(SDNode
*N
,
6140 DAGCombinerInfo
&DCI
) const {
6141 switch(N
->getOpcode()) {
6143 case ISD::ZERO_EXTEND
: return combineZERO_EXTEND(N
, DCI
);
6144 case ISD::SIGN_EXTEND
: return combineSIGN_EXTEND(N
, DCI
);
6145 case ISD::SIGN_EXTEND_INREG
: return combineSIGN_EXTEND_INREG(N
, DCI
);
6146 case SystemZISD::MERGE_HIGH
:
6147 case SystemZISD::MERGE_LOW
: return combineMERGE(N
, DCI
);
6148 case ISD::LOAD
: return combineLOAD(N
, DCI
);
6149 case ISD::STORE
: return combineSTORE(N
, DCI
);
6150 case ISD::VECTOR_SHUFFLE
: return combineVECTOR_SHUFFLE(N
, DCI
);
6151 case ISD::EXTRACT_VECTOR_ELT
: return combineEXTRACT_VECTOR_ELT(N
, DCI
);
6152 case SystemZISD::JOIN_DWORDS
: return combineJOIN_DWORDS(N
, DCI
);
6153 case ISD::FP_ROUND
: return combineFP_ROUND(N
, DCI
);
6154 case ISD::FP_EXTEND
: return combineFP_EXTEND(N
, DCI
);
6155 case ISD::BSWAP
: return combineBSWAP(N
, DCI
);
6156 case SystemZISD::BR_CCMASK
: return combineBR_CCMASK(N
, DCI
);
6157 case SystemZISD::SELECT_CCMASK
: return combineSELECT_CCMASK(N
, DCI
);
6158 case SystemZISD::GET_CCMASK
: return combineGET_CCMASK(N
, DCI
);
6162 case ISD::UREM
: return combineIntDIVREM(N
, DCI
);
6168 // Return the demanded elements for the OpNo source operand of Op. DemandedElts
6170 static APInt
getDemandedSrcElements(SDValue Op
, const APInt
&DemandedElts
,
6172 EVT VT
= Op
.getValueType();
6173 unsigned NumElts
= (VT
.isVector() ? VT
.getVectorNumElements() : 1);
6175 unsigned Opcode
= Op
.getOpcode();
6176 if (Opcode
== ISD::INTRINSIC_WO_CHAIN
) {
6177 unsigned Id
= cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue();
6179 case Intrinsic::s390_vpksh
: // PACKS
6180 case Intrinsic::s390_vpksf
:
6181 case Intrinsic::s390_vpksg
:
6182 case Intrinsic::s390_vpkshs
: // PACKS_CC
6183 case Intrinsic::s390_vpksfs
:
6184 case Intrinsic::s390_vpksgs
:
6185 case Intrinsic::s390_vpklsh
: // PACKLS
6186 case Intrinsic::s390_vpklsf
:
6187 case Intrinsic::s390_vpklsg
:
6188 case Intrinsic::s390_vpklshs
: // PACKLS_CC
6189 case Intrinsic::s390_vpklsfs
:
6190 case Intrinsic::s390_vpklsgs
:
6191 // VECTOR PACK truncates the elements of two source vectors into one.
6192 SrcDemE
= DemandedElts
;
6194 SrcDemE
.lshrInPlace(NumElts
/ 2);
6195 SrcDemE
= SrcDemE
.trunc(NumElts
/ 2);
6197 // VECTOR UNPACK extends half the elements of the source vector.
6198 case Intrinsic::s390_vuphb
: // VECTOR UNPACK HIGH
6199 case Intrinsic::s390_vuphh
:
6200 case Intrinsic::s390_vuphf
:
6201 case Intrinsic::s390_vuplhb
: // VECTOR UNPACK LOGICAL HIGH
6202 case Intrinsic::s390_vuplhh
:
6203 case Intrinsic::s390_vuplhf
:
6204 SrcDemE
= APInt(NumElts
* 2, 0);
6205 SrcDemE
.insertBits(DemandedElts
, 0);
6207 case Intrinsic::s390_vuplb
: // VECTOR UNPACK LOW
6208 case Intrinsic::s390_vuplhw
:
6209 case Intrinsic::s390_vuplf
:
6210 case Intrinsic::s390_vupllb
: // VECTOR UNPACK LOGICAL LOW
6211 case Intrinsic::s390_vupllh
:
6212 case Intrinsic::s390_vupllf
:
6213 SrcDemE
= APInt(NumElts
* 2, 0);
6214 SrcDemE
.insertBits(DemandedElts
, NumElts
);
6216 case Intrinsic::s390_vpdi
: {
6217 // VECTOR PERMUTE DWORD IMMEDIATE selects one element from each source.
6218 SrcDemE
= APInt(NumElts
, 0);
6219 if (!DemandedElts
[OpNo
- 1])
6221 unsigned Mask
= cast
<ConstantSDNode
>(Op
.getOperand(3))->getZExtValue();
6222 unsigned MaskBit
= ((OpNo
- 1) ? 1 : 4);
6223 // Demand input element 0 or 1, given by the mask bit value.
6224 SrcDemE
.setBit((Mask
& MaskBit
)? 1 : 0);
6227 case Intrinsic::s390_vsldb
: {
6228 // VECTOR SHIFT LEFT DOUBLE BY BYTE
6229 assert(VT
== MVT::v16i8
&& "Unexpected type.");
6230 unsigned FirstIdx
= cast
<ConstantSDNode
>(Op
.getOperand(3))->getZExtValue();
6231 assert (FirstIdx
> 0 && FirstIdx
< 16 && "Unused operand.");
6232 unsigned NumSrc0Els
= 16 - FirstIdx
;
6233 SrcDemE
= APInt(NumElts
, 0);
6235 APInt DemEls
= DemandedElts
.trunc(NumSrc0Els
);
6236 SrcDemE
.insertBits(DemEls
, FirstIdx
);
6238 APInt DemEls
= DemandedElts
.lshr(NumSrc0Els
);
6239 SrcDemE
.insertBits(DemEls
, 0);
6243 case Intrinsic::s390_vperm
:
6244 SrcDemE
= APInt(NumElts
, 1);
6247 llvm_unreachable("Unhandled intrinsic.");
6252 case SystemZISD::JOIN_DWORDS
:
6254 SrcDemE
= APInt(1, 1);
6256 case SystemZISD::SELECT_CCMASK
:
6257 SrcDemE
= DemandedElts
;
6260 llvm_unreachable("Unhandled opcode.");
6267 static void computeKnownBitsBinOp(const SDValue Op
, KnownBits
&Known
,
6268 const APInt
&DemandedElts
,
6269 const SelectionDAG
&DAG
, unsigned Depth
,
6271 APInt Src0DemE
= getDemandedSrcElements(Op
, DemandedElts
, OpNo
);
6272 APInt Src1DemE
= getDemandedSrcElements(Op
, DemandedElts
, OpNo
+ 1);
6273 KnownBits LHSKnown
=
6274 DAG
.computeKnownBits(Op
.getOperand(OpNo
), Src0DemE
, Depth
+ 1);
6275 KnownBits RHSKnown
=
6276 DAG
.computeKnownBits(Op
.getOperand(OpNo
+ 1), Src1DemE
, Depth
+ 1);
6277 Known
.Zero
= LHSKnown
.Zero
& RHSKnown
.Zero
;
6278 Known
.One
= LHSKnown
.One
& RHSKnown
.One
;
6282 SystemZTargetLowering::computeKnownBitsForTargetNode(const SDValue Op
,
6284 const APInt
&DemandedElts
,
6285 const SelectionDAG
&DAG
,
6286 unsigned Depth
) const {
6289 // Intrinsic CC result is returned in the two low bits.
6290 unsigned tmp0
, tmp1
; // not used
6291 if (Op
.getResNo() == 1 && isIntrinsicWithCC(Op
, tmp0
, tmp1
)) {
6292 Known
.Zero
.setBitsFrom(2);
6295 EVT VT
= Op
.getValueType();
6296 if (Op
.getResNo() != 0 || VT
== MVT::Untyped
)
6298 assert (Known
.getBitWidth() == VT
.getScalarSizeInBits() &&
6299 "KnownBits does not match VT in bitwidth");
6300 assert ((!VT
.isVector() ||
6301 (DemandedElts
.getBitWidth() == VT
.getVectorNumElements())) &&
6302 "DemandedElts does not match VT number of elements");
6303 unsigned BitWidth
= Known
.getBitWidth();
6304 unsigned Opcode
= Op
.getOpcode();
6305 if (Opcode
== ISD::INTRINSIC_WO_CHAIN
) {
6306 bool IsLogical
= false;
6307 unsigned Id
= cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue();
6309 case Intrinsic::s390_vpksh
: // PACKS
6310 case Intrinsic::s390_vpksf
:
6311 case Intrinsic::s390_vpksg
:
6312 case Intrinsic::s390_vpkshs
: // PACKS_CC
6313 case Intrinsic::s390_vpksfs
:
6314 case Intrinsic::s390_vpksgs
:
6315 case Intrinsic::s390_vpklsh
: // PACKLS
6316 case Intrinsic::s390_vpklsf
:
6317 case Intrinsic::s390_vpklsg
:
6318 case Intrinsic::s390_vpklshs
: // PACKLS_CC
6319 case Intrinsic::s390_vpklsfs
:
6320 case Intrinsic::s390_vpklsgs
:
6321 case Intrinsic::s390_vpdi
:
6322 case Intrinsic::s390_vsldb
:
6323 case Intrinsic::s390_vperm
:
6324 computeKnownBitsBinOp(Op
, Known
, DemandedElts
, DAG
, Depth
, 1);
6326 case Intrinsic::s390_vuplhb
: // VECTOR UNPACK LOGICAL HIGH
6327 case Intrinsic::s390_vuplhh
:
6328 case Intrinsic::s390_vuplhf
:
6329 case Intrinsic::s390_vupllb
: // VECTOR UNPACK LOGICAL LOW
6330 case Intrinsic::s390_vupllh
:
6331 case Intrinsic::s390_vupllf
:
6334 case Intrinsic::s390_vuphb
: // VECTOR UNPACK HIGH
6335 case Intrinsic::s390_vuphh
:
6336 case Intrinsic::s390_vuphf
:
6337 case Intrinsic::s390_vuplb
: // VECTOR UNPACK LOW
6338 case Intrinsic::s390_vuplhw
:
6339 case Intrinsic::s390_vuplf
: {
6340 SDValue SrcOp
= Op
.getOperand(1);
6341 APInt SrcDemE
= getDemandedSrcElements(Op
, DemandedElts
, 0);
6342 Known
= DAG
.computeKnownBits(SrcOp
, SrcDemE
, Depth
+ 1);
6344 Known
= Known
.zext(BitWidth
, true);
6346 Known
= Known
.sext(BitWidth
);
6354 case SystemZISD::JOIN_DWORDS
:
6355 case SystemZISD::SELECT_CCMASK
:
6356 computeKnownBitsBinOp(Op
, Known
, DemandedElts
, DAG
, Depth
, 0);
6358 case SystemZISD::REPLICATE
: {
6359 SDValue SrcOp
= Op
.getOperand(0);
6360 Known
= DAG
.computeKnownBits(SrcOp
, Depth
+ 1);
6361 if (Known
.getBitWidth() < BitWidth
&& isa
<ConstantSDNode
>(SrcOp
))
6362 Known
= Known
.sext(BitWidth
); // VREPI sign extends the immedate.
6370 // Known has the width of the source operand(s). Adjust if needed to match
6371 // the passed bitwidth.
6372 if (Known
.getBitWidth() != BitWidth
)
6373 Known
= Known
.zextOrTrunc(BitWidth
, false);
6376 static unsigned computeNumSignBitsBinOp(SDValue Op
, const APInt
&DemandedElts
,
6377 const SelectionDAG
&DAG
, unsigned Depth
,
6379 APInt Src0DemE
= getDemandedSrcElements(Op
, DemandedElts
, OpNo
);
6380 unsigned LHS
= DAG
.ComputeNumSignBits(Op
.getOperand(OpNo
), Src0DemE
, Depth
+ 1);
6381 if (LHS
== 1) return 1; // Early out.
6382 APInt Src1DemE
= getDemandedSrcElements(Op
, DemandedElts
, OpNo
+ 1);
6383 unsigned RHS
= DAG
.ComputeNumSignBits(Op
.getOperand(OpNo
+ 1), Src1DemE
, Depth
+ 1);
6384 if (RHS
== 1) return 1; // Early out.
6385 unsigned Common
= std::min(LHS
, RHS
);
6386 unsigned SrcBitWidth
= Op
.getOperand(OpNo
).getScalarValueSizeInBits();
6387 EVT VT
= Op
.getValueType();
6388 unsigned VTBits
= VT
.getScalarSizeInBits();
6389 if (SrcBitWidth
> VTBits
) { // PACK
6390 unsigned SrcExtraBits
= SrcBitWidth
- VTBits
;
6391 if (Common
> SrcExtraBits
)
6392 return (Common
- SrcExtraBits
);
6395 assert (SrcBitWidth
== VTBits
&& "Expected operands of same bitwidth.");
6400 SystemZTargetLowering::ComputeNumSignBitsForTargetNode(
6401 SDValue Op
, const APInt
&DemandedElts
, const SelectionDAG
&DAG
,
6402 unsigned Depth
) const {
6403 if (Op
.getResNo() != 0)
6405 unsigned Opcode
= Op
.getOpcode();
6406 if (Opcode
== ISD::INTRINSIC_WO_CHAIN
) {
6407 unsigned Id
= cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue();
6409 case Intrinsic::s390_vpksh
: // PACKS
6410 case Intrinsic::s390_vpksf
:
6411 case Intrinsic::s390_vpksg
:
6412 case Intrinsic::s390_vpkshs
: // PACKS_CC
6413 case Intrinsic::s390_vpksfs
:
6414 case Intrinsic::s390_vpksgs
:
6415 case Intrinsic::s390_vpklsh
: // PACKLS
6416 case Intrinsic::s390_vpklsf
:
6417 case Intrinsic::s390_vpklsg
:
6418 case Intrinsic::s390_vpklshs
: // PACKLS_CC
6419 case Intrinsic::s390_vpklsfs
:
6420 case Intrinsic::s390_vpklsgs
:
6421 case Intrinsic::s390_vpdi
:
6422 case Intrinsic::s390_vsldb
:
6423 case Intrinsic::s390_vperm
:
6424 return computeNumSignBitsBinOp(Op
, DemandedElts
, DAG
, Depth
, 1);
6425 case Intrinsic::s390_vuphb
: // VECTOR UNPACK HIGH
6426 case Intrinsic::s390_vuphh
:
6427 case Intrinsic::s390_vuphf
:
6428 case Intrinsic::s390_vuplb
: // VECTOR UNPACK LOW
6429 case Intrinsic::s390_vuplhw
:
6430 case Intrinsic::s390_vuplf
: {
6431 SDValue PackedOp
= Op
.getOperand(1);
6432 APInt SrcDemE
= getDemandedSrcElements(Op
, DemandedElts
, 1);
6433 unsigned Tmp
= DAG
.ComputeNumSignBits(PackedOp
, SrcDemE
, Depth
+ 1);
6434 EVT VT
= Op
.getValueType();
6435 unsigned VTBits
= VT
.getScalarSizeInBits();
6436 Tmp
+= VTBits
- PackedOp
.getScalarValueSizeInBits();
6444 case SystemZISD::SELECT_CCMASK
:
6445 return computeNumSignBitsBinOp(Op
, DemandedElts
, DAG
, Depth
, 0);
6454 //===----------------------------------------------------------------------===//
6456 //===----------------------------------------------------------------------===//
6458 // Create a new basic block after MBB.
6459 static MachineBasicBlock
*emitBlockAfter(MachineBasicBlock
*MBB
) {
6460 MachineFunction
&MF
= *MBB
->getParent();
6461 MachineBasicBlock
*NewMBB
= MF
.CreateMachineBasicBlock(MBB
->getBasicBlock());
6462 MF
.insert(std::next(MachineFunction::iterator(MBB
)), NewMBB
);
6466 // Split MBB after MI and return the new block (the one that contains
6467 // instructions after MI).
6468 static MachineBasicBlock
*splitBlockAfter(MachineBasicBlock::iterator MI
,
6469 MachineBasicBlock
*MBB
) {
6470 MachineBasicBlock
*NewMBB
= emitBlockAfter(MBB
);
6471 NewMBB
->splice(NewMBB
->begin(), MBB
,
6472 std::next(MachineBasicBlock::iterator(MI
)), MBB
->end());
6473 NewMBB
->transferSuccessorsAndUpdatePHIs(MBB
);
6477 // Split MBB before MI and return the new block (the one that contains MI).
6478 static MachineBasicBlock
*splitBlockBefore(MachineBasicBlock::iterator MI
,
6479 MachineBasicBlock
*MBB
) {
6480 MachineBasicBlock
*NewMBB
= emitBlockAfter(MBB
);
6481 NewMBB
->splice(NewMBB
->begin(), MBB
, MI
, MBB
->end());
6482 NewMBB
->transferSuccessorsAndUpdatePHIs(MBB
);
6486 // Force base value Base into a register before MI. Return the register.
6487 static Register
forceReg(MachineInstr
&MI
, MachineOperand
&Base
,
6488 const SystemZInstrInfo
*TII
) {
6490 return Base
.getReg();
6492 MachineBasicBlock
*MBB
= MI
.getParent();
6493 MachineFunction
&MF
= *MBB
->getParent();
6494 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
6496 Register Reg
= MRI
.createVirtualRegister(&SystemZ::ADDR64BitRegClass
);
6497 BuildMI(*MBB
, MI
, MI
.getDebugLoc(), TII
->get(SystemZ::LA
), Reg
)
6504 // The CC operand of MI might be missing a kill marker because there
6505 // were multiple uses of CC, and ISel didn't know which to mark.
6506 // Figure out whether MI should have had a kill marker.
6507 static bool checkCCKill(MachineInstr
&MI
, MachineBasicBlock
*MBB
) {
6508 // Scan forward through BB for a use/def of CC.
6509 MachineBasicBlock::iterator
miI(std::next(MachineBasicBlock::iterator(MI
)));
6510 for (MachineBasicBlock::iterator miE
= MBB
->end(); miI
!= miE
; ++miI
) {
6511 const MachineInstr
& mi
= *miI
;
6512 if (mi
.readsRegister(SystemZ::CC
))
6514 if (mi
.definesRegister(SystemZ::CC
))
6515 break; // Should have kill-flag - update below.
6518 // If we hit the end of the block, check whether CC is live into a
6520 if (miI
== MBB
->end()) {
6521 for (auto SI
= MBB
->succ_begin(), SE
= MBB
->succ_end(); SI
!= SE
; ++SI
)
6522 if ((*SI
)->isLiveIn(SystemZ::CC
))
6529 // Return true if it is OK for this Select pseudo-opcode to be cascaded
6530 // together with other Select pseudo-opcodes into a single basic-block with
6531 // a conditional jump around it.
6532 static bool isSelectPseudo(MachineInstr
&MI
) {
6533 switch (MI
.getOpcode()) {
6534 case SystemZ::Select32
:
6535 case SystemZ::Select64
:
6536 case SystemZ::SelectF32
:
6537 case SystemZ::SelectF64
:
6538 case SystemZ::SelectF128
:
6539 case SystemZ::SelectVR32
:
6540 case SystemZ::SelectVR64
:
6541 case SystemZ::SelectVR128
:
6549 // Helper function, which inserts PHI functions into SinkMBB:
6550 // %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ],
6551 // where %FalseValue(i) and %TrueValue(i) are taken from the consequent Selects
6552 // in [MIItBegin, MIItEnd) range.
6553 static void createPHIsForSelects(MachineBasicBlock::iterator MIItBegin
,
6554 MachineBasicBlock::iterator MIItEnd
,
6555 MachineBasicBlock
*TrueMBB
,
6556 MachineBasicBlock
*FalseMBB
,
6557 MachineBasicBlock
*SinkMBB
) {
6558 MachineFunction
*MF
= TrueMBB
->getParent();
6559 const TargetInstrInfo
*TII
= MF
->getSubtarget().getInstrInfo();
6561 unsigned CCValid
= MIItBegin
->getOperand(3).getImm();
6562 unsigned CCMask
= MIItBegin
->getOperand(4).getImm();
6563 DebugLoc DL
= MIItBegin
->getDebugLoc();
6565 MachineBasicBlock::iterator SinkInsertionPoint
= SinkMBB
->begin();
6567 // As we are creating the PHIs, we have to be careful if there is more than
6568 // one. Later Selects may reference the results of earlier Selects, but later
6569 // PHIs have to reference the individual true/false inputs from earlier PHIs.
6570 // That also means that PHI construction must work forward from earlier to
6571 // later, and that the code must maintain a mapping from earlier PHI's
6572 // destination registers, and the registers that went into the PHI.
6573 DenseMap
<unsigned, std::pair
<unsigned, unsigned>> RegRewriteTable
;
6575 for (MachineBasicBlock::iterator MIIt
= MIItBegin
; MIIt
!= MIItEnd
;
6576 MIIt
= skipDebugInstructionsForward(++MIIt
, MIItEnd
)) {
6577 Register DestReg
= MIIt
->getOperand(0).getReg();
6578 Register TrueReg
= MIIt
->getOperand(1).getReg();
6579 Register FalseReg
= MIIt
->getOperand(2).getReg();
6581 // If this Select we are generating is the opposite condition from
6582 // the jump we generated, then we have to swap the operands for the
6583 // PHI that is going to be generated.
6584 if (MIIt
->getOperand(4).getImm() == (CCValid
^ CCMask
))
6585 std::swap(TrueReg
, FalseReg
);
6587 if (RegRewriteTable
.find(TrueReg
) != RegRewriteTable
.end())
6588 TrueReg
= RegRewriteTable
[TrueReg
].first
;
6590 if (RegRewriteTable
.find(FalseReg
) != RegRewriteTable
.end())
6591 FalseReg
= RegRewriteTable
[FalseReg
].second
;
6593 BuildMI(*SinkMBB
, SinkInsertionPoint
, DL
, TII
->get(SystemZ::PHI
), DestReg
)
6594 .addReg(TrueReg
).addMBB(TrueMBB
)
6595 .addReg(FalseReg
).addMBB(FalseMBB
);
6597 // Add this PHI to the rewrite table.
6598 RegRewriteTable
[DestReg
] = std::make_pair(TrueReg
, FalseReg
);
6601 MF
->getProperties().reset(MachineFunctionProperties::Property::NoPHIs
);
6604 // Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI.
6606 SystemZTargetLowering::emitSelect(MachineInstr
&MI
,
6607 MachineBasicBlock
*MBB
) const {
6608 const SystemZInstrInfo
*TII
=
6609 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
6611 unsigned CCValid
= MI
.getOperand(3).getImm();
6612 unsigned CCMask
= MI
.getOperand(4).getImm();
6613 DebugLoc DL
= MI
.getDebugLoc();
6615 // If we have a sequence of Select* pseudo instructions using the
6616 // same condition code value, we want to expand all of them into
6617 // a single pair of basic blocks using the same condition.
6618 MachineInstr
*LastMI
= &MI
;
6619 MachineBasicBlock::iterator NextMIIt
= skipDebugInstructionsForward(
6620 std::next(MachineBasicBlock::iterator(MI
)), MBB
->end());
6622 if (isSelectPseudo(MI
))
6623 while (NextMIIt
!= MBB
->end() && isSelectPseudo(*NextMIIt
) &&
6624 NextMIIt
->getOperand(3).getImm() == CCValid
&&
6625 (NextMIIt
->getOperand(4).getImm() == CCMask
||
6626 NextMIIt
->getOperand(4).getImm() == (CCValid
^ CCMask
))) {
6627 LastMI
= &*NextMIIt
;
6628 NextMIIt
= skipDebugInstructionsForward(++NextMIIt
, MBB
->end());
6631 MachineBasicBlock
*StartMBB
= MBB
;
6632 MachineBasicBlock
*JoinMBB
= splitBlockBefore(MI
, MBB
);
6633 MachineBasicBlock
*FalseMBB
= emitBlockAfter(StartMBB
);
6635 // Unless CC was killed in the last Select instruction, mark it as
6636 // live-in to both FalseMBB and JoinMBB.
6637 if (!LastMI
->killsRegister(SystemZ::CC
) && !checkCCKill(*LastMI
, JoinMBB
)) {
6638 FalseMBB
->addLiveIn(SystemZ::CC
);
6639 JoinMBB
->addLiveIn(SystemZ::CC
);
6643 // BRC CCMask, JoinMBB
6644 // # fallthrough to FalseMBB
6646 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
6647 .addImm(CCValid
).addImm(CCMask
).addMBB(JoinMBB
);
6648 MBB
->addSuccessor(JoinMBB
);
6649 MBB
->addSuccessor(FalseMBB
);
6652 // # fallthrough to JoinMBB
6654 MBB
->addSuccessor(JoinMBB
);
6657 // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ]
6660 MachineBasicBlock::iterator MIItBegin
= MachineBasicBlock::iterator(MI
);
6661 MachineBasicBlock::iterator MIItEnd
= skipDebugInstructionsForward(
6662 std::next(MachineBasicBlock::iterator(LastMI
)), MBB
->end());
6663 createPHIsForSelects(MIItBegin
, MIItEnd
, StartMBB
, FalseMBB
, MBB
);
6665 StartMBB
->erase(MIItBegin
, MIItEnd
);
6669 // Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI.
6670 // StoreOpcode is the store to use and Invert says whether the store should
6671 // happen when the condition is false rather than true. If a STORE ON
6672 // CONDITION is available, STOCOpcode is its opcode, otherwise it is 0.
6673 MachineBasicBlock
*SystemZTargetLowering::emitCondStore(MachineInstr
&MI
,
6674 MachineBasicBlock
*MBB
,
6675 unsigned StoreOpcode
,
6676 unsigned STOCOpcode
,
6677 bool Invert
) const {
6678 const SystemZInstrInfo
*TII
=
6679 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
6681 Register SrcReg
= MI
.getOperand(0).getReg();
6682 MachineOperand Base
= MI
.getOperand(1);
6683 int64_t Disp
= MI
.getOperand(2).getImm();
6684 Register IndexReg
= MI
.getOperand(3).getReg();
6685 unsigned CCValid
= MI
.getOperand(4).getImm();
6686 unsigned CCMask
= MI
.getOperand(5).getImm();
6687 DebugLoc DL
= MI
.getDebugLoc();
6689 StoreOpcode
= TII
->getOpcodeForOffset(StoreOpcode
, Disp
);
6691 // Use STOCOpcode if possible. We could use different store patterns in
6692 // order to avoid matching the index register, but the performance trade-offs
6693 // might be more complicated in that case.
6694 if (STOCOpcode
&& !IndexReg
&& Subtarget
.hasLoadStoreOnCond()) {
6698 // ISel pattern matching also adds a load memory operand of the same
6699 // address, so take special care to find the storing memory operand.
6700 MachineMemOperand
*MMO
= nullptr;
6701 for (auto *I
: MI
.memoperands())
6707 BuildMI(*MBB
, MI
, DL
, TII
->get(STOCOpcode
))
6713 .addMemOperand(MMO
);
6715 MI
.eraseFromParent();
6719 // Get the condition needed to branch around the store.
6723 MachineBasicBlock
*StartMBB
= MBB
;
6724 MachineBasicBlock
*JoinMBB
= splitBlockBefore(MI
, MBB
);
6725 MachineBasicBlock
*FalseMBB
= emitBlockAfter(StartMBB
);
6727 // Unless CC was killed in the CondStore instruction, mark it as
6728 // live-in to both FalseMBB and JoinMBB.
6729 if (!MI
.killsRegister(SystemZ::CC
) && !checkCCKill(MI
, JoinMBB
)) {
6730 FalseMBB
->addLiveIn(SystemZ::CC
);
6731 JoinMBB
->addLiveIn(SystemZ::CC
);
6735 // BRC CCMask, JoinMBB
6736 // # fallthrough to FalseMBB
6738 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
6739 .addImm(CCValid
).addImm(CCMask
).addMBB(JoinMBB
);
6740 MBB
->addSuccessor(JoinMBB
);
6741 MBB
->addSuccessor(FalseMBB
);
6744 // store %SrcReg, %Disp(%Index,%Base)
6745 // # fallthrough to JoinMBB
6747 BuildMI(MBB
, DL
, TII
->get(StoreOpcode
))
6752 MBB
->addSuccessor(JoinMBB
);
6754 MI
.eraseFromParent();
6758 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_*
6759 // or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that
6760 // performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}.
6761 // BitSize is the width of the field in bits, or 0 if this is a partword
6762 // ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize
6763 // is one of the operands. Invert says whether the field should be
6764 // inverted after performing BinOpcode (e.g. for NAND).
6765 MachineBasicBlock
*SystemZTargetLowering::emitAtomicLoadBinary(
6766 MachineInstr
&MI
, MachineBasicBlock
*MBB
, unsigned BinOpcode
,
6767 unsigned BitSize
, bool Invert
) const {
6768 MachineFunction
&MF
= *MBB
->getParent();
6769 const SystemZInstrInfo
*TII
=
6770 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
6771 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
6772 bool IsSubWord
= (BitSize
< 32);
6774 // Extract the operands. Base can be a register or a frame index.
6775 // Src2 can be a register or immediate.
6776 Register Dest
= MI
.getOperand(0).getReg();
6777 MachineOperand Base
= earlyUseOperand(MI
.getOperand(1));
6778 int64_t Disp
= MI
.getOperand(2).getImm();
6779 MachineOperand Src2
= earlyUseOperand(MI
.getOperand(3));
6780 Register BitShift
= IsSubWord
? MI
.getOperand(4).getReg() : Register();
6781 Register NegBitShift
= IsSubWord
? MI
.getOperand(5).getReg() : Register();
6782 DebugLoc DL
= MI
.getDebugLoc();
6784 BitSize
= MI
.getOperand(6).getImm();
6786 // Subword operations use 32-bit registers.
6787 const TargetRegisterClass
*RC
= (BitSize
<= 32 ?
6788 &SystemZ::GR32BitRegClass
:
6789 &SystemZ::GR64BitRegClass
);
6790 unsigned LOpcode
= BitSize
<= 32 ? SystemZ::L
: SystemZ::LG
;
6791 unsigned CSOpcode
= BitSize
<= 32 ? SystemZ::CS
: SystemZ::CSG
;
6793 // Get the right opcodes for the displacement.
6794 LOpcode
= TII
->getOpcodeForOffset(LOpcode
, Disp
);
6795 CSOpcode
= TII
->getOpcodeForOffset(CSOpcode
, Disp
);
6796 assert(LOpcode
&& CSOpcode
&& "Displacement out of range");
6798 // Create virtual registers for temporary results.
6799 Register OrigVal
= MRI
.createVirtualRegister(RC
);
6800 Register OldVal
= MRI
.createVirtualRegister(RC
);
6801 Register NewVal
= (BinOpcode
|| IsSubWord
?
6802 MRI
.createVirtualRegister(RC
) : Src2
.getReg());
6803 Register RotatedOldVal
= (IsSubWord
? MRI
.createVirtualRegister(RC
) : OldVal
);
6804 Register RotatedNewVal
= (IsSubWord
? MRI
.createVirtualRegister(RC
) : NewVal
);
6806 // Insert a basic block for the main loop.
6807 MachineBasicBlock
*StartMBB
= MBB
;
6808 MachineBasicBlock
*DoneMBB
= splitBlockBefore(MI
, MBB
);
6809 MachineBasicBlock
*LoopMBB
= emitBlockAfter(StartMBB
);
6813 // %OrigVal = L Disp(%Base)
6814 // # fall through to LoopMMB
6816 BuildMI(MBB
, DL
, TII
->get(LOpcode
), OrigVal
).add(Base
).addImm(Disp
).addReg(0);
6817 MBB
->addSuccessor(LoopMBB
);
6820 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ]
6821 // %RotatedOldVal = RLL %OldVal, 0(%BitShift)
6822 // %RotatedNewVal = OP %RotatedOldVal, %Src2
6823 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift)
6824 // %Dest = CS %OldVal, %NewVal, Disp(%Base)
6826 // # fall through to DoneMMB
6828 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), OldVal
)
6829 .addReg(OrigVal
).addMBB(StartMBB
)
6830 .addReg(Dest
).addMBB(LoopMBB
);
6832 BuildMI(MBB
, DL
, TII
->get(SystemZ::RLL
), RotatedOldVal
)
6833 .addReg(OldVal
).addReg(BitShift
).addImm(0);
6835 // Perform the operation normally and then invert every bit of the field.
6836 Register Tmp
= MRI
.createVirtualRegister(RC
);
6837 BuildMI(MBB
, DL
, TII
->get(BinOpcode
), Tmp
).addReg(RotatedOldVal
).add(Src2
);
6839 // XILF with the upper BitSize bits set.
6840 BuildMI(MBB
, DL
, TII
->get(SystemZ::XILF
), RotatedNewVal
)
6841 .addReg(Tmp
).addImm(-1U << (32 - BitSize
));
6843 // Use LCGR and add -1 to the result, which is more compact than
6844 // an XILF, XILH pair.
6845 Register Tmp2
= MRI
.createVirtualRegister(RC
);
6846 BuildMI(MBB
, DL
, TII
->get(SystemZ::LCGR
), Tmp2
).addReg(Tmp
);
6847 BuildMI(MBB
, DL
, TII
->get(SystemZ::AGHI
), RotatedNewVal
)
6848 .addReg(Tmp2
).addImm(-1);
6850 } else if (BinOpcode
)
6851 // A simply binary operation.
6852 BuildMI(MBB
, DL
, TII
->get(BinOpcode
), RotatedNewVal
)
6853 .addReg(RotatedOldVal
)
6856 // Use RISBG to rotate Src2 into position and use it to replace the
6857 // field in RotatedOldVal.
6858 BuildMI(MBB
, DL
, TII
->get(SystemZ::RISBG32
), RotatedNewVal
)
6859 .addReg(RotatedOldVal
).addReg(Src2
.getReg())
6860 .addImm(32).addImm(31 + BitSize
).addImm(32 - BitSize
);
6862 BuildMI(MBB
, DL
, TII
->get(SystemZ::RLL
), NewVal
)
6863 .addReg(RotatedNewVal
).addReg(NegBitShift
).addImm(0);
6864 BuildMI(MBB
, DL
, TII
->get(CSOpcode
), Dest
)
6869 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
6870 .addImm(SystemZ::CCMASK_CS
).addImm(SystemZ::CCMASK_CS_NE
).addMBB(LoopMBB
);
6871 MBB
->addSuccessor(LoopMBB
);
6872 MBB
->addSuccessor(DoneMBB
);
6874 MI
.eraseFromParent();
6878 // Implement EmitInstrWithCustomInserter for pseudo
6879 // ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI. CompareOpcode is the
6880 // instruction that should be used to compare the current field with the
6881 // minimum or maximum value. KeepOldMask is the BRC condition-code mask
6882 // for when the current field should be kept. BitSize is the width of
6883 // the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction.
6884 MachineBasicBlock
*SystemZTargetLowering::emitAtomicLoadMinMax(
6885 MachineInstr
&MI
, MachineBasicBlock
*MBB
, unsigned CompareOpcode
,
6886 unsigned KeepOldMask
, unsigned BitSize
) const {
6887 MachineFunction
&MF
= *MBB
->getParent();
6888 const SystemZInstrInfo
*TII
=
6889 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
6890 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
6891 bool IsSubWord
= (BitSize
< 32);
6893 // Extract the operands. Base can be a register or a frame index.
6894 Register Dest
= MI
.getOperand(0).getReg();
6895 MachineOperand Base
= earlyUseOperand(MI
.getOperand(1));
6896 int64_t Disp
= MI
.getOperand(2).getImm();
6897 Register Src2
= MI
.getOperand(3).getReg();
6898 Register BitShift
= (IsSubWord
? MI
.getOperand(4).getReg() : Register());
6899 Register NegBitShift
= (IsSubWord
? MI
.getOperand(5).getReg() : Register());
6900 DebugLoc DL
= MI
.getDebugLoc();
6902 BitSize
= MI
.getOperand(6).getImm();
6904 // Subword operations use 32-bit registers.
6905 const TargetRegisterClass
*RC
= (BitSize
<= 32 ?
6906 &SystemZ::GR32BitRegClass
:
6907 &SystemZ::GR64BitRegClass
);
6908 unsigned LOpcode
= BitSize
<= 32 ? SystemZ::L
: SystemZ::LG
;
6909 unsigned CSOpcode
= BitSize
<= 32 ? SystemZ::CS
: SystemZ::CSG
;
6911 // Get the right opcodes for the displacement.
6912 LOpcode
= TII
->getOpcodeForOffset(LOpcode
, Disp
);
6913 CSOpcode
= TII
->getOpcodeForOffset(CSOpcode
, Disp
);
6914 assert(LOpcode
&& CSOpcode
&& "Displacement out of range");
6916 // Create virtual registers for temporary results.
6917 Register OrigVal
= MRI
.createVirtualRegister(RC
);
6918 Register OldVal
= MRI
.createVirtualRegister(RC
);
6919 Register NewVal
= MRI
.createVirtualRegister(RC
);
6920 Register RotatedOldVal
= (IsSubWord
? MRI
.createVirtualRegister(RC
) : OldVal
);
6921 Register RotatedAltVal
= (IsSubWord
? MRI
.createVirtualRegister(RC
) : Src2
);
6922 Register RotatedNewVal
= (IsSubWord
? MRI
.createVirtualRegister(RC
) : NewVal
);
6924 // Insert 3 basic blocks for the loop.
6925 MachineBasicBlock
*StartMBB
= MBB
;
6926 MachineBasicBlock
*DoneMBB
= splitBlockBefore(MI
, MBB
);
6927 MachineBasicBlock
*LoopMBB
= emitBlockAfter(StartMBB
);
6928 MachineBasicBlock
*UseAltMBB
= emitBlockAfter(LoopMBB
);
6929 MachineBasicBlock
*UpdateMBB
= emitBlockAfter(UseAltMBB
);
6933 // %OrigVal = L Disp(%Base)
6934 // # fall through to LoopMMB
6936 BuildMI(MBB
, DL
, TII
->get(LOpcode
), OrigVal
).add(Base
).addImm(Disp
).addReg(0);
6937 MBB
->addSuccessor(LoopMBB
);
6940 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ]
6941 // %RotatedOldVal = RLL %OldVal, 0(%BitShift)
6942 // CompareOpcode %RotatedOldVal, %Src2
6943 // BRC KeepOldMask, UpdateMBB
6945 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), OldVal
)
6946 .addReg(OrigVal
).addMBB(StartMBB
)
6947 .addReg(Dest
).addMBB(UpdateMBB
);
6949 BuildMI(MBB
, DL
, TII
->get(SystemZ::RLL
), RotatedOldVal
)
6950 .addReg(OldVal
).addReg(BitShift
).addImm(0);
6951 BuildMI(MBB
, DL
, TII
->get(CompareOpcode
))
6952 .addReg(RotatedOldVal
).addReg(Src2
);
6953 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
6954 .addImm(SystemZ::CCMASK_ICMP
).addImm(KeepOldMask
).addMBB(UpdateMBB
);
6955 MBB
->addSuccessor(UpdateMBB
);
6956 MBB
->addSuccessor(UseAltMBB
);
6959 // %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0
6960 // # fall through to UpdateMMB
6963 BuildMI(MBB
, DL
, TII
->get(SystemZ::RISBG32
), RotatedAltVal
)
6964 .addReg(RotatedOldVal
).addReg(Src2
)
6965 .addImm(32).addImm(31 + BitSize
).addImm(0);
6966 MBB
->addSuccessor(UpdateMBB
);
6969 // %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ],
6970 // [ %RotatedAltVal, UseAltMBB ]
6971 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift)
6972 // %Dest = CS %OldVal, %NewVal, Disp(%Base)
6974 // # fall through to DoneMMB
6976 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), RotatedNewVal
)
6977 .addReg(RotatedOldVal
).addMBB(LoopMBB
)
6978 .addReg(RotatedAltVal
).addMBB(UseAltMBB
);
6980 BuildMI(MBB
, DL
, TII
->get(SystemZ::RLL
), NewVal
)
6981 .addReg(RotatedNewVal
).addReg(NegBitShift
).addImm(0);
6982 BuildMI(MBB
, DL
, TII
->get(CSOpcode
), Dest
)
6987 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
6988 .addImm(SystemZ::CCMASK_CS
).addImm(SystemZ::CCMASK_CS_NE
).addMBB(LoopMBB
);
6989 MBB
->addSuccessor(LoopMBB
);
6990 MBB
->addSuccessor(DoneMBB
);
6992 MI
.eraseFromParent();
6996 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW
6999 SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr
&MI
,
7000 MachineBasicBlock
*MBB
) const {
7002 MachineFunction
&MF
= *MBB
->getParent();
7003 const SystemZInstrInfo
*TII
=
7004 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
7005 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
7007 // Extract the operands. Base can be a register or a frame index.
7008 Register Dest
= MI
.getOperand(0).getReg();
7009 MachineOperand Base
= earlyUseOperand(MI
.getOperand(1));
7010 int64_t Disp
= MI
.getOperand(2).getImm();
7011 Register OrigCmpVal
= MI
.getOperand(3).getReg();
7012 Register OrigSwapVal
= MI
.getOperand(4).getReg();
7013 Register BitShift
= MI
.getOperand(5).getReg();
7014 Register NegBitShift
= MI
.getOperand(6).getReg();
7015 int64_t BitSize
= MI
.getOperand(7).getImm();
7016 DebugLoc DL
= MI
.getDebugLoc();
7018 const TargetRegisterClass
*RC
= &SystemZ::GR32BitRegClass
;
7020 // Get the right opcodes for the displacement.
7021 unsigned LOpcode
= TII
->getOpcodeForOffset(SystemZ::L
, Disp
);
7022 unsigned CSOpcode
= TII
->getOpcodeForOffset(SystemZ::CS
, Disp
);
7023 assert(LOpcode
&& CSOpcode
&& "Displacement out of range");
7025 // Create virtual registers for temporary results.
7026 Register OrigOldVal
= MRI
.createVirtualRegister(RC
);
7027 Register OldVal
= MRI
.createVirtualRegister(RC
);
7028 Register CmpVal
= MRI
.createVirtualRegister(RC
);
7029 Register SwapVal
= MRI
.createVirtualRegister(RC
);
7030 Register StoreVal
= MRI
.createVirtualRegister(RC
);
7031 Register RetryOldVal
= MRI
.createVirtualRegister(RC
);
7032 Register RetryCmpVal
= MRI
.createVirtualRegister(RC
);
7033 Register RetrySwapVal
= MRI
.createVirtualRegister(RC
);
7035 // Insert 2 basic blocks for the loop.
7036 MachineBasicBlock
*StartMBB
= MBB
;
7037 MachineBasicBlock
*DoneMBB
= splitBlockBefore(MI
, MBB
);
7038 MachineBasicBlock
*LoopMBB
= emitBlockAfter(StartMBB
);
7039 MachineBasicBlock
*SetMBB
= emitBlockAfter(LoopMBB
);
7043 // %OrigOldVal = L Disp(%Base)
7044 // # fall through to LoopMMB
7046 BuildMI(MBB
, DL
, TII
->get(LOpcode
), OrigOldVal
)
7050 MBB
->addSuccessor(LoopMBB
);
7053 // %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ]
7054 // %CmpVal = phi [ %OrigCmpVal, EntryBB ], [ %RetryCmpVal, SetMBB ]
7055 // %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ]
7056 // %Dest = RLL %OldVal, BitSize(%BitShift)
7057 // ^^ The low BitSize bits contain the field
7059 // %RetryCmpVal = RISBG32 %CmpVal, %Dest, 32, 63-BitSize, 0
7060 // ^^ Replace the upper 32-BitSize bits of the
7061 // comparison value with those that we loaded,
7062 // so that we can use a full word comparison.
7063 // CR %Dest, %RetryCmpVal
7065 // # Fall through to SetMBB
7067 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), OldVal
)
7068 .addReg(OrigOldVal
).addMBB(StartMBB
)
7069 .addReg(RetryOldVal
).addMBB(SetMBB
);
7070 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), CmpVal
)
7071 .addReg(OrigCmpVal
).addMBB(StartMBB
)
7072 .addReg(RetryCmpVal
).addMBB(SetMBB
);
7073 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), SwapVal
)
7074 .addReg(OrigSwapVal
).addMBB(StartMBB
)
7075 .addReg(RetrySwapVal
).addMBB(SetMBB
);
7076 BuildMI(MBB
, DL
, TII
->get(SystemZ::RLL
), Dest
)
7077 .addReg(OldVal
).addReg(BitShift
).addImm(BitSize
);
7078 BuildMI(MBB
, DL
, TII
->get(SystemZ::RISBG32
), RetryCmpVal
)
7079 .addReg(CmpVal
).addReg(Dest
).addImm(32).addImm(63 - BitSize
).addImm(0);
7080 BuildMI(MBB
, DL
, TII
->get(SystemZ::CR
))
7081 .addReg(Dest
).addReg(RetryCmpVal
);
7082 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
7083 .addImm(SystemZ::CCMASK_ICMP
)
7084 .addImm(SystemZ::CCMASK_CMP_NE
).addMBB(DoneMBB
);
7085 MBB
->addSuccessor(DoneMBB
);
7086 MBB
->addSuccessor(SetMBB
);
7089 // %RetrySwapVal = RISBG32 %SwapVal, %Dest, 32, 63-BitSize, 0
7090 // ^^ Replace the upper 32-BitSize bits of the new
7091 // value with those that we loaded.
7092 // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift)
7093 // ^^ Rotate the new field to its proper position.
7094 // %RetryOldVal = CS %Dest, %StoreVal, Disp(%Base)
7096 // # fall through to ExitMMB
7098 BuildMI(MBB
, DL
, TII
->get(SystemZ::RISBG32
), RetrySwapVal
)
7099 .addReg(SwapVal
).addReg(Dest
).addImm(32).addImm(63 - BitSize
).addImm(0);
7100 BuildMI(MBB
, DL
, TII
->get(SystemZ::RLL
), StoreVal
)
7101 .addReg(RetrySwapVal
).addReg(NegBitShift
).addImm(-BitSize
);
7102 BuildMI(MBB
, DL
, TII
->get(CSOpcode
), RetryOldVal
)
7107 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
7108 .addImm(SystemZ::CCMASK_CS
).addImm(SystemZ::CCMASK_CS_NE
).addMBB(LoopMBB
);
7109 MBB
->addSuccessor(LoopMBB
);
7110 MBB
->addSuccessor(DoneMBB
);
7112 // If the CC def wasn't dead in the ATOMIC_CMP_SWAPW, mark CC as live-in
7113 // to the block after the loop. At this point, CC may have been defined
7114 // either by the CR in LoopMBB or by the CS in SetMBB.
7115 if (!MI
.registerDefIsDead(SystemZ::CC
))
7116 DoneMBB
->addLiveIn(SystemZ::CC
);
7118 MI
.eraseFromParent();
7122 // Emit a move from two GR64s to a GR128.
7124 SystemZTargetLowering::emitPair128(MachineInstr
&MI
,
7125 MachineBasicBlock
*MBB
) const {
7126 MachineFunction
&MF
= *MBB
->getParent();
7127 const SystemZInstrInfo
*TII
=
7128 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
7129 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
7130 DebugLoc DL
= MI
.getDebugLoc();
7132 Register Dest
= MI
.getOperand(0).getReg();
7133 Register Hi
= MI
.getOperand(1).getReg();
7134 Register Lo
= MI
.getOperand(2).getReg();
7135 Register Tmp1
= MRI
.createVirtualRegister(&SystemZ::GR128BitRegClass
);
7136 Register Tmp2
= MRI
.createVirtualRegister(&SystemZ::GR128BitRegClass
);
7138 BuildMI(*MBB
, MI
, DL
, TII
->get(TargetOpcode::IMPLICIT_DEF
), Tmp1
);
7139 BuildMI(*MBB
, MI
, DL
, TII
->get(TargetOpcode::INSERT_SUBREG
), Tmp2
)
7140 .addReg(Tmp1
).addReg(Hi
).addImm(SystemZ::subreg_h64
);
7141 BuildMI(*MBB
, MI
, DL
, TII
->get(TargetOpcode::INSERT_SUBREG
), Dest
)
7142 .addReg(Tmp2
).addReg(Lo
).addImm(SystemZ::subreg_l64
);
7144 MI
.eraseFromParent();
7148 // Emit an extension from a GR64 to a GR128. ClearEven is true
7149 // if the high register of the GR128 value must be cleared or false if
7150 // it's "don't care".
7151 MachineBasicBlock
*SystemZTargetLowering::emitExt128(MachineInstr
&MI
,
7152 MachineBasicBlock
*MBB
,
7153 bool ClearEven
) const {
7154 MachineFunction
&MF
= *MBB
->getParent();
7155 const SystemZInstrInfo
*TII
=
7156 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
7157 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
7158 DebugLoc DL
= MI
.getDebugLoc();
7160 Register Dest
= MI
.getOperand(0).getReg();
7161 Register Src
= MI
.getOperand(1).getReg();
7162 Register In128
= MRI
.createVirtualRegister(&SystemZ::GR128BitRegClass
);
7164 BuildMI(*MBB
, MI
, DL
, TII
->get(TargetOpcode::IMPLICIT_DEF
), In128
);
7166 Register NewIn128
= MRI
.createVirtualRegister(&SystemZ::GR128BitRegClass
);
7167 Register Zero64
= MRI
.createVirtualRegister(&SystemZ::GR64BitRegClass
);
7169 BuildMI(*MBB
, MI
, DL
, TII
->get(SystemZ::LLILL
), Zero64
)
7171 BuildMI(*MBB
, MI
, DL
, TII
->get(TargetOpcode::INSERT_SUBREG
), NewIn128
)
7172 .addReg(In128
).addReg(Zero64
).addImm(SystemZ::subreg_h64
);
7175 BuildMI(*MBB
, MI
, DL
, TII
->get(TargetOpcode::INSERT_SUBREG
), Dest
)
7176 .addReg(In128
).addReg(Src
).addImm(SystemZ::subreg_l64
);
7178 MI
.eraseFromParent();
7182 MachineBasicBlock
*SystemZTargetLowering::emitMemMemWrapper(
7183 MachineInstr
&MI
, MachineBasicBlock
*MBB
, unsigned Opcode
) const {
7184 MachineFunction
&MF
= *MBB
->getParent();
7185 const SystemZInstrInfo
*TII
=
7186 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
7187 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
7188 DebugLoc DL
= MI
.getDebugLoc();
7190 MachineOperand DestBase
= earlyUseOperand(MI
.getOperand(0));
7191 uint64_t DestDisp
= MI
.getOperand(1).getImm();
7192 MachineOperand SrcBase
= earlyUseOperand(MI
.getOperand(2));
7193 uint64_t SrcDisp
= MI
.getOperand(3).getImm();
7194 uint64_t Length
= MI
.getOperand(4).getImm();
7196 // When generating more than one CLC, all but the last will need to
7197 // branch to the end when a difference is found.
7198 MachineBasicBlock
*EndMBB
= (Length
> 256 && Opcode
== SystemZ::CLC
?
7199 splitBlockAfter(MI
, MBB
) : nullptr);
7201 // Check for the loop form, in which operand 5 is the trip count.
7202 if (MI
.getNumExplicitOperands() > 5) {
7203 bool HaveSingleBase
= DestBase
.isIdenticalTo(SrcBase
);
7205 Register StartCountReg
= MI
.getOperand(5).getReg();
7206 Register StartSrcReg
= forceReg(MI
, SrcBase
, TII
);
7207 Register StartDestReg
= (HaveSingleBase
? StartSrcReg
:
7208 forceReg(MI
, DestBase
, TII
));
7210 const TargetRegisterClass
*RC
= &SystemZ::ADDR64BitRegClass
;
7211 Register ThisSrcReg
= MRI
.createVirtualRegister(RC
);
7212 Register ThisDestReg
= (HaveSingleBase
? ThisSrcReg
:
7213 MRI
.createVirtualRegister(RC
));
7214 Register NextSrcReg
= MRI
.createVirtualRegister(RC
);
7215 Register NextDestReg
= (HaveSingleBase
? NextSrcReg
:
7216 MRI
.createVirtualRegister(RC
));
7218 RC
= &SystemZ::GR64BitRegClass
;
7219 Register ThisCountReg
= MRI
.createVirtualRegister(RC
);
7220 Register NextCountReg
= MRI
.createVirtualRegister(RC
);
7222 MachineBasicBlock
*StartMBB
= MBB
;
7223 MachineBasicBlock
*DoneMBB
= splitBlockBefore(MI
, MBB
);
7224 MachineBasicBlock
*LoopMBB
= emitBlockAfter(StartMBB
);
7225 MachineBasicBlock
*NextMBB
= (EndMBB
? emitBlockAfter(LoopMBB
) : LoopMBB
);
7228 // # fall through to LoopMMB
7229 MBB
->addSuccessor(LoopMBB
);
7232 // %ThisDestReg = phi [ %StartDestReg, StartMBB ],
7233 // [ %NextDestReg, NextMBB ]
7234 // %ThisSrcReg = phi [ %StartSrcReg, StartMBB ],
7235 // [ %NextSrcReg, NextMBB ]
7236 // %ThisCountReg = phi [ %StartCountReg, StartMBB ],
7237 // [ %NextCountReg, NextMBB ]
7238 // ( PFD 2, 768+DestDisp(%ThisDestReg) )
7239 // Opcode DestDisp(256,%ThisDestReg), SrcDisp(%ThisSrcReg)
7242 // The prefetch is used only for MVC. The JLH is used only for CLC.
7245 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), ThisDestReg
)
7246 .addReg(StartDestReg
).addMBB(StartMBB
)
7247 .addReg(NextDestReg
).addMBB(NextMBB
);
7248 if (!HaveSingleBase
)
7249 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), ThisSrcReg
)
7250 .addReg(StartSrcReg
).addMBB(StartMBB
)
7251 .addReg(NextSrcReg
).addMBB(NextMBB
);
7252 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), ThisCountReg
)
7253 .addReg(StartCountReg
).addMBB(StartMBB
)
7254 .addReg(NextCountReg
).addMBB(NextMBB
);
7255 if (Opcode
== SystemZ::MVC
)
7256 BuildMI(MBB
, DL
, TII
->get(SystemZ::PFD
))
7257 .addImm(SystemZ::PFD_WRITE
)
7258 .addReg(ThisDestReg
).addImm(DestDisp
+ 768).addReg(0);
7259 BuildMI(MBB
, DL
, TII
->get(Opcode
))
7260 .addReg(ThisDestReg
).addImm(DestDisp
).addImm(256)
7261 .addReg(ThisSrcReg
).addImm(SrcDisp
);
7263 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
7264 .addImm(SystemZ::CCMASK_ICMP
).addImm(SystemZ::CCMASK_CMP_NE
)
7266 MBB
->addSuccessor(EndMBB
);
7267 MBB
->addSuccessor(NextMBB
);
7271 // %NextDestReg = LA 256(%ThisDestReg)
7272 // %NextSrcReg = LA 256(%ThisSrcReg)
7273 // %NextCountReg = AGHI %ThisCountReg, -1
7274 // CGHI %NextCountReg, 0
7276 // # fall through to DoneMMB
7278 // The AGHI, CGHI and JLH should be converted to BRCTG by later passes.
7281 BuildMI(MBB
, DL
, TII
->get(SystemZ::LA
), NextDestReg
)
7282 .addReg(ThisDestReg
).addImm(256).addReg(0);
7283 if (!HaveSingleBase
)
7284 BuildMI(MBB
, DL
, TII
->get(SystemZ::LA
), NextSrcReg
)
7285 .addReg(ThisSrcReg
).addImm(256).addReg(0);
7286 BuildMI(MBB
, DL
, TII
->get(SystemZ::AGHI
), NextCountReg
)
7287 .addReg(ThisCountReg
).addImm(-1);
7288 BuildMI(MBB
, DL
, TII
->get(SystemZ::CGHI
))
7289 .addReg(NextCountReg
).addImm(0);
7290 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
7291 .addImm(SystemZ::CCMASK_ICMP
).addImm(SystemZ::CCMASK_CMP_NE
)
7293 MBB
->addSuccessor(LoopMBB
);
7294 MBB
->addSuccessor(DoneMBB
);
7296 DestBase
= MachineOperand::CreateReg(NextDestReg
, false);
7297 SrcBase
= MachineOperand::CreateReg(NextSrcReg
, false);
7299 if (EndMBB
&& !Length
)
7300 // If the loop handled the whole CLC range, DoneMBB will be empty with
7301 // CC live-through into EndMBB, so add it as live-in.
7302 DoneMBB
->addLiveIn(SystemZ::CC
);
7305 // Handle any remaining bytes with straight-line code.
7306 while (Length
> 0) {
7307 uint64_t ThisLength
= std::min(Length
, uint64_t(256));
7308 // The previous iteration might have created out-of-range displacements.
7309 // Apply them using LAY if so.
7310 if (!isUInt
<12>(DestDisp
)) {
7311 Register Reg
= MRI
.createVirtualRegister(&SystemZ::ADDR64BitRegClass
);
7312 BuildMI(*MBB
, MI
, MI
.getDebugLoc(), TII
->get(SystemZ::LAY
), Reg
)
7316 DestBase
= MachineOperand::CreateReg(Reg
, false);
7319 if (!isUInt
<12>(SrcDisp
)) {
7320 Register Reg
= MRI
.createVirtualRegister(&SystemZ::ADDR64BitRegClass
);
7321 BuildMI(*MBB
, MI
, MI
.getDebugLoc(), TII
->get(SystemZ::LAY
), Reg
)
7325 SrcBase
= MachineOperand::CreateReg(Reg
, false);
7328 BuildMI(*MBB
, MI
, DL
, TII
->get(Opcode
))
7334 .setMemRefs(MI
.memoperands());
7335 DestDisp
+= ThisLength
;
7336 SrcDisp
+= ThisLength
;
7337 Length
-= ThisLength
;
7338 // If there's another CLC to go, branch to the end if a difference
7340 if (EndMBB
&& Length
> 0) {
7341 MachineBasicBlock
*NextMBB
= splitBlockBefore(MI
, MBB
);
7342 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
7343 .addImm(SystemZ::CCMASK_ICMP
).addImm(SystemZ::CCMASK_CMP_NE
)
7345 MBB
->addSuccessor(EndMBB
);
7346 MBB
->addSuccessor(NextMBB
);
7351 MBB
->addSuccessor(EndMBB
);
7353 MBB
->addLiveIn(SystemZ::CC
);
7356 MI
.eraseFromParent();
7360 // Decompose string pseudo-instruction MI into a loop that continually performs
7361 // Opcode until CC != 3.
7362 MachineBasicBlock
*SystemZTargetLowering::emitStringWrapper(
7363 MachineInstr
&MI
, MachineBasicBlock
*MBB
, unsigned Opcode
) const {
7364 MachineFunction
&MF
= *MBB
->getParent();
7365 const SystemZInstrInfo
*TII
=
7366 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
7367 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
7368 DebugLoc DL
= MI
.getDebugLoc();
7370 uint64_t End1Reg
= MI
.getOperand(0).getReg();
7371 uint64_t Start1Reg
= MI
.getOperand(1).getReg();
7372 uint64_t Start2Reg
= MI
.getOperand(2).getReg();
7373 uint64_t CharReg
= MI
.getOperand(3).getReg();
7375 const TargetRegisterClass
*RC
= &SystemZ::GR64BitRegClass
;
7376 uint64_t This1Reg
= MRI
.createVirtualRegister(RC
);
7377 uint64_t This2Reg
= MRI
.createVirtualRegister(RC
);
7378 uint64_t End2Reg
= MRI
.createVirtualRegister(RC
);
7380 MachineBasicBlock
*StartMBB
= MBB
;
7381 MachineBasicBlock
*DoneMBB
= splitBlockBefore(MI
, MBB
);
7382 MachineBasicBlock
*LoopMBB
= emitBlockAfter(StartMBB
);
7385 // # fall through to LoopMMB
7386 MBB
->addSuccessor(LoopMBB
);
7389 // %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ]
7390 // %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ]
7392 // %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0L
7394 // # fall through to DoneMMB
7396 // The load of R0L can be hoisted by post-RA LICM.
7399 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), This1Reg
)
7400 .addReg(Start1Reg
).addMBB(StartMBB
)
7401 .addReg(End1Reg
).addMBB(LoopMBB
);
7402 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), This2Reg
)
7403 .addReg(Start2Reg
).addMBB(StartMBB
)
7404 .addReg(End2Reg
).addMBB(LoopMBB
);
7405 BuildMI(MBB
, DL
, TII
->get(TargetOpcode::COPY
), SystemZ::R0L
).addReg(CharReg
);
7406 BuildMI(MBB
, DL
, TII
->get(Opcode
))
7407 .addReg(End1Reg
, RegState::Define
).addReg(End2Reg
, RegState::Define
)
7408 .addReg(This1Reg
).addReg(This2Reg
);
7409 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
7410 .addImm(SystemZ::CCMASK_ANY
).addImm(SystemZ::CCMASK_3
).addMBB(LoopMBB
);
7411 MBB
->addSuccessor(LoopMBB
);
7412 MBB
->addSuccessor(DoneMBB
);
7414 DoneMBB
->addLiveIn(SystemZ::CC
);
7416 MI
.eraseFromParent();
7420 // Update TBEGIN instruction with final opcode and register clobbers.
7421 MachineBasicBlock
*SystemZTargetLowering::emitTransactionBegin(
7422 MachineInstr
&MI
, MachineBasicBlock
*MBB
, unsigned Opcode
,
7423 bool NoFloat
) const {
7424 MachineFunction
&MF
= *MBB
->getParent();
7425 const TargetFrameLowering
*TFI
= Subtarget
.getFrameLowering();
7426 const SystemZInstrInfo
*TII
= Subtarget
.getInstrInfo();
7429 MI
.setDesc(TII
->get(Opcode
));
7431 // We cannot handle a TBEGIN that clobbers the stack or frame pointer.
7432 // Make sure to add the corresponding GRSM bits if they are missing.
7433 uint64_t Control
= MI
.getOperand(2).getImm();
7434 static const unsigned GPRControlBit
[16] = {
7435 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000,
7436 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100
7438 Control
|= GPRControlBit
[15];
7440 Control
|= GPRControlBit
[11];
7441 MI
.getOperand(2).setImm(Control
);
7443 // Add GPR clobbers.
7444 for (int I
= 0; I
< 16; I
++) {
7445 if ((Control
& GPRControlBit
[I
]) == 0) {
7446 unsigned Reg
= SystemZMC::GR64Regs
[I
];
7447 MI
.addOperand(MachineOperand::CreateReg(Reg
, true, true));
7451 // Add FPR/VR clobbers.
7452 if (!NoFloat
&& (Control
& 4) != 0) {
7453 if (Subtarget
.hasVector()) {
7454 for (int I
= 0; I
< 32; I
++) {
7455 unsigned Reg
= SystemZMC::VR128Regs
[I
];
7456 MI
.addOperand(MachineOperand::CreateReg(Reg
, true, true));
7459 for (int I
= 0; I
< 16; I
++) {
7460 unsigned Reg
= SystemZMC::FP64Regs
[I
];
7461 MI
.addOperand(MachineOperand::CreateReg(Reg
, true, true));
7469 MachineBasicBlock
*SystemZTargetLowering::emitLoadAndTestCmp0(
7470 MachineInstr
&MI
, MachineBasicBlock
*MBB
, unsigned Opcode
) const {
7471 MachineFunction
&MF
= *MBB
->getParent();
7472 MachineRegisterInfo
*MRI
= &MF
.getRegInfo();
7473 const SystemZInstrInfo
*TII
=
7474 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
7475 DebugLoc DL
= MI
.getDebugLoc();
7477 Register SrcReg
= MI
.getOperand(0).getReg();
7479 // Create new virtual register of the same class as source.
7480 const TargetRegisterClass
*RC
= MRI
->getRegClass(SrcReg
);
7481 Register DstReg
= MRI
->createVirtualRegister(RC
);
7483 // Replace pseudo with a normal load-and-test that models the def as
7485 BuildMI(*MBB
, MI
, DL
, TII
->get(Opcode
), DstReg
)
7487 MI
.eraseFromParent();
7492 MachineBasicBlock
*SystemZTargetLowering::EmitInstrWithCustomInserter(
7493 MachineInstr
&MI
, MachineBasicBlock
*MBB
) const {
7494 switch (MI
.getOpcode()) {
7495 case SystemZ::Select32
:
7496 case SystemZ::Select64
:
7497 case SystemZ::SelectF32
:
7498 case SystemZ::SelectF64
:
7499 case SystemZ::SelectF128
:
7500 case SystemZ::SelectVR32
:
7501 case SystemZ::SelectVR64
:
7502 case SystemZ::SelectVR128
:
7503 return emitSelect(MI
, MBB
);
7505 case SystemZ::CondStore8Mux
:
7506 return emitCondStore(MI
, MBB
, SystemZ::STCMux
, 0, false);
7507 case SystemZ::CondStore8MuxInv
:
7508 return emitCondStore(MI
, MBB
, SystemZ::STCMux
, 0, true);
7509 case SystemZ::CondStore16Mux
:
7510 return emitCondStore(MI
, MBB
, SystemZ::STHMux
, 0, false);
7511 case SystemZ::CondStore16MuxInv
:
7512 return emitCondStore(MI
, MBB
, SystemZ::STHMux
, 0, true);
7513 case SystemZ::CondStore32Mux
:
7514 return emitCondStore(MI
, MBB
, SystemZ::STMux
, SystemZ::STOCMux
, false);
7515 case SystemZ::CondStore32MuxInv
:
7516 return emitCondStore(MI
, MBB
, SystemZ::STMux
, SystemZ::STOCMux
, true);
7517 case SystemZ::CondStore8
:
7518 return emitCondStore(MI
, MBB
, SystemZ::STC
, 0, false);
7519 case SystemZ::CondStore8Inv
:
7520 return emitCondStore(MI
, MBB
, SystemZ::STC
, 0, true);
7521 case SystemZ::CondStore16
:
7522 return emitCondStore(MI
, MBB
, SystemZ::STH
, 0, false);
7523 case SystemZ::CondStore16Inv
:
7524 return emitCondStore(MI
, MBB
, SystemZ::STH
, 0, true);
7525 case SystemZ::CondStore32
:
7526 return emitCondStore(MI
, MBB
, SystemZ::ST
, SystemZ::STOC
, false);
7527 case SystemZ::CondStore32Inv
:
7528 return emitCondStore(MI
, MBB
, SystemZ::ST
, SystemZ::STOC
, true);
7529 case SystemZ::CondStore64
:
7530 return emitCondStore(MI
, MBB
, SystemZ::STG
, SystemZ::STOCG
, false);
7531 case SystemZ::CondStore64Inv
:
7532 return emitCondStore(MI
, MBB
, SystemZ::STG
, SystemZ::STOCG
, true);
7533 case SystemZ::CondStoreF32
:
7534 return emitCondStore(MI
, MBB
, SystemZ::STE
, 0, false);
7535 case SystemZ::CondStoreF32Inv
:
7536 return emitCondStore(MI
, MBB
, SystemZ::STE
, 0, true);
7537 case SystemZ::CondStoreF64
:
7538 return emitCondStore(MI
, MBB
, SystemZ::STD
, 0, false);
7539 case SystemZ::CondStoreF64Inv
:
7540 return emitCondStore(MI
, MBB
, SystemZ::STD
, 0, true);
7542 case SystemZ::PAIR128
:
7543 return emitPair128(MI
, MBB
);
7544 case SystemZ::AEXT128
:
7545 return emitExt128(MI
, MBB
, false);
7546 case SystemZ::ZEXT128
:
7547 return emitExt128(MI
, MBB
, true);
7549 case SystemZ::ATOMIC_SWAPW
:
7550 return emitAtomicLoadBinary(MI
, MBB
, 0, 0);
7551 case SystemZ::ATOMIC_SWAP_32
:
7552 return emitAtomicLoadBinary(MI
, MBB
, 0, 32);
7553 case SystemZ::ATOMIC_SWAP_64
:
7554 return emitAtomicLoadBinary(MI
, MBB
, 0, 64);
7556 case SystemZ::ATOMIC_LOADW_AR
:
7557 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::AR
, 0);
7558 case SystemZ::ATOMIC_LOADW_AFI
:
7559 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::AFI
, 0);
7560 case SystemZ::ATOMIC_LOAD_AR
:
7561 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::AR
, 32);
7562 case SystemZ::ATOMIC_LOAD_AHI
:
7563 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::AHI
, 32);
7564 case SystemZ::ATOMIC_LOAD_AFI
:
7565 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::AFI
, 32);
7566 case SystemZ::ATOMIC_LOAD_AGR
:
7567 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::AGR
, 64);
7568 case SystemZ::ATOMIC_LOAD_AGHI
:
7569 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::AGHI
, 64);
7570 case SystemZ::ATOMIC_LOAD_AGFI
:
7571 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::AGFI
, 64);
7573 case SystemZ::ATOMIC_LOADW_SR
:
7574 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::SR
, 0);
7575 case SystemZ::ATOMIC_LOAD_SR
:
7576 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::SR
, 32);
7577 case SystemZ::ATOMIC_LOAD_SGR
:
7578 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::SGR
, 64);
7580 case SystemZ::ATOMIC_LOADW_NR
:
7581 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NR
, 0);
7582 case SystemZ::ATOMIC_LOADW_NILH
:
7583 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILH
, 0);
7584 case SystemZ::ATOMIC_LOAD_NR
:
7585 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NR
, 32);
7586 case SystemZ::ATOMIC_LOAD_NILL
:
7587 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILL
, 32);
7588 case SystemZ::ATOMIC_LOAD_NILH
:
7589 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILH
, 32);
7590 case SystemZ::ATOMIC_LOAD_NILF
:
7591 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILF
, 32);
7592 case SystemZ::ATOMIC_LOAD_NGR
:
7593 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NGR
, 64);
7594 case SystemZ::ATOMIC_LOAD_NILL64
:
7595 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILL64
, 64);
7596 case SystemZ::ATOMIC_LOAD_NILH64
:
7597 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILH64
, 64);
7598 case SystemZ::ATOMIC_LOAD_NIHL64
:
7599 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NIHL64
, 64);
7600 case SystemZ::ATOMIC_LOAD_NIHH64
:
7601 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NIHH64
, 64);
7602 case SystemZ::ATOMIC_LOAD_NILF64
:
7603 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILF64
, 64);
7604 case SystemZ::ATOMIC_LOAD_NIHF64
:
7605 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NIHF64
, 64);
7607 case SystemZ::ATOMIC_LOADW_OR
:
7608 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OR
, 0);
7609 case SystemZ::ATOMIC_LOADW_OILH
:
7610 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OILH
, 0);
7611 case SystemZ::ATOMIC_LOAD_OR
:
7612 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OR
, 32);
7613 case SystemZ::ATOMIC_LOAD_OILL
:
7614 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OILL
, 32);
7615 case SystemZ::ATOMIC_LOAD_OILH
:
7616 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OILH
, 32);
7617 case SystemZ::ATOMIC_LOAD_OILF
:
7618 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OILF
, 32);
7619 case SystemZ::ATOMIC_LOAD_OGR
:
7620 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OGR
, 64);
7621 case SystemZ::ATOMIC_LOAD_OILL64
:
7622 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OILL64
, 64);
7623 case SystemZ::ATOMIC_LOAD_OILH64
:
7624 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OILH64
, 64);
7625 case SystemZ::ATOMIC_LOAD_OIHL64
:
7626 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OIHL64
, 64);
7627 case SystemZ::ATOMIC_LOAD_OIHH64
:
7628 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OIHH64
, 64);
7629 case SystemZ::ATOMIC_LOAD_OILF64
:
7630 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OILF64
, 64);
7631 case SystemZ::ATOMIC_LOAD_OIHF64
:
7632 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OIHF64
, 64);
7634 case SystemZ::ATOMIC_LOADW_XR
:
7635 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::XR
, 0);
7636 case SystemZ::ATOMIC_LOADW_XILF
:
7637 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::XILF
, 0);
7638 case SystemZ::ATOMIC_LOAD_XR
:
7639 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::XR
, 32);
7640 case SystemZ::ATOMIC_LOAD_XILF
:
7641 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::XILF
, 32);
7642 case SystemZ::ATOMIC_LOAD_XGR
:
7643 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::XGR
, 64);
7644 case SystemZ::ATOMIC_LOAD_XILF64
:
7645 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::XILF64
, 64);
7646 case SystemZ::ATOMIC_LOAD_XIHF64
:
7647 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::XIHF64
, 64);
7649 case SystemZ::ATOMIC_LOADW_NRi
:
7650 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NR
, 0, true);
7651 case SystemZ::ATOMIC_LOADW_NILHi
:
7652 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILH
, 0, true);
7653 case SystemZ::ATOMIC_LOAD_NRi
:
7654 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NR
, 32, true);
7655 case SystemZ::ATOMIC_LOAD_NILLi
:
7656 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILL
, 32, true);
7657 case SystemZ::ATOMIC_LOAD_NILHi
:
7658 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILH
, 32, true);
7659 case SystemZ::ATOMIC_LOAD_NILFi
:
7660 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILF
, 32, true);
7661 case SystemZ::ATOMIC_LOAD_NGRi
:
7662 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NGR
, 64, true);
7663 case SystemZ::ATOMIC_LOAD_NILL64i
:
7664 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILL64
, 64, true);
7665 case SystemZ::ATOMIC_LOAD_NILH64i
:
7666 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILH64
, 64, true);
7667 case SystemZ::ATOMIC_LOAD_NIHL64i
:
7668 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NIHL64
, 64, true);
7669 case SystemZ::ATOMIC_LOAD_NIHH64i
:
7670 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NIHH64
, 64, true);
7671 case SystemZ::ATOMIC_LOAD_NILF64i
:
7672 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILF64
, 64, true);
7673 case SystemZ::ATOMIC_LOAD_NIHF64i
:
7674 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NIHF64
, 64, true);
7676 case SystemZ::ATOMIC_LOADW_MIN
:
7677 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CR
,
7678 SystemZ::CCMASK_CMP_LE
, 0);
7679 case SystemZ::ATOMIC_LOAD_MIN_32
:
7680 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CR
,
7681 SystemZ::CCMASK_CMP_LE
, 32);
7682 case SystemZ::ATOMIC_LOAD_MIN_64
:
7683 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CGR
,
7684 SystemZ::CCMASK_CMP_LE
, 64);
7686 case SystemZ::ATOMIC_LOADW_MAX
:
7687 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CR
,
7688 SystemZ::CCMASK_CMP_GE
, 0);
7689 case SystemZ::ATOMIC_LOAD_MAX_32
:
7690 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CR
,
7691 SystemZ::CCMASK_CMP_GE
, 32);
7692 case SystemZ::ATOMIC_LOAD_MAX_64
:
7693 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CGR
,
7694 SystemZ::CCMASK_CMP_GE
, 64);
7696 case SystemZ::ATOMIC_LOADW_UMIN
:
7697 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CLR
,
7698 SystemZ::CCMASK_CMP_LE
, 0);
7699 case SystemZ::ATOMIC_LOAD_UMIN_32
:
7700 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CLR
,
7701 SystemZ::CCMASK_CMP_LE
, 32);
7702 case SystemZ::ATOMIC_LOAD_UMIN_64
:
7703 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CLGR
,
7704 SystemZ::CCMASK_CMP_LE
, 64);
7706 case SystemZ::ATOMIC_LOADW_UMAX
:
7707 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CLR
,
7708 SystemZ::CCMASK_CMP_GE
, 0);
7709 case SystemZ::ATOMIC_LOAD_UMAX_32
:
7710 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CLR
,
7711 SystemZ::CCMASK_CMP_GE
, 32);
7712 case SystemZ::ATOMIC_LOAD_UMAX_64
:
7713 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CLGR
,
7714 SystemZ::CCMASK_CMP_GE
, 64);
7716 case SystemZ::ATOMIC_CMP_SWAPW
:
7717 return emitAtomicCmpSwapW(MI
, MBB
);
7718 case SystemZ::MVCSequence
:
7719 case SystemZ::MVCLoop
:
7720 return emitMemMemWrapper(MI
, MBB
, SystemZ::MVC
);
7721 case SystemZ::NCSequence
:
7722 case SystemZ::NCLoop
:
7723 return emitMemMemWrapper(MI
, MBB
, SystemZ::NC
);
7724 case SystemZ::OCSequence
:
7725 case SystemZ::OCLoop
:
7726 return emitMemMemWrapper(MI
, MBB
, SystemZ::OC
);
7727 case SystemZ::XCSequence
:
7728 case SystemZ::XCLoop
:
7729 return emitMemMemWrapper(MI
, MBB
, SystemZ::XC
);
7730 case SystemZ::CLCSequence
:
7731 case SystemZ::CLCLoop
:
7732 return emitMemMemWrapper(MI
, MBB
, SystemZ::CLC
);
7733 case SystemZ::CLSTLoop
:
7734 return emitStringWrapper(MI
, MBB
, SystemZ::CLST
);
7735 case SystemZ::MVSTLoop
:
7736 return emitStringWrapper(MI
, MBB
, SystemZ::MVST
);
7737 case SystemZ::SRSTLoop
:
7738 return emitStringWrapper(MI
, MBB
, SystemZ::SRST
);
7739 case SystemZ::TBEGIN
:
7740 return emitTransactionBegin(MI
, MBB
, SystemZ::TBEGIN
, false);
7741 case SystemZ::TBEGIN_nofloat
:
7742 return emitTransactionBegin(MI
, MBB
, SystemZ::TBEGIN
, true);
7743 case SystemZ::TBEGINC
:
7744 return emitTransactionBegin(MI
, MBB
, SystemZ::TBEGINC
, true);
7745 case SystemZ::LTEBRCompare_VecPseudo
:
7746 return emitLoadAndTestCmp0(MI
, MBB
, SystemZ::LTEBR
);
7747 case SystemZ::LTDBRCompare_VecPseudo
:
7748 return emitLoadAndTestCmp0(MI
, MBB
, SystemZ::LTDBR
);
7749 case SystemZ::LTXBRCompare_VecPseudo
:
7750 return emitLoadAndTestCmp0(MI
, MBB
, SystemZ::LTXBR
);
7752 case TargetOpcode::STACKMAP
:
7753 case TargetOpcode::PATCHPOINT
:
7754 return emitPatchPoint(MI
, MBB
);
7757 llvm_unreachable("Unexpected instr type to insert");
7761 // This is only used by the isel schedulers, and is needed only to prevent
7762 // compiler from crashing when list-ilp is used.
7763 const TargetRegisterClass
*
7764 SystemZTargetLowering::getRepRegClassFor(MVT VT
) const {
7765 if (VT
== MVT::Untyped
)
7766 return &SystemZ::ADDR128BitRegClass
;
7767 return TargetLowering::getRepRegClassFor(VT
);