1 //===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the SystemZTargetLowering class.
11 //===----------------------------------------------------------------------===//
13 #include "SystemZISelLowering.h"
14 #include "SystemZCallingConv.h"
15 #include "SystemZConstantPoolValue.h"
16 #include "SystemZMachineFunctionInfo.h"
17 #include "SystemZTargetMachine.h"
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
22 #include "llvm/IR/IntrinsicInst.h"
23 #include "llvm/IR/Intrinsics.h"
24 #include "llvm/IR/IntrinsicsS390.h"
25 #include "llvm/Support/CommandLine.h"
26 #include "llvm/Support/KnownBits.h"
31 #define DEBUG_TYPE "systemz-lower"
34 // Represents information about a comparison.
36 Comparison(SDValue Op0In
, SDValue Op1In
, SDValue ChainIn
)
37 : Op0(Op0In
), Op1(Op1In
), Chain(ChainIn
),
38 Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
40 // The operands to the comparison.
43 // Chain if this is a strict floating-point comparison.
46 // The opcode that should be used to compare Op0 and Op1.
49 // A SystemZICMP value. Only used for integer comparisons.
52 // The mask of CC values that Opcode can produce.
55 // The mask of CC values for which the original condition is true.
58 } // end anonymous namespace
60 // Classify VT as either 32 or 64 bit.
61 static bool is32Bit(EVT VT
) {
62 switch (VT
.getSimpleVT().SimpleTy
) {
68 llvm_unreachable("Unsupported type");
72 // Return a version of MachineOperand that can be safely used before the
74 static MachineOperand
earlyUseOperand(MachineOperand Op
) {
80 SystemZTargetLowering::SystemZTargetLowering(const TargetMachine
&TM
,
81 const SystemZSubtarget
&STI
)
82 : TargetLowering(TM
), Subtarget(STI
) {
83 MVT PtrVT
= MVT::getIntegerVT(8 * TM
.getPointerSize(0));
85 // Set up the register classes.
86 if (Subtarget
.hasHighWord())
87 addRegisterClass(MVT::i32
, &SystemZ::GRX32BitRegClass
);
89 addRegisterClass(MVT::i32
, &SystemZ::GR32BitRegClass
);
90 addRegisterClass(MVT::i64
, &SystemZ::GR64BitRegClass
);
91 if (!useSoftFloat()) {
92 if (Subtarget
.hasVector()) {
93 addRegisterClass(MVT::f32
, &SystemZ::VR32BitRegClass
);
94 addRegisterClass(MVT::f64
, &SystemZ::VR64BitRegClass
);
96 addRegisterClass(MVT::f32
, &SystemZ::FP32BitRegClass
);
97 addRegisterClass(MVT::f64
, &SystemZ::FP64BitRegClass
);
99 if (Subtarget
.hasVectorEnhancements1())
100 addRegisterClass(MVT::f128
, &SystemZ::VR128BitRegClass
);
102 addRegisterClass(MVT::f128
, &SystemZ::FP128BitRegClass
);
104 if (Subtarget
.hasVector()) {
105 addRegisterClass(MVT::v16i8
, &SystemZ::VR128BitRegClass
);
106 addRegisterClass(MVT::v8i16
, &SystemZ::VR128BitRegClass
);
107 addRegisterClass(MVT::v4i32
, &SystemZ::VR128BitRegClass
);
108 addRegisterClass(MVT::v2i64
, &SystemZ::VR128BitRegClass
);
109 addRegisterClass(MVT::v4f32
, &SystemZ::VR128BitRegClass
);
110 addRegisterClass(MVT::v2f64
, &SystemZ::VR128BitRegClass
);
114 // Compute derived properties from the register classes
115 computeRegisterProperties(Subtarget
.getRegisterInfo());
117 // Set up special registers.
118 setStackPointerRegisterToSaveRestore(SystemZ::R15D
);
120 // TODO: It may be better to default to latency-oriented scheduling, however
121 // LLVM's current latency-oriented scheduler can't handle physreg definitions
122 // such as SystemZ has with CC, so set this to the register-pressure
123 // scheduler, because it can.
124 setSchedulingPreference(Sched::RegPressure
);
126 setBooleanContents(ZeroOrOneBooleanContent
);
127 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent
);
129 // Instructions are strings of 2-byte aligned 2-byte values.
130 setMinFunctionAlignment(Align(2));
131 // For performance reasons we prefer 16-byte alignment.
132 setPrefFunctionAlignment(Align(16));
134 // Handle operations that are handled in a similar way for all types.
135 for (unsigned I
= MVT::FIRST_INTEGER_VALUETYPE
;
136 I
<= MVT::LAST_FP_VALUETYPE
;
138 MVT VT
= MVT::SimpleValueType(I
);
139 if (isTypeLegal(VT
)) {
140 // Lower SET_CC into an IPM-based sequence.
141 setOperationAction(ISD::SETCC
, VT
, Custom
);
142 setOperationAction(ISD::STRICT_FSETCC
, VT
, Custom
);
143 setOperationAction(ISD::STRICT_FSETCCS
, VT
, Custom
);
145 // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE).
146 setOperationAction(ISD::SELECT
, VT
, Expand
);
148 // Lower SELECT_CC and BR_CC into separate comparisons and branches.
149 setOperationAction(ISD::SELECT_CC
, VT
, Custom
);
150 setOperationAction(ISD::BR_CC
, VT
, Custom
);
154 // Expand jump table branches as address arithmetic followed by an
156 setOperationAction(ISD::BR_JT
, MVT::Other
, Expand
);
158 // Expand BRCOND into a BR_CC (see above).
159 setOperationAction(ISD::BRCOND
, MVT::Other
, Expand
);
161 // Handle integer types.
162 for (unsigned I
= MVT::FIRST_INTEGER_VALUETYPE
;
163 I
<= MVT::LAST_INTEGER_VALUETYPE
;
165 MVT VT
= MVT::SimpleValueType(I
);
166 if (isTypeLegal(VT
)) {
167 setOperationAction(ISD::ABS
, VT
, Legal
);
169 // Expand individual DIV and REMs into DIVREMs.
170 setOperationAction(ISD::SDIV
, VT
, Expand
);
171 setOperationAction(ISD::UDIV
, VT
, Expand
);
172 setOperationAction(ISD::SREM
, VT
, Expand
);
173 setOperationAction(ISD::UREM
, VT
, Expand
);
174 setOperationAction(ISD::SDIVREM
, VT
, Custom
);
175 setOperationAction(ISD::UDIVREM
, VT
, Custom
);
177 // Support addition/subtraction with overflow.
178 setOperationAction(ISD::SADDO
, VT
, Custom
);
179 setOperationAction(ISD::SSUBO
, VT
, Custom
);
181 // Support addition/subtraction with carry.
182 setOperationAction(ISD::UADDO
, VT
, Custom
);
183 setOperationAction(ISD::USUBO
, VT
, Custom
);
185 // Support carry in as value rather than glue.
186 setOperationAction(ISD::ADDCARRY
, VT
, Custom
);
187 setOperationAction(ISD::SUBCARRY
, VT
, Custom
);
189 // Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and
190 // stores, putting a serialization instruction after the stores.
191 setOperationAction(ISD::ATOMIC_LOAD
, VT
, Custom
);
192 setOperationAction(ISD::ATOMIC_STORE
, VT
, Custom
);
194 // Lower ATOMIC_LOAD_SUB into ATOMIC_LOAD_ADD if LAA and LAAG are
195 // available, or if the operand is constant.
196 setOperationAction(ISD::ATOMIC_LOAD_SUB
, VT
, Custom
);
198 // Use POPCNT on z196 and above.
199 if (Subtarget
.hasPopulationCount())
200 setOperationAction(ISD::CTPOP
, VT
, Custom
);
202 setOperationAction(ISD::CTPOP
, VT
, Expand
);
204 // No special instructions for these.
205 setOperationAction(ISD::CTTZ
, VT
, Expand
);
206 setOperationAction(ISD::ROTR
, VT
, Expand
);
208 // Use *MUL_LOHI where possible instead of MULH*.
209 setOperationAction(ISD::MULHS
, VT
, Expand
);
210 setOperationAction(ISD::MULHU
, VT
, Expand
);
211 setOperationAction(ISD::SMUL_LOHI
, VT
, Custom
);
212 setOperationAction(ISD::UMUL_LOHI
, VT
, Custom
);
214 // Only z196 and above have native support for conversions to unsigned.
215 // On z10, promoting to i64 doesn't generate an inexact condition for
216 // values that are outside the i32 range but in the i64 range, so use
217 // the default expansion.
218 if (!Subtarget
.hasFPExtension())
219 setOperationAction(ISD::FP_TO_UINT
, VT
, Expand
);
221 // Mirror those settings for STRICT_FP_TO_[SU]INT. Note that these all
222 // default to Expand, so need to be modified to Legal where appropriate.
223 setOperationAction(ISD::STRICT_FP_TO_SINT
, VT
, Legal
);
224 if (Subtarget
.hasFPExtension())
225 setOperationAction(ISD::STRICT_FP_TO_UINT
, VT
, Legal
);
227 // And similarly for STRICT_[SU]INT_TO_FP.
228 setOperationAction(ISD::STRICT_SINT_TO_FP
, VT
, Legal
);
229 if (Subtarget
.hasFPExtension())
230 setOperationAction(ISD::STRICT_UINT_TO_FP
, VT
, Legal
);
234 // Type legalization will convert 8- and 16-bit atomic operations into
235 // forms that operate on i32s (but still keeping the original memory VT).
236 // Lower them into full i32 operations.
237 setOperationAction(ISD::ATOMIC_SWAP
, MVT::i32
, Custom
);
238 setOperationAction(ISD::ATOMIC_LOAD_ADD
, MVT::i32
, Custom
);
239 setOperationAction(ISD::ATOMIC_LOAD_SUB
, MVT::i32
, Custom
);
240 setOperationAction(ISD::ATOMIC_LOAD_AND
, MVT::i32
, Custom
);
241 setOperationAction(ISD::ATOMIC_LOAD_OR
, MVT::i32
, Custom
);
242 setOperationAction(ISD::ATOMIC_LOAD_XOR
, MVT::i32
, Custom
);
243 setOperationAction(ISD::ATOMIC_LOAD_NAND
, MVT::i32
, Custom
);
244 setOperationAction(ISD::ATOMIC_LOAD_MIN
, MVT::i32
, Custom
);
245 setOperationAction(ISD::ATOMIC_LOAD_MAX
, MVT::i32
, Custom
);
246 setOperationAction(ISD::ATOMIC_LOAD_UMIN
, MVT::i32
, Custom
);
247 setOperationAction(ISD::ATOMIC_LOAD_UMAX
, MVT::i32
, Custom
);
249 // Even though i128 is not a legal type, we still need to custom lower
250 // the atomic operations in order to exploit SystemZ instructions.
251 setOperationAction(ISD::ATOMIC_LOAD
, MVT::i128
, Custom
);
252 setOperationAction(ISD::ATOMIC_STORE
, MVT::i128
, Custom
);
254 // We can use the CC result of compare-and-swap to implement
255 // the "success" result of ATOMIC_CMP_SWAP_WITH_SUCCESS.
256 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS
, MVT::i32
, Custom
);
257 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS
, MVT::i64
, Custom
);
258 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS
, MVT::i128
, Custom
);
260 setOperationAction(ISD::ATOMIC_FENCE
, MVT::Other
, Custom
);
262 // Traps are legal, as we will convert them to "j .+2".
263 setOperationAction(ISD::TRAP
, MVT::Other
, Legal
);
265 // z10 has instructions for signed but not unsigned FP conversion.
266 // Handle unsigned 32-bit types as signed 64-bit types.
267 if (!Subtarget
.hasFPExtension()) {
268 setOperationAction(ISD::UINT_TO_FP
, MVT::i32
, Promote
);
269 setOperationAction(ISD::UINT_TO_FP
, MVT::i64
, Expand
);
270 setOperationAction(ISD::STRICT_UINT_TO_FP
, MVT::i32
, Promote
);
271 setOperationAction(ISD::STRICT_UINT_TO_FP
, MVT::i64
, Expand
);
274 // We have native support for a 64-bit CTLZ, via FLOGR.
275 setOperationAction(ISD::CTLZ
, MVT::i32
, Promote
);
276 setOperationAction(ISD::CTLZ_ZERO_UNDEF
, MVT::i32
, Promote
);
277 setOperationAction(ISD::CTLZ
, MVT::i64
, Legal
);
279 // On z15 we have native support for a 64-bit CTPOP.
280 if (Subtarget
.hasMiscellaneousExtensions3()) {
281 setOperationAction(ISD::CTPOP
, MVT::i32
, Promote
);
282 setOperationAction(ISD::CTPOP
, MVT::i64
, Legal
);
285 // Give LowerOperation the chance to replace 64-bit ORs with subregs.
286 setOperationAction(ISD::OR
, MVT::i64
, Custom
);
288 // Expand 128 bit shifts without using a libcall.
289 setOperationAction(ISD::SRL_PARTS
, MVT::i64
, Expand
);
290 setOperationAction(ISD::SHL_PARTS
, MVT::i64
, Expand
);
291 setOperationAction(ISD::SRA_PARTS
, MVT::i64
, Expand
);
292 setLibcallName(RTLIB::SRL_I128
, nullptr);
293 setLibcallName(RTLIB::SHL_I128
, nullptr);
294 setLibcallName(RTLIB::SRA_I128
, nullptr);
296 // We have native instructions for i8, i16 and i32 extensions, but not i1.
297 setOperationAction(ISD::SIGN_EXTEND_INREG
, MVT::i1
, Expand
);
298 for (MVT VT
: MVT::integer_valuetypes()) {
299 setLoadExtAction(ISD::SEXTLOAD
, VT
, MVT::i1
, Promote
);
300 setLoadExtAction(ISD::ZEXTLOAD
, VT
, MVT::i1
, Promote
);
301 setLoadExtAction(ISD::EXTLOAD
, VT
, MVT::i1
, Promote
);
304 // Handle the various types of symbolic address.
305 setOperationAction(ISD::ConstantPool
, PtrVT
, Custom
);
306 setOperationAction(ISD::GlobalAddress
, PtrVT
, Custom
);
307 setOperationAction(ISD::GlobalTLSAddress
, PtrVT
, Custom
);
308 setOperationAction(ISD::BlockAddress
, PtrVT
, Custom
);
309 setOperationAction(ISD::JumpTable
, PtrVT
, Custom
);
311 // We need to handle dynamic allocations specially because of the
312 // 160-byte area at the bottom of the stack.
313 setOperationAction(ISD::DYNAMIC_STACKALLOC
, PtrVT
, Custom
);
314 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET
, PtrVT
, Custom
);
316 // Use custom expanders so that we can force the function to use
318 setOperationAction(ISD::STACKSAVE
, MVT::Other
, Custom
);
319 setOperationAction(ISD::STACKRESTORE
, MVT::Other
, Custom
);
321 // Handle prefetches with PFD or PFDRL.
322 setOperationAction(ISD::PREFETCH
, MVT::Other
, Custom
);
324 for (MVT VT
: MVT::fixedlen_vector_valuetypes()) {
325 // Assume by default that all vector operations need to be expanded.
326 for (unsigned Opcode
= 0; Opcode
< ISD::BUILTIN_OP_END
; ++Opcode
)
327 if (getOperationAction(Opcode
, VT
) == Legal
)
328 setOperationAction(Opcode
, VT
, Expand
);
330 // Likewise all truncating stores and extending loads.
331 for (MVT InnerVT
: MVT::fixedlen_vector_valuetypes()) {
332 setTruncStoreAction(VT
, InnerVT
, Expand
);
333 setLoadExtAction(ISD::SEXTLOAD
, VT
, InnerVT
, Expand
);
334 setLoadExtAction(ISD::ZEXTLOAD
, VT
, InnerVT
, Expand
);
335 setLoadExtAction(ISD::EXTLOAD
, VT
, InnerVT
, Expand
);
338 if (isTypeLegal(VT
)) {
339 // These operations are legal for anything that can be stored in a
340 // vector register, even if there is no native support for the format
341 // as such. In particular, we can do these for v4f32 even though there
342 // are no specific instructions for that format.
343 setOperationAction(ISD::LOAD
, VT
, Legal
);
344 setOperationAction(ISD::STORE
, VT
, Legal
);
345 setOperationAction(ISD::VSELECT
, VT
, Legal
);
346 setOperationAction(ISD::BITCAST
, VT
, Legal
);
347 setOperationAction(ISD::UNDEF
, VT
, Legal
);
349 // Likewise, except that we need to replace the nodes with something
351 setOperationAction(ISD::BUILD_VECTOR
, VT
, Custom
);
352 setOperationAction(ISD::VECTOR_SHUFFLE
, VT
, Custom
);
356 // Handle integer vector types.
357 for (MVT VT
: MVT::integer_fixedlen_vector_valuetypes()) {
358 if (isTypeLegal(VT
)) {
359 // These operations have direct equivalents.
360 setOperationAction(ISD::EXTRACT_VECTOR_ELT
, VT
, Legal
);
361 setOperationAction(ISD::INSERT_VECTOR_ELT
, VT
, Legal
);
362 setOperationAction(ISD::ADD
, VT
, Legal
);
363 setOperationAction(ISD::SUB
, VT
, Legal
);
364 if (VT
!= MVT::v2i64
)
365 setOperationAction(ISD::MUL
, VT
, Legal
);
366 setOperationAction(ISD::ABS
, VT
, Legal
);
367 setOperationAction(ISD::AND
, VT
, Legal
);
368 setOperationAction(ISD::OR
, VT
, Legal
);
369 setOperationAction(ISD::XOR
, VT
, Legal
);
370 if (Subtarget
.hasVectorEnhancements1())
371 setOperationAction(ISD::CTPOP
, VT
, Legal
);
373 setOperationAction(ISD::CTPOP
, VT
, Custom
);
374 setOperationAction(ISD::CTTZ
, VT
, Legal
);
375 setOperationAction(ISD::CTLZ
, VT
, Legal
);
377 // Convert a GPR scalar to a vector by inserting it into element 0.
378 setOperationAction(ISD::SCALAR_TO_VECTOR
, VT
, Custom
);
380 // Use a series of unpacks for extensions.
381 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG
, VT
, Custom
);
382 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG
, VT
, Custom
);
384 // Detect shifts by a scalar amount and convert them into
386 setOperationAction(ISD::SHL
, VT
, Custom
);
387 setOperationAction(ISD::SRA
, VT
, Custom
);
388 setOperationAction(ISD::SRL
, VT
, Custom
);
390 // At present ROTL isn't matched by DAGCombiner. ROTR should be
391 // converted into ROTL.
392 setOperationAction(ISD::ROTL
, VT
, Expand
);
393 setOperationAction(ISD::ROTR
, VT
, Expand
);
395 // Map SETCCs onto one of VCE, VCH or VCHL, swapping the operands
396 // and inverting the result as necessary.
397 setOperationAction(ISD::SETCC
, VT
, Custom
);
398 setOperationAction(ISD::STRICT_FSETCC
, VT
, Custom
);
399 if (Subtarget
.hasVectorEnhancements1())
400 setOperationAction(ISD::STRICT_FSETCCS
, VT
, Custom
);
404 if (Subtarget
.hasVector()) {
405 // There should be no need to check for float types other than v2f64
406 // since <2 x f32> isn't a legal type.
407 setOperationAction(ISD::FP_TO_SINT
, MVT::v2i64
, Legal
);
408 setOperationAction(ISD::FP_TO_SINT
, MVT::v2f64
, Legal
);
409 setOperationAction(ISD::FP_TO_UINT
, MVT::v2i64
, Legal
);
410 setOperationAction(ISD::FP_TO_UINT
, MVT::v2f64
, Legal
);
411 setOperationAction(ISD::SINT_TO_FP
, MVT::v2i64
, Legal
);
412 setOperationAction(ISD::SINT_TO_FP
, MVT::v2f64
, Legal
);
413 setOperationAction(ISD::UINT_TO_FP
, MVT::v2i64
, Legal
);
414 setOperationAction(ISD::UINT_TO_FP
, MVT::v2f64
, Legal
);
416 setOperationAction(ISD::STRICT_FP_TO_SINT
, MVT::v2i64
, Legal
);
417 setOperationAction(ISD::STRICT_FP_TO_SINT
, MVT::v2f64
, Legal
);
418 setOperationAction(ISD::STRICT_FP_TO_UINT
, MVT::v2i64
, Legal
);
419 setOperationAction(ISD::STRICT_FP_TO_UINT
, MVT::v2f64
, Legal
);
420 setOperationAction(ISD::STRICT_SINT_TO_FP
, MVT::v2i64
, Legal
);
421 setOperationAction(ISD::STRICT_SINT_TO_FP
, MVT::v2f64
, Legal
);
422 setOperationAction(ISD::STRICT_UINT_TO_FP
, MVT::v2i64
, Legal
);
423 setOperationAction(ISD::STRICT_UINT_TO_FP
, MVT::v2f64
, Legal
);
426 if (Subtarget
.hasVectorEnhancements2()) {
427 setOperationAction(ISD::FP_TO_SINT
, MVT::v4i32
, Legal
);
428 setOperationAction(ISD::FP_TO_SINT
, MVT::v4f32
, Legal
);
429 setOperationAction(ISD::FP_TO_UINT
, MVT::v4i32
, Legal
);
430 setOperationAction(ISD::FP_TO_UINT
, MVT::v4f32
, Legal
);
431 setOperationAction(ISD::SINT_TO_FP
, MVT::v4i32
, Legal
);
432 setOperationAction(ISD::SINT_TO_FP
, MVT::v4f32
, Legal
);
433 setOperationAction(ISD::UINT_TO_FP
, MVT::v4i32
, Legal
);
434 setOperationAction(ISD::UINT_TO_FP
, MVT::v4f32
, Legal
);
436 setOperationAction(ISD::STRICT_FP_TO_SINT
, MVT::v4i32
, Legal
);
437 setOperationAction(ISD::STRICT_FP_TO_SINT
, MVT::v4f32
, Legal
);
438 setOperationAction(ISD::STRICT_FP_TO_UINT
, MVT::v4i32
, Legal
);
439 setOperationAction(ISD::STRICT_FP_TO_UINT
, MVT::v4f32
, Legal
);
440 setOperationAction(ISD::STRICT_SINT_TO_FP
, MVT::v4i32
, Legal
);
441 setOperationAction(ISD::STRICT_SINT_TO_FP
, MVT::v4f32
, Legal
);
442 setOperationAction(ISD::STRICT_UINT_TO_FP
, MVT::v4i32
, Legal
);
443 setOperationAction(ISD::STRICT_UINT_TO_FP
, MVT::v4f32
, Legal
);
446 // Handle floating-point types.
447 for (unsigned I
= MVT::FIRST_FP_VALUETYPE
;
448 I
<= MVT::LAST_FP_VALUETYPE
;
450 MVT VT
= MVT::SimpleValueType(I
);
451 if (isTypeLegal(VT
)) {
452 // We can use FI for FRINT.
453 setOperationAction(ISD::FRINT
, VT
, Legal
);
455 // We can use the extended form of FI for other rounding operations.
456 if (Subtarget
.hasFPExtension()) {
457 setOperationAction(ISD::FNEARBYINT
, VT
, Legal
);
458 setOperationAction(ISD::FFLOOR
, VT
, Legal
);
459 setOperationAction(ISD::FCEIL
, VT
, Legal
);
460 setOperationAction(ISD::FTRUNC
, VT
, Legal
);
461 setOperationAction(ISD::FROUND
, VT
, Legal
);
464 // No special instructions for these.
465 setOperationAction(ISD::FSIN
, VT
, Expand
);
466 setOperationAction(ISD::FCOS
, VT
, Expand
);
467 setOperationAction(ISD::FSINCOS
, VT
, Expand
);
468 setOperationAction(ISD::FREM
, VT
, Expand
);
469 setOperationAction(ISD::FPOW
, VT
, Expand
);
471 // Handle constrained floating-point operations.
472 setOperationAction(ISD::STRICT_FADD
, VT
, Legal
);
473 setOperationAction(ISD::STRICT_FSUB
, VT
, Legal
);
474 setOperationAction(ISD::STRICT_FMUL
, VT
, Legal
);
475 setOperationAction(ISD::STRICT_FDIV
, VT
, Legal
);
476 setOperationAction(ISD::STRICT_FMA
, VT
, Legal
);
477 setOperationAction(ISD::STRICT_FSQRT
, VT
, Legal
);
478 setOperationAction(ISD::STRICT_FRINT
, VT
, Legal
);
479 setOperationAction(ISD::STRICT_FP_ROUND
, VT
, Legal
);
480 setOperationAction(ISD::STRICT_FP_EXTEND
, VT
, Legal
);
481 if (Subtarget
.hasFPExtension()) {
482 setOperationAction(ISD::STRICT_FNEARBYINT
, VT
, Legal
);
483 setOperationAction(ISD::STRICT_FFLOOR
, VT
, Legal
);
484 setOperationAction(ISD::STRICT_FCEIL
, VT
, Legal
);
485 setOperationAction(ISD::STRICT_FROUND
, VT
, Legal
);
486 setOperationAction(ISD::STRICT_FTRUNC
, VT
, Legal
);
491 // Handle floating-point vector types.
492 if (Subtarget
.hasVector()) {
493 // Scalar-to-vector conversion is just a subreg.
494 setOperationAction(ISD::SCALAR_TO_VECTOR
, MVT::v4f32
, Legal
);
495 setOperationAction(ISD::SCALAR_TO_VECTOR
, MVT::v2f64
, Legal
);
497 // Some insertions and extractions can be done directly but others
498 // need to go via integers.
499 setOperationAction(ISD::INSERT_VECTOR_ELT
, MVT::v4f32
, Custom
);
500 setOperationAction(ISD::INSERT_VECTOR_ELT
, MVT::v2f64
, Custom
);
501 setOperationAction(ISD::EXTRACT_VECTOR_ELT
, MVT::v4f32
, Custom
);
502 setOperationAction(ISD::EXTRACT_VECTOR_ELT
, MVT::v2f64
, Custom
);
504 // These operations have direct equivalents.
505 setOperationAction(ISD::FADD
, MVT::v2f64
, Legal
);
506 setOperationAction(ISD::FNEG
, MVT::v2f64
, Legal
);
507 setOperationAction(ISD::FSUB
, MVT::v2f64
, Legal
);
508 setOperationAction(ISD::FMUL
, MVT::v2f64
, Legal
);
509 setOperationAction(ISD::FMA
, MVT::v2f64
, Legal
);
510 setOperationAction(ISD::FDIV
, MVT::v2f64
, Legal
);
511 setOperationAction(ISD::FABS
, MVT::v2f64
, Legal
);
512 setOperationAction(ISD::FSQRT
, MVT::v2f64
, Legal
);
513 setOperationAction(ISD::FRINT
, MVT::v2f64
, Legal
);
514 setOperationAction(ISD::FNEARBYINT
, MVT::v2f64
, Legal
);
515 setOperationAction(ISD::FFLOOR
, MVT::v2f64
, Legal
);
516 setOperationAction(ISD::FCEIL
, MVT::v2f64
, Legal
);
517 setOperationAction(ISD::FTRUNC
, MVT::v2f64
, Legal
);
518 setOperationAction(ISD::FROUND
, MVT::v2f64
, Legal
);
520 // Handle constrained floating-point operations.
521 setOperationAction(ISD::STRICT_FADD
, MVT::v2f64
, Legal
);
522 setOperationAction(ISD::STRICT_FSUB
, MVT::v2f64
, Legal
);
523 setOperationAction(ISD::STRICT_FMUL
, MVT::v2f64
, Legal
);
524 setOperationAction(ISD::STRICT_FMA
, MVT::v2f64
, Legal
);
525 setOperationAction(ISD::STRICT_FDIV
, MVT::v2f64
, Legal
);
526 setOperationAction(ISD::STRICT_FSQRT
, MVT::v2f64
, Legal
);
527 setOperationAction(ISD::STRICT_FRINT
, MVT::v2f64
, Legal
);
528 setOperationAction(ISD::STRICT_FNEARBYINT
, MVT::v2f64
, Legal
);
529 setOperationAction(ISD::STRICT_FFLOOR
, MVT::v2f64
, Legal
);
530 setOperationAction(ISD::STRICT_FCEIL
, MVT::v2f64
, Legal
);
531 setOperationAction(ISD::STRICT_FTRUNC
, MVT::v2f64
, Legal
);
532 setOperationAction(ISD::STRICT_FROUND
, MVT::v2f64
, Legal
);
535 // The vector enhancements facility 1 has instructions for these.
536 if (Subtarget
.hasVectorEnhancements1()) {
537 setOperationAction(ISD::FADD
, MVT::v4f32
, Legal
);
538 setOperationAction(ISD::FNEG
, MVT::v4f32
, Legal
);
539 setOperationAction(ISD::FSUB
, MVT::v4f32
, Legal
);
540 setOperationAction(ISD::FMUL
, MVT::v4f32
, Legal
);
541 setOperationAction(ISD::FMA
, MVT::v4f32
, Legal
);
542 setOperationAction(ISD::FDIV
, MVT::v4f32
, Legal
);
543 setOperationAction(ISD::FABS
, MVT::v4f32
, Legal
);
544 setOperationAction(ISD::FSQRT
, MVT::v4f32
, Legal
);
545 setOperationAction(ISD::FRINT
, MVT::v4f32
, Legal
);
546 setOperationAction(ISD::FNEARBYINT
, MVT::v4f32
, Legal
);
547 setOperationAction(ISD::FFLOOR
, MVT::v4f32
, Legal
);
548 setOperationAction(ISD::FCEIL
, MVT::v4f32
, Legal
);
549 setOperationAction(ISD::FTRUNC
, MVT::v4f32
, Legal
);
550 setOperationAction(ISD::FROUND
, MVT::v4f32
, Legal
);
552 setOperationAction(ISD::FMAXNUM
, MVT::f64
, Legal
);
553 setOperationAction(ISD::FMAXIMUM
, MVT::f64
, Legal
);
554 setOperationAction(ISD::FMINNUM
, MVT::f64
, Legal
);
555 setOperationAction(ISD::FMINIMUM
, MVT::f64
, Legal
);
557 setOperationAction(ISD::FMAXNUM
, MVT::v2f64
, Legal
);
558 setOperationAction(ISD::FMAXIMUM
, MVT::v2f64
, Legal
);
559 setOperationAction(ISD::FMINNUM
, MVT::v2f64
, Legal
);
560 setOperationAction(ISD::FMINIMUM
, MVT::v2f64
, Legal
);
562 setOperationAction(ISD::FMAXNUM
, MVT::f32
, Legal
);
563 setOperationAction(ISD::FMAXIMUM
, MVT::f32
, Legal
);
564 setOperationAction(ISD::FMINNUM
, MVT::f32
, Legal
);
565 setOperationAction(ISD::FMINIMUM
, MVT::f32
, Legal
);
567 setOperationAction(ISD::FMAXNUM
, MVT::v4f32
, Legal
);
568 setOperationAction(ISD::FMAXIMUM
, MVT::v4f32
, Legal
);
569 setOperationAction(ISD::FMINNUM
, MVT::v4f32
, Legal
);
570 setOperationAction(ISD::FMINIMUM
, MVT::v4f32
, Legal
);
572 setOperationAction(ISD::FMAXNUM
, MVT::f128
, Legal
);
573 setOperationAction(ISD::FMAXIMUM
, MVT::f128
, Legal
);
574 setOperationAction(ISD::FMINNUM
, MVT::f128
, Legal
);
575 setOperationAction(ISD::FMINIMUM
, MVT::f128
, Legal
);
577 // Handle constrained floating-point operations.
578 setOperationAction(ISD::STRICT_FADD
, MVT::v4f32
, Legal
);
579 setOperationAction(ISD::STRICT_FSUB
, MVT::v4f32
, Legal
);
580 setOperationAction(ISD::STRICT_FMUL
, MVT::v4f32
, Legal
);
581 setOperationAction(ISD::STRICT_FMA
, MVT::v4f32
, Legal
);
582 setOperationAction(ISD::STRICT_FDIV
, MVT::v4f32
, Legal
);
583 setOperationAction(ISD::STRICT_FSQRT
, MVT::v4f32
, Legal
);
584 setOperationAction(ISD::STRICT_FRINT
, MVT::v4f32
, Legal
);
585 setOperationAction(ISD::STRICT_FNEARBYINT
, MVT::v4f32
, Legal
);
586 setOperationAction(ISD::STRICT_FFLOOR
, MVT::v4f32
, Legal
);
587 setOperationAction(ISD::STRICT_FCEIL
, MVT::v4f32
, Legal
);
588 setOperationAction(ISD::STRICT_FROUND
, MVT::v4f32
, Legal
);
589 setOperationAction(ISD::STRICT_FTRUNC
, MVT::v4f32
, Legal
);
590 for (auto VT
: { MVT::f32
, MVT::f64
, MVT::f128
,
591 MVT::v4f32
, MVT::v2f64
}) {
592 setOperationAction(ISD::STRICT_FMAXNUM
, VT
, Legal
);
593 setOperationAction(ISD::STRICT_FMINNUM
, VT
, Legal
);
594 setOperationAction(ISD::STRICT_FMAXIMUM
, VT
, Legal
);
595 setOperationAction(ISD::STRICT_FMINIMUM
, VT
, Legal
);
599 // We only have fused f128 multiply-addition on vector registers.
600 if (!Subtarget
.hasVectorEnhancements1()) {
601 setOperationAction(ISD::FMA
, MVT::f128
, Expand
);
602 setOperationAction(ISD::STRICT_FMA
, MVT::f128
, Expand
);
605 // We don't have a copysign instruction on vector registers.
606 if (Subtarget
.hasVectorEnhancements1())
607 setOperationAction(ISD::FCOPYSIGN
, MVT::f128
, Expand
);
609 // Needed so that we don't try to implement f128 constant loads using
610 // a load-and-extend of a f80 constant (in cases where the constant
611 // would fit in an f80).
612 for (MVT VT
: MVT::fp_valuetypes())
613 setLoadExtAction(ISD::EXTLOAD
, VT
, MVT::f80
, Expand
);
615 // We don't have extending load instruction on vector registers.
616 if (Subtarget
.hasVectorEnhancements1()) {
617 setLoadExtAction(ISD::EXTLOAD
, MVT::f128
, MVT::f32
, Expand
);
618 setLoadExtAction(ISD::EXTLOAD
, MVT::f128
, MVT::f64
, Expand
);
621 // Floating-point truncation and stores need to be done separately.
622 setTruncStoreAction(MVT::f64
, MVT::f32
, Expand
);
623 setTruncStoreAction(MVT::f128
, MVT::f32
, Expand
);
624 setTruncStoreAction(MVT::f128
, MVT::f64
, Expand
);
626 // We have 64-bit FPR<->GPR moves, but need special handling for
628 if (!Subtarget
.hasVector()) {
629 setOperationAction(ISD::BITCAST
, MVT::i32
, Custom
);
630 setOperationAction(ISD::BITCAST
, MVT::f32
, Custom
);
633 // VASTART and VACOPY need to deal with the SystemZ-specific varargs
634 // structure, but VAEND is a no-op.
635 setOperationAction(ISD::VASTART
, MVT::Other
, Custom
);
636 setOperationAction(ISD::VACOPY
, MVT::Other
, Custom
);
637 setOperationAction(ISD::VAEND
, MVT::Other
, Expand
);
639 // Codes for which we want to perform some z-specific combinations.
640 setTargetDAGCombine(ISD::ZERO_EXTEND
);
641 setTargetDAGCombine(ISD::SIGN_EXTEND
);
642 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG
);
643 setTargetDAGCombine(ISD::LOAD
);
644 setTargetDAGCombine(ISD::STORE
);
645 setTargetDAGCombine(ISD::VECTOR_SHUFFLE
);
646 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT
);
647 setTargetDAGCombine(ISD::FP_ROUND
);
648 setTargetDAGCombine(ISD::STRICT_FP_ROUND
);
649 setTargetDAGCombine(ISD::FP_EXTEND
);
650 setTargetDAGCombine(ISD::SINT_TO_FP
);
651 setTargetDAGCombine(ISD::UINT_TO_FP
);
652 setTargetDAGCombine(ISD::STRICT_FP_EXTEND
);
653 setTargetDAGCombine(ISD::BSWAP
);
654 setTargetDAGCombine(ISD::SDIV
);
655 setTargetDAGCombine(ISD::UDIV
);
656 setTargetDAGCombine(ISD::SREM
);
657 setTargetDAGCombine(ISD::UREM
);
658 setTargetDAGCombine(ISD::INTRINSIC_VOID
);
659 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN
);
661 // Handle intrinsics.
662 setOperationAction(ISD::INTRINSIC_W_CHAIN
, MVT::Other
, Custom
);
663 setOperationAction(ISD::INTRINSIC_WO_CHAIN
, MVT::Other
, Custom
);
665 // We want to use MVC in preference to even a single load/store pair.
666 MaxStoresPerMemcpy
= 0;
667 MaxStoresPerMemcpyOptSize
= 0;
669 // The main memset sequence is a byte store followed by an MVC.
670 // Two STC or MV..I stores win over that, but the kind of fused stores
671 // generated by target-independent code don't when the byte value is
672 // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better
673 // than "STC;MVC". Handle the choice in target-specific code instead.
674 MaxStoresPerMemset
= 0;
675 MaxStoresPerMemsetOptSize
= 0;
677 // Default to having -disable-strictnode-mutation on
678 IsStrictFPEnabled
= true;
681 bool SystemZTargetLowering::useSoftFloat() const {
682 return Subtarget
.hasSoftFloat();
685 EVT
SystemZTargetLowering::getSetCCResultType(const DataLayout
&DL
,
686 LLVMContext
&, EVT VT
) const {
689 return VT
.changeVectorElementTypeToInteger();
692 bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(
693 const MachineFunction
&MF
, EVT VT
) const {
694 VT
= VT
.getScalarType();
699 switch (VT
.getSimpleVT().SimpleTy
) {
704 return Subtarget
.hasVectorEnhancements1();
712 // Return true if the constant can be generated with a vector instruction,
713 // such as VGM, VGMB or VREPI.
714 bool SystemZVectorConstantInfo::isVectorConstantLegal(
715 const SystemZSubtarget
&Subtarget
) {
716 const SystemZInstrInfo
*TII
=
717 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
718 if (!Subtarget
.hasVector() ||
719 (isFP128
&& !Subtarget
.hasVectorEnhancements1()))
722 // Try using VECTOR GENERATE BYTE MASK. This is the architecturally-
723 // preferred way of creating all-zero and all-one vectors so give it
724 // priority over other methods below.
727 for (; I
< SystemZ::VectorBytes
; ++I
) {
728 uint64_t Byte
= IntBits
.lshr(I
* 8).trunc(8).getZExtValue();
734 if (I
== SystemZ::VectorBytes
) {
735 Opcode
= SystemZISD::BYTE_MASK
;
736 OpVals
.push_back(Mask
);
737 VecVT
= MVT::getVectorVT(MVT::getIntegerVT(8), 16);
741 if (SplatBitSize
> 64)
744 auto tryValue
= [&](uint64_t Value
) -> bool {
745 // Try VECTOR REPLICATE IMMEDIATE
746 int64_t SignedValue
= SignExtend64(Value
, SplatBitSize
);
747 if (isInt
<16>(SignedValue
)) {
748 OpVals
.push_back(((unsigned) SignedValue
));
749 Opcode
= SystemZISD::REPLICATE
;
750 VecVT
= MVT::getVectorVT(MVT::getIntegerVT(SplatBitSize
),
751 SystemZ::VectorBits
/ SplatBitSize
);
754 // Try VECTOR GENERATE MASK
756 if (TII
->isRxSBGMask(Value
, SplatBitSize
, Start
, End
)) {
757 // isRxSBGMask returns the bit numbers for a full 64-bit value, with 0
758 // denoting 1 << 63 and 63 denoting 1. Convert them to bit numbers for
759 // an SplatBitSize value, so that 0 denotes 1 << (SplatBitSize-1).
760 OpVals
.push_back(Start
- (64 - SplatBitSize
));
761 OpVals
.push_back(End
- (64 - SplatBitSize
));
762 Opcode
= SystemZISD::ROTATE_MASK
;
763 VecVT
= MVT::getVectorVT(MVT::getIntegerVT(SplatBitSize
),
764 SystemZ::VectorBits
/ SplatBitSize
);
770 // First try assuming that any undefined bits above the highest set bit
771 // and below the lowest set bit are 1s. This increases the likelihood of
772 // being able to use a sign-extended element value in VECTOR REPLICATE
773 // IMMEDIATE or a wraparound mask in VECTOR GENERATE MASK.
774 uint64_t SplatBitsZ
= SplatBits
.getZExtValue();
775 uint64_t SplatUndefZ
= SplatUndef
.getZExtValue();
777 (SplatUndefZ
& ((uint64_t(1) << findFirstSet(SplatBitsZ
)) - 1));
779 (SplatUndefZ
& ~((uint64_t(1) << findLastSet(SplatBitsZ
)) - 1));
780 if (tryValue(SplatBitsZ
| Upper
| Lower
))
783 // Now try assuming that any undefined bits between the first and
784 // last defined set bits are set. This increases the chances of
785 // using a non-wraparound mask.
786 uint64_t Middle
= SplatUndefZ
& ~Upper
& ~Lower
;
787 return tryValue(SplatBitsZ
| Middle
);
790 SystemZVectorConstantInfo::SystemZVectorConstantInfo(APFloat FPImm
) {
791 IntBits
= FPImm
.bitcastToAPInt().zextOrSelf(128);
792 isFP128
= (&FPImm
.getSemantics() == &APFloat::IEEEquad());
793 SplatBits
= FPImm
.bitcastToAPInt();
794 unsigned Width
= SplatBits
.getBitWidth();
795 IntBits
<<= (SystemZ::VectorBits
- Width
);
797 // Find the smallest splat.
799 unsigned HalfSize
= Width
/ 2;
800 APInt HighValue
= SplatBits
.lshr(HalfSize
).trunc(HalfSize
);
801 APInt LowValue
= SplatBits
.trunc(HalfSize
);
803 // If the two halves do not match, stop here.
804 if (HighValue
!= LowValue
|| 8 > HalfSize
)
807 SplatBits
= HighValue
;
811 SplatBitSize
= Width
;
814 SystemZVectorConstantInfo::SystemZVectorConstantInfo(BuildVectorSDNode
*BVN
) {
815 assert(BVN
->isConstant() && "Expected a constant BUILD_VECTOR");
818 // Get IntBits by finding the 128 bit splat.
819 BVN
->isConstantSplat(IntBits
, SplatUndef
, SplatBitSize
, HasAnyUndefs
, 128,
822 // Get SplatBits by finding the 8 bit or greater splat.
823 BVN
->isConstantSplat(SplatBits
, SplatUndef
, SplatBitSize
, HasAnyUndefs
, 8,
827 bool SystemZTargetLowering::isFPImmLegal(const APFloat
&Imm
, EVT VT
,
828 bool ForCodeSize
) const {
829 // We can load zero using LZ?R and negative zero using LZ?R;LC?BR.
830 if (Imm
.isZero() || Imm
.isNegZero())
833 return SystemZVectorConstantInfo(Imm
).isVectorConstantLegal(Subtarget
);
836 /// Returns true if stack probing through inline assembly is requested.
837 bool SystemZTargetLowering::hasInlineStackProbe(MachineFunction
&MF
) const {
838 // If the function specifically requests inline stack probes, emit them.
839 if (MF
.getFunction().hasFnAttribute("probe-stack"))
840 return MF
.getFunction().getFnAttribute("probe-stack").getValueAsString() ==
845 bool SystemZTargetLowering::isLegalICmpImmediate(int64_t Imm
) const {
846 // We can use CGFI or CLGFI.
847 return isInt
<32>(Imm
) || isUInt
<32>(Imm
);
850 bool SystemZTargetLowering::isLegalAddImmediate(int64_t Imm
) const {
851 // We can use ALGFI or SLGFI.
852 return isUInt
<32>(Imm
) || isUInt
<32>(-Imm
);
855 bool SystemZTargetLowering::allowsMisalignedMemoryAccesses(
856 EVT VT
, unsigned, Align
, MachineMemOperand::Flags
, bool *Fast
) const {
857 // Unaligned accesses should never be slower than the expanded version.
858 // We check specifically for aligned accesses in the few cases where
859 // they are required.
865 // Information about the addressing mode for a memory access.
866 struct AddressingMode
{
867 // True if a long displacement is supported.
868 bool LongDisplacement
;
870 // True if use of index register is supported.
873 AddressingMode(bool LongDispl
, bool IdxReg
) :
874 LongDisplacement(LongDispl
), IndexReg(IdxReg
) {}
877 // Return the desired addressing mode for a Load which has only one use (in
878 // the same block) which is a Store.
879 static AddressingMode
getLoadStoreAddrMode(bool HasVector
,
881 // With vector support a Load->Store combination may be combined to either
882 // an MVC or vector operations and it seems to work best to allow the
883 // vector addressing mode.
885 return AddressingMode(false/*LongDispl*/, true/*IdxReg*/);
887 // Otherwise only the MVC case is special.
888 bool MVC
= Ty
->isIntegerTy(8);
889 return AddressingMode(!MVC
/*LongDispl*/, !MVC
/*IdxReg*/);
892 // Return the addressing mode which seems most desirable given an LLVM
893 // Instruction pointer.
894 static AddressingMode
895 supportedAddressingMode(Instruction
*I
, bool HasVector
) {
896 if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(I
)) {
897 switch (II
->getIntrinsicID()) {
899 case Intrinsic::memset
:
900 case Intrinsic::memmove
:
901 case Intrinsic::memcpy
:
902 return AddressingMode(false/*LongDispl*/, false/*IdxReg*/);
906 if (isa
<LoadInst
>(I
) && I
->hasOneUse()) {
907 auto *SingleUser
= cast
<Instruction
>(*I
->user_begin());
908 if (SingleUser
->getParent() == I
->getParent()) {
909 if (isa
<ICmpInst
>(SingleUser
)) {
910 if (auto *C
= dyn_cast
<ConstantInt
>(SingleUser
->getOperand(1)))
911 if (C
->getBitWidth() <= 64 &&
912 (isInt
<16>(C
->getSExtValue()) || isUInt
<16>(C
->getZExtValue())))
913 // Comparison of memory with 16 bit signed / unsigned immediate
914 return AddressingMode(false/*LongDispl*/, false/*IdxReg*/);
915 } else if (isa
<StoreInst
>(SingleUser
))
917 return getLoadStoreAddrMode(HasVector
, I
->getType());
919 } else if (auto *StoreI
= dyn_cast
<StoreInst
>(I
)) {
920 if (auto *LoadI
= dyn_cast
<LoadInst
>(StoreI
->getValueOperand()))
921 if (LoadI
->hasOneUse() && LoadI
->getParent() == I
->getParent())
923 return getLoadStoreAddrMode(HasVector
, LoadI
->getType());
926 if (HasVector
&& (isa
<LoadInst
>(I
) || isa
<StoreInst
>(I
))) {
928 // * Use LDE instead of LE/LEY for z13 to avoid partial register
929 // dependencies (LDE only supports small offsets).
930 // * Utilize the vector registers to hold floating point
931 // values (vector load / store instructions only support small
934 Type
*MemAccessTy
= (isa
<LoadInst
>(I
) ? I
->getType() :
935 I
->getOperand(0)->getType());
936 bool IsFPAccess
= MemAccessTy
->isFloatingPointTy();
937 bool IsVectorAccess
= MemAccessTy
->isVectorTy();
939 // A store of an extracted vector element will be combined into a VSTE type
941 if (!IsVectorAccess
&& isa
<StoreInst
>(I
)) {
942 Value
*DataOp
= I
->getOperand(0);
943 if (isa
<ExtractElementInst
>(DataOp
))
944 IsVectorAccess
= true;
947 // A load which gets inserted into a vector element will be combined into a
948 // VLE type instruction.
949 if (!IsVectorAccess
&& isa
<LoadInst
>(I
) && I
->hasOneUse()) {
950 User
*LoadUser
= *I
->user_begin();
951 if (isa
<InsertElementInst
>(LoadUser
))
952 IsVectorAccess
= true;
955 if (IsFPAccess
|| IsVectorAccess
)
956 return AddressingMode(false/*LongDispl*/, true/*IdxReg*/);
959 return AddressingMode(true/*LongDispl*/, true/*IdxReg*/);
962 bool SystemZTargetLowering::isLegalAddressingMode(const DataLayout
&DL
,
963 const AddrMode
&AM
, Type
*Ty
, unsigned AS
, Instruction
*I
) const {
964 // Punt on globals for now, although they can be used in limited
965 // RELATIVE LONG cases.
969 // Require a 20-bit signed offset.
970 if (!isInt
<20>(AM
.BaseOffs
))
973 AddressingMode
SupportedAM(true, true);
975 SupportedAM
= supportedAddressingMode(I
, Subtarget
.hasVector());
977 if (!SupportedAM
.LongDisplacement
&& !isUInt
<12>(AM
.BaseOffs
))
980 if (!SupportedAM
.IndexReg
)
981 // No indexing allowed.
982 return AM
.Scale
== 0;
984 // Indexing is OK but no scale factor can be applied.
985 return AM
.Scale
== 0 || AM
.Scale
== 1;
988 bool SystemZTargetLowering::isTruncateFree(Type
*FromType
, Type
*ToType
) const {
989 if (!FromType
->isIntegerTy() || !ToType
->isIntegerTy())
991 unsigned FromBits
= FromType
->getPrimitiveSizeInBits().getFixedSize();
992 unsigned ToBits
= ToType
->getPrimitiveSizeInBits().getFixedSize();
993 return FromBits
> ToBits
;
996 bool SystemZTargetLowering::isTruncateFree(EVT FromVT
, EVT ToVT
) const {
997 if (!FromVT
.isInteger() || !ToVT
.isInteger())
999 unsigned FromBits
= FromVT
.getFixedSizeInBits();
1000 unsigned ToBits
= ToVT
.getFixedSizeInBits();
1001 return FromBits
> ToBits
;
1004 //===----------------------------------------------------------------------===//
1005 // Inline asm support
1006 //===----------------------------------------------------------------------===//
1008 TargetLowering::ConstraintType
1009 SystemZTargetLowering::getConstraintType(StringRef Constraint
) const {
1010 if (Constraint
.size() == 1) {
1011 switch (Constraint
[0]) {
1012 case 'a': // Address register
1013 case 'd': // Data register (equivalent to 'r')
1014 case 'f': // Floating-point register
1015 case 'h': // High-part register
1016 case 'r': // General-purpose register
1017 case 'v': // Vector register
1018 return C_RegisterClass
;
1020 case 'Q': // Memory with base and unsigned 12-bit displacement
1021 case 'R': // Likewise, plus an index
1022 case 'S': // Memory with base and signed 20-bit displacement
1023 case 'T': // Likewise, plus an index
1024 case 'm': // Equivalent to 'T'.
1027 case 'I': // Unsigned 8-bit constant
1028 case 'J': // Unsigned 12-bit constant
1029 case 'K': // Signed 16-bit constant
1030 case 'L': // Signed 20-bit displacement (on all targets we support)
1031 case 'M': // 0x7fffffff
1038 return TargetLowering::getConstraintType(Constraint
);
1041 TargetLowering::ConstraintWeight
SystemZTargetLowering::
1042 getSingleConstraintMatchWeight(AsmOperandInfo
&info
,
1043 const char *constraint
) const {
1044 ConstraintWeight weight
= CW_Invalid
;
1045 Value
*CallOperandVal
= info
.CallOperandVal
;
1046 // If we don't have a value, we can't do a match,
1047 // but allow it at the lowest weight.
1048 if (!CallOperandVal
)
1050 Type
*type
= CallOperandVal
->getType();
1051 // Look at the constraint type.
1052 switch (*constraint
) {
1054 weight
= TargetLowering::getSingleConstraintMatchWeight(info
, constraint
);
1057 case 'a': // Address register
1058 case 'd': // Data register (equivalent to 'r')
1059 case 'h': // High-part register
1060 case 'r': // General-purpose register
1061 if (CallOperandVal
->getType()->isIntegerTy())
1062 weight
= CW_Register
;
1065 case 'f': // Floating-point register
1066 if (type
->isFloatingPointTy())
1067 weight
= CW_Register
;
1070 case 'v': // Vector register
1071 if ((type
->isVectorTy() || type
->isFloatingPointTy()) &&
1072 Subtarget
.hasVector())
1073 weight
= CW_Register
;
1076 case 'I': // Unsigned 8-bit constant
1077 if (auto *C
= dyn_cast
<ConstantInt
>(CallOperandVal
))
1078 if (isUInt
<8>(C
->getZExtValue()))
1079 weight
= CW_Constant
;
1082 case 'J': // Unsigned 12-bit constant
1083 if (auto *C
= dyn_cast
<ConstantInt
>(CallOperandVal
))
1084 if (isUInt
<12>(C
->getZExtValue()))
1085 weight
= CW_Constant
;
1088 case 'K': // Signed 16-bit constant
1089 if (auto *C
= dyn_cast
<ConstantInt
>(CallOperandVal
))
1090 if (isInt
<16>(C
->getSExtValue()))
1091 weight
= CW_Constant
;
1094 case 'L': // Signed 20-bit displacement (on all targets we support)
1095 if (auto *C
= dyn_cast
<ConstantInt
>(CallOperandVal
))
1096 if (isInt
<20>(C
->getSExtValue()))
1097 weight
= CW_Constant
;
1100 case 'M': // 0x7fffffff
1101 if (auto *C
= dyn_cast
<ConstantInt
>(CallOperandVal
))
1102 if (C
->getZExtValue() == 0x7fffffff)
1103 weight
= CW_Constant
;
1109 // Parse a "{tNNN}" register constraint for which the register type "t"
1110 // has already been verified. MC is the class associated with "t" and
1111 // Map maps 0-based register numbers to LLVM register numbers.
1112 static std::pair
<unsigned, const TargetRegisterClass
*>
1113 parseRegisterNumber(StringRef Constraint
, const TargetRegisterClass
*RC
,
1114 const unsigned *Map
, unsigned Size
) {
1115 assert(*(Constraint
.end()-1) == '}' && "Missing '}'");
1116 if (isdigit(Constraint
[2])) {
1119 Constraint
.slice(2, Constraint
.size() - 1).getAsInteger(10, Index
);
1120 if (!Failed
&& Index
< Size
&& Map
[Index
])
1121 return std::make_pair(Map
[Index
], RC
);
1123 return std::make_pair(0U, nullptr);
1126 std::pair
<unsigned, const TargetRegisterClass
*>
1127 SystemZTargetLowering::getRegForInlineAsmConstraint(
1128 const TargetRegisterInfo
*TRI
, StringRef Constraint
, MVT VT
) const {
1129 if (Constraint
.size() == 1) {
1130 // GCC Constraint Letters
1131 switch (Constraint
[0]) {
1133 case 'd': // Data register (equivalent to 'r')
1134 case 'r': // General-purpose register
1136 return std::make_pair(0U, &SystemZ::GR64BitRegClass
);
1137 else if (VT
== MVT::i128
)
1138 return std::make_pair(0U, &SystemZ::GR128BitRegClass
);
1139 return std::make_pair(0U, &SystemZ::GR32BitRegClass
);
1141 case 'a': // Address register
1143 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass
);
1144 else if (VT
== MVT::i128
)
1145 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass
);
1146 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass
);
1148 case 'h': // High-part register (an LLVM extension)
1149 return std::make_pair(0U, &SystemZ::GRH32BitRegClass
);
1151 case 'f': // Floating-point register
1152 if (!useSoftFloat()) {
1154 return std::make_pair(0U, &SystemZ::FP64BitRegClass
);
1155 else if (VT
== MVT::f128
)
1156 return std::make_pair(0U, &SystemZ::FP128BitRegClass
);
1157 return std::make_pair(0U, &SystemZ::FP32BitRegClass
);
1160 case 'v': // Vector register
1161 if (Subtarget
.hasVector()) {
1163 return std::make_pair(0U, &SystemZ::VR32BitRegClass
);
1165 return std::make_pair(0U, &SystemZ::VR64BitRegClass
);
1166 return std::make_pair(0U, &SystemZ::VR128BitRegClass
);
1171 if (Constraint
.size() > 0 && Constraint
[0] == '{') {
1172 // We need to override the default register parsing for GPRs and FPRs
1173 // because the interpretation depends on VT. The internal names of
1174 // the registers are also different from the external names
1175 // (F0D and F0S instead of F0, etc.).
1176 if (Constraint
[1] == 'r') {
1178 return parseRegisterNumber(Constraint
, &SystemZ::GR32BitRegClass
,
1179 SystemZMC::GR32Regs
, 16);
1180 if (VT
== MVT::i128
)
1181 return parseRegisterNumber(Constraint
, &SystemZ::GR128BitRegClass
,
1182 SystemZMC::GR128Regs
, 16);
1183 return parseRegisterNumber(Constraint
, &SystemZ::GR64BitRegClass
,
1184 SystemZMC::GR64Regs
, 16);
1186 if (Constraint
[1] == 'f') {
1188 return std::make_pair(
1189 0u, static_cast<const TargetRegisterClass
*>(nullptr));
1191 return parseRegisterNumber(Constraint
, &SystemZ::FP32BitRegClass
,
1192 SystemZMC::FP32Regs
, 16);
1193 if (VT
== MVT::f128
)
1194 return parseRegisterNumber(Constraint
, &SystemZ::FP128BitRegClass
,
1195 SystemZMC::FP128Regs
, 16);
1196 return parseRegisterNumber(Constraint
, &SystemZ::FP64BitRegClass
,
1197 SystemZMC::FP64Regs
, 16);
1199 if (Constraint
[1] == 'v') {
1200 if (!Subtarget
.hasVector())
1201 return std::make_pair(
1202 0u, static_cast<const TargetRegisterClass
*>(nullptr));
1204 return parseRegisterNumber(Constraint
, &SystemZ::VR32BitRegClass
,
1205 SystemZMC::VR32Regs
, 32);
1207 return parseRegisterNumber(Constraint
, &SystemZ::VR64BitRegClass
,
1208 SystemZMC::VR64Regs
, 32);
1209 return parseRegisterNumber(Constraint
, &SystemZ::VR128BitRegClass
,
1210 SystemZMC::VR128Regs
, 32);
1213 return TargetLowering::getRegForInlineAsmConstraint(TRI
, Constraint
, VT
);
1216 // FIXME? Maybe this could be a TableGen attribute on some registers and
1217 // this table could be generated automatically from RegInfo.
1218 Register
SystemZTargetLowering::getRegisterByName(const char *RegName
, LLT VT
,
1219 const MachineFunction
&MF
) const {
1221 Register Reg
= StringSwitch
<Register
>(RegName
)
1222 .Case("r15", SystemZ::R15D
)
1226 report_fatal_error("Invalid register name global variable");
1229 void SystemZTargetLowering::
1230 LowerAsmOperandForConstraint(SDValue Op
, std::string
&Constraint
,
1231 std::vector
<SDValue
> &Ops
,
1232 SelectionDAG
&DAG
) const {
1233 // Only support length 1 constraints for now.
1234 if (Constraint
.length() == 1) {
1235 switch (Constraint
[0]) {
1236 case 'I': // Unsigned 8-bit constant
1237 if (auto *C
= dyn_cast
<ConstantSDNode
>(Op
))
1238 if (isUInt
<8>(C
->getZExtValue()))
1239 Ops
.push_back(DAG
.getTargetConstant(C
->getZExtValue(), SDLoc(Op
),
1240 Op
.getValueType()));
1243 case 'J': // Unsigned 12-bit constant
1244 if (auto *C
= dyn_cast
<ConstantSDNode
>(Op
))
1245 if (isUInt
<12>(C
->getZExtValue()))
1246 Ops
.push_back(DAG
.getTargetConstant(C
->getZExtValue(), SDLoc(Op
),
1247 Op
.getValueType()));
1250 case 'K': // Signed 16-bit constant
1251 if (auto *C
= dyn_cast
<ConstantSDNode
>(Op
))
1252 if (isInt
<16>(C
->getSExtValue()))
1253 Ops
.push_back(DAG
.getTargetConstant(C
->getSExtValue(), SDLoc(Op
),
1254 Op
.getValueType()));
1257 case 'L': // Signed 20-bit displacement (on all targets we support)
1258 if (auto *C
= dyn_cast
<ConstantSDNode
>(Op
))
1259 if (isInt
<20>(C
->getSExtValue()))
1260 Ops
.push_back(DAG
.getTargetConstant(C
->getSExtValue(), SDLoc(Op
),
1261 Op
.getValueType()));
1264 case 'M': // 0x7fffffff
1265 if (auto *C
= dyn_cast
<ConstantSDNode
>(Op
))
1266 if (C
->getZExtValue() == 0x7fffffff)
1267 Ops
.push_back(DAG
.getTargetConstant(C
->getZExtValue(), SDLoc(Op
),
1268 Op
.getValueType()));
1272 TargetLowering::LowerAsmOperandForConstraint(Op
, Constraint
, Ops
, DAG
);
1275 //===----------------------------------------------------------------------===//
1276 // Calling conventions
1277 //===----------------------------------------------------------------------===//
1279 #include "SystemZGenCallingConv.inc"
1281 const MCPhysReg
*SystemZTargetLowering::getScratchRegisters(
1282 CallingConv::ID
) const {
1283 static const MCPhysReg ScratchRegs
[] = { SystemZ::R0D
, SystemZ::R1D
,
1288 bool SystemZTargetLowering::allowTruncateForTailCall(Type
*FromType
,
1289 Type
*ToType
) const {
1290 return isTruncateFree(FromType
, ToType
);
1293 bool SystemZTargetLowering::mayBeEmittedAsTailCall(const CallInst
*CI
) const {
1294 return CI
->isTailCall();
1297 // We do not yet support 128-bit single-element vector types. If the user
1298 // attempts to use such types as function argument or return type, prefer
1299 // to error out instead of emitting code violating the ABI.
1300 static void VerifyVectorType(MVT VT
, EVT ArgVT
) {
1301 if (ArgVT
.isVector() && !VT
.isVector())
1302 report_fatal_error("Unsupported vector argument or return type");
1305 static void VerifyVectorTypes(const SmallVectorImpl
<ISD::InputArg
> &Ins
) {
1306 for (unsigned i
= 0; i
< Ins
.size(); ++i
)
1307 VerifyVectorType(Ins
[i
].VT
, Ins
[i
].ArgVT
);
1310 static void VerifyVectorTypes(const SmallVectorImpl
<ISD::OutputArg
> &Outs
) {
1311 for (unsigned i
= 0; i
< Outs
.size(); ++i
)
1312 VerifyVectorType(Outs
[i
].VT
, Outs
[i
].ArgVT
);
1315 // Value is a value that has been passed to us in the location described by VA
1316 // (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining
1317 // any loads onto Chain.
1318 static SDValue
convertLocVTToValVT(SelectionDAG
&DAG
, const SDLoc
&DL
,
1319 CCValAssign
&VA
, SDValue Chain
,
1321 // If the argument has been promoted from a smaller type, insert an
1322 // assertion to capture this.
1323 if (VA
.getLocInfo() == CCValAssign::SExt
)
1324 Value
= DAG
.getNode(ISD::AssertSext
, DL
, VA
.getLocVT(), Value
,
1325 DAG
.getValueType(VA
.getValVT()));
1326 else if (VA
.getLocInfo() == CCValAssign::ZExt
)
1327 Value
= DAG
.getNode(ISD::AssertZext
, DL
, VA
.getLocVT(), Value
,
1328 DAG
.getValueType(VA
.getValVT()));
1330 if (VA
.isExtInLoc())
1331 Value
= DAG
.getNode(ISD::TRUNCATE
, DL
, VA
.getValVT(), Value
);
1332 else if (VA
.getLocInfo() == CCValAssign::BCvt
) {
1333 // If this is a short vector argument loaded from the stack,
1334 // extend from i64 to full vector size and then bitcast.
1335 assert(VA
.getLocVT() == MVT::i64
);
1336 assert(VA
.getValVT().isVector());
1337 Value
= DAG
.getBuildVector(MVT::v2i64
, DL
, {Value
, DAG
.getUNDEF(MVT::i64
)});
1338 Value
= DAG
.getNode(ISD::BITCAST
, DL
, VA
.getValVT(), Value
);
1340 assert(VA
.getLocInfo() == CCValAssign::Full
&& "Unsupported getLocInfo");
1344 // Value is a value of type VA.getValVT() that we need to copy into
1345 // the location described by VA. Return a copy of Value converted to
1346 // VA.getValVT(). The caller is responsible for handling indirect values.
1347 static SDValue
convertValVTToLocVT(SelectionDAG
&DAG
, const SDLoc
&DL
,
1348 CCValAssign
&VA
, SDValue Value
) {
1349 switch (VA
.getLocInfo()) {
1350 case CCValAssign::SExt
:
1351 return DAG
.getNode(ISD::SIGN_EXTEND
, DL
, VA
.getLocVT(), Value
);
1352 case CCValAssign::ZExt
:
1353 return DAG
.getNode(ISD::ZERO_EXTEND
, DL
, VA
.getLocVT(), Value
);
1354 case CCValAssign::AExt
:
1355 return DAG
.getNode(ISD::ANY_EXTEND
, DL
, VA
.getLocVT(), Value
);
1356 case CCValAssign::BCvt
:
1357 // If this is a short vector argument to be stored to the stack,
1358 // bitcast to v2i64 and then extract first element.
1359 assert(VA
.getLocVT() == MVT::i64
);
1360 assert(VA
.getValVT().isVector());
1361 Value
= DAG
.getNode(ISD::BITCAST
, DL
, MVT::v2i64
, Value
);
1362 return DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, DL
, VA
.getLocVT(), Value
,
1363 DAG
.getConstant(0, DL
, MVT::i32
));
1364 case CCValAssign::Full
:
1367 llvm_unreachable("Unhandled getLocInfo()");
1371 static SDValue
lowerI128ToGR128(SelectionDAG
&DAG
, SDValue In
) {
1373 SDValue Lo
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
, MVT::i64
, In
,
1374 DAG
.getIntPtrConstant(0, DL
));
1375 SDValue Hi
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
, MVT::i64
, In
,
1376 DAG
.getIntPtrConstant(1, DL
));
1377 SDNode
*Pair
= DAG
.getMachineNode(SystemZ::PAIR128
, DL
,
1378 MVT::Untyped
, Hi
, Lo
);
1379 return SDValue(Pair
, 0);
1382 static SDValue
lowerGR128ToI128(SelectionDAG
&DAG
, SDValue In
) {
1384 SDValue Hi
= DAG
.getTargetExtractSubreg(SystemZ::subreg_h64
,
1386 SDValue Lo
= DAG
.getTargetExtractSubreg(SystemZ::subreg_l64
,
1388 return DAG
.getNode(ISD::BUILD_PAIR
, DL
, MVT::i128
, Lo
, Hi
);
1391 bool SystemZTargetLowering::splitValueIntoRegisterParts(
1392 SelectionDAG
&DAG
, const SDLoc
&DL
, SDValue Val
, SDValue
*Parts
,
1393 unsigned NumParts
, MVT PartVT
, Optional
<CallingConv::ID
> CC
) const {
1394 EVT ValueVT
= Val
.getValueType();
1395 assert((ValueVT
!= MVT::i128
||
1396 ((NumParts
== 1 && PartVT
== MVT::Untyped
) ||
1397 (NumParts
== 2 && PartVT
== MVT::i64
))) &&
1398 "Unknown handling of i128 value.");
1399 if (ValueVT
== MVT::i128
&& NumParts
== 1) {
1400 // Inline assembly operand.
1401 Parts
[0] = lowerI128ToGR128(DAG
, Val
);
1407 SDValue
SystemZTargetLowering::joinRegisterPartsIntoValue(
1408 SelectionDAG
&DAG
, const SDLoc
&DL
, const SDValue
*Parts
, unsigned NumParts
,
1409 MVT PartVT
, EVT ValueVT
, Optional
<CallingConv::ID
> CC
) const {
1410 assert((ValueVT
!= MVT::i128
||
1411 ((NumParts
== 1 && PartVT
== MVT::Untyped
) ||
1412 (NumParts
== 2 && PartVT
== MVT::i64
))) &&
1413 "Unknown handling of i128 value.");
1414 if (ValueVT
== MVT::i128
&& NumParts
== 1)
1415 // Inline assembly operand.
1416 return lowerGR128ToI128(DAG
, Parts
[0]);
1420 SDValue
SystemZTargetLowering::LowerFormalArguments(
1421 SDValue Chain
, CallingConv::ID CallConv
, bool IsVarArg
,
1422 const SmallVectorImpl
<ISD::InputArg
> &Ins
, const SDLoc
&DL
,
1423 SelectionDAG
&DAG
, SmallVectorImpl
<SDValue
> &InVals
) const {
1424 MachineFunction
&MF
= DAG
.getMachineFunction();
1425 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
1426 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
1427 SystemZMachineFunctionInfo
*FuncInfo
=
1428 MF
.getInfo
<SystemZMachineFunctionInfo
>();
1430 static_cast<const SystemZFrameLowering
*>(Subtarget
.getFrameLowering());
1431 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
1433 // Detect unsupported vector argument types.
1434 if (Subtarget
.hasVector())
1435 VerifyVectorTypes(Ins
);
1437 // Assign locations to all of the incoming arguments.
1438 SmallVector
<CCValAssign
, 16> ArgLocs
;
1439 SystemZCCState
CCInfo(CallConv
, IsVarArg
, MF
, ArgLocs
, *DAG
.getContext());
1440 CCInfo
.AnalyzeFormalArguments(Ins
, CC_SystemZ
);
1442 unsigned NumFixedGPRs
= 0;
1443 unsigned NumFixedFPRs
= 0;
1444 for (unsigned I
= 0, E
= ArgLocs
.size(); I
!= E
; ++I
) {
1446 CCValAssign
&VA
= ArgLocs
[I
];
1447 EVT LocVT
= VA
.getLocVT();
1448 if (VA
.isRegLoc()) {
1449 // Arguments passed in registers
1450 const TargetRegisterClass
*RC
;
1451 switch (LocVT
.getSimpleVT().SimpleTy
) {
1453 // Integers smaller than i64 should be promoted to i64.
1454 llvm_unreachable("Unexpected argument type");
1457 RC
= &SystemZ::GR32BitRegClass
;
1461 RC
= &SystemZ::GR64BitRegClass
;
1465 RC
= &SystemZ::FP32BitRegClass
;
1469 RC
= &SystemZ::FP64BitRegClass
;
1477 RC
= &SystemZ::VR128BitRegClass
;
1481 Register VReg
= MRI
.createVirtualRegister(RC
);
1482 MRI
.addLiveIn(VA
.getLocReg(), VReg
);
1483 ArgValue
= DAG
.getCopyFromReg(Chain
, DL
, VReg
, LocVT
);
1485 assert(VA
.isMemLoc() && "Argument not register or memory");
1487 // Create the frame index object for this incoming parameter.
1488 int FI
= MFI
.CreateFixedObject(LocVT
.getSizeInBits() / 8,
1489 VA
.getLocMemOffset(), true);
1491 // Create the SelectionDAG nodes corresponding to a load
1492 // from this parameter. Unpromoted ints and floats are
1493 // passed as right-justified 8-byte values.
1494 SDValue FIN
= DAG
.getFrameIndex(FI
, PtrVT
);
1495 if (VA
.getLocVT() == MVT::i32
|| VA
.getLocVT() == MVT::f32
)
1496 FIN
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, FIN
,
1497 DAG
.getIntPtrConstant(4, DL
));
1498 ArgValue
= DAG
.getLoad(LocVT
, DL
, Chain
, FIN
,
1499 MachinePointerInfo::getFixedStack(MF
, FI
));
1502 // Convert the value of the argument register into the value that's
1504 if (VA
.getLocInfo() == CCValAssign::Indirect
) {
1505 InVals
.push_back(DAG
.getLoad(VA
.getValVT(), DL
, Chain
, ArgValue
,
1506 MachinePointerInfo()));
1507 // If the original argument was split (e.g. i128), we need
1508 // to load all parts of it here (using the same address).
1509 unsigned ArgIndex
= Ins
[I
].OrigArgIndex
;
1510 assert (Ins
[I
].PartOffset
== 0);
1511 while (I
+ 1 != E
&& Ins
[I
+ 1].OrigArgIndex
== ArgIndex
) {
1512 CCValAssign
&PartVA
= ArgLocs
[I
+ 1];
1513 unsigned PartOffset
= Ins
[I
+ 1].PartOffset
;
1514 SDValue Address
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, ArgValue
,
1515 DAG
.getIntPtrConstant(PartOffset
, DL
));
1516 InVals
.push_back(DAG
.getLoad(PartVA
.getValVT(), DL
, Chain
, Address
,
1517 MachinePointerInfo()));
1521 InVals
.push_back(convertLocVTToValVT(DAG
, DL
, VA
, Chain
, ArgValue
));
1525 // Save the number of non-varargs registers for later use by va_start, etc.
1526 FuncInfo
->setVarArgsFirstGPR(NumFixedGPRs
);
1527 FuncInfo
->setVarArgsFirstFPR(NumFixedFPRs
);
1529 // Likewise the address (in the form of a frame index) of where the
1530 // first stack vararg would be. The 1-byte size here is arbitrary.
1531 int64_t StackSize
= CCInfo
.getNextStackOffset();
1532 FuncInfo
->setVarArgsFrameIndex(MFI
.CreateFixedObject(1, StackSize
, true));
1534 // ...and a similar frame index for the caller-allocated save area
1535 // that will be used to store the incoming registers.
1536 int64_t RegSaveOffset
=
1537 -SystemZMC::ELFCallFrameSize
+ TFL
->getRegSpillOffset(MF
, SystemZ::R2D
) - 16;
1538 unsigned RegSaveIndex
= MFI
.CreateFixedObject(1, RegSaveOffset
, true);
1539 FuncInfo
->setRegSaveFrameIndex(RegSaveIndex
);
1541 // Store the FPR varargs in the reserved frame slots. (We store the
1542 // GPRs as part of the prologue.)
1543 if (NumFixedFPRs
< SystemZ::ELFNumArgFPRs
&& !useSoftFloat()) {
1544 SDValue MemOps
[SystemZ::ELFNumArgFPRs
];
1545 for (unsigned I
= NumFixedFPRs
; I
< SystemZ::ELFNumArgFPRs
; ++I
) {
1546 unsigned Offset
= TFL
->getRegSpillOffset(MF
, SystemZ::ELFArgFPRs
[I
]);
1548 MFI
.CreateFixedObject(8, -SystemZMC::ELFCallFrameSize
+ Offset
, true);
1549 SDValue FIN
= DAG
.getFrameIndex(FI
, getPointerTy(DAG
.getDataLayout()));
1550 unsigned VReg
= MF
.addLiveIn(SystemZ::ELFArgFPRs
[I
],
1551 &SystemZ::FP64BitRegClass
);
1552 SDValue ArgValue
= DAG
.getCopyFromReg(Chain
, DL
, VReg
, MVT::f64
);
1553 MemOps
[I
] = DAG
.getStore(ArgValue
.getValue(1), DL
, ArgValue
, FIN
,
1554 MachinePointerInfo::getFixedStack(MF
, FI
));
1556 // Join the stores, which are independent of one another.
1557 Chain
= DAG
.getNode(ISD::TokenFactor
, DL
, MVT::Other
,
1558 makeArrayRef(&MemOps
[NumFixedFPRs
],
1559 SystemZ::ELFNumArgFPRs
-NumFixedFPRs
));
1566 static bool canUseSiblingCall(const CCState
&ArgCCInfo
,
1567 SmallVectorImpl
<CCValAssign
> &ArgLocs
,
1568 SmallVectorImpl
<ISD::OutputArg
> &Outs
) {
1569 // Punt if there are any indirect or stack arguments, or if the call
1570 // needs the callee-saved argument register R6, or if the call uses
1571 // the callee-saved register arguments SwiftSelf and SwiftError.
1572 for (unsigned I
= 0, E
= ArgLocs
.size(); I
!= E
; ++I
) {
1573 CCValAssign
&VA
= ArgLocs
[I
];
1574 if (VA
.getLocInfo() == CCValAssign::Indirect
)
1578 Register Reg
= VA
.getLocReg();
1579 if (Reg
== SystemZ::R6H
|| Reg
== SystemZ::R6L
|| Reg
== SystemZ::R6D
)
1581 if (Outs
[I
].Flags
.isSwiftSelf() || Outs
[I
].Flags
.isSwiftError())
1588 SystemZTargetLowering::LowerCall(CallLoweringInfo
&CLI
,
1589 SmallVectorImpl
<SDValue
> &InVals
) const {
1590 SelectionDAG
&DAG
= CLI
.DAG
;
1592 SmallVectorImpl
<ISD::OutputArg
> &Outs
= CLI
.Outs
;
1593 SmallVectorImpl
<SDValue
> &OutVals
= CLI
.OutVals
;
1594 SmallVectorImpl
<ISD::InputArg
> &Ins
= CLI
.Ins
;
1595 SDValue Chain
= CLI
.Chain
;
1596 SDValue Callee
= CLI
.Callee
;
1597 bool &IsTailCall
= CLI
.IsTailCall
;
1598 CallingConv::ID CallConv
= CLI
.CallConv
;
1599 bool IsVarArg
= CLI
.IsVarArg
;
1600 MachineFunction
&MF
= DAG
.getMachineFunction();
1601 EVT PtrVT
= getPointerTy(MF
.getDataLayout());
1602 LLVMContext
&Ctx
= *DAG
.getContext();
1604 // Detect unsupported vector argument and return types.
1605 if (Subtarget
.hasVector()) {
1606 VerifyVectorTypes(Outs
);
1607 VerifyVectorTypes(Ins
);
1610 // Analyze the operands of the call, assigning locations to each operand.
1611 SmallVector
<CCValAssign
, 16> ArgLocs
;
1612 SystemZCCState
ArgCCInfo(CallConv
, IsVarArg
, MF
, ArgLocs
, Ctx
);
1613 ArgCCInfo
.AnalyzeCallOperands(Outs
, CC_SystemZ
);
1615 // We don't support GuaranteedTailCallOpt, only automatically-detected
1617 if (IsTailCall
&& !canUseSiblingCall(ArgCCInfo
, ArgLocs
, Outs
))
1620 // Get a count of how many bytes are to be pushed on the stack.
1621 unsigned NumBytes
= ArgCCInfo
.getNextStackOffset();
1623 // Mark the start of the call.
1625 Chain
= DAG
.getCALLSEQ_START(Chain
, NumBytes
, 0, DL
);
1627 // Copy argument values to their designated locations.
1628 SmallVector
<std::pair
<unsigned, SDValue
>, 9> RegsToPass
;
1629 SmallVector
<SDValue
, 8> MemOpChains
;
1631 for (unsigned I
= 0, E
= ArgLocs
.size(); I
!= E
; ++I
) {
1632 CCValAssign
&VA
= ArgLocs
[I
];
1633 SDValue ArgValue
= OutVals
[I
];
1635 if (VA
.getLocInfo() == CCValAssign::Indirect
) {
1636 // Store the argument in a stack slot and pass its address.
1637 unsigned ArgIndex
= Outs
[I
].OrigArgIndex
;
1639 if (I
+ 1 != E
&& Outs
[I
+ 1].OrigArgIndex
== ArgIndex
) {
1640 // Allocate the full stack space for a promoted (and split) argument.
1641 Type
*OrigArgType
= CLI
.Args
[Outs
[I
].OrigArgIndex
].Ty
;
1642 EVT OrigArgVT
= getValueType(MF
.getDataLayout(), OrigArgType
);
1643 MVT PartVT
= getRegisterTypeForCallingConv(Ctx
, CLI
.CallConv
, OrigArgVT
);
1644 unsigned N
= getNumRegistersForCallingConv(Ctx
, CLI
.CallConv
, OrigArgVT
);
1645 SlotVT
= EVT::getIntegerVT(Ctx
, PartVT
.getSizeInBits() * N
);
1647 SlotVT
= Outs
[I
].ArgVT
;
1649 SDValue SpillSlot
= DAG
.CreateStackTemporary(SlotVT
);
1650 int FI
= cast
<FrameIndexSDNode
>(SpillSlot
)->getIndex();
1651 MemOpChains
.push_back(
1652 DAG
.getStore(Chain
, DL
, ArgValue
, SpillSlot
,
1653 MachinePointerInfo::getFixedStack(MF
, FI
)));
1654 // If the original argument was split (e.g. i128), we need
1655 // to store all parts of it here (and pass just one address).
1656 assert (Outs
[I
].PartOffset
== 0);
1657 while (I
+ 1 != E
&& Outs
[I
+ 1].OrigArgIndex
== ArgIndex
) {
1658 SDValue PartValue
= OutVals
[I
+ 1];
1659 unsigned PartOffset
= Outs
[I
+ 1].PartOffset
;
1660 SDValue Address
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, SpillSlot
,
1661 DAG
.getIntPtrConstant(PartOffset
, DL
));
1662 MemOpChains
.push_back(
1663 DAG
.getStore(Chain
, DL
, PartValue
, Address
,
1664 MachinePointerInfo::getFixedStack(MF
, FI
)));
1665 assert((PartOffset
+ PartValue
.getValueType().getStoreSize() <=
1666 SlotVT
.getStoreSize()) && "Not enough space for argument part!");
1669 ArgValue
= SpillSlot
;
1671 ArgValue
= convertValVTToLocVT(DAG
, DL
, VA
, ArgValue
);
1674 // Queue up the argument copies and emit them at the end.
1675 RegsToPass
.push_back(std::make_pair(VA
.getLocReg(), ArgValue
));
1677 assert(VA
.isMemLoc() && "Argument not register or memory");
1679 // Work out the address of the stack slot. Unpromoted ints and
1680 // floats are passed as right-justified 8-byte values.
1681 if (!StackPtr
.getNode())
1682 StackPtr
= DAG
.getCopyFromReg(Chain
, DL
, SystemZ::R15D
, PtrVT
);
1683 unsigned Offset
= SystemZMC::ELFCallFrameSize
+ VA
.getLocMemOffset();
1684 if (VA
.getLocVT() == MVT::i32
|| VA
.getLocVT() == MVT::f32
)
1686 SDValue Address
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, StackPtr
,
1687 DAG
.getIntPtrConstant(Offset
, DL
));
1690 MemOpChains
.push_back(
1691 DAG
.getStore(Chain
, DL
, ArgValue
, Address
, MachinePointerInfo()));
1695 // Join the stores, which are independent of one another.
1696 if (!MemOpChains
.empty())
1697 Chain
= DAG
.getNode(ISD::TokenFactor
, DL
, MVT::Other
, MemOpChains
);
1699 // Accept direct calls by converting symbolic call addresses to the
1700 // associated Target* opcodes. Force %r1 to be used for indirect
1703 if (auto *G
= dyn_cast
<GlobalAddressSDNode
>(Callee
)) {
1704 Callee
= DAG
.getTargetGlobalAddress(G
->getGlobal(), DL
, PtrVT
);
1705 Callee
= DAG
.getNode(SystemZISD::PCREL_WRAPPER
, DL
, PtrVT
, Callee
);
1706 } else if (auto *E
= dyn_cast
<ExternalSymbolSDNode
>(Callee
)) {
1707 Callee
= DAG
.getTargetExternalSymbol(E
->getSymbol(), PtrVT
);
1708 Callee
= DAG
.getNode(SystemZISD::PCREL_WRAPPER
, DL
, PtrVT
, Callee
);
1709 } else if (IsTailCall
) {
1710 Chain
= DAG
.getCopyToReg(Chain
, DL
, SystemZ::R1D
, Callee
, Glue
);
1711 Glue
= Chain
.getValue(1);
1712 Callee
= DAG
.getRegister(SystemZ::R1D
, Callee
.getValueType());
1715 // Build a sequence of copy-to-reg nodes, chained and glued together.
1716 for (unsigned I
= 0, E
= RegsToPass
.size(); I
!= E
; ++I
) {
1717 Chain
= DAG
.getCopyToReg(Chain
, DL
, RegsToPass
[I
].first
,
1718 RegsToPass
[I
].second
, Glue
);
1719 Glue
= Chain
.getValue(1);
1722 // The first call operand is the chain and the second is the target address.
1723 SmallVector
<SDValue
, 8> Ops
;
1724 Ops
.push_back(Chain
);
1725 Ops
.push_back(Callee
);
1727 // Add argument registers to the end of the list so that they are
1728 // known live into the call.
1729 for (unsigned I
= 0, E
= RegsToPass
.size(); I
!= E
; ++I
)
1730 Ops
.push_back(DAG
.getRegister(RegsToPass
[I
].first
,
1731 RegsToPass
[I
].second
.getValueType()));
1733 // Add a register mask operand representing the call-preserved registers.
1734 const TargetRegisterInfo
*TRI
= Subtarget
.getRegisterInfo();
1735 const uint32_t *Mask
= TRI
->getCallPreservedMask(MF
, CallConv
);
1736 assert(Mask
&& "Missing call preserved mask for calling convention");
1737 Ops
.push_back(DAG
.getRegisterMask(Mask
));
1739 // Glue the call to the argument copies, if any.
1741 Ops
.push_back(Glue
);
1744 SDVTList NodeTys
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
1746 return DAG
.getNode(SystemZISD::SIBCALL
, DL
, NodeTys
, Ops
);
1747 Chain
= DAG
.getNode(SystemZISD::CALL
, DL
, NodeTys
, Ops
);
1748 DAG
.addNoMergeSiteInfo(Chain
.getNode(), CLI
.NoMerge
);
1749 Glue
= Chain
.getValue(1);
1751 // Mark the end of the call, which is glued to the call itself.
1752 Chain
= DAG
.getCALLSEQ_END(Chain
,
1753 DAG
.getConstant(NumBytes
, DL
, PtrVT
, true),
1754 DAG
.getConstant(0, DL
, PtrVT
, true),
1756 Glue
= Chain
.getValue(1);
1758 // Assign locations to each value returned by this call.
1759 SmallVector
<CCValAssign
, 16> RetLocs
;
1760 CCState
RetCCInfo(CallConv
, IsVarArg
, MF
, RetLocs
, Ctx
);
1761 RetCCInfo
.AnalyzeCallResult(Ins
, RetCC_SystemZ
);
1763 // Copy all of the result registers out of their specified physreg.
1764 for (unsigned I
= 0, E
= RetLocs
.size(); I
!= E
; ++I
) {
1765 CCValAssign
&VA
= RetLocs
[I
];
1767 // Copy the value out, gluing the copy to the end of the call sequence.
1768 SDValue RetValue
= DAG
.getCopyFromReg(Chain
, DL
, VA
.getLocReg(),
1769 VA
.getLocVT(), Glue
);
1770 Chain
= RetValue
.getValue(1);
1771 Glue
= RetValue
.getValue(2);
1773 // Convert the value of the return register into the value that's
1775 InVals
.push_back(convertLocVTToValVT(DAG
, DL
, VA
, Chain
, RetValue
));
1781 bool SystemZTargetLowering::
1782 CanLowerReturn(CallingConv::ID CallConv
,
1783 MachineFunction
&MF
, bool isVarArg
,
1784 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
1785 LLVMContext
&Context
) const {
1786 // Detect unsupported vector return types.
1787 if (Subtarget
.hasVector())
1788 VerifyVectorTypes(Outs
);
1790 // Special case that we cannot easily detect in RetCC_SystemZ since
1791 // i128 is not a legal type.
1792 for (auto &Out
: Outs
)
1793 if (Out
.ArgVT
== MVT::i128
)
1796 SmallVector
<CCValAssign
, 16> RetLocs
;
1797 CCState
RetCCInfo(CallConv
, isVarArg
, MF
, RetLocs
, Context
);
1798 return RetCCInfo
.CheckReturn(Outs
, RetCC_SystemZ
);
1802 SystemZTargetLowering::LowerReturn(SDValue Chain
, CallingConv::ID CallConv
,
1804 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
1805 const SmallVectorImpl
<SDValue
> &OutVals
,
1806 const SDLoc
&DL
, SelectionDAG
&DAG
) const {
1807 MachineFunction
&MF
= DAG
.getMachineFunction();
1809 // Detect unsupported vector return types.
1810 if (Subtarget
.hasVector())
1811 VerifyVectorTypes(Outs
);
1813 // Assign locations to each returned value.
1814 SmallVector
<CCValAssign
, 16> RetLocs
;
1815 CCState
RetCCInfo(CallConv
, IsVarArg
, MF
, RetLocs
, *DAG
.getContext());
1816 RetCCInfo
.AnalyzeReturn(Outs
, RetCC_SystemZ
);
1818 // Quick exit for void returns
1819 if (RetLocs
.empty())
1820 return DAG
.getNode(SystemZISD::RET_FLAG
, DL
, MVT::Other
, Chain
);
1822 if (CallConv
== CallingConv::GHC
)
1823 report_fatal_error("GHC functions return void only");
1825 // Copy the result values into the output registers.
1827 SmallVector
<SDValue
, 4> RetOps
;
1828 RetOps
.push_back(Chain
);
1829 for (unsigned I
= 0, E
= RetLocs
.size(); I
!= E
; ++I
) {
1830 CCValAssign
&VA
= RetLocs
[I
];
1831 SDValue RetValue
= OutVals
[I
];
1833 // Make the return register live on exit.
1834 assert(VA
.isRegLoc() && "Can only return in registers!");
1836 // Promote the value as required.
1837 RetValue
= convertValVTToLocVT(DAG
, DL
, VA
, RetValue
);
1839 // Chain and glue the copies together.
1840 Register Reg
= VA
.getLocReg();
1841 Chain
= DAG
.getCopyToReg(Chain
, DL
, Reg
, RetValue
, Glue
);
1842 Glue
= Chain
.getValue(1);
1843 RetOps
.push_back(DAG
.getRegister(Reg
, VA
.getLocVT()));
1846 // Update chain and glue.
1849 RetOps
.push_back(Glue
);
1851 return DAG
.getNode(SystemZISD::RET_FLAG
, DL
, MVT::Other
, RetOps
);
1854 // Return true if Op is an intrinsic node with chain that returns the CC value
1855 // as its only (other) argument. Provide the associated SystemZISD opcode and
1856 // the mask of valid CC values if so.
1857 static bool isIntrinsicWithCCAndChain(SDValue Op
, unsigned &Opcode
,
1858 unsigned &CCValid
) {
1859 unsigned Id
= cast
<ConstantSDNode
>(Op
.getOperand(1))->getZExtValue();
1861 case Intrinsic::s390_tbegin
:
1862 Opcode
= SystemZISD::TBEGIN
;
1863 CCValid
= SystemZ::CCMASK_TBEGIN
;
1866 case Intrinsic::s390_tbegin_nofloat
:
1867 Opcode
= SystemZISD::TBEGIN_NOFLOAT
;
1868 CCValid
= SystemZ::CCMASK_TBEGIN
;
1871 case Intrinsic::s390_tend
:
1872 Opcode
= SystemZISD::TEND
;
1873 CCValid
= SystemZ::CCMASK_TEND
;
1881 // Return true if Op is an intrinsic node without chain that returns the
1882 // CC value as its final argument. Provide the associated SystemZISD
1883 // opcode and the mask of valid CC values if so.
1884 static bool isIntrinsicWithCC(SDValue Op
, unsigned &Opcode
, unsigned &CCValid
) {
1885 unsigned Id
= cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue();
1887 case Intrinsic::s390_vpkshs
:
1888 case Intrinsic::s390_vpksfs
:
1889 case Intrinsic::s390_vpksgs
:
1890 Opcode
= SystemZISD::PACKS_CC
;
1891 CCValid
= SystemZ::CCMASK_VCMP
;
1894 case Intrinsic::s390_vpklshs
:
1895 case Intrinsic::s390_vpklsfs
:
1896 case Intrinsic::s390_vpklsgs
:
1897 Opcode
= SystemZISD::PACKLS_CC
;
1898 CCValid
= SystemZ::CCMASK_VCMP
;
1901 case Intrinsic::s390_vceqbs
:
1902 case Intrinsic::s390_vceqhs
:
1903 case Intrinsic::s390_vceqfs
:
1904 case Intrinsic::s390_vceqgs
:
1905 Opcode
= SystemZISD::VICMPES
;
1906 CCValid
= SystemZ::CCMASK_VCMP
;
1909 case Intrinsic::s390_vchbs
:
1910 case Intrinsic::s390_vchhs
:
1911 case Intrinsic::s390_vchfs
:
1912 case Intrinsic::s390_vchgs
:
1913 Opcode
= SystemZISD::VICMPHS
;
1914 CCValid
= SystemZ::CCMASK_VCMP
;
1917 case Intrinsic::s390_vchlbs
:
1918 case Intrinsic::s390_vchlhs
:
1919 case Intrinsic::s390_vchlfs
:
1920 case Intrinsic::s390_vchlgs
:
1921 Opcode
= SystemZISD::VICMPHLS
;
1922 CCValid
= SystemZ::CCMASK_VCMP
;
1925 case Intrinsic::s390_vtm
:
1926 Opcode
= SystemZISD::VTM
;
1927 CCValid
= SystemZ::CCMASK_VCMP
;
1930 case Intrinsic::s390_vfaebs
:
1931 case Intrinsic::s390_vfaehs
:
1932 case Intrinsic::s390_vfaefs
:
1933 Opcode
= SystemZISD::VFAE_CC
;
1934 CCValid
= SystemZ::CCMASK_ANY
;
1937 case Intrinsic::s390_vfaezbs
:
1938 case Intrinsic::s390_vfaezhs
:
1939 case Intrinsic::s390_vfaezfs
:
1940 Opcode
= SystemZISD::VFAEZ_CC
;
1941 CCValid
= SystemZ::CCMASK_ANY
;
1944 case Intrinsic::s390_vfeebs
:
1945 case Intrinsic::s390_vfeehs
:
1946 case Intrinsic::s390_vfeefs
:
1947 Opcode
= SystemZISD::VFEE_CC
;
1948 CCValid
= SystemZ::CCMASK_ANY
;
1951 case Intrinsic::s390_vfeezbs
:
1952 case Intrinsic::s390_vfeezhs
:
1953 case Intrinsic::s390_vfeezfs
:
1954 Opcode
= SystemZISD::VFEEZ_CC
;
1955 CCValid
= SystemZ::CCMASK_ANY
;
1958 case Intrinsic::s390_vfenebs
:
1959 case Intrinsic::s390_vfenehs
:
1960 case Intrinsic::s390_vfenefs
:
1961 Opcode
= SystemZISD::VFENE_CC
;
1962 CCValid
= SystemZ::CCMASK_ANY
;
1965 case Intrinsic::s390_vfenezbs
:
1966 case Intrinsic::s390_vfenezhs
:
1967 case Intrinsic::s390_vfenezfs
:
1968 Opcode
= SystemZISD::VFENEZ_CC
;
1969 CCValid
= SystemZ::CCMASK_ANY
;
1972 case Intrinsic::s390_vistrbs
:
1973 case Intrinsic::s390_vistrhs
:
1974 case Intrinsic::s390_vistrfs
:
1975 Opcode
= SystemZISD::VISTR_CC
;
1976 CCValid
= SystemZ::CCMASK_0
| SystemZ::CCMASK_3
;
1979 case Intrinsic::s390_vstrcbs
:
1980 case Intrinsic::s390_vstrchs
:
1981 case Intrinsic::s390_vstrcfs
:
1982 Opcode
= SystemZISD::VSTRC_CC
;
1983 CCValid
= SystemZ::CCMASK_ANY
;
1986 case Intrinsic::s390_vstrczbs
:
1987 case Intrinsic::s390_vstrczhs
:
1988 case Intrinsic::s390_vstrczfs
:
1989 Opcode
= SystemZISD::VSTRCZ_CC
;
1990 CCValid
= SystemZ::CCMASK_ANY
;
1993 case Intrinsic::s390_vstrsb
:
1994 case Intrinsic::s390_vstrsh
:
1995 case Intrinsic::s390_vstrsf
:
1996 Opcode
= SystemZISD::VSTRS_CC
;
1997 CCValid
= SystemZ::CCMASK_ANY
;
2000 case Intrinsic::s390_vstrszb
:
2001 case Intrinsic::s390_vstrszh
:
2002 case Intrinsic::s390_vstrszf
:
2003 Opcode
= SystemZISD::VSTRSZ_CC
;
2004 CCValid
= SystemZ::CCMASK_ANY
;
2007 case Intrinsic::s390_vfcedbs
:
2008 case Intrinsic::s390_vfcesbs
:
2009 Opcode
= SystemZISD::VFCMPES
;
2010 CCValid
= SystemZ::CCMASK_VCMP
;
2013 case Intrinsic::s390_vfchdbs
:
2014 case Intrinsic::s390_vfchsbs
:
2015 Opcode
= SystemZISD::VFCMPHS
;
2016 CCValid
= SystemZ::CCMASK_VCMP
;
2019 case Intrinsic::s390_vfchedbs
:
2020 case Intrinsic::s390_vfchesbs
:
2021 Opcode
= SystemZISD::VFCMPHES
;
2022 CCValid
= SystemZ::CCMASK_VCMP
;
2025 case Intrinsic::s390_vftcidb
:
2026 case Intrinsic::s390_vftcisb
:
2027 Opcode
= SystemZISD::VFTCI
;
2028 CCValid
= SystemZ::CCMASK_VCMP
;
2031 case Intrinsic::s390_tdc
:
2032 Opcode
= SystemZISD::TDC
;
2033 CCValid
= SystemZ::CCMASK_TDC
;
2041 // Emit an intrinsic with chain and an explicit CC register result.
2042 static SDNode
*emitIntrinsicWithCCAndChain(SelectionDAG
&DAG
, SDValue Op
,
2044 // Copy all operands except the intrinsic ID.
2045 unsigned NumOps
= Op
.getNumOperands();
2046 SmallVector
<SDValue
, 6> Ops
;
2047 Ops
.reserve(NumOps
- 1);
2048 Ops
.push_back(Op
.getOperand(0));
2049 for (unsigned I
= 2; I
< NumOps
; ++I
)
2050 Ops
.push_back(Op
.getOperand(I
));
2052 assert(Op
->getNumValues() == 2 && "Expected only CC result and chain");
2053 SDVTList RawVTs
= DAG
.getVTList(MVT::i32
, MVT::Other
);
2054 SDValue Intr
= DAG
.getNode(Opcode
, SDLoc(Op
), RawVTs
, Ops
);
2055 SDValue OldChain
= SDValue(Op
.getNode(), 1);
2056 SDValue NewChain
= SDValue(Intr
.getNode(), 1);
2057 DAG
.ReplaceAllUsesOfValueWith(OldChain
, NewChain
);
2058 return Intr
.getNode();
2061 // Emit an intrinsic with an explicit CC register result.
2062 static SDNode
*emitIntrinsicWithCC(SelectionDAG
&DAG
, SDValue Op
,
2064 // Copy all operands except the intrinsic ID.
2065 unsigned NumOps
= Op
.getNumOperands();
2066 SmallVector
<SDValue
, 6> Ops
;
2067 Ops
.reserve(NumOps
- 1);
2068 for (unsigned I
= 1; I
< NumOps
; ++I
)
2069 Ops
.push_back(Op
.getOperand(I
));
2071 SDValue Intr
= DAG
.getNode(Opcode
, SDLoc(Op
), Op
->getVTList(), Ops
);
2072 return Intr
.getNode();
2075 // CC is a comparison that will be implemented using an integer or
2076 // floating-point comparison. Return the condition code mask for
2077 // a branch on true. In the integer case, CCMASK_CMP_UO is set for
2078 // unsigned comparisons and clear for signed ones. In the floating-point
2079 // case, CCMASK_CMP_UO has its normal mask meaning (unordered).
2080 static unsigned CCMaskForCondCode(ISD::CondCode CC
) {
2082 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
2083 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
2084 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
2088 llvm_unreachable("Invalid integer condition!");
2097 case ISD::SETO
: return SystemZ::CCMASK_CMP_O
;
2098 case ISD::SETUO
: return SystemZ::CCMASK_CMP_UO
;
2103 // If C can be converted to a comparison against zero, adjust the operands
2105 static void adjustZeroCmp(SelectionDAG
&DAG
, const SDLoc
&DL
, Comparison
&C
) {
2106 if (C
.ICmpType
== SystemZICMP::UnsignedOnly
)
2109 auto *ConstOp1
= dyn_cast
<ConstantSDNode
>(C
.Op1
.getNode());
2113 int64_t Value
= ConstOp1
->getSExtValue();
2114 if ((Value
== -1 && C
.CCMask
== SystemZ::CCMASK_CMP_GT
) ||
2115 (Value
== -1 && C
.CCMask
== SystemZ::CCMASK_CMP_LE
) ||
2116 (Value
== 1 && C
.CCMask
== SystemZ::CCMASK_CMP_LT
) ||
2117 (Value
== 1 && C
.CCMask
== SystemZ::CCMASK_CMP_GE
)) {
2118 C
.CCMask
^= SystemZ::CCMASK_CMP_EQ
;
2119 C
.Op1
= DAG
.getConstant(0, DL
, C
.Op1
.getValueType());
2123 // If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI,
2124 // adjust the operands as necessary.
2125 static void adjustSubwordCmp(SelectionDAG
&DAG
, const SDLoc
&DL
,
2127 // For us to make any changes, it must a comparison between a single-use
2128 // load and a constant.
2129 if (!C
.Op0
.hasOneUse() ||
2130 C
.Op0
.getOpcode() != ISD::LOAD
||
2131 C
.Op1
.getOpcode() != ISD::Constant
)
2134 // We must have an 8- or 16-bit load.
2135 auto *Load
= cast
<LoadSDNode
>(C
.Op0
);
2136 unsigned NumBits
= Load
->getMemoryVT().getSizeInBits();
2137 if ((NumBits
!= 8 && NumBits
!= 16) ||
2138 NumBits
!= Load
->getMemoryVT().getStoreSizeInBits())
2141 // The load must be an extending one and the constant must be within the
2142 // range of the unextended value.
2143 auto *ConstOp1
= cast
<ConstantSDNode
>(C
.Op1
);
2144 uint64_t Value
= ConstOp1
->getZExtValue();
2145 uint64_t Mask
= (1 << NumBits
) - 1;
2146 if (Load
->getExtensionType() == ISD::SEXTLOAD
) {
2147 // Make sure that ConstOp1 is in range of C.Op0.
2148 int64_t SignedValue
= ConstOp1
->getSExtValue();
2149 if (uint64_t(SignedValue
) + (uint64_t(1) << (NumBits
- 1)) > Mask
)
2151 if (C
.ICmpType
!= SystemZICMP::SignedOnly
) {
2152 // Unsigned comparison between two sign-extended values is equivalent
2153 // to unsigned comparison between two zero-extended values.
2155 } else if (NumBits
== 8) {
2156 // Try to treat the comparison as unsigned, so that we can use CLI.
2157 // Adjust CCMask and Value as necessary.
2158 if (Value
== 0 && C
.CCMask
== SystemZ::CCMASK_CMP_LT
)
2159 // Test whether the high bit of the byte is set.
2160 Value
= 127, C
.CCMask
= SystemZ::CCMASK_CMP_GT
;
2161 else if (Value
== 0 && C
.CCMask
== SystemZ::CCMASK_CMP_GE
)
2162 // Test whether the high bit of the byte is clear.
2163 Value
= 128, C
.CCMask
= SystemZ::CCMASK_CMP_LT
;
2165 // No instruction exists for this combination.
2167 C
.ICmpType
= SystemZICMP::UnsignedOnly
;
2169 } else if (Load
->getExtensionType() == ISD::ZEXTLOAD
) {
2172 // If the constant is in range, we can use any comparison.
2173 C
.ICmpType
= SystemZICMP::Any
;
2177 // Make sure that the first operand is an i32 of the right extension type.
2178 ISD::LoadExtType ExtType
= (C
.ICmpType
== SystemZICMP::SignedOnly
?
2181 if (C
.Op0
.getValueType() != MVT::i32
||
2182 Load
->getExtensionType() != ExtType
) {
2183 C
.Op0
= DAG
.getExtLoad(ExtType
, SDLoc(Load
), MVT::i32
, Load
->getChain(),
2184 Load
->getBasePtr(), Load
->getPointerInfo(),
2185 Load
->getMemoryVT(), Load
->getAlignment(),
2186 Load
->getMemOperand()->getFlags());
2187 // Update the chain uses.
2188 DAG
.ReplaceAllUsesOfValueWith(SDValue(Load
, 1), C
.Op0
.getValue(1));
2191 // Make sure that the second operand is an i32 with the right value.
2192 if (C
.Op1
.getValueType() != MVT::i32
||
2193 Value
!= ConstOp1
->getZExtValue())
2194 C
.Op1
= DAG
.getConstant(Value
, DL
, MVT::i32
);
2197 // Return true if Op is either an unextended load, or a load suitable
2198 // for integer register-memory comparisons of type ICmpType.
2199 static bool isNaturalMemoryOperand(SDValue Op
, unsigned ICmpType
) {
2200 auto *Load
= dyn_cast
<LoadSDNode
>(Op
.getNode());
2202 // There are no instructions to compare a register with a memory byte.
2203 if (Load
->getMemoryVT() == MVT::i8
)
2205 // Otherwise decide on extension type.
2206 switch (Load
->getExtensionType()) {
2207 case ISD::NON_EXTLOAD
:
2210 return ICmpType
!= SystemZICMP::UnsignedOnly
;
2212 return ICmpType
!= SystemZICMP::SignedOnly
;
2220 // Return true if it is better to swap the operands of C.
2221 static bool shouldSwapCmpOperands(const Comparison
&C
) {
2222 // Leave f128 comparisons alone, since they have no memory forms.
2223 if (C
.Op0
.getValueType() == MVT::f128
)
2226 // Always keep a floating-point constant second, since comparisons with
2227 // zero can use LOAD TEST and comparisons with other constants make a
2228 // natural memory operand.
2229 if (isa
<ConstantFPSDNode
>(C
.Op1
))
2232 // Never swap comparisons with zero since there are many ways to optimize
2234 auto *ConstOp1
= dyn_cast
<ConstantSDNode
>(C
.Op1
);
2235 if (ConstOp1
&& ConstOp1
->getZExtValue() == 0)
2238 // Also keep natural memory operands second if the loaded value is
2239 // only used here. Several comparisons have memory forms.
2240 if (isNaturalMemoryOperand(C
.Op1
, C
.ICmpType
) && C
.Op1
.hasOneUse())
2243 // Look for cases where Cmp0 is a single-use load and Cmp1 isn't.
2244 // In that case we generally prefer the memory to be second.
2245 if (isNaturalMemoryOperand(C
.Op0
, C
.ICmpType
) && C
.Op0
.hasOneUse()) {
2246 // The only exceptions are when the second operand is a constant and
2247 // we can use things like CHHSI.
2250 // The unsigned memory-immediate instructions can handle 16-bit
2251 // unsigned integers.
2252 if (C
.ICmpType
!= SystemZICMP::SignedOnly
&&
2253 isUInt
<16>(ConstOp1
->getZExtValue()))
2255 // The signed memory-immediate instructions can handle 16-bit
2257 if (C
.ICmpType
!= SystemZICMP::UnsignedOnly
&&
2258 isInt
<16>(ConstOp1
->getSExtValue()))
2263 // Try to promote the use of CGFR and CLGFR.
2264 unsigned Opcode0
= C
.Op0
.getOpcode();
2265 if (C
.ICmpType
!= SystemZICMP::UnsignedOnly
&& Opcode0
== ISD::SIGN_EXTEND
)
2267 if (C
.ICmpType
!= SystemZICMP::SignedOnly
&& Opcode0
== ISD::ZERO_EXTEND
)
2269 if (C
.ICmpType
!= SystemZICMP::SignedOnly
&&
2270 Opcode0
== ISD::AND
&&
2271 C
.Op0
.getOperand(1).getOpcode() == ISD::Constant
&&
2272 cast
<ConstantSDNode
>(C
.Op0
.getOperand(1))->getZExtValue() == 0xffffffff)
2278 // Check whether C tests for equality between X and Y and whether X - Y
2279 // or Y - X is also computed. In that case it's better to compare the
2280 // result of the subtraction against zero.
2281 static void adjustForSubtraction(SelectionDAG
&DAG
, const SDLoc
&DL
,
2283 if (C
.CCMask
== SystemZ::CCMASK_CMP_EQ
||
2284 C
.CCMask
== SystemZ::CCMASK_CMP_NE
) {
2285 for (auto I
= C
.Op0
->use_begin(), E
= C
.Op0
->use_end(); I
!= E
; ++I
) {
2287 if (N
->getOpcode() == ISD::SUB
&&
2288 ((N
->getOperand(0) == C
.Op0
&& N
->getOperand(1) == C
.Op1
) ||
2289 (N
->getOperand(0) == C
.Op1
&& N
->getOperand(1) == C
.Op0
))) {
2290 C
.Op0
= SDValue(N
, 0);
2291 C
.Op1
= DAG
.getConstant(0, DL
, N
->getValueType(0));
2298 // Check whether C compares a floating-point value with zero and if that
2299 // floating-point value is also negated. In this case we can use the
2300 // negation to set CC, so avoiding separate LOAD AND TEST and
2301 // LOAD (NEGATIVE/COMPLEMENT) instructions.
2302 static void adjustForFNeg(Comparison
&C
) {
2303 // This optimization is invalid for strict comparisons, since FNEG
2304 // does not raise any exceptions.
2307 auto *C1
= dyn_cast
<ConstantFPSDNode
>(C
.Op1
);
2308 if (C1
&& C1
->isZero()) {
2309 for (auto I
= C
.Op0
->use_begin(), E
= C
.Op0
->use_end(); I
!= E
; ++I
) {
2311 if (N
->getOpcode() == ISD::FNEG
) {
2312 C
.Op0
= SDValue(N
, 0);
2313 C
.CCMask
= SystemZ::reverseCCMask(C
.CCMask
);
2320 // Check whether C compares (shl X, 32) with 0 and whether X is
2321 // also sign-extended. In that case it is better to test the result
2322 // of the sign extension using LTGFR.
2324 // This case is important because InstCombine transforms a comparison
2325 // with (sext (trunc X)) into a comparison with (shl X, 32).
2326 static void adjustForLTGFR(Comparison
&C
) {
2327 // Check for a comparison between (shl X, 32) and 0.
2328 if (C
.Op0
.getOpcode() == ISD::SHL
&&
2329 C
.Op0
.getValueType() == MVT::i64
&&
2330 C
.Op1
.getOpcode() == ISD::Constant
&&
2331 cast
<ConstantSDNode
>(C
.Op1
)->getZExtValue() == 0) {
2332 auto *C1
= dyn_cast
<ConstantSDNode
>(C
.Op0
.getOperand(1));
2333 if (C1
&& C1
->getZExtValue() == 32) {
2334 SDValue ShlOp0
= C
.Op0
.getOperand(0);
2335 // See whether X has any SIGN_EXTEND_INREG uses.
2336 for (auto I
= ShlOp0
->use_begin(), E
= ShlOp0
->use_end(); I
!= E
; ++I
) {
2338 if (N
->getOpcode() == ISD::SIGN_EXTEND_INREG
&&
2339 cast
<VTSDNode
>(N
->getOperand(1))->getVT() == MVT::i32
) {
2340 C
.Op0
= SDValue(N
, 0);
2348 // If C compares the truncation of an extending load, try to compare
2349 // the untruncated value instead. This exposes more opportunities to
2351 static void adjustICmpTruncate(SelectionDAG
&DAG
, const SDLoc
&DL
,
2353 if (C
.Op0
.getOpcode() == ISD::TRUNCATE
&&
2354 C
.Op0
.getOperand(0).getOpcode() == ISD::LOAD
&&
2355 C
.Op1
.getOpcode() == ISD::Constant
&&
2356 cast
<ConstantSDNode
>(C
.Op1
)->getZExtValue() == 0) {
2357 auto *L
= cast
<LoadSDNode
>(C
.Op0
.getOperand(0));
2358 if (L
->getMemoryVT().getStoreSizeInBits().getFixedSize() <=
2359 C
.Op0
.getValueSizeInBits().getFixedSize()) {
2360 unsigned Type
= L
->getExtensionType();
2361 if ((Type
== ISD::ZEXTLOAD
&& C
.ICmpType
!= SystemZICMP::SignedOnly
) ||
2362 (Type
== ISD::SEXTLOAD
&& C
.ICmpType
!= SystemZICMP::UnsignedOnly
)) {
2363 C
.Op0
= C
.Op0
.getOperand(0);
2364 C
.Op1
= DAG
.getConstant(0, DL
, C
.Op0
.getValueType());
2370 // Return true if shift operation N has an in-range constant shift value.
2371 // Store it in ShiftVal if so.
2372 static bool isSimpleShift(SDValue N
, unsigned &ShiftVal
) {
2373 auto *Shift
= dyn_cast
<ConstantSDNode
>(N
.getOperand(1));
2377 uint64_t Amount
= Shift
->getZExtValue();
2378 if (Amount
>= N
.getValueSizeInBits())
2385 // Check whether an AND with Mask is suitable for a TEST UNDER MASK
2386 // instruction and whether the CC value is descriptive enough to handle
2387 // a comparison of type Opcode between the AND result and CmpVal.
2388 // CCMask says which comparison result is being tested and BitSize is
2389 // the number of bits in the operands. If TEST UNDER MASK can be used,
2390 // return the corresponding CC mask, otherwise return 0.
2391 static unsigned getTestUnderMaskCond(unsigned BitSize
, unsigned CCMask
,
2392 uint64_t Mask
, uint64_t CmpVal
,
2393 unsigned ICmpType
) {
2394 assert(Mask
!= 0 && "ANDs with zero should have been removed by now");
2396 // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL.
2397 if (!SystemZ::isImmLL(Mask
) && !SystemZ::isImmLH(Mask
) &&
2398 !SystemZ::isImmHL(Mask
) && !SystemZ::isImmHH(Mask
))
2401 // Work out the masks for the lowest and highest bits.
2402 unsigned HighShift
= 63 - countLeadingZeros(Mask
);
2403 uint64_t High
= uint64_t(1) << HighShift
;
2404 uint64_t Low
= uint64_t(1) << countTrailingZeros(Mask
);
2406 // Signed ordered comparisons are effectively unsigned if the sign
2408 bool EffectivelyUnsigned
= (ICmpType
!= SystemZICMP::SignedOnly
);
2410 // Check for equality comparisons with 0, or the equivalent.
2412 if (CCMask
== SystemZ::CCMASK_CMP_EQ
)
2413 return SystemZ::CCMASK_TM_ALL_0
;
2414 if (CCMask
== SystemZ::CCMASK_CMP_NE
)
2415 return SystemZ::CCMASK_TM_SOME_1
;
2417 if (EffectivelyUnsigned
&& CmpVal
> 0 && CmpVal
<= Low
) {
2418 if (CCMask
== SystemZ::CCMASK_CMP_LT
)
2419 return SystemZ::CCMASK_TM_ALL_0
;
2420 if (CCMask
== SystemZ::CCMASK_CMP_GE
)
2421 return SystemZ::CCMASK_TM_SOME_1
;
2423 if (EffectivelyUnsigned
&& CmpVal
< Low
) {
2424 if (CCMask
== SystemZ::CCMASK_CMP_LE
)
2425 return SystemZ::CCMASK_TM_ALL_0
;
2426 if (CCMask
== SystemZ::CCMASK_CMP_GT
)
2427 return SystemZ::CCMASK_TM_SOME_1
;
2430 // Check for equality comparisons with the mask, or the equivalent.
2431 if (CmpVal
== Mask
) {
2432 if (CCMask
== SystemZ::CCMASK_CMP_EQ
)
2433 return SystemZ::CCMASK_TM_ALL_1
;
2434 if (CCMask
== SystemZ::CCMASK_CMP_NE
)
2435 return SystemZ::CCMASK_TM_SOME_0
;
2437 if (EffectivelyUnsigned
&& CmpVal
>= Mask
- Low
&& CmpVal
< Mask
) {
2438 if (CCMask
== SystemZ::CCMASK_CMP_GT
)
2439 return SystemZ::CCMASK_TM_ALL_1
;
2440 if (CCMask
== SystemZ::CCMASK_CMP_LE
)
2441 return SystemZ::CCMASK_TM_SOME_0
;
2443 if (EffectivelyUnsigned
&& CmpVal
> Mask
- Low
&& CmpVal
<= Mask
) {
2444 if (CCMask
== SystemZ::CCMASK_CMP_GE
)
2445 return SystemZ::CCMASK_TM_ALL_1
;
2446 if (CCMask
== SystemZ::CCMASK_CMP_LT
)
2447 return SystemZ::CCMASK_TM_SOME_0
;
2450 // Check for ordered comparisons with the top bit.
2451 if (EffectivelyUnsigned
&& CmpVal
>= Mask
- High
&& CmpVal
< High
) {
2452 if (CCMask
== SystemZ::CCMASK_CMP_LE
)
2453 return SystemZ::CCMASK_TM_MSB_0
;
2454 if (CCMask
== SystemZ::CCMASK_CMP_GT
)
2455 return SystemZ::CCMASK_TM_MSB_1
;
2457 if (EffectivelyUnsigned
&& CmpVal
> Mask
- High
&& CmpVal
<= High
) {
2458 if (CCMask
== SystemZ::CCMASK_CMP_LT
)
2459 return SystemZ::CCMASK_TM_MSB_0
;
2460 if (CCMask
== SystemZ::CCMASK_CMP_GE
)
2461 return SystemZ::CCMASK_TM_MSB_1
;
2464 // If there are just two bits, we can do equality checks for Low and High
2466 if (Mask
== Low
+ High
) {
2467 if (CCMask
== SystemZ::CCMASK_CMP_EQ
&& CmpVal
== Low
)
2468 return SystemZ::CCMASK_TM_MIXED_MSB_0
;
2469 if (CCMask
== SystemZ::CCMASK_CMP_NE
&& CmpVal
== Low
)
2470 return SystemZ::CCMASK_TM_MIXED_MSB_0
^ SystemZ::CCMASK_ANY
;
2471 if (CCMask
== SystemZ::CCMASK_CMP_EQ
&& CmpVal
== High
)
2472 return SystemZ::CCMASK_TM_MIXED_MSB_1
;
2473 if (CCMask
== SystemZ::CCMASK_CMP_NE
&& CmpVal
== High
)
2474 return SystemZ::CCMASK_TM_MIXED_MSB_1
^ SystemZ::CCMASK_ANY
;
2477 // Looks like we've exhausted our options.
2481 // See whether C can be implemented as a TEST UNDER MASK instruction.
2482 // Update the arguments with the TM version if so.
2483 static void adjustForTestUnderMask(SelectionDAG
&DAG
, const SDLoc
&DL
,
2485 // Check that we have a comparison with a constant.
2486 auto *ConstOp1
= dyn_cast
<ConstantSDNode
>(C
.Op1
);
2489 uint64_t CmpVal
= ConstOp1
->getZExtValue();
2491 // Check whether the nonconstant input is an AND with a constant mask.
2494 ConstantSDNode
*Mask
= nullptr;
2495 if (C
.Op0
.getOpcode() == ISD::AND
) {
2496 NewC
.Op0
= C
.Op0
.getOperand(0);
2497 NewC
.Op1
= C
.Op0
.getOperand(1);
2498 Mask
= dyn_cast
<ConstantSDNode
>(NewC
.Op1
);
2501 MaskVal
= Mask
->getZExtValue();
2503 // There is no instruction to compare with a 64-bit immediate
2504 // so use TMHH instead if possible. We need an unsigned ordered
2505 // comparison with an i64 immediate.
2506 if (NewC
.Op0
.getValueType() != MVT::i64
||
2507 NewC
.CCMask
== SystemZ::CCMASK_CMP_EQ
||
2508 NewC
.CCMask
== SystemZ::CCMASK_CMP_NE
||
2509 NewC
.ICmpType
== SystemZICMP::SignedOnly
)
2511 // Convert LE and GT comparisons into LT and GE.
2512 if (NewC
.CCMask
== SystemZ::CCMASK_CMP_LE
||
2513 NewC
.CCMask
== SystemZ::CCMASK_CMP_GT
) {
2514 if (CmpVal
== uint64_t(-1))
2517 NewC
.CCMask
^= SystemZ::CCMASK_CMP_EQ
;
2519 // If the low N bits of Op1 are zero than the low N bits of Op0 can
2520 // be masked off without changing the result.
2521 MaskVal
= -(CmpVal
& -CmpVal
);
2522 NewC
.ICmpType
= SystemZICMP::UnsignedOnly
;
2527 // Check whether the combination of mask, comparison value and comparison
2528 // type are suitable.
2529 unsigned BitSize
= NewC
.Op0
.getValueSizeInBits();
2530 unsigned NewCCMask
, ShiftVal
;
2531 if (NewC
.ICmpType
!= SystemZICMP::SignedOnly
&&
2532 NewC
.Op0
.getOpcode() == ISD::SHL
&&
2533 isSimpleShift(NewC
.Op0
, ShiftVal
) &&
2534 (MaskVal
>> ShiftVal
!= 0) &&
2535 ((CmpVal
>> ShiftVal
) << ShiftVal
) == CmpVal
&&
2536 (NewCCMask
= getTestUnderMaskCond(BitSize
, NewC
.CCMask
,
2537 MaskVal
>> ShiftVal
,
2539 SystemZICMP::Any
))) {
2540 NewC
.Op0
= NewC
.Op0
.getOperand(0);
2541 MaskVal
>>= ShiftVal
;
2542 } else if (NewC
.ICmpType
!= SystemZICMP::SignedOnly
&&
2543 NewC
.Op0
.getOpcode() == ISD::SRL
&&
2544 isSimpleShift(NewC
.Op0
, ShiftVal
) &&
2545 (MaskVal
<< ShiftVal
!= 0) &&
2546 ((CmpVal
<< ShiftVal
) >> ShiftVal
) == CmpVal
&&
2547 (NewCCMask
= getTestUnderMaskCond(BitSize
, NewC
.CCMask
,
2548 MaskVal
<< ShiftVal
,
2550 SystemZICMP::UnsignedOnly
))) {
2551 NewC
.Op0
= NewC
.Op0
.getOperand(0);
2552 MaskVal
<<= ShiftVal
;
2554 NewCCMask
= getTestUnderMaskCond(BitSize
, NewC
.CCMask
, MaskVal
, CmpVal
,
2560 // Go ahead and make the change.
2561 C
.Opcode
= SystemZISD::TM
;
2563 if (Mask
&& Mask
->getZExtValue() == MaskVal
)
2564 C
.Op1
= SDValue(Mask
, 0);
2566 C
.Op1
= DAG
.getConstant(MaskVal
, DL
, C
.Op0
.getValueType());
2567 C
.CCValid
= SystemZ::CCMASK_TM
;
2568 C
.CCMask
= NewCCMask
;
2571 // See whether the comparison argument contains a redundant AND
2572 // and remove it if so. This sometimes happens due to the generic
2573 // BRCOND expansion.
2574 static void adjustForRedundantAnd(SelectionDAG
&DAG
, const SDLoc
&DL
,
2576 if (C
.Op0
.getOpcode() != ISD::AND
)
2578 auto *Mask
= dyn_cast
<ConstantSDNode
>(C
.Op0
.getOperand(1));
2581 KnownBits Known
= DAG
.computeKnownBits(C
.Op0
.getOperand(0));
2582 if ((~Known
.Zero
).getZExtValue() & ~Mask
->getZExtValue())
2585 C
.Op0
= C
.Op0
.getOperand(0);
2588 // Return a Comparison that tests the condition-code result of intrinsic
2589 // node Call against constant integer CC using comparison code Cond.
2590 // Opcode is the opcode of the SystemZISD operation for the intrinsic
2591 // and CCValid is the set of possible condition-code results.
2592 static Comparison
getIntrinsicCmp(SelectionDAG
&DAG
, unsigned Opcode
,
2593 SDValue Call
, unsigned CCValid
, uint64_t CC
,
2594 ISD::CondCode Cond
) {
2595 Comparison
C(Call
, SDValue(), SDValue());
2597 C
.CCValid
= CCValid
;
2598 if (Cond
== ISD::SETEQ
)
2599 // bit 3 for CC==0, bit 0 for CC==3, always false for CC>3.
2600 C
.CCMask
= CC
< 4 ? 1 << (3 - CC
) : 0;
2601 else if (Cond
== ISD::SETNE
)
2602 // ...and the inverse of that.
2603 C
.CCMask
= CC
< 4 ? ~(1 << (3 - CC
)) : -1;
2604 else if (Cond
== ISD::SETLT
|| Cond
== ISD::SETULT
)
2605 // bits above bit 3 for CC==0 (always false), bits above bit 0 for CC==3,
2606 // always true for CC>3.
2607 C
.CCMask
= CC
< 4 ? ~0U << (4 - CC
) : -1;
2608 else if (Cond
== ISD::SETGE
|| Cond
== ISD::SETUGE
)
2609 // ...and the inverse of that.
2610 C
.CCMask
= CC
< 4 ? ~(~0U << (4 - CC
)) : 0;
2611 else if (Cond
== ISD::SETLE
|| Cond
== ISD::SETULE
)
2612 // bit 3 and above for CC==0, bit 0 and above for CC==3 (always true),
2613 // always true for CC>3.
2614 C
.CCMask
= CC
< 4 ? ~0U << (3 - CC
) : -1;
2615 else if (Cond
== ISD::SETGT
|| Cond
== ISD::SETUGT
)
2616 // ...and the inverse of that.
2617 C
.CCMask
= CC
< 4 ? ~(~0U << (3 - CC
)) : 0;
2619 llvm_unreachable("Unexpected integer comparison type");
2620 C
.CCMask
&= CCValid
;
2624 // Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1.
2625 static Comparison
getCmp(SelectionDAG
&DAG
, SDValue CmpOp0
, SDValue CmpOp1
,
2626 ISD::CondCode Cond
, const SDLoc
&DL
,
2627 SDValue Chain
= SDValue(),
2628 bool IsSignaling
= false) {
2629 if (CmpOp1
.getOpcode() == ISD::Constant
) {
2631 uint64_t Constant
= cast
<ConstantSDNode
>(CmpOp1
)->getZExtValue();
2632 unsigned Opcode
, CCValid
;
2633 if (CmpOp0
.getOpcode() == ISD::INTRINSIC_W_CHAIN
&&
2634 CmpOp0
.getResNo() == 0 && CmpOp0
->hasNUsesOfValue(1, 0) &&
2635 isIntrinsicWithCCAndChain(CmpOp0
, Opcode
, CCValid
))
2636 return getIntrinsicCmp(DAG
, Opcode
, CmpOp0
, CCValid
, Constant
, Cond
);
2637 if (CmpOp0
.getOpcode() == ISD::INTRINSIC_WO_CHAIN
&&
2638 CmpOp0
.getResNo() == CmpOp0
->getNumValues() - 1 &&
2639 isIntrinsicWithCC(CmpOp0
, Opcode
, CCValid
))
2640 return getIntrinsicCmp(DAG
, Opcode
, CmpOp0
, CCValid
, Constant
, Cond
);
2642 Comparison
C(CmpOp0
, CmpOp1
, Chain
);
2643 C
.CCMask
= CCMaskForCondCode(Cond
);
2644 if (C
.Op0
.getValueType().isFloatingPoint()) {
2645 C
.CCValid
= SystemZ::CCMASK_FCMP
;
2647 C
.Opcode
= SystemZISD::FCMP
;
2648 else if (!IsSignaling
)
2649 C
.Opcode
= SystemZISD::STRICT_FCMP
;
2651 C
.Opcode
= SystemZISD::STRICT_FCMPS
;
2655 C
.CCValid
= SystemZ::CCMASK_ICMP
;
2656 C
.Opcode
= SystemZISD::ICMP
;
2657 // Choose the type of comparison. Equality and inequality tests can
2658 // use either signed or unsigned comparisons. The choice also doesn't
2659 // matter if both sign bits are known to be clear. In those cases we
2660 // want to give the main isel code the freedom to choose whichever
2662 if (C
.CCMask
== SystemZ::CCMASK_CMP_EQ
||
2663 C
.CCMask
== SystemZ::CCMASK_CMP_NE
||
2664 (DAG
.SignBitIsZero(C
.Op0
) && DAG
.SignBitIsZero(C
.Op1
)))
2665 C
.ICmpType
= SystemZICMP::Any
;
2666 else if (C
.CCMask
& SystemZ::CCMASK_CMP_UO
)
2667 C
.ICmpType
= SystemZICMP::UnsignedOnly
;
2669 C
.ICmpType
= SystemZICMP::SignedOnly
;
2670 C
.CCMask
&= ~SystemZ::CCMASK_CMP_UO
;
2671 adjustForRedundantAnd(DAG
, DL
, C
);
2672 adjustZeroCmp(DAG
, DL
, C
);
2673 adjustSubwordCmp(DAG
, DL
, C
);
2674 adjustForSubtraction(DAG
, DL
, C
);
2676 adjustICmpTruncate(DAG
, DL
, C
);
2679 if (shouldSwapCmpOperands(C
)) {
2680 std::swap(C
.Op0
, C
.Op1
);
2681 C
.CCMask
= SystemZ::reverseCCMask(C
.CCMask
);
2684 adjustForTestUnderMask(DAG
, DL
, C
);
2688 // Emit the comparison instruction described by C.
2689 static SDValue
emitCmp(SelectionDAG
&DAG
, const SDLoc
&DL
, Comparison
&C
) {
2690 if (!C
.Op1
.getNode()) {
2692 switch (C
.Op0
.getOpcode()) {
2693 case ISD::INTRINSIC_W_CHAIN
:
2694 Node
= emitIntrinsicWithCCAndChain(DAG
, C
.Op0
, C
.Opcode
);
2695 return SDValue(Node
, 0);
2696 case ISD::INTRINSIC_WO_CHAIN
:
2697 Node
= emitIntrinsicWithCC(DAG
, C
.Op0
, C
.Opcode
);
2698 return SDValue(Node
, Node
->getNumValues() - 1);
2700 llvm_unreachable("Invalid comparison operands");
2703 if (C
.Opcode
== SystemZISD::ICMP
)
2704 return DAG
.getNode(SystemZISD::ICMP
, DL
, MVT::i32
, C
.Op0
, C
.Op1
,
2705 DAG
.getTargetConstant(C
.ICmpType
, DL
, MVT::i32
));
2706 if (C
.Opcode
== SystemZISD::TM
) {
2707 bool RegisterOnly
= (bool(C
.CCMask
& SystemZ::CCMASK_TM_MIXED_MSB_0
) !=
2708 bool(C
.CCMask
& SystemZ::CCMASK_TM_MIXED_MSB_1
));
2709 return DAG
.getNode(SystemZISD::TM
, DL
, MVT::i32
, C
.Op0
, C
.Op1
,
2710 DAG
.getTargetConstant(RegisterOnly
, DL
, MVT::i32
));
2713 SDVTList VTs
= DAG
.getVTList(MVT::i32
, MVT::Other
);
2714 return DAG
.getNode(C
.Opcode
, DL
, VTs
, C
.Chain
, C
.Op0
, C
.Op1
);
2716 return DAG
.getNode(C
.Opcode
, DL
, MVT::i32
, C
.Op0
, C
.Op1
);
2719 // Implement a 32-bit *MUL_LOHI operation by extending both operands to
2720 // 64 bits. Extend is the extension type to use. Store the high part
2721 // in Hi and the low part in Lo.
2722 static void lowerMUL_LOHI32(SelectionDAG
&DAG
, const SDLoc
&DL
, unsigned Extend
,
2723 SDValue Op0
, SDValue Op1
, SDValue
&Hi
,
2725 Op0
= DAG
.getNode(Extend
, DL
, MVT::i64
, Op0
);
2726 Op1
= DAG
.getNode(Extend
, DL
, MVT::i64
, Op1
);
2727 SDValue Mul
= DAG
.getNode(ISD::MUL
, DL
, MVT::i64
, Op0
, Op1
);
2728 Hi
= DAG
.getNode(ISD::SRL
, DL
, MVT::i64
, Mul
,
2729 DAG
.getConstant(32, DL
, MVT::i64
));
2730 Hi
= DAG
.getNode(ISD::TRUNCATE
, DL
, MVT::i32
, Hi
);
2731 Lo
= DAG
.getNode(ISD::TRUNCATE
, DL
, MVT::i32
, Mul
);
2734 // Lower a binary operation that produces two VT results, one in each
2735 // half of a GR128 pair. Op0 and Op1 are the VT operands to the operation,
2736 // and Opcode performs the GR128 operation. Store the even register result
2737 // in Even and the odd register result in Odd.
2738 static void lowerGR128Binary(SelectionDAG
&DAG
, const SDLoc
&DL
, EVT VT
,
2739 unsigned Opcode
, SDValue Op0
, SDValue Op1
,
2740 SDValue
&Even
, SDValue
&Odd
) {
2741 SDValue Result
= DAG
.getNode(Opcode
, DL
, MVT::Untyped
, Op0
, Op1
);
2742 bool Is32Bit
= is32Bit(VT
);
2743 Even
= DAG
.getTargetExtractSubreg(SystemZ::even128(Is32Bit
), DL
, VT
, Result
);
2744 Odd
= DAG
.getTargetExtractSubreg(SystemZ::odd128(Is32Bit
), DL
, VT
, Result
);
2747 // Return an i32 value that is 1 if the CC value produced by CCReg is
2748 // in the mask CCMask and 0 otherwise. CC is known to have a value
2749 // in CCValid, so other values can be ignored.
2750 static SDValue
emitSETCC(SelectionDAG
&DAG
, const SDLoc
&DL
, SDValue CCReg
,
2751 unsigned CCValid
, unsigned CCMask
) {
2752 SDValue Ops
[] = {DAG
.getConstant(1, DL
, MVT::i32
),
2753 DAG
.getConstant(0, DL
, MVT::i32
),
2754 DAG
.getTargetConstant(CCValid
, DL
, MVT::i32
),
2755 DAG
.getTargetConstant(CCMask
, DL
, MVT::i32
), CCReg
};
2756 return DAG
.getNode(SystemZISD::SELECT_CCMASK
, DL
, MVT::i32
, Ops
);
2759 // Return the SystemISD vector comparison operation for CC, or 0 if it cannot
2760 // be done directly. Mode is CmpMode::Int for integer comparisons, CmpMode::FP
2761 // for regular floating-point comparisons, CmpMode::StrictFP for strict (quiet)
2762 // floating-point comparisons, and CmpMode::SignalingFP for strict signaling
2763 // floating-point comparisons.
2764 enum class CmpMode
{ Int
, FP
, StrictFP
, SignalingFP
};
2765 static unsigned getVectorComparison(ISD::CondCode CC
, CmpMode Mode
) {
2770 case CmpMode::Int
: return SystemZISD::VICMPE
;
2771 case CmpMode::FP
: return SystemZISD::VFCMPE
;
2772 case CmpMode::StrictFP
: return SystemZISD::STRICT_VFCMPE
;
2773 case CmpMode::SignalingFP
: return SystemZISD::STRICT_VFCMPES
;
2775 llvm_unreachable("Bad mode");
2780 case CmpMode::Int
: return 0;
2781 case CmpMode::FP
: return SystemZISD::VFCMPHE
;
2782 case CmpMode::StrictFP
: return SystemZISD::STRICT_VFCMPHE
;
2783 case CmpMode::SignalingFP
: return SystemZISD::STRICT_VFCMPHES
;
2785 llvm_unreachable("Bad mode");
2790 case CmpMode::Int
: return SystemZISD::VICMPH
;
2791 case CmpMode::FP
: return SystemZISD::VFCMPH
;
2792 case CmpMode::StrictFP
: return SystemZISD::STRICT_VFCMPH
;
2793 case CmpMode::SignalingFP
: return SystemZISD::STRICT_VFCMPHS
;
2795 llvm_unreachable("Bad mode");
2799 case CmpMode::Int
: return SystemZISD::VICMPHL
;
2800 case CmpMode::FP
: return 0;
2801 case CmpMode::StrictFP
: return 0;
2802 case CmpMode::SignalingFP
: return 0;
2804 llvm_unreachable("Bad mode");
2811 // Return the SystemZISD vector comparison operation for CC or its inverse,
2812 // or 0 if neither can be done directly. Indicate in Invert whether the
2813 // result is for the inverse of CC. Mode is as above.
2814 static unsigned getVectorComparisonOrInvert(ISD::CondCode CC
, CmpMode Mode
,
2816 if (unsigned Opcode
= getVectorComparison(CC
, Mode
)) {
2821 CC
= ISD::getSetCCInverse(CC
, Mode
== CmpMode::Int
? MVT::i32
: MVT::f32
);
2822 if (unsigned Opcode
= getVectorComparison(CC
, Mode
)) {
2830 // Return a v2f64 that contains the extended form of elements Start and Start+1
2831 // of v4f32 value Op. If Chain is nonnull, return the strict form.
2832 static SDValue
expandV4F32ToV2F64(SelectionDAG
&DAG
, int Start
, const SDLoc
&DL
,
2833 SDValue Op
, SDValue Chain
) {
2834 int Mask
[] = { Start
, -1, Start
+ 1, -1 };
2835 Op
= DAG
.getVectorShuffle(MVT::v4f32
, DL
, Op
, DAG
.getUNDEF(MVT::v4f32
), Mask
);
2837 SDVTList VTs
= DAG
.getVTList(MVT::v2f64
, MVT::Other
);
2838 return DAG
.getNode(SystemZISD::STRICT_VEXTEND
, DL
, VTs
, Chain
, Op
);
2840 return DAG
.getNode(SystemZISD::VEXTEND
, DL
, MVT::v2f64
, Op
);
2843 // Build a comparison of vectors CmpOp0 and CmpOp1 using opcode Opcode,
2844 // producing a result of type VT. If Chain is nonnull, return the strict form.
2845 SDValue
SystemZTargetLowering::getVectorCmp(SelectionDAG
&DAG
, unsigned Opcode
,
2846 const SDLoc
&DL
, EVT VT
,
2849 SDValue Chain
) const {
2850 // There is no hardware support for v4f32 (unless we have the vector
2851 // enhancements facility 1), so extend the vector into two v2f64s
2852 // and compare those.
2853 if (CmpOp0
.getValueType() == MVT::v4f32
&&
2854 !Subtarget
.hasVectorEnhancements1()) {
2855 SDValue H0
= expandV4F32ToV2F64(DAG
, 0, DL
, CmpOp0
, Chain
);
2856 SDValue L0
= expandV4F32ToV2F64(DAG
, 2, DL
, CmpOp0
, Chain
);
2857 SDValue H1
= expandV4F32ToV2F64(DAG
, 0, DL
, CmpOp1
, Chain
);
2858 SDValue L1
= expandV4F32ToV2F64(DAG
, 2, DL
, CmpOp1
, Chain
);
2860 SDVTList VTs
= DAG
.getVTList(MVT::v2i64
, MVT::Other
);
2861 SDValue HRes
= DAG
.getNode(Opcode
, DL
, VTs
, Chain
, H0
, H1
);
2862 SDValue LRes
= DAG
.getNode(Opcode
, DL
, VTs
, Chain
, L0
, L1
);
2863 SDValue Res
= DAG
.getNode(SystemZISD::PACK
, DL
, VT
, HRes
, LRes
);
2864 SDValue Chains
[6] = { H0
.getValue(1), L0
.getValue(1),
2865 H1
.getValue(1), L1
.getValue(1),
2866 HRes
.getValue(1), LRes
.getValue(1) };
2867 SDValue NewChain
= DAG
.getNode(ISD::TokenFactor
, DL
, MVT::Other
, Chains
);
2868 SDValue Ops
[2] = { Res
, NewChain
};
2869 return DAG
.getMergeValues(Ops
, DL
);
2871 SDValue HRes
= DAG
.getNode(Opcode
, DL
, MVT::v2i64
, H0
, H1
);
2872 SDValue LRes
= DAG
.getNode(Opcode
, DL
, MVT::v2i64
, L0
, L1
);
2873 return DAG
.getNode(SystemZISD::PACK
, DL
, VT
, HRes
, LRes
);
2876 SDVTList VTs
= DAG
.getVTList(VT
, MVT::Other
);
2877 return DAG
.getNode(Opcode
, DL
, VTs
, Chain
, CmpOp0
, CmpOp1
);
2879 return DAG
.getNode(Opcode
, DL
, VT
, CmpOp0
, CmpOp1
);
2882 // Lower a vector comparison of type CC between CmpOp0 and CmpOp1, producing
2883 // an integer mask of type VT. If Chain is nonnull, we have a strict
2884 // floating-point comparison. If in addition IsSignaling is true, we have
2885 // a strict signaling floating-point comparison.
2886 SDValue
SystemZTargetLowering::lowerVectorSETCC(SelectionDAG
&DAG
,
2887 const SDLoc
&DL
, EVT VT
,
2892 bool IsSignaling
) const {
2893 bool IsFP
= CmpOp0
.getValueType().isFloatingPoint();
2894 assert (!Chain
|| IsFP
);
2895 assert (!IsSignaling
|| Chain
);
2896 CmpMode Mode
= IsSignaling
? CmpMode::SignalingFP
:
2897 Chain
? CmpMode::StrictFP
: IsFP
? CmpMode::FP
: CmpMode::Int
;
2898 bool Invert
= false;
2901 // Handle tests for order using (or (ogt y x) (oge x y)).
2906 assert(IsFP
&& "Unexpected integer comparison");
2907 SDValue LT
= getVectorCmp(DAG
, getVectorComparison(ISD::SETOGT
, Mode
),
2908 DL
, VT
, CmpOp1
, CmpOp0
, Chain
);
2909 SDValue GE
= getVectorCmp(DAG
, getVectorComparison(ISD::SETOGE
, Mode
),
2910 DL
, VT
, CmpOp0
, CmpOp1
, Chain
);
2911 Cmp
= DAG
.getNode(ISD::OR
, DL
, VT
, LT
, GE
);
2913 Chain
= DAG
.getNode(ISD::TokenFactor
, DL
, MVT::Other
,
2914 LT
.getValue(1), GE
.getValue(1));
2918 // Handle <> tests using (or (ogt y x) (ogt x y)).
2923 assert(IsFP
&& "Unexpected integer comparison");
2924 SDValue LT
= getVectorCmp(DAG
, getVectorComparison(ISD::SETOGT
, Mode
),
2925 DL
, VT
, CmpOp1
, CmpOp0
, Chain
);
2926 SDValue GT
= getVectorCmp(DAG
, getVectorComparison(ISD::SETOGT
, Mode
),
2927 DL
, VT
, CmpOp0
, CmpOp1
, Chain
);
2928 Cmp
= DAG
.getNode(ISD::OR
, DL
, VT
, LT
, GT
);
2930 Chain
= DAG
.getNode(ISD::TokenFactor
, DL
, MVT::Other
,
2931 LT
.getValue(1), GT
.getValue(1));
2935 // Otherwise a single comparison is enough. It doesn't really
2936 // matter whether we try the inversion or the swap first, since
2937 // there are no cases where both work.
2939 if (unsigned Opcode
= getVectorComparisonOrInvert(CC
, Mode
, Invert
))
2940 Cmp
= getVectorCmp(DAG
, Opcode
, DL
, VT
, CmpOp0
, CmpOp1
, Chain
);
2942 CC
= ISD::getSetCCSwappedOperands(CC
);
2943 if (unsigned Opcode
= getVectorComparisonOrInvert(CC
, Mode
, Invert
))
2944 Cmp
= getVectorCmp(DAG
, Opcode
, DL
, VT
, CmpOp1
, CmpOp0
, Chain
);
2946 llvm_unreachable("Unhandled comparison");
2949 Chain
= Cmp
.getValue(1);
2954 DAG
.getSplatBuildVector(VT
, DL
, DAG
.getConstant(-1, DL
, MVT::i64
));
2955 Cmp
= DAG
.getNode(ISD::XOR
, DL
, VT
, Cmp
, Mask
);
2957 if (Chain
&& Chain
.getNode() != Cmp
.getNode()) {
2958 SDValue Ops
[2] = { Cmp
, Chain
};
2959 Cmp
= DAG
.getMergeValues(Ops
, DL
);
2964 SDValue
SystemZTargetLowering::lowerSETCC(SDValue Op
,
2965 SelectionDAG
&DAG
) const {
2966 SDValue CmpOp0
= Op
.getOperand(0);
2967 SDValue CmpOp1
= Op
.getOperand(1);
2968 ISD::CondCode CC
= cast
<CondCodeSDNode
>(Op
.getOperand(2))->get();
2970 EVT VT
= Op
.getValueType();
2972 return lowerVectorSETCC(DAG
, DL
, VT
, CC
, CmpOp0
, CmpOp1
);
2974 Comparison
C(getCmp(DAG
, CmpOp0
, CmpOp1
, CC
, DL
));
2975 SDValue CCReg
= emitCmp(DAG
, DL
, C
);
2976 return emitSETCC(DAG
, DL
, CCReg
, C
.CCValid
, C
.CCMask
);
2979 SDValue
SystemZTargetLowering::lowerSTRICT_FSETCC(SDValue Op
,
2981 bool IsSignaling
) const {
2982 SDValue Chain
= Op
.getOperand(0);
2983 SDValue CmpOp0
= Op
.getOperand(1);
2984 SDValue CmpOp1
= Op
.getOperand(2);
2985 ISD::CondCode CC
= cast
<CondCodeSDNode
>(Op
.getOperand(3))->get();
2987 EVT VT
= Op
.getNode()->getValueType(0);
2988 if (VT
.isVector()) {
2989 SDValue Res
= lowerVectorSETCC(DAG
, DL
, VT
, CC
, CmpOp0
, CmpOp1
,
2990 Chain
, IsSignaling
);
2991 return Res
.getValue(Op
.getResNo());
2994 Comparison
C(getCmp(DAG
, CmpOp0
, CmpOp1
, CC
, DL
, Chain
, IsSignaling
));
2995 SDValue CCReg
= emitCmp(DAG
, DL
, C
);
2996 CCReg
->setFlags(Op
->getFlags());
2997 SDValue Result
= emitSETCC(DAG
, DL
, CCReg
, C
.CCValid
, C
.CCMask
);
2998 SDValue Ops
[2] = { Result
, CCReg
.getValue(1) };
2999 return DAG
.getMergeValues(Ops
, DL
);
3002 SDValue
SystemZTargetLowering::lowerBR_CC(SDValue Op
, SelectionDAG
&DAG
) const {
3003 ISD::CondCode CC
= cast
<CondCodeSDNode
>(Op
.getOperand(1))->get();
3004 SDValue CmpOp0
= Op
.getOperand(2);
3005 SDValue CmpOp1
= Op
.getOperand(3);
3006 SDValue Dest
= Op
.getOperand(4);
3009 Comparison
C(getCmp(DAG
, CmpOp0
, CmpOp1
, CC
, DL
));
3010 SDValue CCReg
= emitCmp(DAG
, DL
, C
);
3012 SystemZISD::BR_CCMASK
, DL
, Op
.getValueType(), Op
.getOperand(0),
3013 DAG
.getTargetConstant(C
.CCValid
, DL
, MVT::i32
),
3014 DAG
.getTargetConstant(C
.CCMask
, DL
, MVT::i32
), Dest
, CCReg
);
3017 // Return true if Pos is CmpOp and Neg is the negative of CmpOp,
3018 // allowing Pos and Neg to be wider than CmpOp.
3019 static bool isAbsolute(SDValue CmpOp
, SDValue Pos
, SDValue Neg
) {
3020 return (Neg
.getOpcode() == ISD::SUB
&&
3021 Neg
.getOperand(0).getOpcode() == ISD::Constant
&&
3022 cast
<ConstantSDNode
>(Neg
.getOperand(0))->getZExtValue() == 0 &&
3023 Neg
.getOperand(1) == Pos
&&
3025 (Pos
.getOpcode() == ISD::SIGN_EXTEND
&&
3026 Pos
.getOperand(0) == CmpOp
)));
3029 // Return the absolute or negative absolute of Op; IsNegative decides which.
3030 static SDValue
getAbsolute(SelectionDAG
&DAG
, const SDLoc
&DL
, SDValue Op
,
3032 Op
= DAG
.getNode(ISD::ABS
, DL
, Op
.getValueType(), Op
);
3034 Op
= DAG
.getNode(ISD::SUB
, DL
, Op
.getValueType(),
3035 DAG
.getConstant(0, DL
, Op
.getValueType()), Op
);
3039 SDValue
SystemZTargetLowering::lowerSELECT_CC(SDValue Op
,
3040 SelectionDAG
&DAG
) const {
3041 SDValue CmpOp0
= Op
.getOperand(0);
3042 SDValue CmpOp1
= Op
.getOperand(1);
3043 SDValue TrueOp
= Op
.getOperand(2);
3044 SDValue FalseOp
= Op
.getOperand(3);
3045 ISD::CondCode CC
= cast
<CondCodeSDNode
>(Op
.getOperand(4))->get();
3048 Comparison
C(getCmp(DAG
, CmpOp0
, CmpOp1
, CC
, DL
));
3050 // Check for absolute and negative-absolute selections, including those
3051 // where the comparison value is sign-extended (for LPGFR and LNGFR).
3052 // This check supplements the one in DAGCombiner.
3053 if (C
.Opcode
== SystemZISD::ICMP
&&
3054 C
.CCMask
!= SystemZ::CCMASK_CMP_EQ
&&
3055 C
.CCMask
!= SystemZ::CCMASK_CMP_NE
&&
3056 C
.Op1
.getOpcode() == ISD::Constant
&&
3057 cast
<ConstantSDNode
>(C
.Op1
)->getZExtValue() == 0) {
3058 if (isAbsolute(C
.Op0
, TrueOp
, FalseOp
))
3059 return getAbsolute(DAG
, DL
, TrueOp
, C
.CCMask
& SystemZ::CCMASK_CMP_LT
);
3060 if (isAbsolute(C
.Op0
, FalseOp
, TrueOp
))
3061 return getAbsolute(DAG
, DL
, FalseOp
, C
.CCMask
& SystemZ::CCMASK_CMP_GT
);
3064 SDValue CCReg
= emitCmp(DAG
, DL
, C
);
3065 SDValue Ops
[] = {TrueOp
, FalseOp
,
3066 DAG
.getTargetConstant(C
.CCValid
, DL
, MVT::i32
),
3067 DAG
.getTargetConstant(C
.CCMask
, DL
, MVT::i32
), CCReg
};
3069 return DAG
.getNode(SystemZISD::SELECT_CCMASK
, DL
, Op
.getValueType(), Ops
);
3072 SDValue
SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode
*Node
,
3073 SelectionDAG
&DAG
) const {
3075 const GlobalValue
*GV
= Node
->getGlobal();
3076 int64_t Offset
= Node
->getOffset();
3077 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3078 CodeModel::Model CM
= DAG
.getTarget().getCodeModel();
3081 if (Subtarget
.isPC32DBLSymbol(GV
, CM
)) {
3082 if (isInt
<32>(Offset
)) {
3083 // Assign anchors at 1<<12 byte boundaries.
3084 uint64_t Anchor
= Offset
& ~uint64_t(0xfff);
3085 Result
= DAG
.getTargetGlobalAddress(GV
, DL
, PtrVT
, Anchor
);
3086 Result
= DAG
.getNode(SystemZISD::PCREL_WRAPPER
, DL
, PtrVT
, Result
);
3088 // The offset can be folded into the address if it is aligned to a
3091 if (Offset
!= 0 && (Offset
& 1) == 0) {
3093 DAG
.getTargetGlobalAddress(GV
, DL
, PtrVT
, Anchor
+ Offset
);
3094 Result
= DAG
.getNode(SystemZISD::PCREL_OFFSET
, DL
, PtrVT
, Full
, Result
);
3098 // Conservatively load a constant offset greater than 32 bits into a
3100 Result
= DAG
.getTargetGlobalAddress(GV
, DL
, PtrVT
);
3101 Result
= DAG
.getNode(SystemZISD::PCREL_WRAPPER
, DL
, PtrVT
, Result
);
3104 Result
= DAG
.getTargetGlobalAddress(GV
, DL
, PtrVT
, 0, SystemZII::MO_GOT
);
3105 Result
= DAG
.getNode(SystemZISD::PCREL_WRAPPER
, DL
, PtrVT
, Result
);
3106 Result
= DAG
.getLoad(PtrVT
, DL
, DAG
.getEntryNode(), Result
,
3107 MachinePointerInfo::getGOT(DAG
.getMachineFunction()));
3110 // If there was a non-zero offset that we didn't fold, create an explicit
3113 Result
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, Result
,
3114 DAG
.getConstant(Offset
, DL
, PtrVT
));
3119 SDValue
SystemZTargetLowering::lowerTLSGetOffset(GlobalAddressSDNode
*Node
,
3122 SDValue GOTOffset
) const {
3124 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3125 SDValue Chain
= DAG
.getEntryNode();
3128 if (DAG
.getMachineFunction().getFunction().getCallingConv() ==
3130 report_fatal_error("In GHC calling convention TLS is not supported");
3132 // __tls_get_offset takes the GOT offset in %r2 and the GOT in %r12.
3133 SDValue GOT
= DAG
.getGLOBAL_OFFSET_TABLE(PtrVT
);
3134 Chain
= DAG
.getCopyToReg(Chain
, DL
, SystemZ::R12D
, GOT
, Glue
);
3135 Glue
= Chain
.getValue(1);
3136 Chain
= DAG
.getCopyToReg(Chain
, DL
, SystemZ::R2D
, GOTOffset
, Glue
);
3137 Glue
= Chain
.getValue(1);
3139 // The first call operand is the chain and the second is the TLS symbol.
3140 SmallVector
<SDValue
, 8> Ops
;
3141 Ops
.push_back(Chain
);
3142 Ops
.push_back(DAG
.getTargetGlobalAddress(Node
->getGlobal(), DL
,
3143 Node
->getValueType(0),
3146 // Add argument registers to the end of the list so that they are
3147 // known live into the call.
3148 Ops
.push_back(DAG
.getRegister(SystemZ::R2D
, PtrVT
));
3149 Ops
.push_back(DAG
.getRegister(SystemZ::R12D
, PtrVT
));
3151 // Add a register mask operand representing the call-preserved registers.
3152 const TargetRegisterInfo
*TRI
= Subtarget
.getRegisterInfo();
3153 const uint32_t *Mask
=
3154 TRI
->getCallPreservedMask(DAG
.getMachineFunction(), CallingConv::C
);
3155 assert(Mask
&& "Missing call preserved mask for calling convention");
3156 Ops
.push_back(DAG
.getRegisterMask(Mask
));
3158 // Glue the call to the argument copies.
3159 Ops
.push_back(Glue
);
3162 SDVTList NodeTys
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
3163 Chain
= DAG
.getNode(Opcode
, DL
, NodeTys
, Ops
);
3164 Glue
= Chain
.getValue(1);
3166 // Copy the return value from %r2.
3167 return DAG
.getCopyFromReg(Chain
, DL
, SystemZ::R2D
, PtrVT
, Glue
);
3170 SDValue
SystemZTargetLowering::lowerThreadPointer(const SDLoc
&DL
,
3171 SelectionDAG
&DAG
) const {
3172 SDValue Chain
= DAG
.getEntryNode();
3173 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3175 // The high part of the thread pointer is in access register 0.
3176 SDValue TPHi
= DAG
.getCopyFromReg(Chain
, DL
, SystemZ::A0
, MVT::i32
);
3177 TPHi
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, PtrVT
, TPHi
);
3179 // The low part of the thread pointer is in access register 1.
3180 SDValue TPLo
= DAG
.getCopyFromReg(Chain
, DL
, SystemZ::A1
, MVT::i32
);
3181 TPLo
= DAG
.getNode(ISD::ZERO_EXTEND
, DL
, PtrVT
, TPLo
);
3183 // Merge them into a single 64-bit address.
3184 SDValue TPHiShifted
= DAG
.getNode(ISD::SHL
, DL
, PtrVT
, TPHi
,
3185 DAG
.getConstant(32, DL
, PtrVT
));
3186 return DAG
.getNode(ISD::OR
, DL
, PtrVT
, TPHiShifted
, TPLo
);
3189 SDValue
SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode
*Node
,
3190 SelectionDAG
&DAG
) const {
3191 if (DAG
.getTarget().useEmulatedTLS())
3192 return LowerToTLSEmulatedModel(Node
, DAG
);
3194 const GlobalValue
*GV
= Node
->getGlobal();
3195 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3196 TLSModel::Model model
= DAG
.getTarget().getTLSModel(GV
);
3198 if (DAG
.getMachineFunction().getFunction().getCallingConv() ==
3200 report_fatal_error("In GHC calling convention TLS is not supported");
3202 SDValue TP
= lowerThreadPointer(DL
, DAG
);
3204 // Get the offset of GA from the thread pointer, based on the TLS model.
3207 case TLSModel::GeneralDynamic
: {
3208 // Load the GOT offset of the tls_index (module ID / per-symbol offset).
3209 SystemZConstantPoolValue
*CPV
=
3210 SystemZConstantPoolValue::Create(GV
, SystemZCP::TLSGD
);
3212 Offset
= DAG
.getConstantPool(CPV
, PtrVT
, Align(8));
3213 Offset
= DAG
.getLoad(
3214 PtrVT
, DL
, DAG
.getEntryNode(), Offset
,
3215 MachinePointerInfo::getConstantPool(DAG
.getMachineFunction()));
3217 // Call __tls_get_offset to retrieve the offset.
3218 Offset
= lowerTLSGetOffset(Node
, DAG
, SystemZISD::TLS_GDCALL
, Offset
);
3222 case TLSModel::LocalDynamic
: {
3223 // Load the GOT offset of the module ID.
3224 SystemZConstantPoolValue
*CPV
=
3225 SystemZConstantPoolValue::Create(GV
, SystemZCP::TLSLDM
);
3227 Offset
= DAG
.getConstantPool(CPV
, PtrVT
, Align(8));
3228 Offset
= DAG
.getLoad(
3229 PtrVT
, DL
, DAG
.getEntryNode(), Offset
,
3230 MachinePointerInfo::getConstantPool(DAG
.getMachineFunction()));
3232 // Call __tls_get_offset to retrieve the module base offset.
3233 Offset
= lowerTLSGetOffset(Node
, DAG
, SystemZISD::TLS_LDCALL
, Offset
);
3235 // Note: The SystemZLDCleanupPass will remove redundant computations
3236 // of the module base offset. Count total number of local-dynamic
3237 // accesses to trigger execution of that pass.
3238 SystemZMachineFunctionInfo
* MFI
=
3239 DAG
.getMachineFunction().getInfo
<SystemZMachineFunctionInfo
>();
3240 MFI
->incNumLocalDynamicTLSAccesses();
3242 // Add the per-symbol offset.
3243 CPV
= SystemZConstantPoolValue::Create(GV
, SystemZCP::DTPOFF
);
3245 SDValue DTPOffset
= DAG
.getConstantPool(CPV
, PtrVT
, Align(8));
3246 DTPOffset
= DAG
.getLoad(
3247 PtrVT
, DL
, DAG
.getEntryNode(), DTPOffset
,
3248 MachinePointerInfo::getConstantPool(DAG
.getMachineFunction()));
3250 Offset
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, Offset
, DTPOffset
);
3254 case TLSModel::InitialExec
: {
3255 // Load the offset from the GOT.
3256 Offset
= DAG
.getTargetGlobalAddress(GV
, DL
, PtrVT
, 0,
3257 SystemZII::MO_INDNTPOFF
);
3258 Offset
= DAG
.getNode(SystemZISD::PCREL_WRAPPER
, DL
, PtrVT
, Offset
);
3260 DAG
.getLoad(PtrVT
, DL
, DAG
.getEntryNode(), Offset
,
3261 MachinePointerInfo::getGOT(DAG
.getMachineFunction()));
3265 case TLSModel::LocalExec
: {
3266 // Force the offset into the constant pool and load it from there.
3267 SystemZConstantPoolValue
*CPV
=
3268 SystemZConstantPoolValue::Create(GV
, SystemZCP::NTPOFF
);
3270 Offset
= DAG
.getConstantPool(CPV
, PtrVT
, Align(8));
3271 Offset
= DAG
.getLoad(
3272 PtrVT
, DL
, DAG
.getEntryNode(), Offset
,
3273 MachinePointerInfo::getConstantPool(DAG
.getMachineFunction()));
3278 // Add the base and offset together.
3279 return DAG
.getNode(ISD::ADD
, DL
, PtrVT
, TP
, Offset
);
3282 SDValue
SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode
*Node
,
3283 SelectionDAG
&DAG
) const {
3285 const BlockAddress
*BA
= Node
->getBlockAddress();
3286 int64_t Offset
= Node
->getOffset();
3287 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3289 SDValue Result
= DAG
.getTargetBlockAddress(BA
, PtrVT
, Offset
);
3290 Result
= DAG
.getNode(SystemZISD::PCREL_WRAPPER
, DL
, PtrVT
, Result
);
3294 SDValue
SystemZTargetLowering::lowerJumpTable(JumpTableSDNode
*JT
,
3295 SelectionDAG
&DAG
) const {
3297 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3298 SDValue Result
= DAG
.getTargetJumpTable(JT
->getIndex(), PtrVT
);
3300 // Use LARL to load the address of the table.
3301 return DAG
.getNode(SystemZISD::PCREL_WRAPPER
, DL
, PtrVT
, Result
);
3304 SDValue
SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode
*CP
,
3305 SelectionDAG
&DAG
) const {
3307 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3310 if (CP
->isMachineConstantPoolEntry())
3312 DAG
.getTargetConstantPool(CP
->getMachineCPVal(), PtrVT
, CP
->getAlign());
3314 Result
= DAG
.getTargetConstantPool(CP
->getConstVal(), PtrVT
, CP
->getAlign(),
3317 // Use LARL to load the address of the constant pool entry.
3318 return DAG
.getNode(SystemZISD::PCREL_WRAPPER
, DL
, PtrVT
, Result
);
3321 SDValue
SystemZTargetLowering::lowerFRAMEADDR(SDValue Op
,
3322 SelectionDAG
&DAG
) const {
3324 static_cast<const SystemZFrameLowering
*>(Subtarget
.getFrameLowering());
3325 MachineFunction
&MF
= DAG
.getMachineFunction();
3326 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
3327 MFI
.setFrameAddressIsTaken(true);
3330 unsigned Depth
= cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue();
3331 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3333 // By definition, the frame address is the address of the back chain. (In
3334 // the case of packed stack without backchain, return the address where the
3335 // backchain would have been stored. This will either be an unused space or
3336 // contain a saved register).
3337 int BackChainIdx
= TFL
->getOrCreateFramePointerSaveIndex(MF
);
3338 SDValue BackChain
= DAG
.getFrameIndex(BackChainIdx
, PtrVT
);
3340 // FIXME The frontend should detect this case.
3342 report_fatal_error("Unsupported stack frame traversal count");
3348 SDValue
SystemZTargetLowering::lowerRETURNADDR(SDValue Op
,
3349 SelectionDAG
&DAG
) const {
3350 MachineFunction
&MF
= DAG
.getMachineFunction();
3351 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
3352 MFI
.setReturnAddressIsTaken(true);
3354 if (verifyReturnAddressArgumentIsConstant(Op
, DAG
))
3358 unsigned Depth
= cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue();
3359 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3361 // FIXME The frontend should detect this case.
3363 report_fatal_error("Unsupported stack frame traversal count");
3366 // Return R14D, which has the return address. Mark it an implicit live-in.
3367 unsigned LinkReg
= MF
.addLiveIn(SystemZ::R14D
, &SystemZ::GR64BitRegClass
);
3368 return DAG
.getCopyFromReg(DAG
.getEntryNode(), DL
, LinkReg
, PtrVT
);
3371 SDValue
SystemZTargetLowering::lowerBITCAST(SDValue Op
,
3372 SelectionDAG
&DAG
) const {
3374 SDValue In
= Op
.getOperand(0);
3375 EVT InVT
= In
.getValueType();
3376 EVT ResVT
= Op
.getValueType();
3378 // Convert loads directly. This is normally done by DAGCombiner,
3379 // but we need this case for bitcasts that are created during lowering
3380 // and which are then lowered themselves.
3381 if (auto *LoadN
= dyn_cast
<LoadSDNode
>(In
))
3382 if (ISD::isNormalLoad(LoadN
)) {
3383 SDValue NewLoad
= DAG
.getLoad(ResVT
, DL
, LoadN
->getChain(),
3384 LoadN
->getBasePtr(), LoadN
->getMemOperand());
3385 // Update the chain uses.
3386 DAG
.ReplaceAllUsesOfValueWith(SDValue(LoadN
, 1), NewLoad
.getValue(1));
3390 if (InVT
== MVT::i32
&& ResVT
== MVT::f32
) {
3392 if (Subtarget
.hasHighWord()) {
3393 SDNode
*U64
= DAG
.getMachineNode(TargetOpcode::IMPLICIT_DEF
, DL
,
3395 In64
= DAG
.getTargetInsertSubreg(SystemZ::subreg_h32
, DL
,
3396 MVT::i64
, SDValue(U64
, 0), In
);
3398 In64
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, MVT::i64
, In
);
3399 In64
= DAG
.getNode(ISD::SHL
, DL
, MVT::i64
, In64
,
3400 DAG
.getConstant(32, DL
, MVT::i64
));
3402 SDValue Out64
= DAG
.getNode(ISD::BITCAST
, DL
, MVT::f64
, In64
);
3403 return DAG
.getTargetExtractSubreg(SystemZ::subreg_h32
,
3404 DL
, MVT::f32
, Out64
);
3406 if (InVT
== MVT::f32
&& ResVT
== MVT::i32
) {
3407 SDNode
*U64
= DAG
.getMachineNode(TargetOpcode::IMPLICIT_DEF
, DL
, MVT::f64
);
3408 SDValue In64
= DAG
.getTargetInsertSubreg(SystemZ::subreg_h32
, DL
,
3409 MVT::f64
, SDValue(U64
, 0), In
);
3410 SDValue Out64
= DAG
.getNode(ISD::BITCAST
, DL
, MVT::i64
, In64
);
3411 if (Subtarget
.hasHighWord())
3412 return DAG
.getTargetExtractSubreg(SystemZ::subreg_h32
, DL
,
3414 SDValue Shift
= DAG
.getNode(ISD::SRL
, DL
, MVT::i64
, Out64
,
3415 DAG
.getConstant(32, DL
, MVT::i64
));
3416 return DAG
.getNode(ISD::TRUNCATE
, DL
, MVT::i32
, Shift
);
3418 llvm_unreachable("Unexpected bitcast combination");
3421 SDValue
SystemZTargetLowering::lowerVASTART(SDValue Op
,
3422 SelectionDAG
&DAG
) const {
3423 MachineFunction
&MF
= DAG
.getMachineFunction();
3424 SystemZMachineFunctionInfo
*FuncInfo
=
3425 MF
.getInfo
<SystemZMachineFunctionInfo
>();
3426 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3428 SDValue Chain
= Op
.getOperand(0);
3429 SDValue Addr
= Op
.getOperand(1);
3430 const Value
*SV
= cast
<SrcValueSDNode
>(Op
.getOperand(2))->getValue();
3433 // The initial values of each field.
3434 const unsigned NumFields
= 4;
3435 SDValue Fields
[NumFields
] = {
3436 DAG
.getConstant(FuncInfo
->getVarArgsFirstGPR(), DL
, PtrVT
),
3437 DAG
.getConstant(FuncInfo
->getVarArgsFirstFPR(), DL
, PtrVT
),
3438 DAG
.getFrameIndex(FuncInfo
->getVarArgsFrameIndex(), PtrVT
),
3439 DAG
.getFrameIndex(FuncInfo
->getRegSaveFrameIndex(), PtrVT
)
3442 // Store each field into its respective slot.
3443 SDValue MemOps
[NumFields
];
3444 unsigned Offset
= 0;
3445 for (unsigned I
= 0; I
< NumFields
; ++I
) {
3446 SDValue FieldAddr
= Addr
;
3448 FieldAddr
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, FieldAddr
,
3449 DAG
.getIntPtrConstant(Offset
, DL
));
3450 MemOps
[I
] = DAG
.getStore(Chain
, DL
, Fields
[I
], FieldAddr
,
3451 MachinePointerInfo(SV
, Offset
));
3454 return DAG
.getNode(ISD::TokenFactor
, DL
, MVT::Other
, MemOps
);
3457 SDValue
SystemZTargetLowering::lowerVACOPY(SDValue Op
,
3458 SelectionDAG
&DAG
) const {
3459 SDValue Chain
= Op
.getOperand(0);
3460 SDValue DstPtr
= Op
.getOperand(1);
3461 SDValue SrcPtr
= Op
.getOperand(2);
3462 const Value
*DstSV
= cast
<SrcValueSDNode
>(Op
.getOperand(3))->getValue();
3463 const Value
*SrcSV
= cast
<SrcValueSDNode
>(Op
.getOperand(4))->getValue();
3466 return DAG
.getMemcpy(Chain
, DL
, DstPtr
, SrcPtr
, DAG
.getIntPtrConstant(32, DL
),
3467 Align(8), /*isVolatile*/ false, /*AlwaysInline*/ false,
3468 /*isTailCall*/ false, MachinePointerInfo(DstSV
),
3469 MachinePointerInfo(SrcSV
));
3472 SDValue
SystemZTargetLowering::
3473 lowerDYNAMIC_STACKALLOC(SDValue Op
, SelectionDAG
&DAG
) const {
3474 const TargetFrameLowering
*TFI
= Subtarget
.getFrameLowering();
3475 MachineFunction
&MF
= DAG
.getMachineFunction();
3476 bool RealignOpt
= !MF
.getFunction().hasFnAttribute("no-realign-stack");
3477 bool StoreBackchain
= MF
.getFunction().hasFnAttribute("backchain");
3479 SDValue Chain
= Op
.getOperand(0);
3480 SDValue Size
= Op
.getOperand(1);
3481 SDValue Align
= Op
.getOperand(2);
3484 // If user has set the no alignment function attribute, ignore
3485 // alloca alignments.
3487 (RealignOpt
? cast
<ConstantSDNode
>(Align
)->getZExtValue() : 0);
3489 uint64_t StackAlign
= TFI
->getStackAlignment();
3490 uint64_t RequiredAlign
= std::max(AlignVal
, StackAlign
);
3491 uint64_t ExtraAlignSpace
= RequiredAlign
- StackAlign
;
3493 Register SPReg
= getStackPointerRegisterToSaveRestore();
3494 SDValue NeededSpace
= Size
;
3496 // Get a reference to the stack pointer.
3497 SDValue OldSP
= DAG
.getCopyFromReg(Chain
, DL
, SPReg
, MVT::i64
);
3499 // If we need a backchain, save it now.
3502 Backchain
= DAG
.getLoad(MVT::i64
, DL
, Chain
, getBackchainAddress(OldSP
, DAG
),
3503 MachinePointerInfo());
3505 // Add extra space for alignment if needed.
3506 if (ExtraAlignSpace
)
3507 NeededSpace
= DAG
.getNode(ISD::ADD
, DL
, MVT::i64
, NeededSpace
,
3508 DAG
.getConstant(ExtraAlignSpace
, DL
, MVT::i64
));
3510 // Get the new stack pointer value.
3512 if (hasInlineStackProbe(MF
)) {
3513 NewSP
= DAG
.getNode(SystemZISD::PROBED_ALLOCA
, DL
,
3514 DAG
.getVTList(MVT::i64
, MVT::Other
), Chain
, OldSP
, NeededSpace
);
3515 Chain
= NewSP
.getValue(1);
3518 NewSP
= DAG
.getNode(ISD::SUB
, DL
, MVT::i64
, OldSP
, NeededSpace
);
3519 // Copy the new stack pointer back.
3520 Chain
= DAG
.getCopyToReg(Chain
, DL
, SPReg
, NewSP
);
3523 // The allocated data lives above the 160 bytes allocated for the standard
3524 // frame, plus any outgoing stack arguments. We don't know how much that
3525 // amounts to yet, so emit a special ADJDYNALLOC placeholder.
3526 SDValue ArgAdjust
= DAG
.getNode(SystemZISD::ADJDYNALLOC
, DL
, MVT::i64
);
3527 SDValue Result
= DAG
.getNode(ISD::ADD
, DL
, MVT::i64
, NewSP
, ArgAdjust
);
3529 // Dynamically realign if needed.
3530 if (RequiredAlign
> StackAlign
) {
3532 DAG
.getNode(ISD::ADD
, DL
, MVT::i64
, Result
,
3533 DAG
.getConstant(ExtraAlignSpace
, DL
, MVT::i64
));
3535 DAG
.getNode(ISD::AND
, DL
, MVT::i64
, Result
,
3536 DAG
.getConstant(~(RequiredAlign
- 1), DL
, MVT::i64
));
3540 Chain
= DAG
.getStore(Chain
, DL
, Backchain
, getBackchainAddress(NewSP
, DAG
),
3541 MachinePointerInfo());
3543 SDValue Ops
[2] = { Result
, Chain
};
3544 return DAG
.getMergeValues(Ops
, DL
);
3547 SDValue
SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
3548 SDValue Op
, SelectionDAG
&DAG
) const {
3551 return DAG
.getNode(SystemZISD::ADJDYNALLOC
, DL
, MVT::i64
);
3554 SDValue
SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op
,
3555 SelectionDAG
&DAG
) const {
3556 EVT VT
= Op
.getValueType();
3560 // Just do a normal 64-bit multiplication and extract the results.
3561 // We define this so that it can be used for constant division.
3562 lowerMUL_LOHI32(DAG
, DL
, ISD::SIGN_EXTEND
, Op
.getOperand(0),
3563 Op
.getOperand(1), Ops
[1], Ops
[0]);
3564 else if (Subtarget
.hasMiscellaneousExtensions2())
3565 // SystemZISD::SMUL_LOHI returns the low result in the odd register and
3566 // the high result in the even register. ISD::SMUL_LOHI is defined to
3567 // return the low half first, so the results are in reverse order.
3568 lowerGR128Binary(DAG
, DL
, VT
, SystemZISD::SMUL_LOHI
,
3569 Op
.getOperand(0), Op
.getOperand(1), Ops
[1], Ops
[0]);
3571 // Do a full 128-bit multiplication based on SystemZISD::UMUL_LOHI:
3573 // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64)
3575 // but using the fact that the upper halves are either all zeros
3578 // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64)
3580 // and grouping the right terms together since they are quicker than the
3583 // (ll * rl) - (((lh & rl) + (ll & rh)) << 64)
3584 SDValue C63
= DAG
.getConstant(63, DL
, MVT::i64
);
3585 SDValue LL
= Op
.getOperand(0);
3586 SDValue RL
= Op
.getOperand(1);
3587 SDValue LH
= DAG
.getNode(ISD::SRA
, DL
, VT
, LL
, C63
);
3588 SDValue RH
= DAG
.getNode(ISD::SRA
, DL
, VT
, RL
, C63
);
3589 // SystemZISD::UMUL_LOHI returns the low result in the odd register and
3590 // the high result in the even register. ISD::SMUL_LOHI is defined to
3591 // return the low half first, so the results are in reverse order.
3592 lowerGR128Binary(DAG
, DL
, VT
, SystemZISD::UMUL_LOHI
,
3593 LL
, RL
, Ops
[1], Ops
[0]);
3594 SDValue NegLLTimesRH
= DAG
.getNode(ISD::AND
, DL
, VT
, LL
, RH
);
3595 SDValue NegLHTimesRL
= DAG
.getNode(ISD::AND
, DL
, VT
, LH
, RL
);
3596 SDValue NegSum
= DAG
.getNode(ISD::ADD
, DL
, VT
, NegLLTimesRH
, NegLHTimesRL
);
3597 Ops
[1] = DAG
.getNode(ISD::SUB
, DL
, VT
, Ops
[1], NegSum
);
3599 return DAG
.getMergeValues(Ops
, DL
);
3602 SDValue
SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op
,
3603 SelectionDAG
&DAG
) const {
3604 EVT VT
= Op
.getValueType();
3608 // Just do a normal 64-bit multiplication and extract the results.
3609 // We define this so that it can be used for constant division.
3610 lowerMUL_LOHI32(DAG
, DL
, ISD::ZERO_EXTEND
, Op
.getOperand(0),
3611 Op
.getOperand(1), Ops
[1], Ops
[0]);
3613 // SystemZISD::UMUL_LOHI returns the low result in the odd register and
3614 // the high result in the even register. ISD::UMUL_LOHI is defined to
3615 // return the low half first, so the results are in reverse order.
3616 lowerGR128Binary(DAG
, DL
, VT
, SystemZISD::UMUL_LOHI
,
3617 Op
.getOperand(0), Op
.getOperand(1), Ops
[1], Ops
[0]);
3618 return DAG
.getMergeValues(Ops
, DL
);
3621 SDValue
SystemZTargetLowering::lowerSDIVREM(SDValue Op
,
3622 SelectionDAG
&DAG
) const {
3623 SDValue Op0
= Op
.getOperand(0);
3624 SDValue Op1
= Op
.getOperand(1);
3625 EVT VT
= Op
.getValueType();
3628 // We use DSGF for 32-bit division. This means the first operand must
3629 // always be 64-bit, and the second operand should be 32-bit whenever
3630 // that is possible, to improve performance.
3632 Op0
= DAG
.getNode(ISD::SIGN_EXTEND
, DL
, MVT::i64
, Op0
);
3633 else if (DAG
.ComputeNumSignBits(Op1
) > 32)
3634 Op1
= DAG
.getNode(ISD::TRUNCATE
, DL
, MVT::i32
, Op1
);
3636 // DSG(F) returns the remainder in the even register and the
3637 // quotient in the odd register.
3639 lowerGR128Binary(DAG
, DL
, VT
, SystemZISD::SDIVREM
, Op0
, Op1
, Ops
[1], Ops
[0]);
3640 return DAG
.getMergeValues(Ops
, DL
);
3643 SDValue
SystemZTargetLowering::lowerUDIVREM(SDValue Op
,
3644 SelectionDAG
&DAG
) const {
3645 EVT VT
= Op
.getValueType();
3648 // DL(G) returns the remainder in the even register and the
3649 // quotient in the odd register.
3651 lowerGR128Binary(DAG
, DL
, VT
, SystemZISD::UDIVREM
,
3652 Op
.getOperand(0), Op
.getOperand(1), Ops
[1], Ops
[0]);
3653 return DAG
.getMergeValues(Ops
, DL
);
3656 SDValue
SystemZTargetLowering::lowerOR(SDValue Op
, SelectionDAG
&DAG
) const {
3657 assert(Op
.getValueType() == MVT::i64
&& "Should be 64-bit operation");
3659 // Get the known-zero masks for each operand.
3660 SDValue Ops
[] = {Op
.getOperand(0), Op
.getOperand(1)};
3661 KnownBits Known
[2] = {DAG
.computeKnownBits(Ops
[0]),
3662 DAG
.computeKnownBits(Ops
[1])};
3664 // See if the upper 32 bits of one operand and the lower 32 bits of the
3665 // other are known zero. They are the low and high operands respectively.
3666 uint64_t Masks
[] = { Known
[0].Zero
.getZExtValue(),
3667 Known
[1].Zero
.getZExtValue() };
3669 if ((Masks
[0] >> 32) == 0xffffffff && uint32_t(Masks
[1]) == 0xffffffff)
3671 else if ((Masks
[1] >> 32) == 0xffffffff && uint32_t(Masks
[0]) == 0xffffffff)
3676 SDValue LowOp
= Ops
[Low
];
3677 SDValue HighOp
= Ops
[High
];
3679 // If the high part is a constant, we're better off using IILH.
3680 if (HighOp
.getOpcode() == ISD::Constant
)
3683 // If the low part is a constant that is outside the range of LHI,
3684 // then we're better off using IILF.
3685 if (LowOp
.getOpcode() == ISD::Constant
) {
3686 int64_t Value
= int32_t(cast
<ConstantSDNode
>(LowOp
)->getZExtValue());
3687 if (!isInt
<16>(Value
))
3691 // Check whether the high part is an AND that doesn't change the
3692 // high 32 bits and just masks out low bits. We can skip it if so.
3693 if (HighOp
.getOpcode() == ISD::AND
&&
3694 HighOp
.getOperand(1).getOpcode() == ISD::Constant
) {
3695 SDValue HighOp0
= HighOp
.getOperand(0);
3696 uint64_t Mask
= cast
<ConstantSDNode
>(HighOp
.getOperand(1))->getZExtValue();
3697 if (DAG
.MaskedValueIsZero(HighOp0
, APInt(64, ~(Mask
| 0xffffffff))))
3701 // Take advantage of the fact that all GR32 operations only change the
3702 // low 32 bits by truncating Low to an i32 and inserting it directly
3703 // using a subreg. The interesting cases are those where the truncation
3706 SDValue Low32
= DAG
.getNode(ISD::TRUNCATE
, DL
, MVT::i32
, LowOp
);
3707 return DAG
.getTargetInsertSubreg(SystemZ::subreg_l32
, DL
,
3708 MVT::i64
, HighOp
, Low32
);
3711 // Lower SADDO/SSUBO/UADDO/USUBO nodes.
3712 SDValue
SystemZTargetLowering::lowerXALUO(SDValue Op
,
3713 SelectionDAG
&DAG
) const {
3714 SDNode
*N
= Op
.getNode();
3715 SDValue LHS
= N
->getOperand(0);
3716 SDValue RHS
= N
->getOperand(1);
3718 unsigned BaseOp
= 0;
3719 unsigned CCValid
= 0;
3720 unsigned CCMask
= 0;
3722 switch (Op
.getOpcode()) {
3723 default: llvm_unreachable("Unknown instruction!");
3725 BaseOp
= SystemZISD::SADDO
;
3726 CCValid
= SystemZ::CCMASK_ARITH
;
3727 CCMask
= SystemZ::CCMASK_ARITH_OVERFLOW
;
3730 BaseOp
= SystemZISD::SSUBO
;
3731 CCValid
= SystemZ::CCMASK_ARITH
;
3732 CCMask
= SystemZ::CCMASK_ARITH_OVERFLOW
;
3735 BaseOp
= SystemZISD::UADDO
;
3736 CCValid
= SystemZ::CCMASK_LOGICAL
;
3737 CCMask
= SystemZ::CCMASK_LOGICAL_CARRY
;
3740 BaseOp
= SystemZISD::USUBO
;
3741 CCValid
= SystemZ::CCMASK_LOGICAL
;
3742 CCMask
= SystemZ::CCMASK_LOGICAL_BORROW
;
3746 SDVTList VTs
= DAG
.getVTList(N
->getValueType(0), MVT::i32
);
3747 SDValue Result
= DAG
.getNode(BaseOp
, DL
, VTs
, LHS
, RHS
);
3749 SDValue SetCC
= emitSETCC(DAG
, DL
, Result
.getValue(1), CCValid
, CCMask
);
3750 if (N
->getValueType(1) == MVT::i1
)
3751 SetCC
= DAG
.getNode(ISD::TRUNCATE
, DL
, MVT::i1
, SetCC
);
3753 return DAG
.getNode(ISD::MERGE_VALUES
, DL
, N
->getVTList(), Result
, SetCC
);
3756 static bool isAddCarryChain(SDValue Carry
) {
3757 while (Carry
.getOpcode() == ISD::ADDCARRY
)
3758 Carry
= Carry
.getOperand(2);
3759 return Carry
.getOpcode() == ISD::UADDO
;
3762 static bool isSubBorrowChain(SDValue Carry
) {
3763 while (Carry
.getOpcode() == ISD::SUBCARRY
)
3764 Carry
= Carry
.getOperand(2);
3765 return Carry
.getOpcode() == ISD::USUBO
;
3768 // Lower ADDCARRY/SUBCARRY nodes.
3769 SDValue
SystemZTargetLowering::lowerADDSUBCARRY(SDValue Op
,
3770 SelectionDAG
&DAG
) const {
3772 SDNode
*N
= Op
.getNode();
3773 MVT VT
= N
->getSimpleValueType(0);
3775 // Let legalize expand this if it isn't a legal type yet.
3776 if (!DAG
.getTargetLoweringInfo().isTypeLegal(VT
))
3779 SDValue LHS
= N
->getOperand(0);
3780 SDValue RHS
= N
->getOperand(1);
3781 SDValue Carry
= Op
.getOperand(2);
3783 unsigned BaseOp
= 0;
3784 unsigned CCValid
= 0;
3785 unsigned CCMask
= 0;
3787 switch (Op
.getOpcode()) {
3788 default: llvm_unreachable("Unknown instruction!");
3790 if (!isAddCarryChain(Carry
))
3793 BaseOp
= SystemZISD::ADDCARRY
;
3794 CCValid
= SystemZ::CCMASK_LOGICAL
;
3795 CCMask
= SystemZ::CCMASK_LOGICAL_CARRY
;
3798 if (!isSubBorrowChain(Carry
))
3801 BaseOp
= SystemZISD::SUBCARRY
;
3802 CCValid
= SystemZ::CCMASK_LOGICAL
;
3803 CCMask
= SystemZ::CCMASK_LOGICAL_BORROW
;
3807 // Set the condition code from the carry flag.
3808 Carry
= DAG
.getNode(SystemZISD::GET_CCMASK
, DL
, MVT::i32
, Carry
,
3809 DAG
.getConstant(CCValid
, DL
, MVT::i32
),
3810 DAG
.getConstant(CCMask
, DL
, MVT::i32
));
3812 SDVTList VTs
= DAG
.getVTList(VT
, MVT::i32
);
3813 SDValue Result
= DAG
.getNode(BaseOp
, DL
, VTs
, LHS
, RHS
, Carry
);
3815 SDValue SetCC
= emitSETCC(DAG
, DL
, Result
.getValue(1), CCValid
, CCMask
);
3816 if (N
->getValueType(1) == MVT::i1
)
3817 SetCC
= DAG
.getNode(ISD::TRUNCATE
, DL
, MVT::i1
, SetCC
);
3819 return DAG
.getNode(ISD::MERGE_VALUES
, DL
, N
->getVTList(), Result
, SetCC
);
3822 SDValue
SystemZTargetLowering::lowerCTPOP(SDValue Op
,
3823 SelectionDAG
&DAG
) const {
3824 EVT VT
= Op
.getValueType();
3826 Op
= Op
.getOperand(0);
3828 // Handle vector types via VPOPCT.
3829 if (VT
.isVector()) {
3830 Op
= DAG
.getNode(ISD::BITCAST
, DL
, MVT::v16i8
, Op
);
3831 Op
= DAG
.getNode(SystemZISD::POPCNT
, DL
, MVT::v16i8
, Op
);
3832 switch (VT
.getScalarSizeInBits()) {
3836 Op
= DAG
.getNode(ISD::BITCAST
, DL
, VT
, Op
);
3837 SDValue Shift
= DAG
.getConstant(8, DL
, MVT::i32
);
3838 SDValue Tmp
= DAG
.getNode(SystemZISD::VSHL_BY_SCALAR
, DL
, VT
, Op
, Shift
);
3839 Op
= DAG
.getNode(ISD::ADD
, DL
, VT
, Op
, Tmp
);
3840 Op
= DAG
.getNode(SystemZISD::VSRL_BY_SCALAR
, DL
, VT
, Op
, Shift
);
3844 SDValue Tmp
= DAG
.getSplatBuildVector(MVT::v16i8
, DL
,
3845 DAG
.getConstant(0, DL
, MVT::i32
));
3846 Op
= DAG
.getNode(SystemZISD::VSUM
, DL
, VT
, Op
, Tmp
);
3850 SDValue Tmp
= DAG
.getSplatBuildVector(MVT::v16i8
, DL
,
3851 DAG
.getConstant(0, DL
, MVT::i32
));
3852 Op
= DAG
.getNode(SystemZISD::VSUM
, DL
, MVT::v4i32
, Op
, Tmp
);
3853 Op
= DAG
.getNode(SystemZISD::VSUM
, DL
, VT
, Op
, Tmp
);
3857 llvm_unreachable("Unexpected type");
3862 // Get the known-zero mask for the operand.
3863 KnownBits Known
= DAG
.computeKnownBits(Op
);
3864 unsigned NumSignificantBits
= Known
.getMaxValue().getActiveBits();
3865 if (NumSignificantBits
== 0)
3866 return DAG
.getConstant(0, DL
, VT
);
3868 // Skip known-zero high parts of the operand.
3869 int64_t OrigBitSize
= VT
.getSizeInBits();
3870 int64_t BitSize
= (int64_t)1 << Log2_32_Ceil(NumSignificantBits
);
3871 BitSize
= std::min(BitSize
, OrigBitSize
);
3873 // The POPCNT instruction counts the number of bits in each byte.
3874 Op
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, MVT::i64
, Op
);
3875 Op
= DAG
.getNode(SystemZISD::POPCNT
, DL
, MVT::i64
, Op
);
3876 Op
= DAG
.getNode(ISD::TRUNCATE
, DL
, VT
, Op
);
3878 // Add up per-byte counts in a binary tree. All bits of Op at
3879 // position larger than BitSize remain zero throughout.
3880 for (int64_t I
= BitSize
/ 2; I
>= 8; I
= I
/ 2) {
3881 SDValue Tmp
= DAG
.getNode(ISD::SHL
, DL
, VT
, Op
, DAG
.getConstant(I
, DL
, VT
));
3882 if (BitSize
!= OrigBitSize
)
3883 Tmp
= DAG
.getNode(ISD::AND
, DL
, VT
, Tmp
,
3884 DAG
.getConstant(((uint64_t)1 << BitSize
) - 1, DL
, VT
));
3885 Op
= DAG
.getNode(ISD::ADD
, DL
, VT
, Op
, Tmp
);
3888 // Extract overall result from high byte.
3890 Op
= DAG
.getNode(ISD::SRL
, DL
, VT
, Op
,
3891 DAG
.getConstant(BitSize
- 8, DL
, VT
));
3896 SDValue
SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op
,
3897 SelectionDAG
&DAG
) const {
3899 AtomicOrdering FenceOrdering
= static_cast<AtomicOrdering
>(
3900 cast
<ConstantSDNode
>(Op
.getOperand(1))->getZExtValue());
3901 SyncScope::ID FenceSSID
= static_cast<SyncScope::ID
>(
3902 cast
<ConstantSDNode
>(Op
.getOperand(2))->getZExtValue());
3904 // The only fence that needs an instruction is a sequentially-consistent
3905 // cross-thread fence.
3906 if (FenceOrdering
== AtomicOrdering::SequentiallyConsistent
&&
3907 FenceSSID
== SyncScope::System
) {
3908 return SDValue(DAG
.getMachineNode(SystemZ::Serialize
, DL
, MVT::Other
,
3913 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
3914 return DAG
.getNode(SystemZISD::MEMBARRIER
, DL
, MVT::Other
, Op
.getOperand(0));
3917 // Op is an atomic load. Lower it into a normal volatile load.
3918 SDValue
SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op
,
3919 SelectionDAG
&DAG
) const {
3920 auto *Node
= cast
<AtomicSDNode
>(Op
.getNode());
3921 return DAG
.getExtLoad(ISD::EXTLOAD
, SDLoc(Op
), Op
.getValueType(),
3922 Node
->getChain(), Node
->getBasePtr(),
3923 Node
->getMemoryVT(), Node
->getMemOperand());
3926 // Op is an atomic store. Lower it into a normal volatile store.
3927 SDValue
SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op
,
3928 SelectionDAG
&DAG
) const {
3929 auto *Node
= cast
<AtomicSDNode
>(Op
.getNode());
3930 SDValue Chain
= DAG
.getTruncStore(Node
->getChain(), SDLoc(Op
), Node
->getVal(),
3931 Node
->getBasePtr(), Node
->getMemoryVT(),
3932 Node
->getMemOperand());
3933 // We have to enforce sequential consistency by performing a
3934 // serialization operation after the store.
3935 if (Node
->getSuccessOrdering() == AtomicOrdering::SequentiallyConsistent
)
3936 Chain
= SDValue(DAG
.getMachineNode(SystemZ::Serialize
, SDLoc(Op
),
3937 MVT::Other
, Chain
), 0);
3941 // Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first
3942 // two into the fullword ATOMIC_LOADW_* operation given by Opcode.
3943 SDValue
SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op
,
3945 unsigned Opcode
) const {
3946 auto *Node
= cast
<AtomicSDNode
>(Op
.getNode());
3948 // 32-bit operations need no code outside the main loop.
3949 EVT NarrowVT
= Node
->getMemoryVT();
3950 EVT WideVT
= MVT::i32
;
3951 if (NarrowVT
== WideVT
)
3954 int64_t BitSize
= NarrowVT
.getSizeInBits();
3955 SDValue ChainIn
= Node
->getChain();
3956 SDValue Addr
= Node
->getBasePtr();
3957 SDValue Src2
= Node
->getVal();
3958 MachineMemOperand
*MMO
= Node
->getMemOperand();
3960 EVT PtrVT
= Addr
.getValueType();
3962 // Convert atomic subtracts of constants into additions.
3963 if (Opcode
== SystemZISD::ATOMIC_LOADW_SUB
)
3964 if (auto *Const
= dyn_cast
<ConstantSDNode
>(Src2
)) {
3965 Opcode
= SystemZISD::ATOMIC_LOADW_ADD
;
3966 Src2
= DAG
.getConstant(-Const
->getSExtValue(), DL
, Src2
.getValueType());
3969 // Get the address of the containing word.
3970 SDValue AlignedAddr
= DAG
.getNode(ISD::AND
, DL
, PtrVT
, Addr
,
3971 DAG
.getConstant(-4, DL
, PtrVT
));
3973 // Get the number of bits that the word must be rotated left in order
3974 // to bring the field to the top bits of a GR32.
3975 SDValue BitShift
= DAG
.getNode(ISD::SHL
, DL
, PtrVT
, Addr
,
3976 DAG
.getConstant(3, DL
, PtrVT
));
3977 BitShift
= DAG
.getNode(ISD::TRUNCATE
, DL
, WideVT
, BitShift
);
3979 // Get the complementing shift amount, for rotating a field in the top
3980 // bits back to its proper position.
3981 SDValue NegBitShift
= DAG
.getNode(ISD::SUB
, DL
, WideVT
,
3982 DAG
.getConstant(0, DL
, WideVT
), BitShift
);
3984 // Extend the source operand to 32 bits and prepare it for the inner loop.
3985 // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other
3986 // operations require the source to be shifted in advance. (This shift
3987 // can be folded if the source is constant.) For AND and NAND, the lower
3988 // bits must be set, while for other opcodes they should be left clear.
3989 if (Opcode
!= SystemZISD::ATOMIC_SWAPW
)
3990 Src2
= DAG
.getNode(ISD::SHL
, DL
, WideVT
, Src2
,
3991 DAG
.getConstant(32 - BitSize
, DL
, WideVT
));
3992 if (Opcode
== SystemZISD::ATOMIC_LOADW_AND
||
3993 Opcode
== SystemZISD::ATOMIC_LOADW_NAND
)
3994 Src2
= DAG
.getNode(ISD::OR
, DL
, WideVT
, Src2
,
3995 DAG
.getConstant(uint32_t(-1) >> BitSize
, DL
, WideVT
));
3997 // Construct the ATOMIC_LOADW_* node.
3998 SDVTList VTList
= DAG
.getVTList(WideVT
, MVT::Other
);
3999 SDValue Ops
[] = { ChainIn
, AlignedAddr
, Src2
, BitShift
, NegBitShift
,
4000 DAG
.getConstant(BitSize
, DL
, WideVT
) };
4001 SDValue AtomicOp
= DAG
.getMemIntrinsicNode(Opcode
, DL
, VTList
, Ops
,
4004 // Rotate the result of the final CS so that the field is in the lower
4005 // bits of a GR32, then truncate it.
4006 SDValue ResultShift
= DAG
.getNode(ISD::ADD
, DL
, WideVT
, BitShift
,
4007 DAG
.getConstant(BitSize
, DL
, WideVT
));
4008 SDValue Result
= DAG
.getNode(ISD::ROTL
, DL
, WideVT
, AtomicOp
, ResultShift
);
4010 SDValue RetOps
[2] = { Result
, AtomicOp
.getValue(1) };
4011 return DAG
.getMergeValues(RetOps
, DL
);
4014 // Op is an ATOMIC_LOAD_SUB operation. Lower 8- and 16-bit operations
4015 // into ATOMIC_LOADW_SUBs and decide whether to convert 32- and 64-bit
4016 // operations into additions.
4017 SDValue
SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op
,
4018 SelectionDAG
&DAG
) const {
4019 auto *Node
= cast
<AtomicSDNode
>(Op
.getNode());
4020 EVT MemVT
= Node
->getMemoryVT();
4021 if (MemVT
== MVT::i32
|| MemVT
== MVT::i64
) {
4022 // A full-width operation.
4023 assert(Op
.getValueType() == MemVT
&& "Mismatched VTs");
4024 SDValue Src2
= Node
->getVal();
4028 if (auto *Op2
= dyn_cast
<ConstantSDNode
>(Src2
)) {
4029 // Use an addition if the operand is constant and either LAA(G) is
4030 // available or the negative value is in the range of A(G)FHI.
4031 int64_t Value
= (-Op2
->getAPIntValue()).getSExtValue();
4032 if (isInt
<32>(Value
) || Subtarget
.hasInterlockedAccess1())
4033 NegSrc2
= DAG
.getConstant(Value
, DL
, MemVT
);
4034 } else if (Subtarget
.hasInterlockedAccess1())
4035 // Use LAA(G) if available.
4036 NegSrc2
= DAG
.getNode(ISD::SUB
, DL
, MemVT
, DAG
.getConstant(0, DL
, MemVT
),
4039 if (NegSrc2
.getNode())
4040 return DAG
.getAtomic(ISD::ATOMIC_LOAD_ADD
, DL
, MemVT
,
4041 Node
->getChain(), Node
->getBasePtr(), NegSrc2
,
4042 Node
->getMemOperand());
4044 // Use the node as-is.
4048 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_SUB
);
4051 // Lower 8/16/32/64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS node.
4052 SDValue
SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op
,
4053 SelectionDAG
&DAG
) const {
4054 auto *Node
= cast
<AtomicSDNode
>(Op
.getNode());
4055 SDValue ChainIn
= Node
->getOperand(0);
4056 SDValue Addr
= Node
->getOperand(1);
4057 SDValue CmpVal
= Node
->getOperand(2);
4058 SDValue SwapVal
= Node
->getOperand(3);
4059 MachineMemOperand
*MMO
= Node
->getMemOperand();
4062 // We have native support for 32-bit and 64-bit compare and swap, but we
4063 // still need to expand extracting the "success" result from the CC.
4064 EVT NarrowVT
= Node
->getMemoryVT();
4065 EVT WideVT
= NarrowVT
== MVT::i64
? MVT::i64
: MVT::i32
;
4066 if (NarrowVT
== WideVT
) {
4067 SDVTList Tys
= DAG
.getVTList(WideVT
, MVT::i32
, MVT::Other
);
4068 SDValue Ops
[] = { ChainIn
, Addr
, CmpVal
, SwapVal
};
4069 SDValue AtomicOp
= DAG
.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP
,
4070 DL
, Tys
, Ops
, NarrowVT
, MMO
);
4071 SDValue Success
= emitSETCC(DAG
, DL
, AtomicOp
.getValue(1),
4072 SystemZ::CCMASK_CS
, SystemZ::CCMASK_CS_EQ
);
4074 DAG
.ReplaceAllUsesOfValueWith(Op
.getValue(0), AtomicOp
.getValue(0));
4075 DAG
.ReplaceAllUsesOfValueWith(Op
.getValue(1), Success
);
4076 DAG
.ReplaceAllUsesOfValueWith(Op
.getValue(2), AtomicOp
.getValue(2));
4080 // Convert 8-bit and 16-bit compare and swap to a loop, implemented
4081 // via a fullword ATOMIC_CMP_SWAPW operation.
4082 int64_t BitSize
= NarrowVT
.getSizeInBits();
4083 EVT PtrVT
= Addr
.getValueType();
4085 // Get the address of the containing word.
4086 SDValue AlignedAddr
= DAG
.getNode(ISD::AND
, DL
, PtrVT
, Addr
,
4087 DAG
.getConstant(-4, DL
, PtrVT
));
4089 // Get the number of bits that the word must be rotated left in order
4090 // to bring the field to the top bits of a GR32.
4091 SDValue BitShift
= DAG
.getNode(ISD::SHL
, DL
, PtrVT
, Addr
,
4092 DAG
.getConstant(3, DL
, PtrVT
));
4093 BitShift
= DAG
.getNode(ISD::TRUNCATE
, DL
, WideVT
, BitShift
);
4095 // Get the complementing shift amount, for rotating a field in the top
4096 // bits back to its proper position.
4097 SDValue NegBitShift
= DAG
.getNode(ISD::SUB
, DL
, WideVT
,
4098 DAG
.getConstant(0, DL
, WideVT
), BitShift
);
4100 // Construct the ATOMIC_CMP_SWAPW node.
4101 SDVTList VTList
= DAG
.getVTList(WideVT
, MVT::i32
, MVT::Other
);
4102 SDValue Ops
[] = { ChainIn
, AlignedAddr
, CmpVal
, SwapVal
, BitShift
,
4103 NegBitShift
, DAG
.getConstant(BitSize
, DL
, WideVT
) };
4104 SDValue AtomicOp
= DAG
.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW
, DL
,
4105 VTList
, Ops
, NarrowVT
, MMO
);
4106 SDValue Success
= emitSETCC(DAG
, DL
, AtomicOp
.getValue(1),
4107 SystemZ::CCMASK_ICMP
, SystemZ::CCMASK_CMP_EQ
);
4109 // emitAtomicCmpSwapW() will zero extend the result (original value).
4110 SDValue OrigVal
= DAG
.getNode(ISD::AssertZext
, DL
, WideVT
, AtomicOp
.getValue(0),
4111 DAG
.getValueType(NarrowVT
));
4112 DAG
.ReplaceAllUsesOfValueWith(Op
.getValue(0), OrigVal
);
4113 DAG
.ReplaceAllUsesOfValueWith(Op
.getValue(1), Success
);
4114 DAG
.ReplaceAllUsesOfValueWith(Op
.getValue(2), AtomicOp
.getValue(2));
4118 MachineMemOperand::Flags
4119 SystemZTargetLowering::getTargetMMOFlags(const Instruction
&I
) const {
4120 // Because of how we convert atomic_load and atomic_store to normal loads and
4121 // stores in the DAG, we need to ensure that the MMOs are marked volatile
4122 // since DAGCombine hasn't been updated to account for atomic, but non
4123 // volatile loads. (See D57601)
4124 if (auto *SI
= dyn_cast
<StoreInst
>(&I
))
4126 return MachineMemOperand::MOVolatile
;
4127 if (auto *LI
= dyn_cast
<LoadInst
>(&I
))
4129 return MachineMemOperand::MOVolatile
;
4130 if (auto *AI
= dyn_cast
<AtomicRMWInst
>(&I
))
4132 return MachineMemOperand::MOVolatile
;
4133 if (auto *AI
= dyn_cast
<AtomicCmpXchgInst
>(&I
))
4135 return MachineMemOperand::MOVolatile
;
4136 return MachineMemOperand::MONone
;
4139 SDValue
SystemZTargetLowering::lowerSTACKSAVE(SDValue Op
,
4140 SelectionDAG
&DAG
) const {
4141 MachineFunction
&MF
= DAG
.getMachineFunction();
4142 MF
.getInfo
<SystemZMachineFunctionInfo
>()->setManipulatesSP(true);
4143 if (MF
.getFunction().getCallingConv() == CallingConv::GHC
)
4144 report_fatal_error("Variable-sized stack allocations are not supported "
4145 "in GHC calling convention");
4146 return DAG
.getCopyFromReg(Op
.getOperand(0), SDLoc(Op
),
4147 SystemZ::R15D
, Op
.getValueType());
4150 SDValue
SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op
,
4151 SelectionDAG
&DAG
) const {
4152 MachineFunction
&MF
= DAG
.getMachineFunction();
4153 MF
.getInfo
<SystemZMachineFunctionInfo
>()->setManipulatesSP(true);
4154 bool StoreBackchain
= MF
.getFunction().hasFnAttribute("backchain");
4156 if (MF
.getFunction().getCallingConv() == CallingConv::GHC
)
4157 report_fatal_error("Variable-sized stack allocations are not supported "
4158 "in GHC calling convention");
4160 SDValue Chain
= Op
.getOperand(0);
4161 SDValue NewSP
= Op
.getOperand(1);
4165 if (StoreBackchain
) {
4166 SDValue OldSP
= DAG
.getCopyFromReg(Chain
, DL
, SystemZ::R15D
, MVT::i64
);
4167 Backchain
= DAG
.getLoad(MVT::i64
, DL
, Chain
, getBackchainAddress(OldSP
, DAG
),
4168 MachinePointerInfo());
4171 Chain
= DAG
.getCopyToReg(Chain
, DL
, SystemZ::R15D
, NewSP
);
4174 Chain
= DAG
.getStore(Chain
, DL
, Backchain
, getBackchainAddress(NewSP
, DAG
),
4175 MachinePointerInfo());
4180 SDValue
SystemZTargetLowering::lowerPREFETCH(SDValue Op
,
4181 SelectionDAG
&DAG
) const {
4182 bool IsData
= cast
<ConstantSDNode
>(Op
.getOperand(4))->getZExtValue();
4184 // Just preserve the chain.
4185 return Op
.getOperand(0);
4188 bool IsWrite
= cast
<ConstantSDNode
>(Op
.getOperand(2))->getZExtValue();
4189 unsigned Code
= IsWrite
? SystemZ::PFD_WRITE
: SystemZ::PFD_READ
;
4190 auto *Node
= cast
<MemIntrinsicSDNode
>(Op
.getNode());
4191 SDValue Ops
[] = {Op
.getOperand(0), DAG
.getTargetConstant(Code
, DL
, MVT::i32
),
4193 return DAG
.getMemIntrinsicNode(SystemZISD::PREFETCH
, DL
,
4194 Node
->getVTList(), Ops
,
4195 Node
->getMemoryVT(), Node
->getMemOperand());
4198 // Convert condition code in CCReg to an i32 value.
4199 static SDValue
getCCResult(SelectionDAG
&DAG
, SDValue CCReg
) {
4201 SDValue IPM
= DAG
.getNode(SystemZISD::IPM
, DL
, MVT::i32
, CCReg
);
4202 return DAG
.getNode(ISD::SRL
, DL
, MVT::i32
, IPM
,
4203 DAG
.getConstant(SystemZ::IPM_CC
, DL
, MVT::i32
));
4207 SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op
,
4208 SelectionDAG
&DAG
) const {
4209 unsigned Opcode
, CCValid
;
4210 if (isIntrinsicWithCCAndChain(Op
, Opcode
, CCValid
)) {
4211 assert(Op
->getNumValues() == 2 && "Expected only CC result and chain");
4212 SDNode
*Node
= emitIntrinsicWithCCAndChain(DAG
, Op
, Opcode
);
4213 SDValue CC
= getCCResult(DAG
, SDValue(Node
, 0));
4214 DAG
.ReplaceAllUsesOfValueWith(SDValue(Op
.getNode(), 0), CC
);
4222 SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op
,
4223 SelectionDAG
&DAG
) const {
4224 unsigned Opcode
, CCValid
;
4225 if (isIntrinsicWithCC(Op
, Opcode
, CCValid
)) {
4226 SDNode
*Node
= emitIntrinsicWithCC(DAG
, Op
, Opcode
);
4227 if (Op
->getNumValues() == 1)
4228 return getCCResult(DAG
, SDValue(Node
, 0));
4229 assert(Op
->getNumValues() == 2 && "Expected a CC and non-CC result");
4230 return DAG
.getNode(ISD::MERGE_VALUES
, SDLoc(Op
), Op
->getVTList(),
4231 SDValue(Node
, 0), getCCResult(DAG
, SDValue(Node
, 1)));
4234 unsigned Id
= cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue();
4236 case Intrinsic::thread_pointer
:
4237 return lowerThreadPointer(SDLoc(Op
), DAG
);
4239 case Intrinsic::s390_vpdi
:
4240 return DAG
.getNode(SystemZISD::PERMUTE_DWORDS
, SDLoc(Op
), Op
.getValueType(),
4241 Op
.getOperand(1), Op
.getOperand(2), Op
.getOperand(3));
4243 case Intrinsic::s390_vperm
:
4244 return DAG
.getNode(SystemZISD::PERMUTE
, SDLoc(Op
), Op
.getValueType(),
4245 Op
.getOperand(1), Op
.getOperand(2), Op
.getOperand(3));
4247 case Intrinsic::s390_vuphb
:
4248 case Intrinsic::s390_vuphh
:
4249 case Intrinsic::s390_vuphf
:
4250 return DAG
.getNode(SystemZISD::UNPACK_HIGH
, SDLoc(Op
), Op
.getValueType(),
4253 case Intrinsic::s390_vuplhb
:
4254 case Intrinsic::s390_vuplhh
:
4255 case Intrinsic::s390_vuplhf
:
4256 return DAG
.getNode(SystemZISD::UNPACKL_HIGH
, SDLoc(Op
), Op
.getValueType(),
4259 case Intrinsic::s390_vuplb
:
4260 case Intrinsic::s390_vuplhw
:
4261 case Intrinsic::s390_vuplf
:
4262 return DAG
.getNode(SystemZISD::UNPACK_LOW
, SDLoc(Op
), Op
.getValueType(),
4265 case Intrinsic::s390_vupllb
:
4266 case Intrinsic::s390_vupllh
:
4267 case Intrinsic::s390_vupllf
:
4268 return DAG
.getNode(SystemZISD::UNPACKL_LOW
, SDLoc(Op
), Op
.getValueType(),
4271 case Intrinsic::s390_vsumb
:
4272 case Intrinsic::s390_vsumh
:
4273 case Intrinsic::s390_vsumgh
:
4274 case Intrinsic::s390_vsumgf
:
4275 case Intrinsic::s390_vsumqf
:
4276 case Intrinsic::s390_vsumqg
:
4277 return DAG
.getNode(SystemZISD::VSUM
, SDLoc(Op
), Op
.getValueType(),
4278 Op
.getOperand(1), Op
.getOperand(2));
4285 // Says that SystemZISD operation Opcode can be used to perform the equivalent
4286 // of a VPERM with permute vector Bytes. If Opcode takes three operands,
4287 // Operand is the constant third operand, otherwise it is the number of
4288 // bytes in each element of the result.
4292 unsigned char Bytes
[SystemZ::VectorBytes
];
4296 static const Permute PermuteForms
[] = {
4298 { SystemZISD::MERGE_HIGH
, 8,
4299 { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } },
4301 { SystemZISD::MERGE_HIGH
, 4,
4302 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
4304 { SystemZISD::MERGE_HIGH
, 2,
4305 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
4307 { SystemZISD::MERGE_HIGH
, 1,
4308 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
4310 { SystemZISD::MERGE_LOW
, 8,
4311 { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } },
4313 { SystemZISD::MERGE_LOW
, 4,
4314 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
4316 { SystemZISD::MERGE_LOW
, 2,
4317 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
4319 { SystemZISD::MERGE_LOW
, 1,
4320 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
4322 { SystemZISD::PACK
, 4,
4323 { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } },
4325 { SystemZISD::PACK
, 2,
4326 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
4328 { SystemZISD::PACK
, 1,
4329 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
4330 // VPDI V1, V2, 4 (low half of V1, high half of V2)
4331 { SystemZISD::PERMUTE_DWORDS
, 4,
4332 { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
4333 // VPDI V1, V2, 1 (high half of V1, low half of V2)
4334 { SystemZISD::PERMUTE_DWORDS
, 1,
4335 { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } }
4338 // Called after matching a vector shuffle against a particular pattern.
4339 // Both the original shuffle and the pattern have two vector operands.
4340 // OpNos[0] is the operand of the original shuffle that should be used for
4341 // operand 0 of the pattern, or -1 if operand 0 of the pattern can be anything.
4342 // OpNos[1] is the same for operand 1 of the pattern. Resolve these -1s and
4343 // set OpNo0 and OpNo1 to the shuffle operands that should actually be used
4344 // for operands 0 and 1 of the pattern.
4345 static bool chooseShuffleOpNos(int *OpNos
, unsigned &OpNo0
, unsigned &OpNo1
) {
4349 OpNo0
= OpNo1
= OpNos
[1];
4350 } else if (OpNos
[1] < 0) {
4351 OpNo0
= OpNo1
= OpNos
[0];
4359 // Bytes is a VPERM-like permute vector, except that -1 is used for
4360 // undefined bytes. Return true if the VPERM can be implemented using P.
4361 // When returning true set OpNo0 to the VPERM operand that should be
4362 // used for operand 0 of P and likewise OpNo1 for operand 1 of P.
4364 // For example, if swapping the VPERM operands allows P to match, OpNo0
4365 // will be 1 and OpNo1 will be 0. If instead Bytes only refers to one
4366 // operand, but rewriting it to use two duplicated operands allows it to
4367 // match P, then OpNo0 and OpNo1 will be the same.
4368 static bool matchPermute(const SmallVectorImpl
<int> &Bytes
, const Permute
&P
,
4369 unsigned &OpNo0
, unsigned &OpNo1
) {
4370 int OpNos
[] = { -1, -1 };
4371 for (unsigned I
= 0; I
< SystemZ::VectorBytes
; ++I
) {
4374 // Make sure that the two permute vectors use the same suboperand
4375 // byte number. Only the operand numbers (the high bits) are
4376 // allowed to differ.
4377 if ((Elt
^ P
.Bytes
[I
]) & (SystemZ::VectorBytes
- 1))
4379 int ModelOpNo
= P
.Bytes
[I
] / SystemZ::VectorBytes
;
4380 int RealOpNo
= unsigned(Elt
) / SystemZ::VectorBytes
;
4381 // Make sure that the operand mappings are consistent with previous
4383 if (OpNos
[ModelOpNo
] == 1 - RealOpNo
)
4385 OpNos
[ModelOpNo
] = RealOpNo
;
4388 return chooseShuffleOpNos(OpNos
, OpNo0
, OpNo1
);
4391 // As above, but search for a matching permute.
4392 static const Permute
*matchPermute(const SmallVectorImpl
<int> &Bytes
,
4393 unsigned &OpNo0
, unsigned &OpNo1
) {
4394 for (auto &P
: PermuteForms
)
4395 if (matchPermute(Bytes
, P
, OpNo0
, OpNo1
))
4400 // Bytes is a VPERM-like permute vector, except that -1 is used for
4401 // undefined bytes. This permute is an operand of an outer permute.
4402 // See whether redistributing the -1 bytes gives a shuffle that can be
4403 // implemented using P. If so, set Transform to a VPERM-like permute vector
4404 // that, when applied to the result of P, gives the original permute in Bytes.
4405 static bool matchDoublePermute(const SmallVectorImpl
<int> &Bytes
,
4407 SmallVectorImpl
<int> &Transform
) {
4409 for (unsigned From
= 0; From
< SystemZ::VectorBytes
; ++From
) {
4410 int Elt
= Bytes
[From
];
4412 // Byte number From of the result is undefined.
4413 Transform
[From
] = -1;
4415 while (P
.Bytes
[To
] != Elt
) {
4417 if (To
== SystemZ::VectorBytes
)
4420 Transform
[From
] = To
;
4426 // As above, but search for a matching permute.
4427 static const Permute
*matchDoublePermute(const SmallVectorImpl
<int> &Bytes
,
4428 SmallVectorImpl
<int> &Transform
) {
4429 for (auto &P
: PermuteForms
)
4430 if (matchDoublePermute(Bytes
, P
, Transform
))
4435 // Convert the mask of the given shuffle op into a byte-level mask,
4436 // as if it had type vNi8.
4437 static bool getVPermMask(SDValue ShuffleOp
,
4438 SmallVectorImpl
<int> &Bytes
) {
4439 EVT VT
= ShuffleOp
.getValueType();
4440 unsigned NumElements
= VT
.getVectorNumElements();
4441 unsigned BytesPerElement
= VT
.getVectorElementType().getStoreSize();
4443 if (auto *VSN
= dyn_cast
<ShuffleVectorSDNode
>(ShuffleOp
)) {
4444 Bytes
.resize(NumElements
* BytesPerElement
, -1);
4445 for (unsigned I
= 0; I
< NumElements
; ++I
) {
4446 int Index
= VSN
->getMaskElt(I
);
4448 for (unsigned J
= 0; J
< BytesPerElement
; ++J
)
4449 Bytes
[I
* BytesPerElement
+ J
] = Index
* BytesPerElement
+ J
;
4453 if (SystemZISD::SPLAT
== ShuffleOp
.getOpcode() &&
4454 isa
<ConstantSDNode
>(ShuffleOp
.getOperand(1))) {
4455 unsigned Index
= ShuffleOp
.getConstantOperandVal(1);
4456 Bytes
.resize(NumElements
* BytesPerElement
, -1);
4457 for (unsigned I
= 0; I
< NumElements
; ++I
)
4458 for (unsigned J
= 0; J
< BytesPerElement
; ++J
)
4459 Bytes
[I
* BytesPerElement
+ J
] = Index
* BytesPerElement
+ J
;
4465 // Bytes is a VPERM-like permute vector, except that -1 is used for
4466 // undefined bytes. See whether bytes [Start, Start + BytesPerElement) of
4467 // the result come from a contiguous sequence of bytes from one input.
4468 // Set Base to the selector for the first byte if so.
4469 static bool getShuffleInput(const SmallVectorImpl
<int> &Bytes
, unsigned Start
,
4470 unsigned BytesPerElement
, int &Base
) {
4472 for (unsigned I
= 0; I
< BytesPerElement
; ++I
) {
4473 if (Bytes
[Start
+ I
] >= 0) {
4474 unsigned Elem
= Bytes
[Start
+ I
];
4477 // Make sure the bytes would come from one input operand.
4478 if (unsigned(Base
) % Bytes
.size() + BytesPerElement
> Bytes
.size())
4480 } else if (unsigned(Base
) != Elem
- I
)
4487 // Bytes is a VPERM-like permute vector, except that -1 is used for
4488 // undefined bytes. Return true if it can be performed using VSLDB.
4489 // When returning true, set StartIndex to the shift amount and OpNo0
4490 // and OpNo1 to the VPERM operands that should be used as the first
4491 // and second shift operand respectively.
4492 static bool isShlDoublePermute(const SmallVectorImpl
<int> &Bytes
,
4493 unsigned &StartIndex
, unsigned &OpNo0
,
4495 int OpNos
[] = { -1, -1 };
4497 for (unsigned I
= 0; I
< 16; ++I
) {
4498 int Index
= Bytes
[I
];
4500 int ExpectedShift
= (Index
- I
) % SystemZ::VectorBytes
;
4501 int ModelOpNo
= unsigned(ExpectedShift
+ I
) / SystemZ::VectorBytes
;
4502 int RealOpNo
= unsigned(Index
) / SystemZ::VectorBytes
;
4504 Shift
= ExpectedShift
;
4505 else if (Shift
!= ExpectedShift
)
4507 // Make sure that the operand mappings are consistent with previous
4509 if (OpNos
[ModelOpNo
] == 1 - RealOpNo
)
4511 OpNos
[ModelOpNo
] = RealOpNo
;
4515 return chooseShuffleOpNos(OpNos
, OpNo0
, OpNo1
);
4518 // Create a node that performs P on operands Op0 and Op1, casting the
4519 // operands to the appropriate type. The type of the result is determined by P.
4520 static SDValue
getPermuteNode(SelectionDAG
&DAG
, const SDLoc
&DL
,
4521 const Permute
&P
, SDValue Op0
, SDValue Op1
) {
4522 // VPDI (PERMUTE_DWORDS) always operates on v2i64s. The input
4523 // elements of a PACK are twice as wide as the outputs.
4524 unsigned InBytes
= (P
.Opcode
== SystemZISD::PERMUTE_DWORDS
? 8 :
4525 P
.Opcode
== SystemZISD::PACK
? P
.Operand
* 2 :
4527 // Cast both operands to the appropriate type.
4528 MVT InVT
= MVT::getVectorVT(MVT::getIntegerVT(InBytes
* 8),
4529 SystemZ::VectorBytes
/ InBytes
);
4530 Op0
= DAG
.getNode(ISD::BITCAST
, DL
, InVT
, Op0
);
4531 Op1
= DAG
.getNode(ISD::BITCAST
, DL
, InVT
, Op1
);
4533 if (P
.Opcode
== SystemZISD::PERMUTE_DWORDS
) {
4534 SDValue Op2
= DAG
.getTargetConstant(P
.Operand
, DL
, MVT::i32
);
4535 Op
= DAG
.getNode(SystemZISD::PERMUTE_DWORDS
, DL
, InVT
, Op0
, Op1
, Op2
);
4536 } else if (P
.Opcode
== SystemZISD::PACK
) {
4537 MVT OutVT
= MVT::getVectorVT(MVT::getIntegerVT(P
.Operand
* 8),
4538 SystemZ::VectorBytes
/ P
.Operand
);
4539 Op
= DAG
.getNode(SystemZISD::PACK
, DL
, OutVT
, Op0
, Op1
);
4541 Op
= DAG
.getNode(P
.Opcode
, DL
, InVT
, Op0
, Op1
);
4546 static bool isZeroVector(SDValue N
) {
4547 if (N
->getOpcode() == ISD::BITCAST
)
4548 N
= N
->getOperand(0);
4549 if (N
->getOpcode() == ISD::SPLAT_VECTOR
)
4550 if (auto *Op
= dyn_cast
<ConstantSDNode
>(N
->getOperand(0)))
4551 return Op
->getZExtValue() == 0;
4552 return ISD::isBuildVectorAllZeros(N
.getNode());
4555 // Return the index of the zero/undef vector, or UINT32_MAX if not found.
4556 static uint32_t findZeroVectorIdx(SDValue
*Ops
, unsigned Num
) {
4557 for (unsigned I
= 0; I
< Num
; I
++)
4558 if (isZeroVector(Ops
[I
]))
4563 // Bytes is a VPERM-like permute vector, except that -1 is used for
4564 // undefined bytes. Implement it on operands Ops[0] and Ops[1] using
4566 static SDValue
getGeneralPermuteNode(SelectionDAG
&DAG
, const SDLoc
&DL
,
4568 const SmallVectorImpl
<int> &Bytes
) {
4569 for (unsigned I
= 0; I
< 2; ++I
)
4570 Ops
[I
] = DAG
.getNode(ISD::BITCAST
, DL
, MVT::v16i8
, Ops
[I
]);
4572 // First see whether VSLDB can be used.
4573 unsigned StartIndex
, OpNo0
, OpNo1
;
4574 if (isShlDoublePermute(Bytes
, StartIndex
, OpNo0
, OpNo1
))
4575 return DAG
.getNode(SystemZISD::SHL_DOUBLE
, DL
, MVT::v16i8
, Ops
[OpNo0
],
4577 DAG
.getTargetConstant(StartIndex
, DL
, MVT::i32
));
4579 // Fall back on VPERM. Construct an SDNode for the permute vector. Try to
4580 // eliminate a zero vector by reusing any zero index in the permute vector.
4581 unsigned ZeroVecIdx
= findZeroVectorIdx(&Ops
[0], 2);
4582 if (ZeroVecIdx
!= UINT32_MAX
) {
4583 bool MaskFirst
= true;
4585 for (unsigned I
= 0; I
< SystemZ::VectorBytes
; ++I
) {
4586 unsigned OpNo
= unsigned(Bytes
[I
]) / SystemZ::VectorBytes
;
4587 unsigned Byte
= unsigned(Bytes
[I
]) % SystemZ::VectorBytes
;
4588 if (OpNo
== ZeroVecIdx
&& I
== 0) {
4589 // If the first byte is zero, use mask as first operand.
4593 if (OpNo
!= ZeroVecIdx
&& Byte
== 0) {
4594 // If mask contains a zero, use it by placing that vector first.
4595 ZeroIdx
= I
+ SystemZ::VectorBytes
;
4600 if (ZeroIdx
!= -1) {
4601 SDValue IndexNodes
[SystemZ::VectorBytes
];
4602 for (unsigned I
= 0; I
< SystemZ::VectorBytes
; ++I
) {
4603 if (Bytes
[I
] >= 0) {
4604 unsigned OpNo
= unsigned(Bytes
[I
]) / SystemZ::VectorBytes
;
4605 unsigned Byte
= unsigned(Bytes
[I
]) % SystemZ::VectorBytes
;
4606 if (OpNo
== ZeroVecIdx
)
4607 IndexNodes
[I
] = DAG
.getConstant(ZeroIdx
, DL
, MVT::i32
);
4609 unsigned BIdx
= MaskFirst
? Byte
+ SystemZ::VectorBytes
: Byte
;
4610 IndexNodes
[I
] = DAG
.getConstant(BIdx
, DL
, MVT::i32
);
4613 IndexNodes
[I
] = DAG
.getUNDEF(MVT::i32
);
4615 SDValue Mask
= DAG
.getBuildVector(MVT::v16i8
, DL
, IndexNodes
);
4616 SDValue Src
= ZeroVecIdx
== 0 ? Ops
[1] : Ops
[0];
4618 return DAG
.getNode(SystemZISD::PERMUTE
, DL
, MVT::v16i8
, Mask
, Src
,
4621 return DAG
.getNode(SystemZISD::PERMUTE
, DL
, MVT::v16i8
, Src
, Mask
,
4626 SDValue IndexNodes
[SystemZ::VectorBytes
];
4627 for (unsigned I
= 0; I
< SystemZ::VectorBytes
; ++I
)
4629 IndexNodes
[I
] = DAG
.getConstant(Bytes
[I
], DL
, MVT::i32
);
4631 IndexNodes
[I
] = DAG
.getUNDEF(MVT::i32
);
4632 SDValue Op2
= DAG
.getBuildVector(MVT::v16i8
, DL
, IndexNodes
);
4633 return DAG
.getNode(SystemZISD::PERMUTE
, DL
, MVT::v16i8
, Ops
[0],
4634 (!Ops
[1].isUndef() ? Ops
[1] : Ops
[0]), Op2
);
4638 // Describes a general N-operand vector shuffle.
4639 struct GeneralShuffle
{
4640 GeneralShuffle(EVT vt
) : VT(vt
), UnpackFromEltSize(UINT_MAX
) {}
4642 bool add(SDValue
, unsigned);
4643 SDValue
getNode(SelectionDAG
&, const SDLoc
&);
4644 void tryPrepareForUnpack();
4645 bool unpackWasPrepared() { return UnpackFromEltSize
<= 4; }
4646 SDValue
insertUnpackIfPrepared(SelectionDAG
&DAG
, const SDLoc
&DL
, SDValue Op
);
4648 // The operands of the shuffle.
4649 SmallVector
<SDValue
, SystemZ::VectorBytes
> Ops
;
4651 // Index I is -1 if byte I of the result is undefined. Otherwise the
4652 // result comes from byte Bytes[I] % SystemZ::VectorBytes of operand
4653 // Bytes[I] / SystemZ::VectorBytes.
4654 SmallVector
<int, SystemZ::VectorBytes
> Bytes
;
4656 // The type of the shuffle result.
4659 // Holds a value of 1, 2 or 4 if a final unpack has been prepared for.
4660 unsigned UnpackFromEltSize
;
4664 // Add an extra undefined element to the shuffle.
4665 void GeneralShuffle::addUndef() {
4666 unsigned BytesPerElement
= VT
.getVectorElementType().getStoreSize();
4667 for (unsigned I
= 0; I
< BytesPerElement
; ++I
)
4668 Bytes
.push_back(-1);
4671 // Add an extra element to the shuffle, taking it from element Elem of Op.
4672 // A null Op indicates a vector input whose value will be calculated later;
4673 // there is at most one such input per shuffle and it always has the same
4674 // type as the result. Aborts and returns false if the source vector elements
4675 // of an EXTRACT_VECTOR_ELT are smaller than the destination elements. Per
4676 // LLVM they become implicitly extended, but this is rare and not optimized.
4677 bool GeneralShuffle::add(SDValue Op
, unsigned Elem
) {
4678 unsigned BytesPerElement
= VT
.getVectorElementType().getStoreSize();
4680 // The source vector can have wider elements than the result,
4681 // either through an explicit TRUNCATE or because of type legalization.
4682 // We want the least significant part.
4683 EVT FromVT
= Op
.getNode() ? Op
.getValueType() : VT
;
4684 unsigned FromBytesPerElement
= FromVT
.getVectorElementType().getStoreSize();
4686 // Return false if the source elements are smaller than their destination
4688 if (FromBytesPerElement
< BytesPerElement
)
4691 unsigned Byte
= ((Elem
* FromBytesPerElement
) % SystemZ::VectorBytes
+
4692 (FromBytesPerElement
- BytesPerElement
));
4694 // Look through things like shuffles and bitcasts.
4695 while (Op
.getNode()) {
4696 if (Op
.getOpcode() == ISD::BITCAST
)
4697 Op
= Op
.getOperand(0);
4698 else if (Op
.getOpcode() == ISD::VECTOR_SHUFFLE
&& Op
.hasOneUse()) {
4699 // See whether the bytes we need come from a contiguous part of one
4701 SmallVector
<int, SystemZ::VectorBytes
> OpBytes
;
4702 if (!getVPermMask(Op
, OpBytes
))
4705 if (!getShuffleInput(OpBytes
, Byte
, BytesPerElement
, NewByte
))
4711 Op
= Op
.getOperand(unsigned(NewByte
) / SystemZ::VectorBytes
);
4712 Byte
= unsigned(NewByte
) % SystemZ::VectorBytes
;
4713 } else if (Op
.isUndef()) {
4720 // Make sure that the source of the extraction is in Ops.
4722 for (; OpNo
< Ops
.size(); ++OpNo
)
4723 if (Ops
[OpNo
] == Op
)
4725 if (OpNo
== Ops
.size())
4728 // Add the element to Bytes.
4729 unsigned Base
= OpNo
* SystemZ::VectorBytes
+ Byte
;
4730 for (unsigned I
= 0; I
< BytesPerElement
; ++I
)
4731 Bytes
.push_back(Base
+ I
);
4736 // Return SDNodes for the completed shuffle.
4737 SDValue
GeneralShuffle::getNode(SelectionDAG
&DAG
, const SDLoc
&DL
) {
4738 assert(Bytes
.size() == SystemZ::VectorBytes
&& "Incomplete vector");
4740 if (Ops
.size() == 0)
4741 return DAG
.getUNDEF(VT
);
4743 // Use a single unpack if possible as the last operation.
4744 tryPrepareForUnpack();
4746 // Make sure that there are at least two shuffle operands.
4747 if (Ops
.size() == 1)
4748 Ops
.push_back(DAG
.getUNDEF(MVT::v16i8
));
4750 // Create a tree of shuffles, deferring root node until after the loop.
4751 // Try to redistribute the undefined elements of non-root nodes so that
4752 // the non-root shuffles match something like a pack or merge, then adjust
4753 // the parent node's permute vector to compensate for the new order.
4754 // Among other things, this copes with vectors like <2 x i16> that were
4755 // padded with undefined elements during type legalization.
4757 // In the best case this redistribution will lead to the whole tree
4758 // using packs and merges. It should rarely be a loss in other cases.
4759 unsigned Stride
= 1;
4760 for (; Stride
* 2 < Ops
.size(); Stride
*= 2) {
4761 for (unsigned I
= 0; I
< Ops
.size() - Stride
; I
+= Stride
* 2) {
4762 SDValue SubOps
[] = { Ops
[I
], Ops
[I
+ Stride
] };
4764 // Create a mask for just these two operands.
4765 SmallVector
<int, SystemZ::VectorBytes
> NewBytes(SystemZ::VectorBytes
);
4766 for (unsigned J
= 0; J
< SystemZ::VectorBytes
; ++J
) {
4767 unsigned OpNo
= unsigned(Bytes
[J
]) / SystemZ::VectorBytes
;
4768 unsigned Byte
= unsigned(Bytes
[J
]) % SystemZ::VectorBytes
;
4771 else if (OpNo
== I
+ Stride
)
4772 NewBytes
[J
] = SystemZ::VectorBytes
+ Byte
;
4776 // See if it would be better to reorganize NewMask to avoid using VPERM.
4777 SmallVector
<int, SystemZ::VectorBytes
> NewBytesMap(SystemZ::VectorBytes
);
4778 if (const Permute
*P
= matchDoublePermute(NewBytes
, NewBytesMap
)) {
4779 Ops
[I
] = getPermuteNode(DAG
, DL
, *P
, SubOps
[0], SubOps
[1]);
4780 // Applying NewBytesMap to Ops[I] gets back to NewBytes.
4781 for (unsigned J
= 0; J
< SystemZ::VectorBytes
; ++J
) {
4782 if (NewBytes
[J
] >= 0) {
4783 assert(unsigned(NewBytesMap
[J
]) < SystemZ::VectorBytes
&&
4784 "Invalid double permute");
4785 Bytes
[J
] = I
* SystemZ::VectorBytes
+ NewBytesMap
[J
];
4787 assert(NewBytesMap
[J
] < 0 && "Invalid double permute");
4790 // Just use NewBytes on the operands.
4791 Ops
[I
] = getGeneralPermuteNode(DAG
, DL
, SubOps
, NewBytes
);
4792 for (unsigned J
= 0; J
< SystemZ::VectorBytes
; ++J
)
4793 if (NewBytes
[J
] >= 0)
4794 Bytes
[J
] = I
* SystemZ::VectorBytes
+ J
;
4799 // Now we just have 2 inputs. Put the second operand in Ops[1].
4801 Ops
[1] = Ops
[Stride
];
4802 for (unsigned I
= 0; I
< SystemZ::VectorBytes
; ++I
)
4803 if (Bytes
[I
] >= int(SystemZ::VectorBytes
))
4804 Bytes
[I
] -= (Stride
- 1) * SystemZ::VectorBytes
;
4807 // Look for an instruction that can do the permute without resorting
4809 unsigned OpNo0
, OpNo1
;
4811 if (unpackWasPrepared() && Ops
[1].isUndef())
4813 else if (const Permute
*P
= matchPermute(Bytes
, OpNo0
, OpNo1
))
4814 Op
= getPermuteNode(DAG
, DL
, *P
, Ops
[OpNo0
], Ops
[OpNo1
]);
4816 Op
= getGeneralPermuteNode(DAG
, DL
, &Ops
[0], Bytes
);
4818 Op
= insertUnpackIfPrepared(DAG
, DL
, Op
);
4820 return DAG
.getNode(ISD::BITCAST
, DL
, VT
, Op
);
4824 static void dumpBytes(const SmallVectorImpl
<int> &Bytes
, std::string Msg
) {
4825 dbgs() << Msg
.c_str() << " { ";
4826 for (unsigned i
= 0; i
< Bytes
.size(); i
++)
4827 dbgs() << Bytes
[i
] << " ";
4832 // If the Bytes vector matches an unpack operation, prepare to do the unpack
4833 // after all else by removing the zero vector and the effect of the unpack on
4835 void GeneralShuffle::tryPrepareForUnpack() {
4836 uint32_t ZeroVecOpNo
= findZeroVectorIdx(&Ops
[0], Ops
.size());
4837 if (ZeroVecOpNo
== UINT32_MAX
|| Ops
.size() == 1)
4840 // Only do this if removing the zero vector reduces the depth, otherwise
4841 // the critical path will increase with the final unpack.
4842 if (Ops
.size() > 2 &&
4843 Log2_32_Ceil(Ops
.size()) == Log2_32_Ceil(Ops
.size() - 1))
4846 // Find an unpack that would allow removing the zero vector from Ops.
4847 UnpackFromEltSize
= 1;
4848 for (; UnpackFromEltSize
<= 4; UnpackFromEltSize
*= 2) {
4849 bool MatchUnpack
= true;
4850 SmallVector
<int, SystemZ::VectorBytes
> SrcBytes
;
4851 for (unsigned Elt
= 0; Elt
< SystemZ::VectorBytes
; Elt
++) {
4852 unsigned ToEltSize
= UnpackFromEltSize
* 2;
4853 bool IsZextByte
= (Elt
% ToEltSize
) < UnpackFromEltSize
;
4855 SrcBytes
.push_back(Bytes
[Elt
]);
4856 if (Bytes
[Elt
] != -1) {
4857 unsigned OpNo
= unsigned(Bytes
[Elt
]) / SystemZ::VectorBytes
;
4858 if (IsZextByte
!= (OpNo
== ZeroVecOpNo
)) {
4859 MatchUnpack
= false;
4865 if (Ops
.size() == 2) {
4866 // Don't use unpack if a single source operand needs rearrangement.
4867 for (unsigned i
= 0; i
< SystemZ::VectorBytes
/ 2; i
++)
4868 if (SrcBytes
[i
] != -1 && SrcBytes
[i
] % 16 != int(i
)) {
4869 UnpackFromEltSize
= UINT_MAX
;
4876 if (UnpackFromEltSize
> 4)
4879 LLVM_DEBUG(dbgs() << "Preparing for final unpack of element size "
4880 << UnpackFromEltSize
<< ". Zero vector is Op#" << ZeroVecOpNo
4882 dumpBytes(Bytes
, "Original Bytes vector:"););
4884 // Apply the unpack in reverse to the Bytes array.
4886 for (unsigned Elt
= 0; Elt
< SystemZ::VectorBytes
;) {
4887 Elt
+= UnpackFromEltSize
;
4888 for (unsigned i
= 0; i
< UnpackFromEltSize
; i
++, Elt
++, B
++)
4889 Bytes
[B
] = Bytes
[Elt
];
4891 while (B
< SystemZ::VectorBytes
)
4894 // Remove the zero vector from Ops
4895 Ops
.erase(&Ops
[ZeroVecOpNo
]);
4896 for (unsigned I
= 0; I
< SystemZ::VectorBytes
; ++I
)
4897 if (Bytes
[I
] >= 0) {
4898 unsigned OpNo
= unsigned(Bytes
[I
]) / SystemZ::VectorBytes
;
4899 if (OpNo
> ZeroVecOpNo
)
4900 Bytes
[I
] -= SystemZ::VectorBytes
;
4903 LLVM_DEBUG(dumpBytes(Bytes
, "Resulting Bytes vector, zero vector removed:");
4907 SDValue
GeneralShuffle::insertUnpackIfPrepared(SelectionDAG
&DAG
,
4910 if (!unpackWasPrepared())
4912 unsigned InBits
= UnpackFromEltSize
* 8;
4913 EVT InVT
= MVT::getVectorVT(MVT::getIntegerVT(InBits
),
4914 SystemZ::VectorBits
/ InBits
);
4915 SDValue PackedOp
= DAG
.getNode(ISD::BITCAST
, DL
, InVT
, Op
);
4916 unsigned OutBits
= InBits
* 2;
4917 EVT OutVT
= MVT::getVectorVT(MVT::getIntegerVT(OutBits
),
4918 SystemZ::VectorBits
/ OutBits
);
4919 return DAG
.getNode(SystemZISD::UNPACKL_HIGH
, DL
, OutVT
, PackedOp
);
4922 // Return true if the given BUILD_VECTOR is a scalar-to-vector conversion.
4923 static bool isScalarToVector(SDValue Op
) {
4924 for (unsigned I
= 1, E
= Op
.getNumOperands(); I
!= E
; ++I
)
4925 if (!Op
.getOperand(I
).isUndef())
4930 // Return a vector of type VT that contains Value in the first element.
4931 // The other elements don't matter.
4932 static SDValue
buildScalarToVector(SelectionDAG
&DAG
, const SDLoc
&DL
, EVT VT
,
4934 // If we have a constant, replicate it to all elements and let the
4935 // BUILD_VECTOR lowering take care of it.
4936 if (Value
.getOpcode() == ISD::Constant
||
4937 Value
.getOpcode() == ISD::ConstantFP
) {
4938 SmallVector
<SDValue
, 16> Ops(VT
.getVectorNumElements(), Value
);
4939 return DAG
.getBuildVector(VT
, DL
, Ops
);
4941 if (Value
.isUndef())
4942 return DAG
.getUNDEF(VT
);
4943 return DAG
.getNode(ISD::SCALAR_TO_VECTOR
, DL
, VT
, Value
);
4946 // Return a vector of type VT in which Op0 is in element 0 and Op1 is in
4947 // element 1. Used for cases in which replication is cheap.
4948 static SDValue
buildMergeScalars(SelectionDAG
&DAG
, const SDLoc
&DL
, EVT VT
,
4949 SDValue Op0
, SDValue Op1
) {
4950 if (Op0
.isUndef()) {
4952 return DAG
.getUNDEF(VT
);
4953 return DAG
.getNode(SystemZISD::REPLICATE
, DL
, VT
, Op1
);
4956 return DAG
.getNode(SystemZISD::REPLICATE
, DL
, VT
, Op0
);
4957 return DAG
.getNode(SystemZISD::MERGE_HIGH
, DL
, VT
,
4958 buildScalarToVector(DAG
, DL
, VT
, Op0
),
4959 buildScalarToVector(DAG
, DL
, VT
, Op1
));
4962 // Extend GPR scalars Op0 and Op1 to doublewords and return a v2i64
4964 static SDValue
joinDwords(SelectionDAG
&DAG
, const SDLoc
&DL
, SDValue Op0
,
4966 if (Op0
.isUndef() && Op1
.isUndef())
4967 return DAG
.getUNDEF(MVT::v2i64
);
4968 // If one of the two inputs is undefined then replicate the other one,
4969 // in order to avoid using another register unnecessarily.
4971 Op0
= Op1
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, MVT::i64
, Op1
);
4972 else if (Op1
.isUndef())
4973 Op0
= Op1
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, MVT::i64
, Op0
);
4975 Op0
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, MVT::i64
, Op0
);
4976 Op1
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, MVT::i64
, Op1
);
4978 return DAG
.getNode(SystemZISD::JOIN_DWORDS
, DL
, MVT::v2i64
, Op0
, Op1
);
4981 // If a BUILD_VECTOR contains some EXTRACT_VECTOR_ELTs, it's usually
4982 // better to use VECTOR_SHUFFLEs on them, only using BUILD_VECTOR for
4983 // the non-EXTRACT_VECTOR_ELT elements. See if the given BUILD_VECTOR
4984 // would benefit from this representation and return it if so.
4985 static SDValue
tryBuildVectorShuffle(SelectionDAG
&DAG
,
4986 BuildVectorSDNode
*BVN
) {
4987 EVT VT
= BVN
->getValueType(0);
4988 unsigned NumElements
= VT
.getVectorNumElements();
4990 // Represent the BUILD_VECTOR as an N-operand VECTOR_SHUFFLE-like operation
4991 // on byte vectors. If there are non-EXTRACT_VECTOR_ELT elements that still
4992 // need a BUILD_VECTOR, add an additional placeholder operand for that
4993 // BUILD_VECTOR and store its operands in ResidueOps.
4994 GeneralShuffle
GS(VT
);
4995 SmallVector
<SDValue
, SystemZ::VectorBytes
> ResidueOps
;
4996 bool FoundOne
= false;
4997 for (unsigned I
= 0; I
< NumElements
; ++I
) {
4998 SDValue Op
= BVN
->getOperand(I
);
4999 if (Op
.getOpcode() == ISD::TRUNCATE
)
5000 Op
= Op
.getOperand(0);
5001 if (Op
.getOpcode() == ISD::EXTRACT_VECTOR_ELT
&&
5002 Op
.getOperand(1).getOpcode() == ISD::Constant
) {
5003 unsigned Elem
= cast
<ConstantSDNode
>(Op
.getOperand(1))->getZExtValue();
5004 if (!GS
.add(Op
.getOperand(0), Elem
))
5007 } else if (Op
.isUndef()) {
5010 if (!GS
.add(SDValue(), ResidueOps
.size()))
5012 ResidueOps
.push_back(BVN
->getOperand(I
));
5016 // Nothing to do if there are no EXTRACT_VECTOR_ELTs.
5020 // Create the BUILD_VECTOR for the remaining elements, if any.
5021 if (!ResidueOps
.empty()) {
5022 while (ResidueOps
.size() < NumElements
)
5023 ResidueOps
.push_back(DAG
.getUNDEF(ResidueOps
[0].getValueType()));
5024 for (auto &Op
: GS
.Ops
) {
5025 if (!Op
.getNode()) {
5026 Op
= DAG
.getBuildVector(VT
, SDLoc(BVN
), ResidueOps
);
5031 return GS
.getNode(DAG
, SDLoc(BVN
));
5034 bool SystemZTargetLowering::isVectorElementLoad(SDValue Op
) const {
5035 if (Op
.getOpcode() == ISD::LOAD
&& cast
<LoadSDNode
>(Op
)->isUnindexed())
5037 if (Subtarget
.hasVectorEnhancements2() && Op
.getOpcode() == SystemZISD::LRV
)
5042 // Combine GPR scalar values Elems into a vector of type VT.
5044 SystemZTargetLowering::buildVector(SelectionDAG
&DAG
, const SDLoc
&DL
, EVT VT
,
5045 SmallVectorImpl
<SDValue
> &Elems
) const {
5046 // See whether there is a single replicated value.
5048 unsigned int NumElements
= Elems
.size();
5049 unsigned int Count
= 0;
5050 for (auto Elem
: Elems
) {
5051 if (!Elem
.isUndef()) {
5052 if (!Single
.getNode())
5054 else if (Elem
!= Single
) {
5061 // There are three cases here:
5063 // - if the only defined element is a loaded one, the best sequence
5064 // is a replicating load.
5066 // - otherwise, if the only defined element is an i64 value, we will
5067 // end up with the same VLVGP sequence regardless of whether we short-cut
5068 // for replication or fall through to the later code.
5070 // - otherwise, if the only defined element is an i32 or smaller value,
5071 // we would need 2 instructions to replicate it: VLVGP followed by VREPx.
5072 // This is only a win if the single defined element is used more than once.
5073 // In other cases we're better off using a single VLVGx.
5074 if (Single
.getNode() && (Count
> 1 || isVectorElementLoad(Single
)))
5075 return DAG
.getNode(SystemZISD::REPLICATE
, DL
, VT
, Single
);
5077 // If all elements are loads, use VLREP/VLEs (below).
5078 bool AllLoads
= true;
5079 for (auto Elem
: Elems
)
5080 if (!isVectorElementLoad(Elem
)) {
5085 // The best way of building a v2i64 from two i64s is to use VLVGP.
5086 if (VT
== MVT::v2i64
&& !AllLoads
)
5087 return joinDwords(DAG
, DL
, Elems
[0], Elems
[1]);
5089 // Use a 64-bit merge high to combine two doubles.
5090 if (VT
== MVT::v2f64
&& !AllLoads
)
5091 return buildMergeScalars(DAG
, DL
, VT
, Elems
[0], Elems
[1]);
5093 // Build v4f32 values directly from the FPRs:
5095 // <Axxx> <Bxxx> <Cxxxx> <Dxxx>
5100 if (VT
== MVT::v4f32
&& !AllLoads
) {
5101 SDValue Op01
= buildMergeScalars(DAG
, DL
, VT
, Elems
[0], Elems
[1]);
5102 SDValue Op23
= buildMergeScalars(DAG
, DL
, VT
, Elems
[2], Elems
[3]);
5103 // Avoid unnecessary undefs by reusing the other operand.
5106 else if (Op23
.isUndef())
5108 // Merging identical replications is a no-op.
5109 if (Op01
.getOpcode() == SystemZISD::REPLICATE
&& Op01
== Op23
)
5111 Op01
= DAG
.getNode(ISD::BITCAST
, DL
, MVT::v2i64
, Op01
);
5112 Op23
= DAG
.getNode(ISD::BITCAST
, DL
, MVT::v2i64
, Op23
);
5113 SDValue Op
= DAG
.getNode(SystemZISD::MERGE_HIGH
,
5114 DL
, MVT::v2i64
, Op01
, Op23
);
5115 return DAG
.getNode(ISD::BITCAST
, DL
, VT
, Op
);
5118 // Collect the constant terms.
5119 SmallVector
<SDValue
, SystemZ::VectorBytes
> Constants(NumElements
, SDValue());
5120 SmallVector
<bool, SystemZ::VectorBytes
> Done(NumElements
, false);
5122 unsigned NumConstants
= 0;
5123 for (unsigned I
= 0; I
< NumElements
; ++I
) {
5124 SDValue Elem
= Elems
[I
];
5125 if (Elem
.getOpcode() == ISD::Constant
||
5126 Elem
.getOpcode() == ISD::ConstantFP
) {
5128 Constants
[I
] = Elem
;
5132 // If there was at least one constant, fill in the other elements of
5133 // Constants with undefs to get a full vector constant and use that
5134 // as the starting point.
5136 SDValue ReplicatedVal
;
5137 if (NumConstants
> 0) {
5138 for (unsigned I
= 0; I
< NumElements
; ++I
)
5139 if (!Constants
[I
].getNode())
5140 Constants
[I
] = DAG
.getUNDEF(Elems
[I
].getValueType());
5141 Result
= DAG
.getBuildVector(VT
, DL
, Constants
);
5143 // Otherwise try to use VLREP or VLVGP to start the sequence in order to
5144 // avoid a false dependency on any previous contents of the vector
5147 // Use a VLREP if at least one element is a load. Make sure to replicate
5148 // the load with the most elements having its value.
5149 std::map
<const SDNode
*, unsigned> UseCounts
;
5150 SDNode
*LoadMaxUses
= nullptr;
5151 for (unsigned I
= 0; I
< NumElements
; ++I
)
5152 if (isVectorElementLoad(Elems
[I
])) {
5153 SDNode
*Ld
= Elems
[I
].getNode();
5155 if (LoadMaxUses
== nullptr || UseCounts
[LoadMaxUses
] < UseCounts
[Ld
])
5158 if (LoadMaxUses
!= nullptr) {
5159 ReplicatedVal
= SDValue(LoadMaxUses
, 0);
5160 Result
= DAG
.getNode(SystemZISD::REPLICATE
, DL
, VT
, ReplicatedVal
);
5162 // Try to use VLVGP.
5163 unsigned I1
= NumElements
/ 2 - 1;
5164 unsigned I2
= NumElements
- 1;
5165 bool Def1
= !Elems
[I1
].isUndef();
5166 bool Def2
= !Elems
[I2
].isUndef();
5168 SDValue Elem1
= Elems
[Def1
? I1
: I2
];
5169 SDValue Elem2
= Elems
[Def2
? I2
: I1
];
5170 Result
= DAG
.getNode(ISD::BITCAST
, DL
, VT
,
5171 joinDwords(DAG
, DL
, Elem1
, Elem2
));
5175 Result
= DAG
.getUNDEF(VT
);
5179 // Use VLVGx to insert the other elements.
5180 for (unsigned I
= 0; I
< NumElements
; ++I
)
5181 if (!Done
[I
] && !Elems
[I
].isUndef() && Elems
[I
] != ReplicatedVal
)
5182 Result
= DAG
.getNode(ISD::INSERT_VECTOR_ELT
, DL
, VT
, Result
, Elems
[I
],
5183 DAG
.getConstant(I
, DL
, MVT::i32
));
5187 SDValue
SystemZTargetLowering::lowerBUILD_VECTOR(SDValue Op
,
5188 SelectionDAG
&DAG
) const {
5189 auto *BVN
= cast
<BuildVectorSDNode
>(Op
.getNode());
5191 EVT VT
= Op
.getValueType();
5193 if (BVN
->isConstant()) {
5194 if (SystemZVectorConstantInfo(BVN
).isVectorConstantLegal(Subtarget
))
5197 // Fall back to loading it from memory.
5201 // See if we should use shuffles to construct the vector from other vectors.
5202 if (SDValue Res
= tryBuildVectorShuffle(DAG
, BVN
))
5205 // Detect SCALAR_TO_VECTOR conversions.
5206 if (isOperationLegal(ISD::SCALAR_TO_VECTOR
, VT
) && isScalarToVector(Op
))
5207 return buildScalarToVector(DAG
, DL
, VT
, Op
.getOperand(0));
5209 // Otherwise use buildVector to build the vector up from GPRs.
5210 unsigned NumElements
= Op
.getNumOperands();
5211 SmallVector
<SDValue
, SystemZ::VectorBytes
> Ops(NumElements
);
5212 for (unsigned I
= 0; I
< NumElements
; ++I
)
5213 Ops
[I
] = Op
.getOperand(I
);
5214 return buildVector(DAG
, DL
, VT
, Ops
);
5217 SDValue
SystemZTargetLowering::lowerVECTOR_SHUFFLE(SDValue Op
,
5218 SelectionDAG
&DAG
) const {
5219 auto *VSN
= cast
<ShuffleVectorSDNode
>(Op
.getNode());
5221 EVT VT
= Op
.getValueType();
5222 unsigned NumElements
= VT
.getVectorNumElements();
5224 if (VSN
->isSplat()) {
5225 SDValue Op0
= Op
.getOperand(0);
5226 unsigned Index
= VSN
->getSplatIndex();
5227 assert(Index
< VT
.getVectorNumElements() &&
5228 "Splat index should be defined and in first operand");
5229 // See whether the value we're splatting is directly available as a scalar.
5230 if ((Index
== 0 && Op0
.getOpcode() == ISD::SCALAR_TO_VECTOR
) ||
5231 Op0
.getOpcode() == ISD::BUILD_VECTOR
)
5232 return DAG
.getNode(SystemZISD::REPLICATE
, DL
, VT
, Op0
.getOperand(Index
));
5233 // Otherwise keep it as a vector-to-vector operation.
5234 return DAG
.getNode(SystemZISD::SPLAT
, DL
, VT
, Op
.getOperand(0),
5235 DAG
.getTargetConstant(Index
, DL
, MVT::i32
));
5238 GeneralShuffle
GS(VT
);
5239 for (unsigned I
= 0; I
< NumElements
; ++I
) {
5240 int Elt
= VSN
->getMaskElt(I
);
5243 else if (!GS
.add(Op
.getOperand(unsigned(Elt
) / NumElements
),
5244 unsigned(Elt
) % NumElements
))
5247 return GS
.getNode(DAG
, SDLoc(VSN
));
5250 SDValue
SystemZTargetLowering::lowerSCALAR_TO_VECTOR(SDValue Op
,
5251 SelectionDAG
&DAG
) const {
5253 // Just insert the scalar into element 0 of an undefined vector.
5254 return DAG
.getNode(ISD::INSERT_VECTOR_ELT
, DL
,
5255 Op
.getValueType(), DAG
.getUNDEF(Op
.getValueType()),
5256 Op
.getOperand(0), DAG
.getConstant(0, DL
, MVT::i32
));
5259 SDValue
SystemZTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op
,
5260 SelectionDAG
&DAG
) const {
5261 // Handle insertions of floating-point values.
5263 SDValue Op0
= Op
.getOperand(0);
5264 SDValue Op1
= Op
.getOperand(1);
5265 SDValue Op2
= Op
.getOperand(2);
5266 EVT VT
= Op
.getValueType();
5268 // Insertions into constant indices of a v2f64 can be done using VPDI.
5269 // However, if the inserted value is a bitcast or a constant then it's
5270 // better to use GPRs, as below.
5271 if (VT
== MVT::v2f64
&&
5272 Op1
.getOpcode() != ISD::BITCAST
&&
5273 Op1
.getOpcode() != ISD::ConstantFP
&&
5274 Op2
.getOpcode() == ISD::Constant
) {
5275 uint64_t Index
= cast
<ConstantSDNode
>(Op2
)->getZExtValue();
5276 unsigned Mask
= VT
.getVectorNumElements() - 1;
5281 // Otherwise bitcast to the equivalent integer form and insert via a GPR.
5282 MVT IntVT
= MVT::getIntegerVT(VT
.getScalarSizeInBits());
5283 MVT IntVecVT
= MVT::getVectorVT(IntVT
, VT
.getVectorNumElements());
5284 SDValue Res
= DAG
.getNode(ISD::INSERT_VECTOR_ELT
, DL
, IntVecVT
,
5285 DAG
.getNode(ISD::BITCAST
, DL
, IntVecVT
, Op0
),
5286 DAG
.getNode(ISD::BITCAST
, DL
, IntVT
, Op1
), Op2
);
5287 return DAG
.getNode(ISD::BITCAST
, DL
, VT
, Res
);
5291 SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op
,
5292 SelectionDAG
&DAG
) const {
5293 // Handle extractions of floating-point values.
5295 SDValue Op0
= Op
.getOperand(0);
5296 SDValue Op1
= Op
.getOperand(1);
5297 EVT VT
= Op
.getValueType();
5298 EVT VecVT
= Op0
.getValueType();
5300 // Extractions of constant indices can be done directly.
5301 if (auto *CIndexN
= dyn_cast
<ConstantSDNode
>(Op1
)) {
5302 uint64_t Index
= CIndexN
->getZExtValue();
5303 unsigned Mask
= VecVT
.getVectorNumElements() - 1;
5308 // Otherwise bitcast to the equivalent integer form and extract via a GPR.
5309 MVT IntVT
= MVT::getIntegerVT(VT
.getSizeInBits());
5310 MVT IntVecVT
= MVT::getVectorVT(IntVT
, VecVT
.getVectorNumElements());
5311 SDValue Res
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, DL
, IntVT
,
5312 DAG
.getNode(ISD::BITCAST
, DL
, IntVecVT
, Op0
), Op1
);
5313 return DAG
.getNode(ISD::BITCAST
, DL
, VT
, Res
);
5316 SDValue
SystemZTargetLowering::
5317 lowerSIGN_EXTEND_VECTOR_INREG(SDValue Op
, SelectionDAG
&DAG
) const {
5318 SDValue PackedOp
= Op
.getOperand(0);
5319 EVT OutVT
= Op
.getValueType();
5320 EVT InVT
= PackedOp
.getValueType();
5321 unsigned ToBits
= OutVT
.getScalarSizeInBits();
5322 unsigned FromBits
= InVT
.getScalarSizeInBits();
5325 EVT OutVT
= MVT::getVectorVT(MVT::getIntegerVT(FromBits
),
5326 SystemZ::VectorBits
/ FromBits
);
5328 DAG
.getNode(SystemZISD::UNPACK_HIGH
, SDLoc(PackedOp
), OutVT
, PackedOp
);
5329 } while (FromBits
!= ToBits
);
5333 // Lower a ZERO_EXTEND_VECTOR_INREG to a vector shuffle with a zero vector.
5334 SDValue
SystemZTargetLowering::
5335 lowerZERO_EXTEND_VECTOR_INREG(SDValue Op
, SelectionDAG
&DAG
) const {
5336 SDValue PackedOp
= Op
.getOperand(0);
5338 EVT OutVT
= Op
.getValueType();
5339 EVT InVT
= PackedOp
.getValueType();
5340 unsigned InNumElts
= InVT
.getVectorNumElements();
5341 unsigned OutNumElts
= OutVT
.getVectorNumElements();
5342 unsigned NumInPerOut
= InNumElts
/ OutNumElts
;
5345 DAG
.getSplatVector(InVT
, DL
, DAG
.getConstant(0, DL
, InVT
.getScalarType()));
5347 SmallVector
<int, 16> Mask(InNumElts
);
5348 unsigned ZeroVecElt
= InNumElts
;
5349 for (unsigned PackedElt
= 0; PackedElt
< OutNumElts
; PackedElt
++) {
5350 unsigned MaskElt
= PackedElt
* NumInPerOut
;
5351 unsigned End
= MaskElt
+ NumInPerOut
- 1;
5352 for (; MaskElt
< End
; MaskElt
++)
5353 Mask
[MaskElt
] = ZeroVecElt
++;
5354 Mask
[MaskElt
] = PackedElt
;
5356 SDValue Shuf
= DAG
.getVectorShuffle(InVT
, DL
, PackedOp
, ZeroVec
, Mask
);
5357 return DAG
.getNode(ISD::BITCAST
, DL
, OutVT
, Shuf
);
5360 SDValue
SystemZTargetLowering::lowerShift(SDValue Op
, SelectionDAG
&DAG
,
5361 unsigned ByScalar
) const {
5362 // Look for cases where a vector shift can use the *_BY_SCALAR form.
5363 SDValue Op0
= Op
.getOperand(0);
5364 SDValue Op1
= Op
.getOperand(1);
5366 EVT VT
= Op
.getValueType();
5367 unsigned ElemBitSize
= VT
.getScalarSizeInBits();
5369 // See whether the shift vector is a splat represented as BUILD_VECTOR.
5370 if (auto *BVN
= dyn_cast
<BuildVectorSDNode
>(Op1
)) {
5371 APInt SplatBits
, SplatUndef
;
5372 unsigned SplatBitSize
;
5374 // Check for constant splats. Use ElemBitSize as the minimum element
5375 // width and reject splats that need wider elements.
5376 if (BVN
->isConstantSplat(SplatBits
, SplatUndef
, SplatBitSize
, HasAnyUndefs
,
5377 ElemBitSize
, true) &&
5378 SplatBitSize
== ElemBitSize
) {
5379 SDValue Shift
= DAG
.getConstant(SplatBits
.getZExtValue() & 0xfff,
5381 return DAG
.getNode(ByScalar
, DL
, VT
, Op0
, Shift
);
5383 // Check for variable splats.
5384 BitVector UndefElements
;
5385 SDValue Splat
= BVN
->getSplatValue(&UndefElements
);
5387 // Since i32 is the smallest legal type, we either need a no-op
5389 SDValue Shift
= DAG
.getNode(ISD::TRUNCATE
, DL
, MVT::i32
, Splat
);
5390 return DAG
.getNode(ByScalar
, DL
, VT
, Op0
, Shift
);
5394 // See whether the shift vector is a splat represented as SHUFFLE_VECTOR,
5395 // and the shift amount is directly available in a GPR.
5396 if (auto *VSN
= dyn_cast
<ShuffleVectorSDNode
>(Op1
)) {
5397 if (VSN
->isSplat()) {
5398 SDValue VSNOp0
= VSN
->getOperand(0);
5399 unsigned Index
= VSN
->getSplatIndex();
5400 assert(Index
< VT
.getVectorNumElements() &&
5401 "Splat index should be defined and in first operand");
5402 if ((Index
== 0 && VSNOp0
.getOpcode() == ISD::SCALAR_TO_VECTOR
) ||
5403 VSNOp0
.getOpcode() == ISD::BUILD_VECTOR
) {
5404 // Since i32 is the smallest legal type, we either need a no-op
5406 SDValue Shift
= DAG
.getNode(ISD::TRUNCATE
, DL
, MVT::i32
,
5407 VSNOp0
.getOperand(Index
));
5408 return DAG
.getNode(ByScalar
, DL
, VT
, Op0
, Shift
);
5413 // Otherwise just treat the current form as legal.
5417 SDValue
SystemZTargetLowering::LowerOperation(SDValue Op
,
5418 SelectionDAG
&DAG
) const {
5419 switch (Op
.getOpcode()) {
5420 case ISD::FRAMEADDR
:
5421 return lowerFRAMEADDR(Op
, DAG
);
5422 case ISD::RETURNADDR
:
5423 return lowerRETURNADDR(Op
, DAG
);
5425 return lowerBR_CC(Op
, DAG
);
5426 case ISD::SELECT_CC
:
5427 return lowerSELECT_CC(Op
, DAG
);
5429 return lowerSETCC(Op
, DAG
);
5430 case ISD::STRICT_FSETCC
:
5431 return lowerSTRICT_FSETCC(Op
, DAG
, false);
5432 case ISD::STRICT_FSETCCS
:
5433 return lowerSTRICT_FSETCC(Op
, DAG
, true);
5434 case ISD::GlobalAddress
:
5435 return lowerGlobalAddress(cast
<GlobalAddressSDNode
>(Op
), DAG
);
5436 case ISD::GlobalTLSAddress
:
5437 return lowerGlobalTLSAddress(cast
<GlobalAddressSDNode
>(Op
), DAG
);
5438 case ISD::BlockAddress
:
5439 return lowerBlockAddress(cast
<BlockAddressSDNode
>(Op
), DAG
);
5440 case ISD::JumpTable
:
5441 return lowerJumpTable(cast
<JumpTableSDNode
>(Op
), DAG
);
5442 case ISD::ConstantPool
:
5443 return lowerConstantPool(cast
<ConstantPoolSDNode
>(Op
), DAG
);
5445 return lowerBITCAST(Op
, DAG
);
5447 return lowerVASTART(Op
, DAG
);
5449 return lowerVACOPY(Op
, DAG
);
5450 case ISD::DYNAMIC_STACKALLOC
:
5451 return lowerDYNAMIC_STACKALLOC(Op
, DAG
);
5452 case ISD::GET_DYNAMIC_AREA_OFFSET
:
5453 return lowerGET_DYNAMIC_AREA_OFFSET(Op
, DAG
);
5454 case ISD::SMUL_LOHI
:
5455 return lowerSMUL_LOHI(Op
, DAG
);
5456 case ISD::UMUL_LOHI
:
5457 return lowerUMUL_LOHI(Op
, DAG
);
5459 return lowerSDIVREM(Op
, DAG
);
5461 return lowerUDIVREM(Op
, DAG
);
5466 return lowerXALUO(Op
, DAG
);
5469 return lowerADDSUBCARRY(Op
, DAG
);
5471 return lowerOR(Op
, DAG
);
5473 return lowerCTPOP(Op
, DAG
);
5474 case ISD::ATOMIC_FENCE
:
5475 return lowerATOMIC_FENCE(Op
, DAG
);
5476 case ISD::ATOMIC_SWAP
:
5477 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_SWAPW
);
5478 case ISD::ATOMIC_STORE
:
5479 return lowerATOMIC_STORE(Op
, DAG
);
5480 case ISD::ATOMIC_LOAD
:
5481 return lowerATOMIC_LOAD(Op
, DAG
);
5482 case ISD::ATOMIC_LOAD_ADD
:
5483 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_ADD
);
5484 case ISD::ATOMIC_LOAD_SUB
:
5485 return lowerATOMIC_LOAD_SUB(Op
, DAG
);
5486 case ISD::ATOMIC_LOAD_AND
:
5487 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_AND
);
5488 case ISD::ATOMIC_LOAD_OR
:
5489 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_OR
);
5490 case ISD::ATOMIC_LOAD_XOR
:
5491 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_XOR
);
5492 case ISD::ATOMIC_LOAD_NAND
:
5493 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_NAND
);
5494 case ISD::ATOMIC_LOAD_MIN
:
5495 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_MIN
);
5496 case ISD::ATOMIC_LOAD_MAX
:
5497 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_MAX
);
5498 case ISD::ATOMIC_LOAD_UMIN
:
5499 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_UMIN
);
5500 case ISD::ATOMIC_LOAD_UMAX
:
5501 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_UMAX
);
5502 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS
:
5503 return lowerATOMIC_CMP_SWAP(Op
, DAG
);
5504 case ISD::STACKSAVE
:
5505 return lowerSTACKSAVE(Op
, DAG
);
5506 case ISD::STACKRESTORE
:
5507 return lowerSTACKRESTORE(Op
, DAG
);
5509 return lowerPREFETCH(Op
, DAG
);
5510 case ISD::INTRINSIC_W_CHAIN
:
5511 return lowerINTRINSIC_W_CHAIN(Op
, DAG
);
5512 case ISD::INTRINSIC_WO_CHAIN
:
5513 return lowerINTRINSIC_WO_CHAIN(Op
, DAG
);
5514 case ISD::BUILD_VECTOR
:
5515 return lowerBUILD_VECTOR(Op
, DAG
);
5516 case ISD::VECTOR_SHUFFLE
:
5517 return lowerVECTOR_SHUFFLE(Op
, DAG
);
5518 case ISD::SCALAR_TO_VECTOR
:
5519 return lowerSCALAR_TO_VECTOR(Op
, DAG
);
5520 case ISD::INSERT_VECTOR_ELT
:
5521 return lowerINSERT_VECTOR_ELT(Op
, DAG
);
5522 case ISD::EXTRACT_VECTOR_ELT
:
5523 return lowerEXTRACT_VECTOR_ELT(Op
, DAG
);
5524 case ISD::SIGN_EXTEND_VECTOR_INREG
:
5525 return lowerSIGN_EXTEND_VECTOR_INREG(Op
, DAG
);
5526 case ISD::ZERO_EXTEND_VECTOR_INREG
:
5527 return lowerZERO_EXTEND_VECTOR_INREG(Op
, DAG
);
5529 return lowerShift(Op
, DAG
, SystemZISD::VSHL_BY_SCALAR
);
5531 return lowerShift(Op
, DAG
, SystemZISD::VSRL_BY_SCALAR
);
5533 return lowerShift(Op
, DAG
, SystemZISD::VSRA_BY_SCALAR
);
5535 llvm_unreachable("Unexpected node to lower");
5539 // Lower operations with invalid operand or result types (currently used
5540 // only for 128-bit integer types).
5542 SystemZTargetLowering::LowerOperationWrapper(SDNode
*N
,
5543 SmallVectorImpl
<SDValue
> &Results
,
5544 SelectionDAG
&DAG
) const {
5545 switch (N
->getOpcode()) {
5546 case ISD::ATOMIC_LOAD
: {
5548 SDVTList Tys
= DAG
.getVTList(MVT::Untyped
, MVT::Other
);
5549 SDValue Ops
[] = { N
->getOperand(0), N
->getOperand(1) };
5550 MachineMemOperand
*MMO
= cast
<AtomicSDNode
>(N
)->getMemOperand();
5551 SDValue Res
= DAG
.getMemIntrinsicNode(SystemZISD::ATOMIC_LOAD_128
,
5552 DL
, Tys
, Ops
, MVT::i128
, MMO
);
5553 Results
.push_back(lowerGR128ToI128(DAG
, Res
));
5554 Results
.push_back(Res
.getValue(1));
5557 case ISD::ATOMIC_STORE
: {
5559 SDVTList Tys
= DAG
.getVTList(MVT::Other
);
5560 SDValue Ops
[] = { N
->getOperand(0),
5561 lowerI128ToGR128(DAG
, N
->getOperand(2)),
5563 MachineMemOperand
*MMO
= cast
<AtomicSDNode
>(N
)->getMemOperand();
5564 SDValue Res
= DAG
.getMemIntrinsicNode(SystemZISD::ATOMIC_STORE_128
,
5565 DL
, Tys
, Ops
, MVT::i128
, MMO
);
5566 // We have to enforce sequential consistency by performing a
5567 // serialization operation after the store.
5568 if (cast
<AtomicSDNode
>(N
)->getSuccessOrdering() ==
5569 AtomicOrdering::SequentiallyConsistent
)
5570 Res
= SDValue(DAG
.getMachineNode(SystemZ::Serialize
, DL
,
5571 MVT::Other
, Res
), 0);
5572 Results
.push_back(Res
);
5575 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS
: {
5577 SDVTList Tys
= DAG
.getVTList(MVT::Untyped
, MVT::i32
, MVT::Other
);
5578 SDValue Ops
[] = { N
->getOperand(0), N
->getOperand(1),
5579 lowerI128ToGR128(DAG
, N
->getOperand(2)),
5580 lowerI128ToGR128(DAG
, N
->getOperand(3)) };
5581 MachineMemOperand
*MMO
= cast
<AtomicSDNode
>(N
)->getMemOperand();
5582 SDValue Res
= DAG
.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP_128
,
5583 DL
, Tys
, Ops
, MVT::i128
, MMO
);
5584 SDValue Success
= emitSETCC(DAG
, DL
, Res
.getValue(1),
5585 SystemZ::CCMASK_CS
, SystemZ::CCMASK_CS_EQ
);
5586 Success
= DAG
.getZExtOrTrunc(Success
, DL
, N
->getValueType(1));
5587 Results
.push_back(lowerGR128ToI128(DAG
, Res
));
5588 Results
.push_back(Success
);
5589 Results
.push_back(Res
.getValue(2));
5593 llvm_unreachable("Unexpected node to lower");
5598 SystemZTargetLowering::ReplaceNodeResults(SDNode
*N
,
5599 SmallVectorImpl
<SDValue
> &Results
,
5600 SelectionDAG
&DAG
) const {
5601 return LowerOperationWrapper(N
, Results
, DAG
);
5604 const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode
) const {
5605 #define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME
5606 switch ((SystemZISD::NodeType
)Opcode
) {
5607 case SystemZISD::FIRST_NUMBER
: break;
5613 OPCODE(PCREL_WRAPPER
);
5614 OPCODE(PCREL_OFFSET
);
5617 OPCODE(STRICT_FCMP
);
5618 OPCODE(STRICT_FCMPS
);
5621 OPCODE(SELECT_CCMASK
);
5622 OPCODE(ADJDYNALLOC
);
5623 OPCODE(PROBED_ALLOCA
);
5648 OPCODE(SEARCH_STRING
);
5652 OPCODE(TBEGIN_NOFLOAT
);
5655 OPCODE(ROTATE_MASK
);
5657 OPCODE(JOIN_DWORDS
);
5662 OPCODE(PERMUTE_DWORDS
);
5667 OPCODE(UNPACK_HIGH
);
5668 OPCODE(UNPACKL_HIGH
);
5670 OPCODE(UNPACKL_LOW
);
5671 OPCODE(VSHL_BY_SCALAR
);
5672 OPCODE(VSRL_BY_SCALAR
);
5673 OPCODE(VSRA_BY_SCALAR
);
5682 OPCODE(STRICT_VFCMPE
);
5683 OPCODE(STRICT_VFCMPES
);
5685 OPCODE(STRICT_VFCMPH
);
5686 OPCODE(STRICT_VFCMPHS
);
5688 OPCODE(STRICT_VFCMPHE
);
5689 OPCODE(STRICT_VFCMPHES
);
5695 OPCODE(STRICT_VEXTEND
);
5697 OPCODE(STRICT_VROUND
);
5711 OPCODE(ATOMIC_SWAPW
);
5712 OPCODE(ATOMIC_LOADW_ADD
);
5713 OPCODE(ATOMIC_LOADW_SUB
);
5714 OPCODE(ATOMIC_LOADW_AND
);
5715 OPCODE(ATOMIC_LOADW_OR
);
5716 OPCODE(ATOMIC_LOADW_XOR
);
5717 OPCODE(ATOMIC_LOADW_NAND
);
5718 OPCODE(ATOMIC_LOADW_MIN
);
5719 OPCODE(ATOMIC_LOADW_MAX
);
5720 OPCODE(ATOMIC_LOADW_UMIN
);
5721 OPCODE(ATOMIC_LOADW_UMAX
);
5722 OPCODE(ATOMIC_CMP_SWAPW
);
5723 OPCODE(ATOMIC_CMP_SWAP
);
5724 OPCODE(ATOMIC_LOAD_128
);
5725 OPCODE(ATOMIC_STORE_128
);
5726 OPCODE(ATOMIC_CMP_SWAP_128
);
5737 // Return true if VT is a vector whose elements are a whole number of bytes
5738 // in width. Also check for presence of vector support.
5739 bool SystemZTargetLowering::canTreatAsByteVector(EVT VT
) const {
5740 if (!Subtarget
.hasVector())
5743 return VT
.isVector() && VT
.getScalarSizeInBits() % 8 == 0 && VT
.isSimple();
5746 // Try to simplify an EXTRACT_VECTOR_ELT from a vector of type VecVT
5747 // producing a result of type ResVT. Op is a possibly bitcast version
5748 // of the input vector and Index is the index (based on type VecVT) that
5749 // should be extracted. Return the new extraction if a simplification
5750 // was possible or if Force is true.
5751 SDValue
SystemZTargetLowering::combineExtract(const SDLoc
&DL
, EVT ResVT
,
5752 EVT VecVT
, SDValue Op
,
5754 DAGCombinerInfo
&DCI
,
5756 SelectionDAG
&DAG
= DCI
.DAG
;
5758 // The number of bytes being extracted.
5759 unsigned BytesPerElement
= VecVT
.getVectorElementType().getStoreSize();
5762 unsigned Opcode
= Op
.getOpcode();
5763 if (Opcode
== ISD::BITCAST
)
5764 // Look through bitcasts.
5765 Op
= Op
.getOperand(0);
5766 else if ((Opcode
== ISD::VECTOR_SHUFFLE
|| Opcode
== SystemZISD::SPLAT
) &&
5767 canTreatAsByteVector(Op
.getValueType())) {
5768 // Get a VPERM-like permute mask and see whether the bytes covered
5769 // by the extracted element are a contiguous sequence from one
5771 SmallVector
<int, SystemZ::VectorBytes
> Bytes
;
5772 if (!getVPermMask(Op
, Bytes
))
5775 if (!getShuffleInput(Bytes
, Index
* BytesPerElement
,
5776 BytesPerElement
, First
))
5779 return DAG
.getUNDEF(ResVT
);
5780 // Make sure the contiguous sequence starts at a multiple of the
5781 // original element size.
5782 unsigned Byte
= unsigned(First
) % Bytes
.size();
5783 if (Byte
% BytesPerElement
!= 0)
5785 // We can get the extracted value directly from an input.
5786 Index
= Byte
/ BytesPerElement
;
5787 Op
= Op
.getOperand(unsigned(First
) / Bytes
.size());
5789 } else if (Opcode
== ISD::BUILD_VECTOR
&&
5790 canTreatAsByteVector(Op
.getValueType())) {
5791 // We can only optimize this case if the BUILD_VECTOR elements are
5792 // at least as wide as the extracted value.
5793 EVT OpVT
= Op
.getValueType();
5794 unsigned OpBytesPerElement
= OpVT
.getVectorElementType().getStoreSize();
5795 if (OpBytesPerElement
< BytesPerElement
)
5797 // Make sure that the least-significant bit of the extracted value
5798 // is the least significant bit of an input.
5799 unsigned End
= (Index
+ 1) * BytesPerElement
;
5800 if (End
% OpBytesPerElement
!= 0)
5802 // We're extracting the low part of one operand of the BUILD_VECTOR.
5803 Op
= Op
.getOperand(End
/ OpBytesPerElement
- 1);
5804 if (!Op
.getValueType().isInteger()) {
5805 EVT VT
= MVT::getIntegerVT(Op
.getValueSizeInBits());
5806 Op
= DAG
.getNode(ISD::BITCAST
, DL
, VT
, Op
);
5807 DCI
.AddToWorklist(Op
.getNode());
5809 EVT VT
= MVT::getIntegerVT(ResVT
.getSizeInBits());
5810 Op
= DAG
.getNode(ISD::TRUNCATE
, DL
, VT
, Op
);
5812 DCI
.AddToWorklist(Op
.getNode());
5813 Op
= DAG
.getNode(ISD::BITCAST
, DL
, ResVT
, Op
);
5816 } else if ((Opcode
== ISD::SIGN_EXTEND_VECTOR_INREG
||
5817 Opcode
== ISD::ZERO_EXTEND_VECTOR_INREG
||
5818 Opcode
== ISD::ANY_EXTEND_VECTOR_INREG
) &&
5819 canTreatAsByteVector(Op
.getValueType()) &&
5820 canTreatAsByteVector(Op
.getOperand(0).getValueType())) {
5821 // Make sure that only the unextended bits are significant.
5822 EVT ExtVT
= Op
.getValueType();
5823 EVT OpVT
= Op
.getOperand(0).getValueType();
5824 unsigned ExtBytesPerElement
= ExtVT
.getVectorElementType().getStoreSize();
5825 unsigned OpBytesPerElement
= OpVT
.getVectorElementType().getStoreSize();
5826 unsigned Byte
= Index
* BytesPerElement
;
5827 unsigned SubByte
= Byte
% ExtBytesPerElement
;
5828 unsigned MinSubByte
= ExtBytesPerElement
- OpBytesPerElement
;
5829 if (SubByte
< MinSubByte
||
5830 SubByte
+ BytesPerElement
> ExtBytesPerElement
)
5832 // Get the byte offset of the unextended element
5833 Byte
= Byte
/ ExtBytesPerElement
* OpBytesPerElement
;
5834 // ...then add the byte offset relative to that element.
5835 Byte
+= SubByte
- MinSubByte
;
5836 if (Byte
% BytesPerElement
!= 0)
5838 Op
= Op
.getOperand(0);
5839 Index
= Byte
/ BytesPerElement
;
5845 if (Op
.getValueType() != VecVT
) {
5846 Op
= DAG
.getNode(ISD::BITCAST
, DL
, VecVT
, Op
);
5847 DCI
.AddToWorklist(Op
.getNode());
5849 return DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, DL
, ResVT
, Op
,
5850 DAG
.getConstant(Index
, DL
, MVT::i32
));
5855 // Optimize vector operations in scalar value Op on the basis that Op
5856 // is truncated to TruncVT.
5857 SDValue
SystemZTargetLowering::combineTruncateExtract(
5858 const SDLoc
&DL
, EVT TruncVT
, SDValue Op
, DAGCombinerInfo
&DCI
) const {
5859 // If we have (trunc (extract_vector_elt X, Y)), try to turn it into
5860 // (extract_vector_elt (bitcast X), Y'), where (bitcast X) has elements
5862 if (Op
.getOpcode() == ISD::EXTRACT_VECTOR_ELT
&&
5863 TruncVT
.getSizeInBits() % 8 == 0) {
5864 SDValue Vec
= Op
.getOperand(0);
5865 EVT VecVT
= Vec
.getValueType();
5866 if (canTreatAsByteVector(VecVT
)) {
5867 if (auto *IndexN
= dyn_cast
<ConstantSDNode
>(Op
.getOperand(1))) {
5868 unsigned BytesPerElement
= VecVT
.getVectorElementType().getStoreSize();
5869 unsigned TruncBytes
= TruncVT
.getStoreSize();
5870 if (BytesPerElement
% TruncBytes
== 0) {
5871 // Calculate the value of Y' in the above description. We are
5872 // splitting the original elements into Scale equal-sized pieces
5873 // and for truncation purposes want the last (least-significant)
5874 // of these pieces for IndexN. This is easiest to do by calculating
5875 // the start index of the following element and then subtracting 1.
5876 unsigned Scale
= BytesPerElement
/ TruncBytes
;
5877 unsigned NewIndex
= (IndexN
->getZExtValue() + 1) * Scale
- 1;
5879 // Defer the creation of the bitcast from X to combineExtract,
5880 // which might be able to optimize the extraction.
5881 VecVT
= MVT::getVectorVT(MVT::getIntegerVT(TruncBytes
* 8),
5882 VecVT
.getStoreSize() / TruncBytes
);
5883 EVT ResVT
= (TruncBytes
< 4 ? MVT::i32
: TruncVT
);
5884 return combineExtract(DL
, ResVT
, VecVT
, Vec
, NewIndex
, DCI
, true);
5892 SDValue
SystemZTargetLowering::combineZERO_EXTEND(
5893 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
5894 // Convert (zext (select_ccmask C1, C2)) into (select_ccmask C1', C2')
5895 SelectionDAG
&DAG
= DCI
.DAG
;
5896 SDValue N0
= N
->getOperand(0);
5897 EVT VT
= N
->getValueType(0);
5898 if (N0
.getOpcode() == SystemZISD::SELECT_CCMASK
) {
5899 auto *TrueOp
= dyn_cast
<ConstantSDNode
>(N0
.getOperand(0));
5900 auto *FalseOp
= dyn_cast
<ConstantSDNode
>(N0
.getOperand(1));
5901 if (TrueOp
&& FalseOp
) {
5903 SDValue Ops
[] = { DAG
.getConstant(TrueOp
->getZExtValue(), DL
, VT
),
5904 DAG
.getConstant(FalseOp
->getZExtValue(), DL
, VT
),
5905 N0
.getOperand(2), N0
.getOperand(3), N0
.getOperand(4) };
5906 SDValue NewSelect
= DAG
.getNode(SystemZISD::SELECT_CCMASK
, DL
, VT
, Ops
);
5907 // If N0 has multiple uses, change other uses as well.
5908 if (!N0
.hasOneUse()) {
5909 SDValue TruncSelect
=
5910 DAG
.getNode(ISD::TRUNCATE
, DL
, N0
.getValueType(), NewSelect
);
5911 DCI
.CombineTo(N0
.getNode(), TruncSelect
);
5919 SDValue
SystemZTargetLowering::combineSIGN_EXTEND_INREG(
5920 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
5921 // Convert (sext_in_reg (setcc LHS, RHS, COND), i1)
5922 // and (sext_in_reg (any_extend (setcc LHS, RHS, COND)), i1)
5923 // into (select_cc LHS, RHS, -1, 0, COND)
5924 SelectionDAG
&DAG
= DCI
.DAG
;
5925 SDValue N0
= N
->getOperand(0);
5926 EVT VT
= N
->getValueType(0);
5927 EVT EVT
= cast
<VTSDNode
>(N
->getOperand(1))->getVT();
5928 if (N0
.hasOneUse() && N0
.getOpcode() == ISD::ANY_EXTEND
)
5929 N0
= N0
.getOperand(0);
5930 if (EVT
== MVT::i1
&& N0
.hasOneUse() && N0
.getOpcode() == ISD::SETCC
) {
5932 SDValue Ops
[] = { N0
.getOperand(0), N0
.getOperand(1),
5933 DAG
.getConstant(-1, DL
, VT
), DAG
.getConstant(0, DL
, VT
),
5935 return DAG
.getNode(ISD::SELECT_CC
, DL
, VT
, Ops
);
5940 SDValue
SystemZTargetLowering::combineSIGN_EXTEND(
5941 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
5942 // Convert (sext (ashr (shl X, C1), C2)) to
5943 // (ashr (shl (anyext X), C1'), C2')), since wider shifts are as
5944 // cheap as narrower ones.
5945 SelectionDAG
&DAG
= DCI
.DAG
;
5946 SDValue N0
= N
->getOperand(0);
5947 EVT VT
= N
->getValueType(0);
5948 if (N0
.hasOneUse() && N0
.getOpcode() == ISD::SRA
) {
5949 auto *SraAmt
= dyn_cast
<ConstantSDNode
>(N0
.getOperand(1));
5950 SDValue Inner
= N0
.getOperand(0);
5951 if (SraAmt
&& Inner
.hasOneUse() && Inner
.getOpcode() == ISD::SHL
) {
5952 if (auto *ShlAmt
= dyn_cast
<ConstantSDNode
>(Inner
.getOperand(1))) {
5953 unsigned Extra
= (VT
.getSizeInBits() - N0
.getValueSizeInBits());
5954 unsigned NewShlAmt
= ShlAmt
->getZExtValue() + Extra
;
5955 unsigned NewSraAmt
= SraAmt
->getZExtValue() + Extra
;
5956 EVT ShiftVT
= N0
.getOperand(1).getValueType();
5957 SDValue Ext
= DAG
.getNode(ISD::ANY_EXTEND
, SDLoc(Inner
), VT
,
5958 Inner
.getOperand(0));
5959 SDValue Shl
= DAG
.getNode(ISD::SHL
, SDLoc(Inner
), VT
, Ext
,
5960 DAG
.getConstant(NewShlAmt
, SDLoc(Inner
),
5962 return DAG
.getNode(ISD::SRA
, SDLoc(N0
), VT
, Shl
,
5963 DAG
.getConstant(NewSraAmt
, SDLoc(N0
), ShiftVT
));
5970 SDValue
SystemZTargetLowering::combineMERGE(
5971 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
5972 SelectionDAG
&DAG
= DCI
.DAG
;
5973 unsigned Opcode
= N
->getOpcode();
5974 SDValue Op0
= N
->getOperand(0);
5975 SDValue Op1
= N
->getOperand(1);
5976 if (Op0
.getOpcode() == ISD::BITCAST
)
5977 Op0
= Op0
.getOperand(0);
5978 if (ISD::isBuildVectorAllZeros(Op0
.getNode())) {
5979 // (z_merge_* 0, 0) -> 0. This is mostly useful for using VLLEZF
5981 if (Op1
== N
->getOperand(0))
5983 // (z_merge_? 0, X) -> (z_unpackl_? 0, X).
5984 EVT VT
= Op1
.getValueType();
5985 unsigned ElemBytes
= VT
.getVectorElementType().getStoreSize();
5986 if (ElemBytes
<= 4) {
5987 Opcode
= (Opcode
== SystemZISD::MERGE_HIGH
?
5988 SystemZISD::UNPACKL_HIGH
: SystemZISD::UNPACKL_LOW
);
5989 EVT InVT
= VT
.changeVectorElementTypeToInteger();
5990 EVT OutVT
= MVT::getVectorVT(MVT::getIntegerVT(ElemBytes
* 16),
5991 SystemZ::VectorBytes
/ ElemBytes
/ 2);
5993 Op1
= DAG
.getNode(ISD::BITCAST
, SDLoc(N
), InVT
, Op1
);
5994 DCI
.AddToWorklist(Op1
.getNode());
5996 SDValue Op
= DAG
.getNode(Opcode
, SDLoc(N
), OutVT
, Op1
);
5997 DCI
.AddToWorklist(Op
.getNode());
5998 return DAG
.getNode(ISD::BITCAST
, SDLoc(N
), VT
, Op
);
6004 SDValue
SystemZTargetLowering::combineLOAD(
6005 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
6006 SelectionDAG
&DAG
= DCI
.DAG
;
6007 EVT LdVT
= N
->getValueType(0);
6008 if (LdVT
.isVector() || LdVT
.isInteger())
6010 // Transform a scalar load that is REPLICATEd as well as having other
6011 // use(s) to the form where the other use(s) use the first element of the
6012 // REPLICATE instead of the load. Otherwise instruction selection will not
6013 // produce a VLREP. Avoid extracting to a GPR, so only do this for floating
6017 SmallVector
<SDNode
*, 8> OtherUses
;
6018 for (SDNode::use_iterator UI
= N
->use_begin(), UE
= N
->use_end();
6020 if (UI
->getOpcode() == SystemZISD::REPLICATE
) {
6022 return SDValue(); // Should never happen
6023 Replicate
= SDValue(*UI
, 0);
6025 else if (UI
.getUse().getResNo() == 0)
6026 OtherUses
.push_back(*UI
);
6028 if (!Replicate
|| OtherUses
.empty())
6032 SDValue Extract0
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, DL
, LdVT
,
6033 Replicate
, DAG
.getConstant(0, DL
, MVT::i32
));
6034 // Update uses of the loaded Value while preserving old chains.
6035 for (SDNode
*U
: OtherUses
) {
6036 SmallVector
<SDValue
, 8> Ops
;
6037 for (SDValue Op
: U
->ops())
6038 Ops
.push_back((Op
.getNode() == N
&& Op
.getResNo() == 0) ? Extract0
: Op
);
6039 DAG
.UpdateNodeOperands(U
, Ops
);
6041 return SDValue(N
, 0);
6044 bool SystemZTargetLowering::canLoadStoreByteSwapped(EVT VT
) const {
6045 if (VT
== MVT::i16
|| VT
== MVT::i32
|| VT
== MVT::i64
)
6047 if (Subtarget
.hasVectorEnhancements2())
6048 if (VT
== MVT::v8i16
|| VT
== MVT::v4i32
|| VT
== MVT::v2i64
)
6053 static bool isVectorElementSwap(ArrayRef
<int> M
, EVT VT
) {
6054 if (!VT
.isVector() || !VT
.isSimple() ||
6055 VT
.getSizeInBits() != 128 ||
6056 VT
.getScalarSizeInBits() % 8 != 0)
6059 unsigned NumElts
= VT
.getVectorNumElements();
6060 for (unsigned i
= 0; i
< NumElts
; ++i
) {
6061 if (M
[i
] < 0) continue; // ignore UNDEF indices
6062 if ((unsigned) M
[i
] != NumElts
- 1 - i
)
6069 SDValue
SystemZTargetLowering::combineSTORE(
6070 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
6071 SelectionDAG
&DAG
= DCI
.DAG
;
6072 auto *SN
= cast
<StoreSDNode
>(N
);
6073 auto &Op1
= N
->getOperand(1);
6074 EVT MemVT
= SN
->getMemoryVT();
6075 // If we have (truncstoreiN (extract_vector_elt X, Y), Z) then it is better
6076 // for the extraction to be done on a vMiN value, so that we can use VSTE.
6077 // If X has wider elements then convert it to:
6078 // (truncstoreiN (extract_vector_elt (bitcast X), Y2), Z).
6079 if (MemVT
.isInteger() && SN
->isTruncatingStore()) {
6081 combineTruncateExtract(SDLoc(N
), MemVT
, SN
->getValue(), DCI
)) {
6082 DCI
.AddToWorklist(Value
.getNode());
6084 // Rewrite the store with the new form of stored value.
6085 return DAG
.getTruncStore(SN
->getChain(), SDLoc(SN
), Value
,
6086 SN
->getBasePtr(), SN
->getMemoryVT(),
6087 SN
->getMemOperand());
6090 // Combine STORE (BSWAP) into STRVH/STRV/STRVG/VSTBR
6091 if (!SN
->isTruncatingStore() &&
6092 Op1
.getOpcode() == ISD::BSWAP
&&
6093 Op1
.getNode()->hasOneUse() &&
6094 canLoadStoreByteSwapped(Op1
.getValueType())) {
6096 SDValue BSwapOp
= Op1
.getOperand(0);
6098 if (BSwapOp
.getValueType() == MVT::i16
)
6099 BSwapOp
= DAG
.getNode(ISD::ANY_EXTEND
, SDLoc(N
), MVT::i32
, BSwapOp
);
6102 N
->getOperand(0), BSwapOp
, N
->getOperand(2)
6106 DAG
.getMemIntrinsicNode(SystemZISD::STRV
, SDLoc(N
), DAG
.getVTList(MVT::Other
),
6107 Ops
, MemVT
, SN
->getMemOperand());
6109 // Combine STORE (element-swap) into VSTER
6110 if (!SN
->isTruncatingStore() &&
6111 Op1
.getOpcode() == ISD::VECTOR_SHUFFLE
&&
6112 Op1
.getNode()->hasOneUse() &&
6113 Subtarget
.hasVectorEnhancements2()) {
6114 ShuffleVectorSDNode
*SVN
= cast
<ShuffleVectorSDNode
>(Op1
.getNode());
6115 ArrayRef
<int> ShuffleMask
= SVN
->getMask();
6116 if (isVectorElementSwap(ShuffleMask
, Op1
.getValueType())) {
6118 N
->getOperand(0), Op1
.getOperand(0), N
->getOperand(2)
6121 return DAG
.getMemIntrinsicNode(SystemZISD::VSTER
, SDLoc(N
),
6122 DAG
.getVTList(MVT::Other
),
6123 Ops
, MemVT
, SN
->getMemOperand());
6130 SDValue
SystemZTargetLowering::combineVECTOR_SHUFFLE(
6131 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
6132 SelectionDAG
&DAG
= DCI
.DAG
;
6133 // Combine element-swap (LOAD) into VLER
6134 if (ISD::isNON_EXTLoad(N
->getOperand(0).getNode()) &&
6135 N
->getOperand(0).hasOneUse() &&
6136 Subtarget
.hasVectorEnhancements2()) {
6137 ShuffleVectorSDNode
*SVN
= cast
<ShuffleVectorSDNode
>(N
);
6138 ArrayRef
<int> ShuffleMask
= SVN
->getMask();
6139 if (isVectorElementSwap(ShuffleMask
, N
->getValueType(0))) {
6140 SDValue Load
= N
->getOperand(0);
6141 LoadSDNode
*LD
= cast
<LoadSDNode
>(Load
);
6143 // Create the element-swapping load.
6145 LD
->getChain(), // Chain
6146 LD
->getBasePtr() // Ptr
6149 DAG
.getMemIntrinsicNode(SystemZISD::VLER
, SDLoc(N
),
6150 DAG
.getVTList(LD
->getValueType(0), MVT::Other
),
6151 Ops
, LD
->getMemoryVT(), LD
->getMemOperand());
6153 // First, combine the VECTOR_SHUFFLE away. This makes the value produced
6154 // by the load dead.
6155 DCI
.CombineTo(N
, ESLoad
);
6157 // Next, combine the load away, we give it a bogus result value but a real
6158 // chain result. The result value is dead because the shuffle is dead.
6159 DCI
.CombineTo(Load
.getNode(), ESLoad
, ESLoad
.getValue(1));
6161 // Return N so it doesn't get rechecked!
6162 return SDValue(N
, 0);
6169 SDValue
SystemZTargetLowering::combineEXTRACT_VECTOR_ELT(
6170 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
6171 SelectionDAG
&DAG
= DCI
.DAG
;
6173 if (!Subtarget
.hasVector())
6176 // Look through bitcasts that retain the number of vector elements.
6177 SDValue Op
= N
->getOperand(0);
6178 if (Op
.getOpcode() == ISD::BITCAST
&&
6179 Op
.getValueType().isVector() &&
6180 Op
.getOperand(0).getValueType().isVector() &&
6181 Op
.getValueType().getVectorNumElements() ==
6182 Op
.getOperand(0).getValueType().getVectorNumElements())
6183 Op
= Op
.getOperand(0);
6185 // Pull BSWAP out of a vector extraction.
6186 if (Op
.getOpcode() == ISD::BSWAP
&& Op
.hasOneUse()) {
6187 EVT VecVT
= Op
.getValueType();
6188 EVT EltVT
= VecVT
.getVectorElementType();
6189 Op
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, SDLoc(N
), EltVT
,
6190 Op
.getOperand(0), N
->getOperand(1));
6191 DCI
.AddToWorklist(Op
.getNode());
6192 Op
= DAG
.getNode(ISD::BSWAP
, SDLoc(N
), EltVT
, Op
);
6193 if (EltVT
!= N
->getValueType(0)) {
6194 DCI
.AddToWorklist(Op
.getNode());
6195 Op
= DAG
.getNode(ISD::BITCAST
, SDLoc(N
), N
->getValueType(0), Op
);
6200 // Try to simplify a vector extraction.
6201 if (auto *IndexN
= dyn_cast
<ConstantSDNode
>(N
->getOperand(1))) {
6202 SDValue Op0
= N
->getOperand(0);
6203 EVT VecVT
= Op0
.getValueType();
6204 return combineExtract(SDLoc(N
), N
->getValueType(0), VecVT
, Op0
,
6205 IndexN
->getZExtValue(), DCI
, false);
6210 SDValue
SystemZTargetLowering::combineJOIN_DWORDS(
6211 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
6212 SelectionDAG
&DAG
= DCI
.DAG
;
6213 // (join_dwords X, X) == (replicate X)
6214 if (N
->getOperand(0) == N
->getOperand(1))
6215 return DAG
.getNode(SystemZISD::REPLICATE
, SDLoc(N
), N
->getValueType(0),
6220 static SDValue
MergeInputChains(SDNode
*N1
, SDNode
*N2
) {
6221 SDValue Chain1
= N1
->getOperand(0);
6222 SDValue Chain2
= N2
->getOperand(0);
6224 // Trivial case: both nodes take the same chain.
6225 if (Chain1
== Chain2
)
6228 // FIXME - we could handle more complex cases via TokenFactor,
6229 // assuming we can verify that this would not create a cycle.
6233 SDValue
SystemZTargetLowering::combineFP_ROUND(
6234 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
6236 if (!Subtarget
.hasVector())
6239 // (fpround (extract_vector_elt X 0))
6240 // (fpround (extract_vector_elt X 1)) ->
6241 // (extract_vector_elt (VROUND X) 0)
6242 // (extract_vector_elt (VROUND X) 2)
6244 // This is a special case since the target doesn't really support v2f32s.
6245 unsigned OpNo
= N
->isStrictFPOpcode() ? 1 : 0;
6246 SelectionDAG
&DAG
= DCI
.DAG
;
6247 SDValue Op0
= N
->getOperand(OpNo
);
6248 if (N
->getValueType(0) == MVT::f32
&&
6250 Op0
.getOpcode() == ISD::EXTRACT_VECTOR_ELT
&&
6251 Op0
.getOperand(0).getValueType() == MVT::v2f64
&&
6252 Op0
.getOperand(1).getOpcode() == ISD::Constant
&&
6253 cast
<ConstantSDNode
>(Op0
.getOperand(1))->getZExtValue() == 0) {
6254 SDValue Vec
= Op0
.getOperand(0);
6255 for (auto *U
: Vec
->uses()) {
6256 if (U
!= Op0
.getNode() &&
6258 U
->getOpcode() == ISD::EXTRACT_VECTOR_ELT
&&
6259 U
->getOperand(0) == Vec
&&
6260 U
->getOperand(1).getOpcode() == ISD::Constant
&&
6261 cast
<ConstantSDNode
>(U
->getOperand(1))->getZExtValue() == 1) {
6262 SDValue OtherRound
= SDValue(*U
->use_begin(), 0);
6263 if (OtherRound
.getOpcode() == N
->getOpcode() &&
6264 OtherRound
.getOperand(OpNo
) == SDValue(U
, 0) &&
6265 OtherRound
.getValueType() == MVT::f32
) {
6266 SDValue VRound
, Chain
;
6267 if (N
->isStrictFPOpcode()) {
6268 Chain
= MergeInputChains(N
, OtherRound
.getNode());
6271 VRound
= DAG
.getNode(SystemZISD::STRICT_VROUND
, SDLoc(N
),
6272 {MVT::v4f32
, MVT::Other
}, {Chain
, Vec
});
6273 Chain
= VRound
.getValue(1);
6275 VRound
= DAG
.getNode(SystemZISD::VROUND
, SDLoc(N
),
6277 DCI
.AddToWorklist(VRound
.getNode());
6279 DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, SDLoc(U
), MVT::f32
,
6280 VRound
, DAG
.getConstant(2, SDLoc(U
), MVT::i32
));
6281 DCI
.AddToWorklist(Extract1
.getNode());
6282 DAG
.ReplaceAllUsesOfValueWith(OtherRound
, Extract1
);
6284 DAG
.ReplaceAllUsesOfValueWith(OtherRound
.getValue(1), Chain
);
6286 DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, SDLoc(Op0
), MVT::f32
,
6287 VRound
, DAG
.getConstant(0, SDLoc(Op0
), MVT::i32
));
6289 return DAG
.getNode(ISD::MERGE_VALUES
, SDLoc(Op0
),
6290 N
->getVTList(), Extract0
, Chain
);
6299 SDValue
SystemZTargetLowering::combineFP_EXTEND(
6300 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
6302 if (!Subtarget
.hasVector())
6305 // (fpextend (extract_vector_elt X 0))
6306 // (fpextend (extract_vector_elt X 2)) ->
6307 // (extract_vector_elt (VEXTEND X) 0)
6308 // (extract_vector_elt (VEXTEND X) 1)
6310 // This is a special case since the target doesn't really support v2f32s.
6311 unsigned OpNo
= N
->isStrictFPOpcode() ? 1 : 0;
6312 SelectionDAG
&DAG
= DCI
.DAG
;
6313 SDValue Op0
= N
->getOperand(OpNo
);
6314 if (N
->getValueType(0) == MVT::f64
&&
6316 Op0
.getOpcode() == ISD::EXTRACT_VECTOR_ELT
&&
6317 Op0
.getOperand(0).getValueType() == MVT::v4f32
&&
6318 Op0
.getOperand(1).getOpcode() == ISD::Constant
&&
6319 cast
<ConstantSDNode
>(Op0
.getOperand(1))->getZExtValue() == 0) {
6320 SDValue Vec
= Op0
.getOperand(0);
6321 for (auto *U
: Vec
->uses()) {
6322 if (U
!= Op0
.getNode() &&
6324 U
->getOpcode() == ISD::EXTRACT_VECTOR_ELT
&&
6325 U
->getOperand(0) == Vec
&&
6326 U
->getOperand(1).getOpcode() == ISD::Constant
&&
6327 cast
<ConstantSDNode
>(U
->getOperand(1))->getZExtValue() == 2) {
6328 SDValue OtherExtend
= SDValue(*U
->use_begin(), 0);
6329 if (OtherExtend
.getOpcode() == N
->getOpcode() &&
6330 OtherExtend
.getOperand(OpNo
) == SDValue(U
, 0) &&
6331 OtherExtend
.getValueType() == MVT::f64
) {
6332 SDValue VExtend
, Chain
;
6333 if (N
->isStrictFPOpcode()) {
6334 Chain
= MergeInputChains(N
, OtherExtend
.getNode());
6337 VExtend
= DAG
.getNode(SystemZISD::STRICT_VEXTEND
, SDLoc(N
),
6338 {MVT::v2f64
, MVT::Other
}, {Chain
, Vec
});
6339 Chain
= VExtend
.getValue(1);
6341 VExtend
= DAG
.getNode(SystemZISD::VEXTEND
, SDLoc(N
),
6343 DCI
.AddToWorklist(VExtend
.getNode());
6345 DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, SDLoc(U
), MVT::f64
,
6346 VExtend
, DAG
.getConstant(1, SDLoc(U
), MVT::i32
));
6347 DCI
.AddToWorklist(Extract1
.getNode());
6348 DAG
.ReplaceAllUsesOfValueWith(OtherExtend
, Extract1
);
6350 DAG
.ReplaceAllUsesOfValueWith(OtherExtend
.getValue(1), Chain
);
6352 DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, SDLoc(Op0
), MVT::f64
,
6353 VExtend
, DAG
.getConstant(0, SDLoc(Op0
), MVT::i32
));
6355 return DAG
.getNode(ISD::MERGE_VALUES
, SDLoc(Op0
),
6356 N
->getVTList(), Extract0
, Chain
);
6365 SDValue
SystemZTargetLowering::combineINT_TO_FP(
6366 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
6367 if (DCI
.Level
!= BeforeLegalizeTypes
)
6369 unsigned Opcode
= N
->getOpcode();
6370 EVT OutVT
= N
->getValueType(0);
6371 SelectionDAG
&DAG
= DCI
.DAG
;
6372 SDValue Op
= N
->getOperand(0);
6373 unsigned OutScalarBits
= OutVT
.getScalarSizeInBits();
6374 unsigned InScalarBits
= Op
->getValueType(0).getScalarSizeInBits();
6376 // Insert an extension before type-legalization to avoid scalarization, e.g.:
6377 // v2f64 = uint_to_fp v2i16
6379 // v2f64 = uint_to_fp (v2i64 zero_extend v2i16)
6380 if (OutVT
.isVector() && OutScalarBits
> InScalarBits
) {
6381 MVT ExtVT
= MVT::getVectorVT(MVT::getIntegerVT(OutVT
.getScalarSizeInBits()),
6382 OutVT
.getVectorNumElements());
6383 unsigned ExtOpcode
=
6384 (Opcode
== ISD::UINT_TO_FP
? ISD::ZERO_EXTEND
: ISD::SIGN_EXTEND
);
6385 SDValue ExtOp
= DAG
.getNode(ExtOpcode
, SDLoc(N
), ExtVT
, Op
);
6386 return DAG
.getNode(Opcode
, SDLoc(N
), OutVT
, ExtOp
);
6391 SDValue
SystemZTargetLowering::combineBSWAP(
6392 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
6393 SelectionDAG
&DAG
= DCI
.DAG
;
6394 // Combine BSWAP (LOAD) into LRVH/LRV/LRVG/VLBR
6395 if (ISD::isNON_EXTLoad(N
->getOperand(0).getNode()) &&
6396 N
->getOperand(0).hasOneUse() &&
6397 canLoadStoreByteSwapped(N
->getValueType(0))) {
6398 SDValue Load
= N
->getOperand(0);
6399 LoadSDNode
*LD
= cast
<LoadSDNode
>(Load
);
6401 // Create the byte-swapping load.
6403 LD
->getChain(), // Chain
6404 LD
->getBasePtr() // Ptr
6406 EVT LoadVT
= N
->getValueType(0);
6407 if (LoadVT
== MVT::i16
)
6410 DAG
.getMemIntrinsicNode(SystemZISD::LRV
, SDLoc(N
),
6411 DAG
.getVTList(LoadVT
, MVT::Other
),
6412 Ops
, LD
->getMemoryVT(), LD
->getMemOperand());
6414 // If this is an i16 load, insert the truncate.
6415 SDValue ResVal
= BSLoad
;
6416 if (N
->getValueType(0) == MVT::i16
)
6417 ResVal
= DAG
.getNode(ISD::TRUNCATE
, SDLoc(N
), MVT::i16
, BSLoad
);
6419 // First, combine the bswap away. This makes the value produced by the
6421 DCI
.CombineTo(N
, ResVal
);
6423 // Next, combine the load away, we give it a bogus result value but a real
6424 // chain result. The result value is dead because the bswap is dead.
6425 DCI
.CombineTo(Load
.getNode(), ResVal
, BSLoad
.getValue(1));
6427 // Return N so it doesn't get rechecked!
6428 return SDValue(N
, 0);
6431 // Look through bitcasts that retain the number of vector elements.
6432 SDValue Op
= N
->getOperand(0);
6433 if (Op
.getOpcode() == ISD::BITCAST
&&
6434 Op
.getValueType().isVector() &&
6435 Op
.getOperand(0).getValueType().isVector() &&
6436 Op
.getValueType().getVectorNumElements() ==
6437 Op
.getOperand(0).getValueType().getVectorNumElements())
6438 Op
= Op
.getOperand(0);
6440 // Push BSWAP into a vector insertion if at least one side then simplifies.
6441 if (Op
.getOpcode() == ISD::INSERT_VECTOR_ELT
&& Op
.hasOneUse()) {
6442 SDValue Vec
= Op
.getOperand(0);
6443 SDValue Elt
= Op
.getOperand(1);
6444 SDValue Idx
= Op
.getOperand(2);
6446 if (DAG
.isConstantIntBuildVectorOrConstantInt(Vec
) ||
6447 Vec
.getOpcode() == ISD::BSWAP
|| Vec
.isUndef() ||
6448 DAG
.isConstantIntBuildVectorOrConstantInt(Elt
) ||
6449 Elt
.getOpcode() == ISD::BSWAP
|| Elt
.isUndef() ||
6450 (canLoadStoreByteSwapped(N
->getValueType(0)) &&
6451 ISD::isNON_EXTLoad(Elt
.getNode()) && Elt
.hasOneUse())) {
6452 EVT VecVT
= N
->getValueType(0);
6453 EVT EltVT
= N
->getValueType(0).getVectorElementType();
6454 if (VecVT
!= Vec
.getValueType()) {
6455 Vec
= DAG
.getNode(ISD::BITCAST
, SDLoc(N
), VecVT
, Vec
);
6456 DCI
.AddToWorklist(Vec
.getNode());
6458 if (EltVT
!= Elt
.getValueType()) {
6459 Elt
= DAG
.getNode(ISD::BITCAST
, SDLoc(N
), EltVT
, Elt
);
6460 DCI
.AddToWorklist(Elt
.getNode());
6462 Vec
= DAG
.getNode(ISD::BSWAP
, SDLoc(N
), VecVT
, Vec
);
6463 DCI
.AddToWorklist(Vec
.getNode());
6464 Elt
= DAG
.getNode(ISD::BSWAP
, SDLoc(N
), EltVT
, Elt
);
6465 DCI
.AddToWorklist(Elt
.getNode());
6466 return DAG
.getNode(ISD::INSERT_VECTOR_ELT
, SDLoc(N
), VecVT
,
6471 // Push BSWAP into a vector shuffle if at least one side then simplifies.
6472 ShuffleVectorSDNode
*SV
= dyn_cast
<ShuffleVectorSDNode
>(Op
);
6473 if (SV
&& Op
.hasOneUse()) {
6474 SDValue Op0
= Op
.getOperand(0);
6475 SDValue Op1
= Op
.getOperand(1);
6477 if (DAG
.isConstantIntBuildVectorOrConstantInt(Op0
) ||
6478 Op0
.getOpcode() == ISD::BSWAP
|| Op0
.isUndef() ||
6479 DAG
.isConstantIntBuildVectorOrConstantInt(Op1
) ||
6480 Op1
.getOpcode() == ISD::BSWAP
|| Op1
.isUndef()) {
6481 EVT VecVT
= N
->getValueType(0);
6482 if (VecVT
!= Op0
.getValueType()) {
6483 Op0
= DAG
.getNode(ISD::BITCAST
, SDLoc(N
), VecVT
, Op0
);
6484 DCI
.AddToWorklist(Op0
.getNode());
6486 if (VecVT
!= Op1
.getValueType()) {
6487 Op1
= DAG
.getNode(ISD::BITCAST
, SDLoc(N
), VecVT
, Op1
);
6488 DCI
.AddToWorklist(Op1
.getNode());
6490 Op0
= DAG
.getNode(ISD::BSWAP
, SDLoc(N
), VecVT
, Op0
);
6491 DCI
.AddToWorklist(Op0
.getNode());
6492 Op1
= DAG
.getNode(ISD::BSWAP
, SDLoc(N
), VecVT
, Op1
);
6493 DCI
.AddToWorklist(Op1
.getNode());
6494 return DAG
.getVectorShuffle(VecVT
, SDLoc(N
), Op0
, Op1
, SV
->getMask());
6501 static bool combineCCMask(SDValue
&CCReg
, int &CCValid
, int &CCMask
) {
6502 // We have a SELECT_CCMASK or BR_CCMASK comparing the condition code
6503 // set by the CCReg instruction using the CCValid / CCMask masks,
6504 // If the CCReg instruction is itself a ICMP testing the condition
6505 // code set by some other instruction, see whether we can directly
6506 // use that condition code.
6508 // Verify that we have an ICMP against some constant.
6509 if (CCValid
!= SystemZ::CCMASK_ICMP
)
6511 auto *ICmp
= CCReg
.getNode();
6512 if (ICmp
->getOpcode() != SystemZISD::ICMP
)
6514 auto *CompareLHS
= ICmp
->getOperand(0).getNode();
6515 auto *CompareRHS
= dyn_cast
<ConstantSDNode
>(ICmp
->getOperand(1));
6519 // Optimize the case where CompareLHS is a SELECT_CCMASK.
6520 if (CompareLHS
->getOpcode() == SystemZISD::SELECT_CCMASK
) {
6521 // Verify that we have an appropriate mask for a EQ or NE comparison.
6522 bool Invert
= false;
6523 if (CCMask
== SystemZ::CCMASK_CMP_NE
)
6525 else if (CCMask
!= SystemZ::CCMASK_CMP_EQ
)
6528 // Verify that the ICMP compares against one of select values.
6529 auto *TrueVal
= dyn_cast
<ConstantSDNode
>(CompareLHS
->getOperand(0));
6532 auto *FalseVal
= dyn_cast
<ConstantSDNode
>(CompareLHS
->getOperand(1));
6535 if (CompareRHS
->getZExtValue() == FalseVal
->getZExtValue())
6537 else if (CompareRHS
->getZExtValue() != TrueVal
->getZExtValue())
6540 // Compute the effective CC mask for the new branch or select.
6541 auto *NewCCValid
= dyn_cast
<ConstantSDNode
>(CompareLHS
->getOperand(2));
6542 auto *NewCCMask
= dyn_cast
<ConstantSDNode
>(CompareLHS
->getOperand(3));
6543 if (!NewCCValid
|| !NewCCMask
)
6545 CCValid
= NewCCValid
->getZExtValue();
6546 CCMask
= NewCCMask
->getZExtValue();
6550 // Return the updated CCReg link.
6551 CCReg
= CompareLHS
->getOperand(4);
6555 // Optimize the case where CompareRHS is (SRA (SHL (IPM))).
6556 if (CompareLHS
->getOpcode() == ISD::SRA
) {
6557 auto *SRACount
= dyn_cast
<ConstantSDNode
>(CompareLHS
->getOperand(1));
6558 if (!SRACount
|| SRACount
->getZExtValue() != 30)
6560 auto *SHL
= CompareLHS
->getOperand(0).getNode();
6561 if (SHL
->getOpcode() != ISD::SHL
)
6563 auto *SHLCount
= dyn_cast
<ConstantSDNode
>(SHL
->getOperand(1));
6564 if (!SHLCount
|| SHLCount
->getZExtValue() != 30 - SystemZ::IPM_CC
)
6566 auto *IPM
= SHL
->getOperand(0).getNode();
6567 if (IPM
->getOpcode() != SystemZISD::IPM
)
6570 // Avoid introducing CC spills (because SRA would clobber CC).
6571 if (!CompareLHS
->hasOneUse())
6573 // Verify that the ICMP compares against zero.
6574 if (CompareRHS
->getZExtValue() != 0)
6577 // Compute the effective CC mask for the new branch or select.
6578 CCMask
= SystemZ::reverseCCMask(CCMask
);
6580 // Return the updated CCReg link.
6581 CCReg
= IPM
->getOperand(0);
6588 SDValue
SystemZTargetLowering::combineBR_CCMASK(
6589 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
6590 SelectionDAG
&DAG
= DCI
.DAG
;
6592 // Combine BR_CCMASK (ICMP (SELECT_CCMASK)) into a single BR_CCMASK.
6593 auto *CCValid
= dyn_cast
<ConstantSDNode
>(N
->getOperand(1));
6594 auto *CCMask
= dyn_cast
<ConstantSDNode
>(N
->getOperand(2));
6595 if (!CCValid
|| !CCMask
)
6598 int CCValidVal
= CCValid
->getZExtValue();
6599 int CCMaskVal
= CCMask
->getZExtValue();
6600 SDValue Chain
= N
->getOperand(0);
6601 SDValue CCReg
= N
->getOperand(4);
6603 if (combineCCMask(CCReg
, CCValidVal
, CCMaskVal
))
6604 return DAG
.getNode(SystemZISD::BR_CCMASK
, SDLoc(N
), N
->getValueType(0),
6606 DAG
.getTargetConstant(CCValidVal
, SDLoc(N
), MVT::i32
),
6607 DAG
.getTargetConstant(CCMaskVal
, SDLoc(N
), MVT::i32
),
6608 N
->getOperand(3), CCReg
);
6612 SDValue
SystemZTargetLowering::combineSELECT_CCMASK(
6613 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
6614 SelectionDAG
&DAG
= DCI
.DAG
;
6616 // Combine SELECT_CCMASK (ICMP (SELECT_CCMASK)) into a single SELECT_CCMASK.
6617 auto *CCValid
= dyn_cast
<ConstantSDNode
>(N
->getOperand(2));
6618 auto *CCMask
= dyn_cast
<ConstantSDNode
>(N
->getOperand(3));
6619 if (!CCValid
|| !CCMask
)
6622 int CCValidVal
= CCValid
->getZExtValue();
6623 int CCMaskVal
= CCMask
->getZExtValue();
6624 SDValue CCReg
= N
->getOperand(4);
6626 if (combineCCMask(CCReg
, CCValidVal
, CCMaskVal
))
6627 return DAG
.getNode(SystemZISD::SELECT_CCMASK
, SDLoc(N
), N
->getValueType(0),
6628 N
->getOperand(0), N
->getOperand(1),
6629 DAG
.getTargetConstant(CCValidVal
, SDLoc(N
), MVT::i32
),
6630 DAG
.getTargetConstant(CCMaskVal
, SDLoc(N
), MVT::i32
),
6636 SDValue
SystemZTargetLowering::combineGET_CCMASK(
6637 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
6639 // Optimize away GET_CCMASK (SELECT_CCMASK) if the CC masks are compatible
6640 auto *CCValid
= dyn_cast
<ConstantSDNode
>(N
->getOperand(1));
6641 auto *CCMask
= dyn_cast
<ConstantSDNode
>(N
->getOperand(2));
6642 if (!CCValid
|| !CCMask
)
6644 int CCValidVal
= CCValid
->getZExtValue();
6645 int CCMaskVal
= CCMask
->getZExtValue();
6647 SDValue Select
= N
->getOperand(0);
6648 if (Select
->getOpcode() != SystemZISD::SELECT_CCMASK
)
6651 auto *SelectCCValid
= dyn_cast
<ConstantSDNode
>(Select
->getOperand(2));
6652 auto *SelectCCMask
= dyn_cast
<ConstantSDNode
>(Select
->getOperand(3));
6653 if (!SelectCCValid
|| !SelectCCMask
)
6655 int SelectCCValidVal
= SelectCCValid
->getZExtValue();
6656 int SelectCCMaskVal
= SelectCCMask
->getZExtValue();
6658 auto *TrueVal
= dyn_cast
<ConstantSDNode
>(Select
->getOperand(0));
6659 auto *FalseVal
= dyn_cast
<ConstantSDNode
>(Select
->getOperand(1));
6660 if (!TrueVal
|| !FalseVal
)
6662 if (TrueVal
->getZExtValue() != 0 && FalseVal
->getZExtValue() == 0)
6664 else if (TrueVal
->getZExtValue() == 0 && FalseVal
->getZExtValue() != 0)
6665 SelectCCMaskVal
^= SelectCCValidVal
;
6669 if (SelectCCValidVal
& ~CCValidVal
)
6671 if (SelectCCMaskVal
!= (CCMaskVal
& SelectCCValidVal
))
6674 return Select
->getOperand(4);
6677 SDValue
SystemZTargetLowering::combineIntDIVREM(
6678 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
6679 SelectionDAG
&DAG
= DCI
.DAG
;
6680 EVT VT
= N
->getValueType(0);
6681 // In the case where the divisor is a vector of constants a cheaper
6682 // sequence of instructions can replace the divide. BuildSDIV is called to
6683 // do this during DAG combining, but it only succeeds when it can build a
6684 // multiplication node. The only option for SystemZ is ISD::SMUL_LOHI, and
6685 // since it is not Legal but Custom it can only happen before
6686 // legalization. Therefore we must scalarize this early before Combine
6687 // 1. For widened vectors, this is already the result of type legalization.
6688 if (DCI
.Level
== BeforeLegalizeTypes
&& VT
.isVector() && isTypeLegal(VT
) &&
6689 DAG
.isConstantIntBuildVectorOrConstantInt(N
->getOperand(1)))
6690 return DAG
.UnrollVectorOp(N
);
6694 SDValue
SystemZTargetLowering::combineINTRINSIC(
6695 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
6696 SelectionDAG
&DAG
= DCI
.DAG
;
6698 unsigned Id
= cast
<ConstantSDNode
>(N
->getOperand(1))->getZExtValue();
6700 // VECTOR LOAD (RIGHTMOST) WITH LENGTH with a length operand of 15
6701 // or larger is simply a vector load.
6702 case Intrinsic::s390_vll
:
6703 case Intrinsic::s390_vlrl
:
6704 if (auto *C
= dyn_cast
<ConstantSDNode
>(N
->getOperand(2)))
6705 if (C
->getZExtValue() >= 15)
6706 return DAG
.getLoad(N
->getValueType(0), SDLoc(N
), N
->getOperand(0),
6707 N
->getOperand(3), MachinePointerInfo());
6709 // Likewise for VECTOR STORE (RIGHTMOST) WITH LENGTH.
6710 case Intrinsic::s390_vstl
:
6711 case Intrinsic::s390_vstrl
:
6712 if (auto *C
= dyn_cast
<ConstantSDNode
>(N
->getOperand(3)))
6713 if (C
->getZExtValue() >= 15)
6714 return DAG
.getStore(N
->getOperand(0), SDLoc(N
), N
->getOperand(2),
6715 N
->getOperand(4), MachinePointerInfo());
6722 SDValue
SystemZTargetLowering::unwrapAddress(SDValue N
) const {
6723 if (N
->getOpcode() == SystemZISD::PCREL_WRAPPER
)
6724 return N
->getOperand(0);
6728 SDValue
SystemZTargetLowering::PerformDAGCombine(SDNode
*N
,
6729 DAGCombinerInfo
&DCI
) const {
6730 switch(N
->getOpcode()) {
6732 case ISD::ZERO_EXTEND
: return combineZERO_EXTEND(N
, DCI
);
6733 case ISD::SIGN_EXTEND
: return combineSIGN_EXTEND(N
, DCI
);
6734 case ISD::SIGN_EXTEND_INREG
: return combineSIGN_EXTEND_INREG(N
, DCI
);
6735 case SystemZISD::MERGE_HIGH
:
6736 case SystemZISD::MERGE_LOW
: return combineMERGE(N
, DCI
);
6737 case ISD::LOAD
: return combineLOAD(N
, DCI
);
6738 case ISD::STORE
: return combineSTORE(N
, DCI
);
6739 case ISD::VECTOR_SHUFFLE
: return combineVECTOR_SHUFFLE(N
, DCI
);
6740 case ISD::EXTRACT_VECTOR_ELT
: return combineEXTRACT_VECTOR_ELT(N
, DCI
);
6741 case SystemZISD::JOIN_DWORDS
: return combineJOIN_DWORDS(N
, DCI
);
6742 case ISD::STRICT_FP_ROUND
:
6743 case ISD::FP_ROUND
: return combineFP_ROUND(N
, DCI
);
6744 case ISD::STRICT_FP_EXTEND
:
6745 case ISD::FP_EXTEND
: return combineFP_EXTEND(N
, DCI
);
6746 case ISD::SINT_TO_FP
:
6747 case ISD::UINT_TO_FP
: return combineINT_TO_FP(N
, DCI
);
6748 case ISD::BSWAP
: return combineBSWAP(N
, DCI
);
6749 case SystemZISD::BR_CCMASK
: return combineBR_CCMASK(N
, DCI
);
6750 case SystemZISD::SELECT_CCMASK
: return combineSELECT_CCMASK(N
, DCI
);
6751 case SystemZISD::GET_CCMASK
: return combineGET_CCMASK(N
, DCI
);
6755 case ISD::UREM
: return combineIntDIVREM(N
, DCI
);
6756 case ISD::INTRINSIC_W_CHAIN
:
6757 case ISD::INTRINSIC_VOID
: return combineINTRINSIC(N
, DCI
);
6763 // Return the demanded elements for the OpNo source operand of Op. DemandedElts
6765 static APInt
getDemandedSrcElements(SDValue Op
, const APInt
&DemandedElts
,
6767 EVT VT
= Op
.getValueType();
6768 unsigned NumElts
= (VT
.isVector() ? VT
.getVectorNumElements() : 1);
6770 unsigned Opcode
= Op
.getOpcode();
6771 if (Opcode
== ISD::INTRINSIC_WO_CHAIN
) {
6772 unsigned Id
= cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue();
6774 case Intrinsic::s390_vpksh
: // PACKS
6775 case Intrinsic::s390_vpksf
:
6776 case Intrinsic::s390_vpksg
:
6777 case Intrinsic::s390_vpkshs
: // PACKS_CC
6778 case Intrinsic::s390_vpksfs
:
6779 case Intrinsic::s390_vpksgs
:
6780 case Intrinsic::s390_vpklsh
: // PACKLS
6781 case Intrinsic::s390_vpklsf
:
6782 case Intrinsic::s390_vpklsg
:
6783 case Intrinsic::s390_vpklshs
: // PACKLS_CC
6784 case Intrinsic::s390_vpklsfs
:
6785 case Intrinsic::s390_vpklsgs
:
6786 // VECTOR PACK truncates the elements of two source vectors into one.
6787 SrcDemE
= DemandedElts
;
6789 SrcDemE
.lshrInPlace(NumElts
/ 2);
6790 SrcDemE
= SrcDemE
.trunc(NumElts
/ 2);
6792 // VECTOR UNPACK extends half the elements of the source vector.
6793 case Intrinsic::s390_vuphb
: // VECTOR UNPACK HIGH
6794 case Intrinsic::s390_vuphh
:
6795 case Intrinsic::s390_vuphf
:
6796 case Intrinsic::s390_vuplhb
: // VECTOR UNPACK LOGICAL HIGH
6797 case Intrinsic::s390_vuplhh
:
6798 case Intrinsic::s390_vuplhf
:
6799 SrcDemE
= APInt(NumElts
* 2, 0);
6800 SrcDemE
.insertBits(DemandedElts
, 0);
6802 case Intrinsic::s390_vuplb
: // VECTOR UNPACK LOW
6803 case Intrinsic::s390_vuplhw
:
6804 case Intrinsic::s390_vuplf
:
6805 case Intrinsic::s390_vupllb
: // VECTOR UNPACK LOGICAL LOW
6806 case Intrinsic::s390_vupllh
:
6807 case Intrinsic::s390_vupllf
:
6808 SrcDemE
= APInt(NumElts
* 2, 0);
6809 SrcDemE
.insertBits(DemandedElts
, NumElts
);
6811 case Intrinsic::s390_vpdi
: {
6812 // VECTOR PERMUTE DWORD IMMEDIATE selects one element from each source.
6813 SrcDemE
= APInt(NumElts
, 0);
6814 if (!DemandedElts
[OpNo
- 1])
6816 unsigned Mask
= cast
<ConstantSDNode
>(Op
.getOperand(3))->getZExtValue();
6817 unsigned MaskBit
= ((OpNo
- 1) ? 1 : 4);
6818 // Demand input element 0 or 1, given by the mask bit value.
6819 SrcDemE
.setBit((Mask
& MaskBit
)? 1 : 0);
6822 case Intrinsic::s390_vsldb
: {
6823 // VECTOR SHIFT LEFT DOUBLE BY BYTE
6824 assert(VT
== MVT::v16i8
&& "Unexpected type.");
6825 unsigned FirstIdx
= cast
<ConstantSDNode
>(Op
.getOperand(3))->getZExtValue();
6826 assert (FirstIdx
> 0 && FirstIdx
< 16 && "Unused operand.");
6827 unsigned NumSrc0Els
= 16 - FirstIdx
;
6828 SrcDemE
= APInt(NumElts
, 0);
6830 APInt DemEls
= DemandedElts
.trunc(NumSrc0Els
);
6831 SrcDemE
.insertBits(DemEls
, FirstIdx
);
6833 APInt DemEls
= DemandedElts
.lshr(NumSrc0Els
);
6834 SrcDemE
.insertBits(DemEls
, 0);
6838 case Intrinsic::s390_vperm
:
6839 SrcDemE
= APInt(NumElts
, 1);
6842 llvm_unreachable("Unhandled intrinsic.");
6847 case SystemZISD::JOIN_DWORDS
:
6849 SrcDemE
= APInt(1, 1);
6851 case SystemZISD::SELECT_CCMASK
:
6852 SrcDemE
= DemandedElts
;
6855 llvm_unreachable("Unhandled opcode.");
6862 static void computeKnownBitsBinOp(const SDValue Op
, KnownBits
&Known
,
6863 const APInt
&DemandedElts
,
6864 const SelectionDAG
&DAG
, unsigned Depth
,
6866 APInt Src0DemE
= getDemandedSrcElements(Op
, DemandedElts
, OpNo
);
6867 APInt Src1DemE
= getDemandedSrcElements(Op
, DemandedElts
, OpNo
+ 1);
6868 KnownBits LHSKnown
=
6869 DAG
.computeKnownBits(Op
.getOperand(OpNo
), Src0DemE
, Depth
+ 1);
6870 KnownBits RHSKnown
=
6871 DAG
.computeKnownBits(Op
.getOperand(OpNo
+ 1), Src1DemE
, Depth
+ 1);
6872 Known
= KnownBits::commonBits(LHSKnown
, RHSKnown
);
6876 SystemZTargetLowering::computeKnownBitsForTargetNode(const SDValue Op
,
6878 const APInt
&DemandedElts
,
6879 const SelectionDAG
&DAG
,
6880 unsigned Depth
) const {
6883 // Intrinsic CC result is returned in the two low bits.
6884 unsigned tmp0
, tmp1
; // not used
6885 if (Op
.getResNo() == 1 && isIntrinsicWithCC(Op
, tmp0
, tmp1
)) {
6886 Known
.Zero
.setBitsFrom(2);
6889 EVT VT
= Op
.getValueType();
6890 if (Op
.getResNo() != 0 || VT
== MVT::Untyped
)
6892 assert (Known
.getBitWidth() == VT
.getScalarSizeInBits() &&
6893 "KnownBits does not match VT in bitwidth");
6894 assert ((!VT
.isVector() ||
6895 (DemandedElts
.getBitWidth() == VT
.getVectorNumElements())) &&
6896 "DemandedElts does not match VT number of elements");
6897 unsigned BitWidth
= Known
.getBitWidth();
6898 unsigned Opcode
= Op
.getOpcode();
6899 if (Opcode
== ISD::INTRINSIC_WO_CHAIN
) {
6900 bool IsLogical
= false;
6901 unsigned Id
= cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue();
6903 case Intrinsic::s390_vpksh
: // PACKS
6904 case Intrinsic::s390_vpksf
:
6905 case Intrinsic::s390_vpksg
:
6906 case Intrinsic::s390_vpkshs
: // PACKS_CC
6907 case Intrinsic::s390_vpksfs
:
6908 case Intrinsic::s390_vpksgs
:
6909 case Intrinsic::s390_vpklsh
: // PACKLS
6910 case Intrinsic::s390_vpklsf
:
6911 case Intrinsic::s390_vpklsg
:
6912 case Intrinsic::s390_vpklshs
: // PACKLS_CC
6913 case Intrinsic::s390_vpklsfs
:
6914 case Intrinsic::s390_vpklsgs
:
6915 case Intrinsic::s390_vpdi
:
6916 case Intrinsic::s390_vsldb
:
6917 case Intrinsic::s390_vperm
:
6918 computeKnownBitsBinOp(Op
, Known
, DemandedElts
, DAG
, Depth
, 1);
6920 case Intrinsic::s390_vuplhb
: // VECTOR UNPACK LOGICAL HIGH
6921 case Intrinsic::s390_vuplhh
:
6922 case Intrinsic::s390_vuplhf
:
6923 case Intrinsic::s390_vupllb
: // VECTOR UNPACK LOGICAL LOW
6924 case Intrinsic::s390_vupllh
:
6925 case Intrinsic::s390_vupllf
:
6928 case Intrinsic::s390_vuphb
: // VECTOR UNPACK HIGH
6929 case Intrinsic::s390_vuphh
:
6930 case Intrinsic::s390_vuphf
:
6931 case Intrinsic::s390_vuplb
: // VECTOR UNPACK LOW
6932 case Intrinsic::s390_vuplhw
:
6933 case Intrinsic::s390_vuplf
: {
6934 SDValue SrcOp
= Op
.getOperand(1);
6935 APInt SrcDemE
= getDemandedSrcElements(Op
, DemandedElts
, 0);
6936 Known
= DAG
.computeKnownBits(SrcOp
, SrcDemE
, Depth
+ 1);
6938 Known
= Known
.zext(BitWidth
);
6940 Known
= Known
.sext(BitWidth
);
6948 case SystemZISD::JOIN_DWORDS
:
6949 case SystemZISD::SELECT_CCMASK
:
6950 computeKnownBitsBinOp(Op
, Known
, DemandedElts
, DAG
, Depth
, 0);
6952 case SystemZISD::REPLICATE
: {
6953 SDValue SrcOp
= Op
.getOperand(0);
6954 Known
= DAG
.computeKnownBits(SrcOp
, Depth
+ 1);
6955 if (Known
.getBitWidth() < BitWidth
&& isa
<ConstantSDNode
>(SrcOp
))
6956 Known
= Known
.sext(BitWidth
); // VREPI sign extends the immedate.
6964 // Known has the width of the source operand(s). Adjust if needed to match
6965 // the passed bitwidth.
6966 if (Known
.getBitWidth() != BitWidth
)
6967 Known
= Known
.anyextOrTrunc(BitWidth
);
6970 static unsigned computeNumSignBitsBinOp(SDValue Op
, const APInt
&DemandedElts
,
6971 const SelectionDAG
&DAG
, unsigned Depth
,
6973 APInt Src0DemE
= getDemandedSrcElements(Op
, DemandedElts
, OpNo
);
6974 unsigned LHS
= DAG
.ComputeNumSignBits(Op
.getOperand(OpNo
), Src0DemE
, Depth
+ 1);
6975 if (LHS
== 1) return 1; // Early out.
6976 APInt Src1DemE
= getDemandedSrcElements(Op
, DemandedElts
, OpNo
+ 1);
6977 unsigned RHS
= DAG
.ComputeNumSignBits(Op
.getOperand(OpNo
+ 1), Src1DemE
, Depth
+ 1);
6978 if (RHS
== 1) return 1; // Early out.
6979 unsigned Common
= std::min(LHS
, RHS
);
6980 unsigned SrcBitWidth
= Op
.getOperand(OpNo
).getScalarValueSizeInBits();
6981 EVT VT
= Op
.getValueType();
6982 unsigned VTBits
= VT
.getScalarSizeInBits();
6983 if (SrcBitWidth
> VTBits
) { // PACK
6984 unsigned SrcExtraBits
= SrcBitWidth
- VTBits
;
6985 if (Common
> SrcExtraBits
)
6986 return (Common
- SrcExtraBits
);
6989 assert (SrcBitWidth
== VTBits
&& "Expected operands of same bitwidth.");
6994 SystemZTargetLowering::ComputeNumSignBitsForTargetNode(
6995 SDValue Op
, const APInt
&DemandedElts
, const SelectionDAG
&DAG
,
6996 unsigned Depth
) const {
6997 if (Op
.getResNo() != 0)
6999 unsigned Opcode
= Op
.getOpcode();
7000 if (Opcode
== ISD::INTRINSIC_WO_CHAIN
) {
7001 unsigned Id
= cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue();
7003 case Intrinsic::s390_vpksh
: // PACKS
7004 case Intrinsic::s390_vpksf
:
7005 case Intrinsic::s390_vpksg
:
7006 case Intrinsic::s390_vpkshs
: // PACKS_CC
7007 case Intrinsic::s390_vpksfs
:
7008 case Intrinsic::s390_vpksgs
:
7009 case Intrinsic::s390_vpklsh
: // PACKLS
7010 case Intrinsic::s390_vpklsf
:
7011 case Intrinsic::s390_vpklsg
:
7012 case Intrinsic::s390_vpklshs
: // PACKLS_CC
7013 case Intrinsic::s390_vpklsfs
:
7014 case Intrinsic::s390_vpklsgs
:
7015 case Intrinsic::s390_vpdi
:
7016 case Intrinsic::s390_vsldb
:
7017 case Intrinsic::s390_vperm
:
7018 return computeNumSignBitsBinOp(Op
, DemandedElts
, DAG
, Depth
, 1);
7019 case Intrinsic::s390_vuphb
: // VECTOR UNPACK HIGH
7020 case Intrinsic::s390_vuphh
:
7021 case Intrinsic::s390_vuphf
:
7022 case Intrinsic::s390_vuplb
: // VECTOR UNPACK LOW
7023 case Intrinsic::s390_vuplhw
:
7024 case Intrinsic::s390_vuplf
: {
7025 SDValue PackedOp
= Op
.getOperand(1);
7026 APInt SrcDemE
= getDemandedSrcElements(Op
, DemandedElts
, 1);
7027 unsigned Tmp
= DAG
.ComputeNumSignBits(PackedOp
, SrcDemE
, Depth
+ 1);
7028 EVT VT
= Op
.getValueType();
7029 unsigned VTBits
= VT
.getScalarSizeInBits();
7030 Tmp
+= VTBits
- PackedOp
.getScalarValueSizeInBits();
7038 case SystemZISD::SELECT_CCMASK
:
7039 return computeNumSignBitsBinOp(Op
, DemandedElts
, DAG
, Depth
, 0);
7049 SystemZTargetLowering::getStackProbeSize(MachineFunction
&MF
) const {
7050 const TargetFrameLowering
*TFI
= Subtarget
.getFrameLowering();
7051 unsigned StackAlign
= TFI
->getStackAlignment();
7052 assert(StackAlign
>=1 && isPowerOf2_32(StackAlign
) &&
7053 "Unexpected stack alignment");
7054 // The default stack probe size is 4096 if the function has no
7055 // stack-probe-size attribute.
7056 unsigned StackProbeSize
= 4096;
7057 const Function
&Fn
= MF
.getFunction();
7058 if (Fn
.hasFnAttribute("stack-probe-size"))
7059 Fn
.getFnAttribute("stack-probe-size")
7061 .getAsInteger(0, StackProbeSize
);
7062 // Round down to the stack alignment.
7063 StackProbeSize
&= ~(StackAlign
- 1);
7064 return StackProbeSize
? StackProbeSize
: StackAlign
;
7067 //===----------------------------------------------------------------------===//
7069 //===----------------------------------------------------------------------===//
7071 // Force base value Base into a register before MI. Return the register.
7072 static Register
forceReg(MachineInstr
&MI
, MachineOperand
&Base
,
7073 const SystemZInstrInfo
*TII
) {
7075 return Base
.getReg();
7077 MachineBasicBlock
*MBB
= MI
.getParent();
7078 MachineFunction
&MF
= *MBB
->getParent();
7079 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
7081 Register Reg
= MRI
.createVirtualRegister(&SystemZ::ADDR64BitRegClass
);
7082 BuildMI(*MBB
, MI
, MI
.getDebugLoc(), TII
->get(SystemZ::LA
), Reg
)
7089 // The CC operand of MI might be missing a kill marker because there
7090 // were multiple uses of CC, and ISel didn't know which to mark.
7091 // Figure out whether MI should have had a kill marker.
7092 static bool checkCCKill(MachineInstr
&MI
, MachineBasicBlock
*MBB
) {
7093 // Scan forward through BB for a use/def of CC.
7094 MachineBasicBlock::iterator
miI(std::next(MachineBasicBlock::iterator(MI
)));
7095 for (MachineBasicBlock::iterator miE
= MBB
->end(); miI
!= miE
; ++miI
) {
7096 const MachineInstr
& mi
= *miI
;
7097 if (mi
.readsRegister(SystemZ::CC
))
7099 if (mi
.definesRegister(SystemZ::CC
))
7100 break; // Should have kill-flag - update below.
7103 // If we hit the end of the block, check whether CC is live into a
7105 if (miI
== MBB
->end()) {
7106 for (auto SI
= MBB
->succ_begin(), SE
= MBB
->succ_end(); SI
!= SE
; ++SI
)
7107 if ((*SI
)->isLiveIn(SystemZ::CC
))
7114 // Return true if it is OK for this Select pseudo-opcode to be cascaded
7115 // together with other Select pseudo-opcodes into a single basic-block with
7116 // a conditional jump around it.
7117 static bool isSelectPseudo(MachineInstr
&MI
) {
7118 switch (MI
.getOpcode()) {
7119 case SystemZ::Select32
:
7120 case SystemZ::Select64
:
7121 case SystemZ::SelectF32
:
7122 case SystemZ::SelectF64
:
7123 case SystemZ::SelectF128
:
7124 case SystemZ::SelectVR32
:
7125 case SystemZ::SelectVR64
:
7126 case SystemZ::SelectVR128
:
7134 // Helper function, which inserts PHI functions into SinkMBB:
7135 // %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ],
7136 // where %FalseValue(i) and %TrueValue(i) are taken from Selects.
7137 static void createPHIsForSelects(SmallVector
<MachineInstr
*, 8> &Selects
,
7138 MachineBasicBlock
*TrueMBB
,
7139 MachineBasicBlock
*FalseMBB
,
7140 MachineBasicBlock
*SinkMBB
) {
7141 MachineFunction
*MF
= TrueMBB
->getParent();
7142 const TargetInstrInfo
*TII
= MF
->getSubtarget().getInstrInfo();
7144 MachineInstr
*FirstMI
= Selects
.front();
7145 unsigned CCValid
= FirstMI
->getOperand(3).getImm();
7146 unsigned CCMask
= FirstMI
->getOperand(4).getImm();
7148 MachineBasicBlock::iterator SinkInsertionPoint
= SinkMBB
->begin();
7150 // As we are creating the PHIs, we have to be careful if there is more than
7151 // one. Later Selects may reference the results of earlier Selects, but later
7152 // PHIs have to reference the individual true/false inputs from earlier PHIs.
7153 // That also means that PHI construction must work forward from earlier to
7154 // later, and that the code must maintain a mapping from earlier PHI's
7155 // destination registers, and the registers that went into the PHI.
7156 DenseMap
<unsigned, std::pair
<unsigned, unsigned>> RegRewriteTable
;
7158 for (auto MI
: Selects
) {
7159 Register DestReg
= MI
->getOperand(0).getReg();
7160 Register TrueReg
= MI
->getOperand(1).getReg();
7161 Register FalseReg
= MI
->getOperand(2).getReg();
7163 // If this Select we are generating is the opposite condition from
7164 // the jump we generated, then we have to swap the operands for the
7165 // PHI that is going to be generated.
7166 if (MI
->getOperand(4).getImm() == (CCValid
^ CCMask
))
7167 std::swap(TrueReg
, FalseReg
);
7169 if (RegRewriteTable
.find(TrueReg
) != RegRewriteTable
.end())
7170 TrueReg
= RegRewriteTable
[TrueReg
].first
;
7172 if (RegRewriteTable
.find(FalseReg
) != RegRewriteTable
.end())
7173 FalseReg
= RegRewriteTable
[FalseReg
].second
;
7175 DebugLoc DL
= MI
->getDebugLoc();
7176 BuildMI(*SinkMBB
, SinkInsertionPoint
, DL
, TII
->get(SystemZ::PHI
), DestReg
)
7177 .addReg(TrueReg
).addMBB(TrueMBB
)
7178 .addReg(FalseReg
).addMBB(FalseMBB
);
7180 // Add this PHI to the rewrite table.
7181 RegRewriteTable
[DestReg
] = std::make_pair(TrueReg
, FalseReg
);
7184 MF
->getProperties().reset(MachineFunctionProperties::Property::NoPHIs
);
7187 // Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI.
7189 SystemZTargetLowering::emitSelect(MachineInstr
&MI
,
7190 MachineBasicBlock
*MBB
) const {
7191 assert(isSelectPseudo(MI
) && "Bad call to emitSelect()");
7192 const SystemZInstrInfo
*TII
=
7193 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
7195 unsigned CCValid
= MI
.getOperand(3).getImm();
7196 unsigned CCMask
= MI
.getOperand(4).getImm();
7198 // If we have a sequence of Select* pseudo instructions using the
7199 // same condition code value, we want to expand all of them into
7200 // a single pair of basic blocks using the same condition.
7201 SmallVector
<MachineInstr
*, 8> Selects
;
7202 SmallVector
<MachineInstr
*, 8> DbgValues
;
7203 Selects
.push_back(&MI
);
7205 for (MachineBasicBlock::iterator NextMIIt
=
7206 std::next(MachineBasicBlock::iterator(MI
));
7207 NextMIIt
!= MBB
->end(); ++NextMIIt
) {
7208 if (isSelectPseudo(*NextMIIt
)) {
7209 assert(NextMIIt
->getOperand(3).getImm() == CCValid
&&
7210 "Bad CCValid operands since CC was not redefined.");
7211 if (NextMIIt
->getOperand(4).getImm() == CCMask
||
7212 NextMIIt
->getOperand(4).getImm() == (CCValid
^ CCMask
)) {
7213 Selects
.push_back(&*NextMIIt
);
7218 if (NextMIIt
->definesRegister(SystemZ::CC
) ||
7219 NextMIIt
->usesCustomInsertionHook())
7222 for (auto SelMI
: Selects
)
7223 if (NextMIIt
->readsVirtualRegister(SelMI
->getOperand(0).getReg())) {
7227 if (NextMIIt
->isDebugInstr()) {
7229 assert(NextMIIt
->isDebugValue() && "Unhandled debug opcode.");
7230 DbgValues
.push_back(&*NextMIIt
);
7233 else if (User
|| ++Count
> 20)
7237 MachineInstr
*LastMI
= Selects
.back();
7239 (LastMI
->killsRegister(SystemZ::CC
) || checkCCKill(*LastMI
, MBB
));
7240 MachineBasicBlock
*StartMBB
= MBB
;
7241 MachineBasicBlock
*JoinMBB
= SystemZ::splitBlockAfter(LastMI
, MBB
);
7242 MachineBasicBlock
*FalseMBB
= SystemZ::emitBlockAfter(StartMBB
);
7244 // Unless CC was killed in the last Select instruction, mark it as
7245 // live-in to both FalseMBB and JoinMBB.
7247 FalseMBB
->addLiveIn(SystemZ::CC
);
7248 JoinMBB
->addLiveIn(SystemZ::CC
);
7252 // BRC CCMask, JoinMBB
7253 // # fallthrough to FalseMBB
7255 BuildMI(MBB
, MI
.getDebugLoc(), TII
->get(SystemZ::BRC
))
7256 .addImm(CCValid
).addImm(CCMask
).addMBB(JoinMBB
);
7257 MBB
->addSuccessor(JoinMBB
);
7258 MBB
->addSuccessor(FalseMBB
);
7261 // # fallthrough to JoinMBB
7263 MBB
->addSuccessor(JoinMBB
);
7266 // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ]
7269 createPHIsForSelects(Selects
, StartMBB
, FalseMBB
, MBB
);
7270 for (auto SelMI
: Selects
)
7271 SelMI
->eraseFromParent();
7273 MachineBasicBlock::iterator InsertPos
= MBB
->getFirstNonPHI();
7274 for (auto DbgMI
: DbgValues
)
7275 MBB
->splice(InsertPos
, StartMBB
, DbgMI
);
7280 // Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI.
7281 // StoreOpcode is the store to use and Invert says whether the store should
7282 // happen when the condition is false rather than true. If a STORE ON
7283 // CONDITION is available, STOCOpcode is its opcode, otherwise it is 0.
7284 MachineBasicBlock
*SystemZTargetLowering::emitCondStore(MachineInstr
&MI
,
7285 MachineBasicBlock
*MBB
,
7286 unsigned StoreOpcode
,
7287 unsigned STOCOpcode
,
7288 bool Invert
) const {
7289 const SystemZInstrInfo
*TII
=
7290 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
7292 Register SrcReg
= MI
.getOperand(0).getReg();
7293 MachineOperand Base
= MI
.getOperand(1);
7294 int64_t Disp
= MI
.getOperand(2).getImm();
7295 Register IndexReg
= MI
.getOperand(3).getReg();
7296 unsigned CCValid
= MI
.getOperand(4).getImm();
7297 unsigned CCMask
= MI
.getOperand(5).getImm();
7298 DebugLoc DL
= MI
.getDebugLoc();
7300 StoreOpcode
= TII
->getOpcodeForOffset(StoreOpcode
, Disp
);
7302 // ISel pattern matching also adds a load memory operand of the same
7303 // address, so take special care to find the storing memory operand.
7304 MachineMemOperand
*MMO
= nullptr;
7305 for (auto *I
: MI
.memoperands())
7311 // Use STOCOpcode if possible. We could use different store patterns in
7312 // order to avoid matching the index register, but the performance trade-offs
7313 // might be more complicated in that case.
7314 if (STOCOpcode
&& !IndexReg
&& Subtarget
.hasLoadStoreOnCond()) {
7318 BuildMI(*MBB
, MI
, DL
, TII
->get(STOCOpcode
))
7324 .addMemOperand(MMO
);
7326 MI
.eraseFromParent();
7330 // Get the condition needed to branch around the store.
7334 MachineBasicBlock
*StartMBB
= MBB
;
7335 MachineBasicBlock
*JoinMBB
= SystemZ::splitBlockBefore(MI
, MBB
);
7336 MachineBasicBlock
*FalseMBB
= SystemZ::emitBlockAfter(StartMBB
);
7338 // Unless CC was killed in the CondStore instruction, mark it as
7339 // live-in to both FalseMBB and JoinMBB.
7340 if (!MI
.killsRegister(SystemZ::CC
) && !checkCCKill(MI
, JoinMBB
)) {
7341 FalseMBB
->addLiveIn(SystemZ::CC
);
7342 JoinMBB
->addLiveIn(SystemZ::CC
);
7346 // BRC CCMask, JoinMBB
7347 // # fallthrough to FalseMBB
7349 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
7350 .addImm(CCValid
).addImm(CCMask
).addMBB(JoinMBB
);
7351 MBB
->addSuccessor(JoinMBB
);
7352 MBB
->addSuccessor(FalseMBB
);
7355 // store %SrcReg, %Disp(%Index,%Base)
7356 // # fallthrough to JoinMBB
7358 BuildMI(MBB
, DL
, TII
->get(StoreOpcode
))
7363 .addMemOperand(MMO
);
7364 MBB
->addSuccessor(JoinMBB
);
7366 MI
.eraseFromParent();
7370 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_*
7371 // or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that
7372 // performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}.
7373 // BitSize is the width of the field in bits, or 0 if this is a partword
7374 // ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize
7375 // is one of the operands. Invert says whether the field should be
7376 // inverted after performing BinOpcode (e.g. for NAND).
7377 MachineBasicBlock
*SystemZTargetLowering::emitAtomicLoadBinary(
7378 MachineInstr
&MI
, MachineBasicBlock
*MBB
, unsigned BinOpcode
,
7379 unsigned BitSize
, bool Invert
) const {
7380 MachineFunction
&MF
= *MBB
->getParent();
7381 const SystemZInstrInfo
*TII
=
7382 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
7383 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
7384 bool IsSubWord
= (BitSize
< 32);
7386 // Extract the operands. Base can be a register or a frame index.
7387 // Src2 can be a register or immediate.
7388 Register Dest
= MI
.getOperand(0).getReg();
7389 MachineOperand Base
= earlyUseOperand(MI
.getOperand(1));
7390 int64_t Disp
= MI
.getOperand(2).getImm();
7391 MachineOperand Src2
= earlyUseOperand(MI
.getOperand(3));
7392 Register BitShift
= IsSubWord
? MI
.getOperand(4).getReg() : Register();
7393 Register NegBitShift
= IsSubWord
? MI
.getOperand(5).getReg() : Register();
7394 DebugLoc DL
= MI
.getDebugLoc();
7396 BitSize
= MI
.getOperand(6).getImm();
7398 // Subword operations use 32-bit registers.
7399 const TargetRegisterClass
*RC
= (BitSize
<= 32 ?
7400 &SystemZ::GR32BitRegClass
:
7401 &SystemZ::GR64BitRegClass
);
7402 unsigned LOpcode
= BitSize
<= 32 ? SystemZ::L
: SystemZ::LG
;
7403 unsigned CSOpcode
= BitSize
<= 32 ? SystemZ::CS
: SystemZ::CSG
;
7405 // Get the right opcodes for the displacement.
7406 LOpcode
= TII
->getOpcodeForOffset(LOpcode
, Disp
);
7407 CSOpcode
= TII
->getOpcodeForOffset(CSOpcode
, Disp
);
7408 assert(LOpcode
&& CSOpcode
&& "Displacement out of range");
7410 // Create virtual registers for temporary results.
7411 Register OrigVal
= MRI
.createVirtualRegister(RC
);
7412 Register OldVal
= MRI
.createVirtualRegister(RC
);
7413 Register NewVal
= (BinOpcode
|| IsSubWord
?
7414 MRI
.createVirtualRegister(RC
) : Src2
.getReg());
7415 Register RotatedOldVal
= (IsSubWord
? MRI
.createVirtualRegister(RC
) : OldVal
);
7416 Register RotatedNewVal
= (IsSubWord
? MRI
.createVirtualRegister(RC
) : NewVal
);
7418 // Insert a basic block for the main loop.
7419 MachineBasicBlock
*StartMBB
= MBB
;
7420 MachineBasicBlock
*DoneMBB
= SystemZ::splitBlockBefore(MI
, MBB
);
7421 MachineBasicBlock
*LoopMBB
= SystemZ::emitBlockAfter(StartMBB
);
7425 // %OrigVal = L Disp(%Base)
7426 // # fall through to LoopMBB
7428 BuildMI(MBB
, DL
, TII
->get(LOpcode
), OrigVal
).add(Base
).addImm(Disp
).addReg(0);
7429 MBB
->addSuccessor(LoopMBB
);
7432 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ]
7433 // %RotatedOldVal = RLL %OldVal, 0(%BitShift)
7434 // %RotatedNewVal = OP %RotatedOldVal, %Src2
7435 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift)
7436 // %Dest = CS %OldVal, %NewVal, Disp(%Base)
7438 // # fall through to DoneMBB
7440 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), OldVal
)
7441 .addReg(OrigVal
).addMBB(StartMBB
)
7442 .addReg(Dest
).addMBB(LoopMBB
);
7444 BuildMI(MBB
, DL
, TII
->get(SystemZ::RLL
), RotatedOldVal
)
7445 .addReg(OldVal
).addReg(BitShift
).addImm(0);
7447 // Perform the operation normally and then invert every bit of the field.
7448 Register Tmp
= MRI
.createVirtualRegister(RC
);
7449 BuildMI(MBB
, DL
, TII
->get(BinOpcode
), Tmp
).addReg(RotatedOldVal
).add(Src2
);
7451 // XILF with the upper BitSize bits set.
7452 BuildMI(MBB
, DL
, TII
->get(SystemZ::XILF
), RotatedNewVal
)
7453 .addReg(Tmp
).addImm(-1U << (32 - BitSize
));
7455 // Use LCGR and add -1 to the result, which is more compact than
7456 // an XILF, XILH pair.
7457 Register Tmp2
= MRI
.createVirtualRegister(RC
);
7458 BuildMI(MBB
, DL
, TII
->get(SystemZ::LCGR
), Tmp2
).addReg(Tmp
);
7459 BuildMI(MBB
, DL
, TII
->get(SystemZ::AGHI
), RotatedNewVal
)
7460 .addReg(Tmp2
).addImm(-1);
7462 } else if (BinOpcode
)
7463 // A simply binary operation.
7464 BuildMI(MBB
, DL
, TII
->get(BinOpcode
), RotatedNewVal
)
7465 .addReg(RotatedOldVal
)
7468 // Use RISBG to rotate Src2 into position and use it to replace the
7469 // field in RotatedOldVal.
7470 BuildMI(MBB
, DL
, TII
->get(SystemZ::RISBG32
), RotatedNewVal
)
7471 .addReg(RotatedOldVal
).addReg(Src2
.getReg())
7472 .addImm(32).addImm(31 + BitSize
).addImm(32 - BitSize
);
7474 BuildMI(MBB
, DL
, TII
->get(SystemZ::RLL
), NewVal
)
7475 .addReg(RotatedNewVal
).addReg(NegBitShift
).addImm(0);
7476 BuildMI(MBB
, DL
, TII
->get(CSOpcode
), Dest
)
7481 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
7482 .addImm(SystemZ::CCMASK_CS
).addImm(SystemZ::CCMASK_CS_NE
).addMBB(LoopMBB
);
7483 MBB
->addSuccessor(LoopMBB
);
7484 MBB
->addSuccessor(DoneMBB
);
7486 MI
.eraseFromParent();
7490 // Implement EmitInstrWithCustomInserter for pseudo
7491 // ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI. CompareOpcode is the
7492 // instruction that should be used to compare the current field with the
7493 // minimum or maximum value. KeepOldMask is the BRC condition-code mask
7494 // for when the current field should be kept. BitSize is the width of
7495 // the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction.
7496 MachineBasicBlock
*SystemZTargetLowering::emitAtomicLoadMinMax(
7497 MachineInstr
&MI
, MachineBasicBlock
*MBB
, unsigned CompareOpcode
,
7498 unsigned KeepOldMask
, unsigned BitSize
) const {
7499 MachineFunction
&MF
= *MBB
->getParent();
7500 const SystemZInstrInfo
*TII
=
7501 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
7502 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
7503 bool IsSubWord
= (BitSize
< 32);
7505 // Extract the operands. Base can be a register or a frame index.
7506 Register Dest
= MI
.getOperand(0).getReg();
7507 MachineOperand Base
= earlyUseOperand(MI
.getOperand(1));
7508 int64_t Disp
= MI
.getOperand(2).getImm();
7509 Register Src2
= MI
.getOperand(3).getReg();
7510 Register BitShift
= (IsSubWord
? MI
.getOperand(4).getReg() : Register());
7511 Register NegBitShift
= (IsSubWord
? MI
.getOperand(5).getReg() : Register());
7512 DebugLoc DL
= MI
.getDebugLoc();
7514 BitSize
= MI
.getOperand(6).getImm();
7516 // Subword operations use 32-bit registers.
7517 const TargetRegisterClass
*RC
= (BitSize
<= 32 ?
7518 &SystemZ::GR32BitRegClass
:
7519 &SystemZ::GR64BitRegClass
);
7520 unsigned LOpcode
= BitSize
<= 32 ? SystemZ::L
: SystemZ::LG
;
7521 unsigned CSOpcode
= BitSize
<= 32 ? SystemZ::CS
: SystemZ::CSG
;
7523 // Get the right opcodes for the displacement.
7524 LOpcode
= TII
->getOpcodeForOffset(LOpcode
, Disp
);
7525 CSOpcode
= TII
->getOpcodeForOffset(CSOpcode
, Disp
);
7526 assert(LOpcode
&& CSOpcode
&& "Displacement out of range");
7528 // Create virtual registers for temporary results.
7529 Register OrigVal
= MRI
.createVirtualRegister(RC
);
7530 Register OldVal
= MRI
.createVirtualRegister(RC
);
7531 Register NewVal
= MRI
.createVirtualRegister(RC
);
7532 Register RotatedOldVal
= (IsSubWord
? MRI
.createVirtualRegister(RC
) : OldVal
);
7533 Register RotatedAltVal
= (IsSubWord
? MRI
.createVirtualRegister(RC
) : Src2
);
7534 Register RotatedNewVal
= (IsSubWord
? MRI
.createVirtualRegister(RC
) : NewVal
);
7536 // Insert 3 basic blocks for the loop.
7537 MachineBasicBlock
*StartMBB
= MBB
;
7538 MachineBasicBlock
*DoneMBB
= SystemZ::splitBlockBefore(MI
, MBB
);
7539 MachineBasicBlock
*LoopMBB
= SystemZ::emitBlockAfter(StartMBB
);
7540 MachineBasicBlock
*UseAltMBB
= SystemZ::emitBlockAfter(LoopMBB
);
7541 MachineBasicBlock
*UpdateMBB
= SystemZ::emitBlockAfter(UseAltMBB
);
7545 // %OrigVal = L Disp(%Base)
7546 // # fall through to LoopMBB
7548 BuildMI(MBB
, DL
, TII
->get(LOpcode
), OrigVal
).add(Base
).addImm(Disp
).addReg(0);
7549 MBB
->addSuccessor(LoopMBB
);
7552 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ]
7553 // %RotatedOldVal = RLL %OldVal, 0(%BitShift)
7554 // CompareOpcode %RotatedOldVal, %Src2
7555 // BRC KeepOldMask, UpdateMBB
7557 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), OldVal
)
7558 .addReg(OrigVal
).addMBB(StartMBB
)
7559 .addReg(Dest
).addMBB(UpdateMBB
);
7561 BuildMI(MBB
, DL
, TII
->get(SystemZ::RLL
), RotatedOldVal
)
7562 .addReg(OldVal
).addReg(BitShift
).addImm(0);
7563 BuildMI(MBB
, DL
, TII
->get(CompareOpcode
))
7564 .addReg(RotatedOldVal
).addReg(Src2
);
7565 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
7566 .addImm(SystemZ::CCMASK_ICMP
).addImm(KeepOldMask
).addMBB(UpdateMBB
);
7567 MBB
->addSuccessor(UpdateMBB
);
7568 MBB
->addSuccessor(UseAltMBB
);
7571 // %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0
7572 // # fall through to UpdateMBB
7575 BuildMI(MBB
, DL
, TII
->get(SystemZ::RISBG32
), RotatedAltVal
)
7576 .addReg(RotatedOldVal
).addReg(Src2
)
7577 .addImm(32).addImm(31 + BitSize
).addImm(0);
7578 MBB
->addSuccessor(UpdateMBB
);
7581 // %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ],
7582 // [ %RotatedAltVal, UseAltMBB ]
7583 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift)
7584 // %Dest = CS %OldVal, %NewVal, Disp(%Base)
7586 // # fall through to DoneMBB
7588 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), RotatedNewVal
)
7589 .addReg(RotatedOldVal
).addMBB(LoopMBB
)
7590 .addReg(RotatedAltVal
).addMBB(UseAltMBB
);
7592 BuildMI(MBB
, DL
, TII
->get(SystemZ::RLL
), NewVal
)
7593 .addReg(RotatedNewVal
).addReg(NegBitShift
).addImm(0);
7594 BuildMI(MBB
, DL
, TII
->get(CSOpcode
), Dest
)
7599 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
7600 .addImm(SystemZ::CCMASK_CS
).addImm(SystemZ::CCMASK_CS_NE
).addMBB(LoopMBB
);
7601 MBB
->addSuccessor(LoopMBB
);
7602 MBB
->addSuccessor(DoneMBB
);
7604 MI
.eraseFromParent();
7608 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW
7611 SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr
&MI
,
7612 MachineBasicBlock
*MBB
) const {
7613 MachineFunction
&MF
= *MBB
->getParent();
7614 const SystemZInstrInfo
*TII
=
7615 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
7616 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
7618 // Extract the operands. Base can be a register or a frame index.
7619 Register Dest
= MI
.getOperand(0).getReg();
7620 MachineOperand Base
= earlyUseOperand(MI
.getOperand(1));
7621 int64_t Disp
= MI
.getOperand(2).getImm();
7622 Register CmpVal
= MI
.getOperand(3).getReg();
7623 Register OrigSwapVal
= MI
.getOperand(4).getReg();
7624 Register BitShift
= MI
.getOperand(5).getReg();
7625 Register NegBitShift
= MI
.getOperand(6).getReg();
7626 int64_t BitSize
= MI
.getOperand(7).getImm();
7627 DebugLoc DL
= MI
.getDebugLoc();
7629 const TargetRegisterClass
*RC
= &SystemZ::GR32BitRegClass
;
7631 // Get the right opcodes for the displacement and zero-extension.
7632 unsigned LOpcode
= TII
->getOpcodeForOffset(SystemZ::L
, Disp
);
7633 unsigned CSOpcode
= TII
->getOpcodeForOffset(SystemZ::CS
, Disp
);
7634 unsigned ZExtOpcode
= BitSize
== 8 ? SystemZ::LLCR
: SystemZ::LLHR
;
7635 assert(LOpcode
&& CSOpcode
&& "Displacement out of range");
7637 // Create virtual registers for temporary results.
7638 Register OrigOldVal
= MRI
.createVirtualRegister(RC
);
7639 Register OldVal
= MRI
.createVirtualRegister(RC
);
7640 Register SwapVal
= MRI
.createVirtualRegister(RC
);
7641 Register StoreVal
= MRI
.createVirtualRegister(RC
);
7642 Register OldValRot
= MRI
.createVirtualRegister(RC
);
7643 Register RetryOldVal
= MRI
.createVirtualRegister(RC
);
7644 Register RetrySwapVal
= MRI
.createVirtualRegister(RC
);
7646 // Insert 2 basic blocks for the loop.
7647 MachineBasicBlock
*StartMBB
= MBB
;
7648 MachineBasicBlock
*DoneMBB
= SystemZ::splitBlockBefore(MI
, MBB
);
7649 MachineBasicBlock
*LoopMBB
= SystemZ::emitBlockAfter(StartMBB
);
7650 MachineBasicBlock
*SetMBB
= SystemZ::emitBlockAfter(LoopMBB
);
7654 // %OrigOldVal = L Disp(%Base)
7655 // # fall through to LoopMBB
7657 BuildMI(MBB
, DL
, TII
->get(LOpcode
), OrigOldVal
)
7661 MBB
->addSuccessor(LoopMBB
);
7664 // %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ]
7665 // %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ]
7666 // %OldValRot = RLL %OldVal, BitSize(%BitShift)
7667 // ^^ The low BitSize bits contain the field
7669 // %RetrySwapVal = RISBG32 %SwapVal, %OldValRot, 32, 63-BitSize, 0
7670 // ^^ Replace the upper 32-BitSize bits of the
7671 // swap value with those that we loaded and rotated.
7672 // %Dest = LL[CH] %OldValRot
7673 // CR %Dest, %CmpVal
7675 // # Fall through to SetMBB
7677 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), OldVal
)
7678 .addReg(OrigOldVal
).addMBB(StartMBB
)
7679 .addReg(RetryOldVal
).addMBB(SetMBB
);
7680 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), SwapVal
)
7681 .addReg(OrigSwapVal
).addMBB(StartMBB
)
7682 .addReg(RetrySwapVal
).addMBB(SetMBB
);
7683 BuildMI(MBB
, DL
, TII
->get(SystemZ::RLL
), OldValRot
)
7684 .addReg(OldVal
).addReg(BitShift
).addImm(BitSize
);
7685 BuildMI(MBB
, DL
, TII
->get(SystemZ::RISBG32
), RetrySwapVal
)
7686 .addReg(SwapVal
).addReg(OldValRot
).addImm(32).addImm(63 - BitSize
).addImm(0);
7687 BuildMI(MBB
, DL
, TII
->get(ZExtOpcode
), Dest
)
7689 BuildMI(MBB
, DL
, TII
->get(SystemZ::CR
))
7690 .addReg(Dest
).addReg(CmpVal
);
7691 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
7692 .addImm(SystemZ::CCMASK_ICMP
)
7693 .addImm(SystemZ::CCMASK_CMP_NE
).addMBB(DoneMBB
);
7694 MBB
->addSuccessor(DoneMBB
);
7695 MBB
->addSuccessor(SetMBB
);
7698 // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift)
7699 // ^^ Rotate the new field to its proper position.
7700 // %RetryOldVal = CS %OldVal, %StoreVal, Disp(%Base)
7702 // # fall through to ExitMBB
7704 BuildMI(MBB
, DL
, TII
->get(SystemZ::RLL
), StoreVal
)
7705 .addReg(RetrySwapVal
).addReg(NegBitShift
).addImm(-BitSize
);
7706 BuildMI(MBB
, DL
, TII
->get(CSOpcode
), RetryOldVal
)
7711 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
7712 .addImm(SystemZ::CCMASK_CS
).addImm(SystemZ::CCMASK_CS_NE
).addMBB(LoopMBB
);
7713 MBB
->addSuccessor(LoopMBB
);
7714 MBB
->addSuccessor(DoneMBB
);
7716 // If the CC def wasn't dead in the ATOMIC_CMP_SWAPW, mark CC as live-in
7717 // to the block after the loop. At this point, CC may have been defined
7718 // either by the CR in LoopMBB or by the CS in SetMBB.
7719 if (!MI
.registerDefIsDead(SystemZ::CC
))
7720 DoneMBB
->addLiveIn(SystemZ::CC
);
7722 MI
.eraseFromParent();
7726 // Emit a move from two GR64s to a GR128.
7728 SystemZTargetLowering::emitPair128(MachineInstr
&MI
,
7729 MachineBasicBlock
*MBB
) const {
7730 MachineFunction
&MF
= *MBB
->getParent();
7731 const SystemZInstrInfo
*TII
=
7732 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
7733 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
7734 DebugLoc DL
= MI
.getDebugLoc();
7736 Register Dest
= MI
.getOperand(0).getReg();
7737 Register Hi
= MI
.getOperand(1).getReg();
7738 Register Lo
= MI
.getOperand(2).getReg();
7739 Register Tmp1
= MRI
.createVirtualRegister(&SystemZ::GR128BitRegClass
);
7740 Register Tmp2
= MRI
.createVirtualRegister(&SystemZ::GR128BitRegClass
);
7742 BuildMI(*MBB
, MI
, DL
, TII
->get(TargetOpcode::IMPLICIT_DEF
), Tmp1
);
7743 BuildMI(*MBB
, MI
, DL
, TII
->get(TargetOpcode::INSERT_SUBREG
), Tmp2
)
7744 .addReg(Tmp1
).addReg(Hi
).addImm(SystemZ::subreg_h64
);
7745 BuildMI(*MBB
, MI
, DL
, TII
->get(TargetOpcode::INSERT_SUBREG
), Dest
)
7746 .addReg(Tmp2
).addReg(Lo
).addImm(SystemZ::subreg_l64
);
7748 MI
.eraseFromParent();
7752 // Emit an extension from a GR64 to a GR128. ClearEven is true
7753 // if the high register of the GR128 value must be cleared or false if
7754 // it's "don't care".
7755 MachineBasicBlock
*SystemZTargetLowering::emitExt128(MachineInstr
&MI
,
7756 MachineBasicBlock
*MBB
,
7757 bool ClearEven
) const {
7758 MachineFunction
&MF
= *MBB
->getParent();
7759 const SystemZInstrInfo
*TII
=
7760 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
7761 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
7762 DebugLoc DL
= MI
.getDebugLoc();
7764 Register Dest
= MI
.getOperand(0).getReg();
7765 Register Src
= MI
.getOperand(1).getReg();
7766 Register In128
= MRI
.createVirtualRegister(&SystemZ::GR128BitRegClass
);
7768 BuildMI(*MBB
, MI
, DL
, TII
->get(TargetOpcode::IMPLICIT_DEF
), In128
);
7770 Register NewIn128
= MRI
.createVirtualRegister(&SystemZ::GR128BitRegClass
);
7771 Register Zero64
= MRI
.createVirtualRegister(&SystemZ::GR64BitRegClass
);
7773 BuildMI(*MBB
, MI
, DL
, TII
->get(SystemZ::LLILL
), Zero64
)
7775 BuildMI(*MBB
, MI
, DL
, TII
->get(TargetOpcode::INSERT_SUBREG
), NewIn128
)
7776 .addReg(In128
).addReg(Zero64
).addImm(SystemZ::subreg_h64
);
7779 BuildMI(*MBB
, MI
, DL
, TII
->get(TargetOpcode::INSERT_SUBREG
), Dest
)
7780 .addReg(In128
).addReg(Src
).addImm(SystemZ::subreg_l64
);
7782 MI
.eraseFromParent();
7786 MachineBasicBlock
*SystemZTargetLowering::emitMemMemWrapper(
7787 MachineInstr
&MI
, MachineBasicBlock
*MBB
, unsigned Opcode
) const {
7788 MachineFunction
&MF
= *MBB
->getParent();
7789 const SystemZInstrInfo
*TII
=
7790 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
7791 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
7792 DebugLoc DL
= MI
.getDebugLoc();
7794 MachineOperand DestBase
= earlyUseOperand(MI
.getOperand(0));
7795 uint64_t DestDisp
= MI
.getOperand(1).getImm();
7796 MachineOperand SrcBase
= earlyUseOperand(MI
.getOperand(2));
7797 uint64_t SrcDisp
= MI
.getOperand(3).getImm();
7798 MachineOperand
&LengthMO
= MI
.getOperand(4);
7799 uint64_t ImmLength
= LengthMO
.isImm() ? LengthMO
.getImm() : 0;
7800 Register LenMinus1Reg
=
7801 LengthMO
.isReg() ? LengthMO
.getReg() : SystemZ::NoRegister
;
7803 // When generating more than one CLC, all but the last will need to
7804 // branch to the end when a difference is found.
7805 MachineBasicBlock
*EndMBB
= (ImmLength
> 256 && Opcode
== SystemZ::CLC
7806 ? SystemZ::splitBlockAfter(MI
, MBB
)
7809 // Check for the loop form, in which operand 5 is the trip count.
7810 if (MI
.getNumExplicitOperands() > 5) {
7811 Register StartCountReg
= MI
.getOperand(5).getReg();
7812 bool HaveSingleBase
= DestBase
.isIdenticalTo(SrcBase
);
7814 auto loadZeroAddress
= [&]() -> MachineOperand
{
7815 Register Reg
= MRI
.createVirtualRegister(&SystemZ::ADDR64BitRegClass
);
7816 BuildMI(*MBB
, MI
, DL
, TII
->get(SystemZ::LGHI
), Reg
).addImm(0);
7817 return MachineOperand::CreateReg(Reg
, false);
7819 if (DestBase
.isReg() && DestBase
.getReg() == SystemZ::NoRegister
)
7820 DestBase
= loadZeroAddress();
7821 if (SrcBase
.isReg() && SrcBase
.getReg() == SystemZ::NoRegister
)
7822 SrcBase
= HaveSingleBase
? DestBase
: loadZeroAddress();
7824 MachineBasicBlock
*StartMBB
= nullptr;
7825 MachineBasicBlock
*LoopMBB
= nullptr;
7826 MachineBasicBlock
*NextMBB
= nullptr;
7827 MachineBasicBlock
*DoneMBB
= nullptr;
7828 MachineBasicBlock
*AllDoneMBB
= nullptr;
7830 Register StartSrcReg
= forceReg(MI
, SrcBase
, TII
);
7831 Register StartDestReg
=
7832 (HaveSingleBase
? StartSrcReg
: forceReg(MI
, DestBase
, TII
));
7834 const TargetRegisterClass
*RC
= &SystemZ::ADDR64BitRegClass
;
7835 Register ThisSrcReg
= MRI
.createVirtualRegister(RC
);
7836 Register ThisDestReg
=
7837 (HaveSingleBase
? ThisSrcReg
: MRI
.createVirtualRegister(RC
));
7838 Register NextSrcReg
= MRI
.createVirtualRegister(RC
);
7839 Register NextDestReg
=
7840 (HaveSingleBase
? NextSrcReg
: MRI
.createVirtualRegister(RC
));
7841 RC
= &SystemZ::GR64BitRegClass
;
7842 Register ThisCountReg
= MRI
.createVirtualRegister(RC
);
7843 Register NextCountReg
= MRI
.createVirtualRegister(RC
);
7845 if (LengthMO
.isReg()) {
7846 AllDoneMBB
= SystemZ::splitBlockBefore(MI
, MBB
);
7847 StartMBB
= SystemZ::emitBlockAfter(MBB
);
7848 LoopMBB
= SystemZ::emitBlockAfter(StartMBB
);
7850 DoneMBB
= SystemZ::emitBlockAfter(LoopMBB
);
7853 // # Jump to AllDoneMBB if LenMinus1Reg is -1, or fall thru to StartMBB.
7854 BuildMI(MBB
, DL
, TII
->get(SystemZ::CGHI
))
7855 .addReg(LenMinus1Reg
).addImm(-1);
7856 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
7857 .addImm(SystemZ::CCMASK_ICMP
).addImm(SystemZ::CCMASK_CMP_EQ
)
7858 .addMBB(AllDoneMBB
);
7859 MBB
->addSuccessor(AllDoneMBB
);
7860 MBB
->addSuccessor(StartMBB
);
7863 // # Jump to DoneMBB if %StartCountReg is zero, or fall through to LoopMBB.
7865 BuildMI(MBB
, DL
, TII
->get(SystemZ::CGHI
))
7866 .addReg(StartCountReg
).addImm(0);
7867 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
7868 .addImm(SystemZ::CCMASK_ICMP
).addImm(SystemZ::CCMASK_CMP_EQ
)
7870 MBB
->addSuccessor(DoneMBB
);
7871 MBB
->addSuccessor(LoopMBB
);
7875 DoneMBB
= SystemZ::splitBlockBefore(MI
, MBB
);
7876 LoopMBB
= SystemZ::emitBlockAfter(StartMBB
);
7877 NextMBB
= (EndMBB
? SystemZ::emitBlockAfter(LoopMBB
) : LoopMBB
);
7880 // # fall through to LoopMBB
7881 MBB
->addSuccessor(LoopMBB
);
7883 DestBase
= MachineOperand::CreateReg(NextDestReg
, false);
7884 SrcBase
= MachineOperand::CreateReg(NextSrcReg
, false);
7886 if (EndMBB
&& !ImmLength
)
7887 // If the loop handled the whole CLC range, DoneMBB will be empty with
7888 // CC live-through into EndMBB, so add it as live-in.
7889 DoneMBB
->addLiveIn(SystemZ::CC
);
7893 // %ThisDestReg = phi [ %StartDestReg, StartMBB ],
7894 // [ %NextDestReg, NextMBB ]
7895 // %ThisSrcReg = phi [ %StartSrcReg, StartMBB ],
7896 // [ %NextSrcReg, NextMBB ]
7897 // %ThisCountReg = phi [ %StartCountReg, StartMBB ],
7898 // [ %NextCountReg, NextMBB ]
7899 // ( PFD 2, 768+DestDisp(%ThisDestReg) )
7900 // Opcode DestDisp(256,%ThisDestReg), SrcDisp(%ThisSrcReg)
7903 // The prefetch is used only for MVC. The JLH is used only for CLC.
7905 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), ThisDestReg
)
7906 .addReg(StartDestReg
).addMBB(StartMBB
)
7907 .addReg(NextDestReg
).addMBB(NextMBB
);
7908 if (!HaveSingleBase
)
7909 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), ThisSrcReg
)
7910 .addReg(StartSrcReg
).addMBB(StartMBB
)
7911 .addReg(NextSrcReg
).addMBB(NextMBB
);
7912 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), ThisCountReg
)
7913 .addReg(StartCountReg
).addMBB(StartMBB
)
7914 .addReg(NextCountReg
).addMBB(NextMBB
);
7915 if (Opcode
== SystemZ::MVC
)
7916 BuildMI(MBB
, DL
, TII
->get(SystemZ::PFD
))
7917 .addImm(SystemZ::PFD_WRITE
)
7918 .addReg(ThisDestReg
).addImm(DestDisp
+ 768).addReg(0);
7919 BuildMI(MBB
, DL
, TII
->get(Opcode
))
7920 .addReg(ThisDestReg
).addImm(DestDisp
).addImm(256)
7921 .addReg(ThisSrcReg
).addImm(SrcDisp
);
7923 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
7924 .addImm(SystemZ::CCMASK_ICMP
).addImm(SystemZ::CCMASK_CMP_NE
)
7926 MBB
->addSuccessor(EndMBB
);
7927 MBB
->addSuccessor(NextMBB
);
7931 // %NextDestReg = LA 256(%ThisDestReg)
7932 // %NextSrcReg = LA 256(%ThisSrcReg)
7933 // %NextCountReg = AGHI %ThisCountReg, -1
7934 // CGHI %NextCountReg, 0
7936 // # fall through to DoneMBB
7938 // The AGHI, CGHI and JLH should be converted to BRCTG by later passes.
7940 BuildMI(MBB
, DL
, TII
->get(SystemZ::LA
), NextDestReg
)
7941 .addReg(ThisDestReg
).addImm(256).addReg(0);
7942 if (!HaveSingleBase
)
7943 BuildMI(MBB
, DL
, TII
->get(SystemZ::LA
), NextSrcReg
)
7944 .addReg(ThisSrcReg
).addImm(256).addReg(0);
7945 BuildMI(MBB
, DL
, TII
->get(SystemZ::AGHI
), NextCountReg
)
7946 .addReg(ThisCountReg
).addImm(-1);
7947 BuildMI(MBB
, DL
, TII
->get(SystemZ::CGHI
))
7948 .addReg(NextCountReg
).addImm(0);
7949 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
7950 .addImm(SystemZ::CCMASK_ICMP
).addImm(SystemZ::CCMASK_CMP_NE
)
7952 MBB
->addSuccessor(LoopMBB
);
7953 MBB
->addSuccessor(DoneMBB
);
7956 if (LengthMO
.isReg()) {
7958 // # Make PHIs for RemDestReg/RemSrcReg as the loop may or may not run.
7959 // # Use EXecute Relative Long for the remainder of the bytes. The target
7960 // instruction of the EXRL will have a length field of 1 since 0 is an
7961 // illegal value. The number of bytes processed becomes (%LenMinus1Reg &
7963 // # Fall through to AllDoneMBB.
7964 Register RemSrcReg
= MRI
.createVirtualRegister(&SystemZ::ADDR64BitRegClass
);
7965 Register RemDestReg
= HaveSingleBase
? RemSrcReg
7966 : MRI
.createVirtualRegister(&SystemZ::ADDR64BitRegClass
);
7967 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), RemDestReg
)
7968 .addReg(StartDestReg
).addMBB(StartMBB
)
7969 .addReg(NextDestReg
).addMBB(LoopMBB
);
7970 if (!HaveSingleBase
)
7971 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), RemSrcReg
)
7972 .addReg(StartSrcReg
).addMBB(StartMBB
)
7973 .addReg(NextSrcReg
).addMBB(LoopMBB
);
7974 MRI
.constrainRegClass(LenMinus1Reg
, &SystemZ::ADDR64BitRegClass
);
7975 BuildMI(MBB
, DL
, TII
->get(SystemZ::EXRL_Pseudo
))
7977 .addReg(LenMinus1Reg
)
7978 .addReg(RemDestReg
).addImm(DestDisp
)
7979 .addReg(RemSrcReg
).addImm(SrcDisp
);
7980 MBB
->addSuccessor(AllDoneMBB
);
7985 // Handle any remaining bytes with straight-line code.
7986 while (ImmLength
> 0) {
7987 uint64_t ThisLength
= std::min(ImmLength
, uint64_t(256));
7988 // The previous iteration might have created out-of-range displacements.
7989 // Apply them using LAY if so.
7990 if (!isUInt
<12>(DestDisp
)) {
7991 Register Reg
= MRI
.createVirtualRegister(&SystemZ::ADDR64BitRegClass
);
7992 BuildMI(*MBB
, MI
, MI
.getDebugLoc(), TII
->get(SystemZ::LAY
), Reg
)
7996 DestBase
= MachineOperand::CreateReg(Reg
, false);
7999 if (!isUInt
<12>(SrcDisp
)) {
8000 Register Reg
= MRI
.createVirtualRegister(&SystemZ::ADDR64BitRegClass
);
8001 BuildMI(*MBB
, MI
, MI
.getDebugLoc(), TII
->get(SystemZ::LAY
), Reg
)
8005 SrcBase
= MachineOperand::CreateReg(Reg
, false);
8008 BuildMI(*MBB
, MI
, DL
, TII
->get(Opcode
))
8014 .setMemRefs(MI
.memoperands());
8015 DestDisp
+= ThisLength
;
8016 SrcDisp
+= ThisLength
;
8017 ImmLength
-= ThisLength
;
8018 // If there's another CLC to go, branch to the end if a difference
8020 if (EndMBB
&& ImmLength
> 0) {
8021 MachineBasicBlock
*NextMBB
= SystemZ::splitBlockBefore(MI
, MBB
);
8022 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
8023 .addImm(SystemZ::CCMASK_ICMP
).addImm(SystemZ::CCMASK_CMP_NE
)
8025 MBB
->addSuccessor(EndMBB
);
8026 MBB
->addSuccessor(NextMBB
);
8031 MBB
->addSuccessor(EndMBB
);
8033 MBB
->addLiveIn(SystemZ::CC
);
8036 MI
.eraseFromParent();
8040 // Decompose string pseudo-instruction MI into a loop that continually performs
8041 // Opcode until CC != 3.
8042 MachineBasicBlock
*SystemZTargetLowering::emitStringWrapper(
8043 MachineInstr
&MI
, MachineBasicBlock
*MBB
, unsigned Opcode
) const {
8044 MachineFunction
&MF
= *MBB
->getParent();
8045 const SystemZInstrInfo
*TII
=
8046 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
8047 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
8048 DebugLoc DL
= MI
.getDebugLoc();
8050 uint64_t End1Reg
= MI
.getOperand(0).getReg();
8051 uint64_t Start1Reg
= MI
.getOperand(1).getReg();
8052 uint64_t Start2Reg
= MI
.getOperand(2).getReg();
8053 uint64_t CharReg
= MI
.getOperand(3).getReg();
8055 const TargetRegisterClass
*RC
= &SystemZ::GR64BitRegClass
;
8056 uint64_t This1Reg
= MRI
.createVirtualRegister(RC
);
8057 uint64_t This2Reg
= MRI
.createVirtualRegister(RC
);
8058 uint64_t End2Reg
= MRI
.createVirtualRegister(RC
);
8060 MachineBasicBlock
*StartMBB
= MBB
;
8061 MachineBasicBlock
*DoneMBB
= SystemZ::splitBlockBefore(MI
, MBB
);
8062 MachineBasicBlock
*LoopMBB
= SystemZ::emitBlockAfter(StartMBB
);
8065 // # fall through to LoopMBB
8066 MBB
->addSuccessor(LoopMBB
);
8069 // %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ]
8070 // %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ]
8072 // %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0L
8074 // # fall through to DoneMBB
8076 // The load of R0L can be hoisted by post-RA LICM.
8079 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), This1Reg
)
8080 .addReg(Start1Reg
).addMBB(StartMBB
)
8081 .addReg(End1Reg
).addMBB(LoopMBB
);
8082 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), This2Reg
)
8083 .addReg(Start2Reg
).addMBB(StartMBB
)
8084 .addReg(End2Reg
).addMBB(LoopMBB
);
8085 BuildMI(MBB
, DL
, TII
->get(TargetOpcode::COPY
), SystemZ::R0L
).addReg(CharReg
);
8086 BuildMI(MBB
, DL
, TII
->get(Opcode
))
8087 .addReg(End1Reg
, RegState::Define
).addReg(End2Reg
, RegState::Define
)
8088 .addReg(This1Reg
).addReg(This2Reg
);
8089 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
8090 .addImm(SystemZ::CCMASK_ANY
).addImm(SystemZ::CCMASK_3
).addMBB(LoopMBB
);
8091 MBB
->addSuccessor(LoopMBB
);
8092 MBB
->addSuccessor(DoneMBB
);
8094 DoneMBB
->addLiveIn(SystemZ::CC
);
8096 MI
.eraseFromParent();
8100 // Update TBEGIN instruction with final opcode and register clobbers.
8101 MachineBasicBlock
*SystemZTargetLowering::emitTransactionBegin(
8102 MachineInstr
&MI
, MachineBasicBlock
*MBB
, unsigned Opcode
,
8103 bool NoFloat
) const {
8104 MachineFunction
&MF
= *MBB
->getParent();
8105 const TargetFrameLowering
*TFI
= Subtarget
.getFrameLowering();
8106 const SystemZInstrInfo
*TII
= Subtarget
.getInstrInfo();
8109 MI
.setDesc(TII
->get(Opcode
));
8111 // We cannot handle a TBEGIN that clobbers the stack or frame pointer.
8112 // Make sure to add the corresponding GRSM bits if they are missing.
8113 uint64_t Control
= MI
.getOperand(2).getImm();
8114 static const unsigned GPRControlBit
[16] = {
8115 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000,
8116 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100
8118 Control
|= GPRControlBit
[15];
8120 Control
|= GPRControlBit
[11];
8121 MI
.getOperand(2).setImm(Control
);
8123 // Add GPR clobbers.
8124 for (int I
= 0; I
< 16; I
++) {
8125 if ((Control
& GPRControlBit
[I
]) == 0) {
8126 unsigned Reg
= SystemZMC::GR64Regs
[I
];
8127 MI
.addOperand(MachineOperand::CreateReg(Reg
, true, true));
8131 // Add FPR/VR clobbers.
8132 if (!NoFloat
&& (Control
& 4) != 0) {
8133 if (Subtarget
.hasVector()) {
8134 for (int I
= 0; I
< 32; I
++) {
8135 unsigned Reg
= SystemZMC::VR128Regs
[I
];
8136 MI
.addOperand(MachineOperand::CreateReg(Reg
, true, true));
8139 for (int I
= 0; I
< 16; I
++) {
8140 unsigned Reg
= SystemZMC::FP64Regs
[I
];
8141 MI
.addOperand(MachineOperand::CreateReg(Reg
, true, true));
8149 MachineBasicBlock
*SystemZTargetLowering::emitLoadAndTestCmp0(
8150 MachineInstr
&MI
, MachineBasicBlock
*MBB
, unsigned Opcode
) const {
8151 MachineFunction
&MF
= *MBB
->getParent();
8152 MachineRegisterInfo
*MRI
= &MF
.getRegInfo();
8153 const SystemZInstrInfo
*TII
=
8154 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
8155 DebugLoc DL
= MI
.getDebugLoc();
8157 Register SrcReg
= MI
.getOperand(0).getReg();
8159 // Create new virtual register of the same class as source.
8160 const TargetRegisterClass
*RC
= MRI
->getRegClass(SrcReg
);
8161 Register DstReg
= MRI
->createVirtualRegister(RC
);
8163 // Replace pseudo with a normal load-and-test that models the def as
8165 BuildMI(*MBB
, MI
, DL
, TII
->get(Opcode
), DstReg
)
8167 .setMIFlags(MI
.getFlags());
8168 MI
.eraseFromParent();
8173 MachineBasicBlock
*SystemZTargetLowering::emitProbedAlloca(
8174 MachineInstr
&MI
, MachineBasicBlock
*MBB
) const {
8175 MachineFunction
&MF
= *MBB
->getParent();
8176 MachineRegisterInfo
*MRI
= &MF
.getRegInfo();
8177 const SystemZInstrInfo
*TII
=
8178 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
8179 DebugLoc DL
= MI
.getDebugLoc();
8180 const unsigned ProbeSize
= getStackProbeSize(MF
);
8181 Register DstReg
= MI
.getOperand(0).getReg();
8182 Register SizeReg
= MI
.getOperand(2).getReg();
8184 MachineBasicBlock
*StartMBB
= MBB
;
8185 MachineBasicBlock
*DoneMBB
= SystemZ::splitBlockAfter(MI
, MBB
);
8186 MachineBasicBlock
*LoopTestMBB
= SystemZ::emitBlockAfter(StartMBB
);
8187 MachineBasicBlock
*LoopBodyMBB
= SystemZ::emitBlockAfter(LoopTestMBB
);
8188 MachineBasicBlock
*TailTestMBB
= SystemZ::emitBlockAfter(LoopBodyMBB
);
8189 MachineBasicBlock
*TailMBB
= SystemZ::emitBlockAfter(TailTestMBB
);
8191 MachineMemOperand
*VolLdMMO
= MF
.getMachineMemOperand(MachinePointerInfo(),
8192 MachineMemOperand::MOVolatile
| MachineMemOperand::MOLoad
, 8, Align(1));
8194 Register PHIReg
= MRI
->createVirtualRegister(&SystemZ::ADDR64BitRegClass
);
8195 Register IncReg
= MRI
->createVirtualRegister(&SystemZ::ADDR64BitRegClass
);
8199 // # fallthrough to LoopBodyMBB
8200 StartMBB
->addSuccessor(LoopTestMBB
);
8202 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), PHIReg
)
8206 .addMBB(LoopBodyMBB
);
8207 BuildMI(MBB
, DL
, TII
->get(SystemZ::CLGFI
))
8210 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
8211 .addImm(SystemZ::CCMASK_ICMP
).addImm(SystemZ::CCMASK_CMP_LT
)
8212 .addMBB(TailTestMBB
);
8213 MBB
->addSuccessor(LoopBodyMBB
);
8214 MBB
->addSuccessor(TailTestMBB
);
8216 // LoopBodyMBB: Allocate and probe by means of a volatile compare.
8219 BuildMI(MBB
, DL
, TII
->get(SystemZ::SLGFI
), IncReg
)
8222 BuildMI(MBB
, DL
, TII
->get(SystemZ::SLGFI
), SystemZ::R15D
)
8223 .addReg(SystemZ::R15D
)
8225 BuildMI(MBB
, DL
, TII
->get(SystemZ::CG
)).addReg(SystemZ::R15D
)
8226 .addReg(SystemZ::R15D
).addImm(ProbeSize
- 8).addReg(0)
8227 .setMemRefs(VolLdMMO
);
8228 BuildMI(MBB
, DL
, TII
->get(SystemZ::J
)).addMBB(LoopTestMBB
);
8229 MBB
->addSuccessor(LoopTestMBB
);
8233 // # fallthrough to TailMBB
8235 BuildMI(MBB
, DL
, TII
->get(SystemZ::CGHI
))
8238 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
8239 .addImm(SystemZ::CCMASK_ICMP
).addImm(SystemZ::CCMASK_CMP_EQ
)
8241 MBB
->addSuccessor(TailMBB
);
8242 MBB
->addSuccessor(DoneMBB
);
8245 // # fallthrough to DoneMBB
8247 BuildMI(MBB
, DL
, TII
->get(SystemZ::SLGR
), SystemZ::R15D
)
8248 .addReg(SystemZ::R15D
)
8250 BuildMI(MBB
, DL
, TII
->get(SystemZ::CG
)).addReg(SystemZ::R15D
)
8251 .addReg(SystemZ::R15D
).addImm(-8).addReg(PHIReg
)
8252 .setMemRefs(VolLdMMO
);
8253 MBB
->addSuccessor(DoneMBB
);
8257 BuildMI(*MBB
, MBB
->begin(), DL
, TII
->get(TargetOpcode::COPY
), DstReg
)
8258 .addReg(SystemZ::R15D
);
8260 MI
.eraseFromParent();
8264 SDValue
SystemZTargetLowering::
8265 getBackchainAddress(SDValue SP
, SelectionDAG
&DAG
) const {
8266 MachineFunction
&MF
= DAG
.getMachineFunction();
8268 static_cast<const SystemZFrameLowering
*>(Subtarget
.getFrameLowering());
8270 return DAG
.getNode(ISD::ADD
, DL
, MVT::i64
, SP
,
8271 DAG
.getIntPtrConstant(TFL
->getBackchainOffset(MF
), DL
));
8274 MachineBasicBlock
*SystemZTargetLowering::EmitInstrWithCustomInserter(
8275 MachineInstr
&MI
, MachineBasicBlock
*MBB
) const {
8276 switch (MI
.getOpcode()) {
8277 case SystemZ::Select32
:
8278 case SystemZ::Select64
:
8279 case SystemZ::SelectF32
:
8280 case SystemZ::SelectF64
:
8281 case SystemZ::SelectF128
:
8282 case SystemZ::SelectVR32
:
8283 case SystemZ::SelectVR64
:
8284 case SystemZ::SelectVR128
:
8285 return emitSelect(MI
, MBB
);
8287 case SystemZ::CondStore8Mux
:
8288 return emitCondStore(MI
, MBB
, SystemZ::STCMux
, 0, false);
8289 case SystemZ::CondStore8MuxInv
:
8290 return emitCondStore(MI
, MBB
, SystemZ::STCMux
, 0, true);
8291 case SystemZ::CondStore16Mux
:
8292 return emitCondStore(MI
, MBB
, SystemZ::STHMux
, 0, false);
8293 case SystemZ::CondStore16MuxInv
:
8294 return emitCondStore(MI
, MBB
, SystemZ::STHMux
, 0, true);
8295 case SystemZ::CondStore32Mux
:
8296 return emitCondStore(MI
, MBB
, SystemZ::STMux
, SystemZ::STOCMux
, false);
8297 case SystemZ::CondStore32MuxInv
:
8298 return emitCondStore(MI
, MBB
, SystemZ::STMux
, SystemZ::STOCMux
, true);
8299 case SystemZ::CondStore8
:
8300 return emitCondStore(MI
, MBB
, SystemZ::STC
, 0, false);
8301 case SystemZ::CondStore8Inv
:
8302 return emitCondStore(MI
, MBB
, SystemZ::STC
, 0, true);
8303 case SystemZ::CondStore16
:
8304 return emitCondStore(MI
, MBB
, SystemZ::STH
, 0, false);
8305 case SystemZ::CondStore16Inv
:
8306 return emitCondStore(MI
, MBB
, SystemZ::STH
, 0, true);
8307 case SystemZ::CondStore32
:
8308 return emitCondStore(MI
, MBB
, SystemZ::ST
, SystemZ::STOC
, false);
8309 case SystemZ::CondStore32Inv
:
8310 return emitCondStore(MI
, MBB
, SystemZ::ST
, SystemZ::STOC
, true);
8311 case SystemZ::CondStore64
:
8312 return emitCondStore(MI
, MBB
, SystemZ::STG
, SystemZ::STOCG
, false);
8313 case SystemZ::CondStore64Inv
:
8314 return emitCondStore(MI
, MBB
, SystemZ::STG
, SystemZ::STOCG
, true);
8315 case SystemZ::CondStoreF32
:
8316 return emitCondStore(MI
, MBB
, SystemZ::STE
, 0, false);
8317 case SystemZ::CondStoreF32Inv
:
8318 return emitCondStore(MI
, MBB
, SystemZ::STE
, 0, true);
8319 case SystemZ::CondStoreF64
:
8320 return emitCondStore(MI
, MBB
, SystemZ::STD
, 0, false);
8321 case SystemZ::CondStoreF64Inv
:
8322 return emitCondStore(MI
, MBB
, SystemZ::STD
, 0, true);
8324 case SystemZ::PAIR128
:
8325 return emitPair128(MI
, MBB
);
8326 case SystemZ::AEXT128
:
8327 return emitExt128(MI
, MBB
, false);
8328 case SystemZ::ZEXT128
:
8329 return emitExt128(MI
, MBB
, true);
8331 case SystemZ::ATOMIC_SWAPW
:
8332 return emitAtomicLoadBinary(MI
, MBB
, 0, 0);
8333 case SystemZ::ATOMIC_SWAP_32
:
8334 return emitAtomicLoadBinary(MI
, MBB
, 0, 32);
8335 case SystemZ::ATOMIC_SWAP_64
:
8336 return emitAtomicLoadBinary(MI
, MBB
, 0, 64);
8338 case SystemZ::ATOMIC_LOADW_AR
:
8339 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::AR
, 0);
8340 case SystemZ::ATOMIC_LOADW_AFI
:
8341 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::AFI
, 0);
8342 case SystemZ::ATOMIC_LOAD_AR
:
8343 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::AR
, 32);
8344 case SystemZ::ATOMIC_LOAD_AHI
:
8345 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::AHI
, 32);
8346 case SystemZ::ATOMIC_LOAD_AFI
:
8347 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::AFI
, 32);
8348 case SystemZ::ATOMIC_LOAD_AGR
:
8349 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::AGR
, 64);
8350 case SystemZ::ATOMIC_LOAD_AGHI
:
8351 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::AGHI
, 64);
8352 case SystemZ::ATOMIC_LOAD_AGFI
:
8353 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::AGFI
, 64);
8355 case SystemZ::ATOMIC_LOADW_SR
:
8356 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::SR
, 0);
8357 case SystemZ::ATOMIC_LOAD_SR
:
8358 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::SR
, 32);
8359 case SystemZ::ATOMIC_LOAD_SGR
:
8360 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::SGR
, 64);
8362 case SystemZ::ATOMIC_LOADW_NR
:
8363 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NR
, 0);
8364 case SystemZ::ATOMIC_LOADW_NILH
:
8365 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILH
, 0);
8366 case SystemZ::ATOMIC_LOAD_NR
:
8367 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NR
, 32);
8368 case SystemZ::ATOMIC_LOAD_NILL
:
8369 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILL
, 32);
8370 case SystemZ::ATOMIC_LOAD_NILH
:
8371 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILH
, 32);
8372 case SystemZ::ATOMIC_LOAD_NILF
:
8373 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILF
, 32);
8374 case SystemZ::ATOMIC_LOAD_NGR
:
8375 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NGR
, 64);
8376 case SystemZ::ATOMIC_LOAD_NILL64
:
8377 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILL64
, 64);
8378 case SystemZ::ATOMIC_LOAD_NILH64
:
8379 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILH64
, 64);
8380 case SystemZ::ATOMIC_LOAD_NIHL64
:
8381 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NIHL64
, 64);
8382 case SystemZ::ATOMIC_LOAD_NIHH64
:
8383 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NIHH64
, 64);
8384 case SystemZ::ATOMIC_LOAD_NILF64
:
8385 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILF64
, 64);
8386 case SystemZ::ATOMIC_LOAD_NIHF64
:
8387 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NIHF64
, 64);
8389 case SystemZ::ATOMIC_LOADW_OR
:
8390 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OR
, 0);
8391 case SystemZ::ATOMIC_LOADW_OILH
:
8392 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OILH
, 0);
8393 case SystemZ::ATOMIC_LOAD_OR
:
8394 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OR
, 32);
8395 case SystemZ::ATOMIC_LOAD_OILL
:
8396 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OILL
, 32);
8397 case SystemZ::ATOMIC_LOAD_OILH
:
8398 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OILH
, 32);
8399 case SystemZ::ATOMIC_LOAD_OILF
:
8400 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OILF
, 32);
8401 case SystemZ::ATOMIC_LOAD_OGR
:
8402 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OGR
, 64);
8403 case SystemZ::ATOMIC_LOAD_OILL64
:
8404 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OILL64
, 64);
8405 case SystemZ::ATOMIC_LOAD_OILH64
:
8406 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OILH64
, 64);
8407 case SystemZ::ATOMIC_LOAD_OIHL64
:
8408 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OIHL64
, 64);
8409 case SystemZ::ATOMIC_LOAD_OIHH64
:
8410 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OIHH64
, 64);
8411 case SystemZ::ATOMIC_LOAD_OILF64
:
8412 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OILF64
, 64);
8413 case SystemZ::ATOMIC_LOAD_OIHF64
:
8414 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OIHF64
, 64);
8416 case SystemZ::ATOMIC_LOADW_XR
:
8417 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::XR
, 0);
8418 case SystemZ::ATOMIC_LOADW_XILF
:
8419 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::XILF
, 0);
8420 case SystemZ::ATOMIC_LOAD_XR
:
8421 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::XR
, 32);
8422 case SystemZ::ATOMIC_LOAD_XILF
:
8423 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::XILF
, 32);
8424 case SystemZ::ATOMIC_LOAD_XGR
:
8425 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::XGR
, 64);
8426 case SystemZ::ATOMIC_LOAD_XILF64
:
8427 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::XILF64
, 64);
8428 case SystemZ::ATOMIC_LOAD_XIHF64
:
8429 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::XIHF64
, 64);
8431 case SystemZ::ATOMIC_LOADW_NRi
:
8432 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NR
, 0, true);
8433 case SystemZ::ATOMIC_LOADW_NILHi
:
8434 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILH
, 0, true);
8435 case SystemZ::ATOMIC_LOAD_NRi
:
8436 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NR
, 32, true);
8437 case SystemZ::ATOMIC_LOAD_NILLi
:
8438 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILL
, 32, true);
8439 case SystemZ::ATOMIC_LOAD_NILHi
:
8440 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILH
, 32, true);
8441 case SystemZ::ATOMIC_LOAD_NILFi
:
8442 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILF
, 32, true);
8443 case SystemZ::ATOMIC_LOAD_NGRi
:
8444 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NGR
, 64, true);
8445 case SystemZ::ATOMIC_LOAD_NILL64i
:
8446 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILL64
, 64, true);
8447 case SystemZ::ATOMIC_LOAD_NILH64i
:
8448 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILH64
, 64, true);
8449 case SystemZ::ATOMIC_LOAD_NIHL64i
:
8450 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NIHL64
, 64, true);
8451 case SystemZ::ATOMIC_LOAD_NIHH64i
:
8452 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NIHH64
, 64, true);
8453 case SystemZ::ATOMIC_LOAD_NILF64i
:
8454 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILF64
, 64, true);
8455 case SystemZ::ATOMIC_LOAD_NIHF64i
:
8456 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NIHF64
, 64, true);
8458 case SystemZ::ATOMIC_LOADW_MIN
:
8459 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CR
,
8460 SystemZ::CCMASK_CMP_LE
, 0);
8461 case SystemZ::ATOMIC_LOAD_MIN_32
:
8462 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CR
,
8463 SystemZ::CCMASK_CMP_LE
, 32);
8464 case SystemZ::ATOMIC_LOAD_MIN_64
:
8465 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CGR
,
8466 SystemZ::CCMASK_CMP_LE
, 64);
8468 case SystemZ::ATOMIC_LOADW_MAX
:
8469 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CR
,
8470 SystemZ::CCMASK_CMP_GE
, 0);
8471 case SystemZ::ATOMIC_LOAD_MAX_32
:
8472 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CR
,
8473 SystemZ::CCMASK_CMP_GE
, 32);
8474 case SystemZ::ATOMIC_LOAD_MAX_64
:
8475 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CGR
,
8476 SystemZ::CCMASK_CMP_GE
, 64);
8478 case SystemZ::ATOMIC_LOADW_UMIN
:
8479 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CLR
,
8480 SystemZ::CCMASK_CMP_LE
, 0);
8481 case SystemZ::ATOMIC_LOAD_UMIN_32
:
8482 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CLR
,
8483 SystemZ::CCMASK_CMP_LE
, 32);
8484 case SystemZ::ATOMIC_LOAD_UMIN_64
:
8485 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CLGR
,
8486 SystemZ::CCMASK_CMP_LE
, 64);
8488 case SystemZ::ATOMIC_LOADW_UMAX
:
8489 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CLR
,
8490 SystemZ::CCMASK_CMP_GE
, 0);
8491 case SystemZ::ATOMIC_LOAD_UMAX_32
:
8492 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CLR
,
8493 SystemZ::CCMASK_CMP_GE
, 32);
8494 case SystemZ::ATOMIC_LOAD_UMAX_64
:
8495 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CLGR
,
8496 SystemZ::CCMASK_CMP_GE
, 64);
8498 case SystemZ::ATOMIC_CMP_SWAPW
:
8499 return emitAtomicCmpSwapW(MI
, MBB
);
8500 case SystemZ::MVCSequence
:
8501 case SystemZ::MVCLoop
:
8502 return emitMemMemWrapper(MI
, MBB
, SystemZ::MVC
);
8503 case SystemZ::NCSequence
:
8504 case SystemZ::NCLoop
:
8505 return emitMemMemWrapper(MI
, MBB
, SystemZ::NC
);
8506 case SystemZ::OCSequence
:
8507 case SystemZ::OCLoop
:
8508 return emitMemMemWrapper(MI
, MBB
, SystemZ::OC
);
8509 case SystemZ::XCSequence
:
8510 case SystemZ::XCLoop
:
8511 case SystemZ::XCLoopVarLen
:
8512 return emitMemMemWrapper(MI
, MBB
, SystemZ::XC
);
8513 case SystemZ::CLCSequence
:
8514 case SystemZ::CLCLoop
:
8515 return emitMemMemWrapper(MI
, MBB
, SystemZ::CLC
);
8516 case SystemZ::CLSTLoop
:
8517 return emitStringWrapper(MI
, MBB
, SystemZ::CLST
);
8518 case SystemZ::MVSTLoop
:
8519 return emitStringWrapper(MI
, MBB
, SystemZ::MVST
);
8520 case SystemZ::SRSTLoop
:
8521 return emitStringWrapper(MI
, MBB
, SystemZ::SRST
);
8522 case SystemZ::TBEGIN
:
8523 return emitTransactionBegin(MI
, MBB
, SystemZ::TBEGIN
, false);
8524 case SystemZ::TBEGIN_nofloat
:
8525 return emitTransactionBegin(MI
, MBB
, SystemZ::TBEGIN
, true);
8526 case SystemZ::TBEGINC
:
8527 return emitTransactionBegin(MI
, MBB
, SystemZ::TBEGINC
, true);
8528 case SystemZ::LTEBRCompare_VecPseudo
:
8529 return emitLoadAndTestCmp0(MI
, MBB
, SystemZ::LTEBR
);
8530 case SystemZ::LTDBRCompare_VecPseudo
:
8531 return emitLoadAndTestCmp0(MI
, MBB
, SystemZ::LTDBR
);
8532 case SystemZ::LTXBRCompare_VecPseudo
:
8533 return emitLoadAndTestCmp0(MI
, MBB
, SystemZ::LTXBR
);
8535 case SystemZ::PROBED_ALLOCA
:
8536 return emitProbedAlloca(MI
, MBB
);
8538 case TargetOpcode::STACKMAP
:
8539 case TargetOpcode::PATCHPOINT
:
8540 return emitPatchPoint(MI
, MBB
);
8543 llvm_unreachable("Unexpected instr type to insert");
8547 // This is only used by the isel schedulers, and is needed only to prevent
8548 // compiler from crashing when list-ilp is used.
8549 const TargetRegisterClass
*
8550 SystemZTargetLowering::getRepRegClassFor(MVT VT
) const {
8551 if (VT
== MVT::Untyped
)
8552 return &SystemZ::ADDR128BitRegClass
;
8553 return TargetLowering::getRepRegClassFor(VT
);