1 //===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the interfaces that Sparc uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #include "SparcISelLowering.h"
15 #include "MCTargetDesc/SparcMCExpr.h"
16 #include "SparcMachineFunctionInfo.h"
17 #include "SparcRegisterInfo.h"
18 #include "SparcTargetMachine.h"
19 #include "SparcTargetObjectFile.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/ADT/StringSwitch.h"
22 #include "llvm/CodeGen/CallingConvLower.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/SelectionDAG.h"
28 #include "llvm/CodeGen/SelectionDAGNodes.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/IR/DerivedTypes.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/Module.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/KnownBits.h"
38 //===----------------------------------------------------------------------===//
39 // Calling Convention Implementation
40 //===----------------------------------------------------------------------===//
42 static bool CC_Sparc_Assign_SRet(unsigned &ValNo
, MVT
&ValVT
,
43 MVT
&LocVT
, CCValAssign::LocInfo
&LocInfo
,
44 ISD::ArgFlagsTy
&ArgFlags
, CCState
&State
)
46 assert (ArgFlags
.isSRet());
48 // Assign SRet argument.
49 State
.addLoc(CCValAssign::getCustomMem(ValNo
, ValVT
,
55 static bool CC_Sparc_Assign_Split_64(unsigned &ValNo
, MVT
&ValVT
,
56 MVT
&LocVT
, CCValAssign::LocInfo
&LocInfo
,
57 ISD::ArgFlagsTy
&ArgFlags
, CCState
&State
)
59 static const MCPhysReg RegList
[] = {
60 SP::I0
, SP::I1
, SP::I2
, SP::I3
, SP::I4
, SP::I5
62 // Try to get first reg.
63 if (Register Reg
= State
.AllocateReg(RegList
)) {
64 State
.addLoc(CCValAssign::getCustomReg(ValNo
, ValVT
, Reg
, LocVT
, LocInfo
));
66 // Assign whole thing in stack.
67 State
.addLoc(CCValAssign::getCustomMem(
68 ValNo
, ValVT
, State
.AllocateStack(8, Align(4)), LocVT
, LocInfo
));
72 // Try to get second reg.
73 if (Register Reg
= State
.AllocateReg(RegList
))
74 State
.addLoc(CCValAssign::getCustomReg(ValNo
, ValVT
, Reg
, LocVT
, LocInfo
));
76 State
.addLoc(CCValAssign::getCustomMem(
77 ValNo
, ValVT
, State
.AllocateStack(4, Align(4)), LocVT
, LocInfo
));
81 static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo
, MVT
&ValVT
,
82 MVT
&LocVT
, CCValAssign::LocInfo
&LocInfo
,
83 ISD::ArgFlagsTy
&ArgFlags
, CCState
&State
)
85 static const MCPhysReg RegList
[] = {
86 SP::I0
, SP::I1
, SP::I2
, SP::I3
, SP::I4
, SP::I5
89 // Try to get first reg.
90 if (Register Reg
= State
.AllocateReg(RegList
))
91 State
.addLoc(CCValAssign::getCustomReg(ValNo
, ValVT
, Reg
, LocVT
, LocInfo
));
95 // Try to get second reg.
96 if (Register Reg
= State
.AllocateReg(RegList
))
97 State
.addLoc(CCValAssign::getCustomReg(ValNo
, ValVT
, Reg
, LocVT
, LocInfo
));
104 // Allocate a full-sized argument for the 64-bit ABI.
105 static bool Analyze_CC_Sparc64_Full(bool IsReturn
, unsigned &ValNo
, MVT
&ValVT
,
106 MVT
&LocVT
, CCValAssign::LocInfo
&LocInfo
,
107 ISD::ArgFlagsTy
&ArgFlags
, CCState
&State
) {
108 assert((LocVT
== MVT::f32
|| LocVT
== MVT::f128
109 || LocVT
.getSizeInBits() == 64) &&
110 "Can't handle non-64 bits locations");
112 // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
113 unsigned size
= (LocVT
== MVT::f128
) ? 16 : 8;
114 Align alignment
= (LocVT
== MVT::f128
) ? Align(16) : Align(8);
115 unsigned Offset
= State
.AllocateStack(size
, alignment
);
118 if (LocVT
== MVT::i64
&& Offset
< 6*8)
119 // Promote integers to %i0-%i5.
120 Reg
= SP::I0
+ Offset
/8;
121 else if (LocVT
== MVT::f64
&& Offset
< 16*8)
122 // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
123 Reg
= SP::D0
+ Offset
/8;
124 else if (LocVT
== MVT::f32
&& Offset
< 16*8)
125 // Promote floats to %f1, %f3, ...
126 Reg
= SP::F1
+ Offset
/4;
127 else if (LocVT
== MVT::f128
&& Offset
< 16*8)
128 // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
129 Reg
= SP::Q0
+ Offset
/16;
131 // Promote to register when possible, otherwise use the stack slot.
133 State
.addLoc(CCValAssign::getReg(ValNo
, ValVT
, Reg
, LocVT
, LocInfo
));
137 // Bail out if this is a return CC and we run out of registers to place
142 // This argument goes on the stack in an 8-byte slot.
143 // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
144 // the right-aligned float. The first 4 bytes of the stack slot are undefined.
145 if (LocVT
== MVT::f32
)
148 State
.addLoc(CCValAssign::getMem(ValNo
, ValVT
, Offset
, LocVT
, LocInfo
));
152 // Allocate a half-sized argument for the 64-bit ABI.
154 // This is used when passing { float, int } structs by value in registers.
155 static bool Analyze_CC_Sparc64_Half(bool IsReturn
, unsigned &ValNo
, MVT
&ValVT
,
156 MVT
&LocVT
, CCValAssign::LocInfo
&LocInfo
,
157 ISD::ArgFlagsTy
&ArgFlags
, CCState
&State
) {
158 assert(LocVT
.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
159 unsigned Offset
= State
.AllocateStack(4, Align(4));
161 if (LocVT
== MVT::f32
&& Offset
< 16*8) {
162 // Promote floats to %f0-%f31.
163 State
.addLoc(CCValAssign::getReg(ValNo
, ValVT
, SP::F0
+ Offset
/4,
168 if (LocVT
== MVT::i32
&& Offset
< 6*8) {
169 // Promote integers to %i0-%i5, using half the register.
170 unsigned Reg
= SP::I0
+ Offset
/8;
172 LocInfo
= CCValAssign::AExt
;
174 // Set the Custom bit if this i32 goes in the high bits of a register.
176 State
.addLoc(CCValAssign::getCustomReg(ValNo
, ValVT
, Reg
,
179 State
.addLoc(CCValAssign::getReg(ValNo
, ValVT
, Reg
, LocVT
, LocInfo
));
183 // Bail out if this is a return CC and we run out of registers to place
188 State
.addLoc(CCValAssign::getMem(ValNo
, ValVT
, Offset
, LocVT
, LocInfo
));
192 static bool CC_Sparc64_Full(unsigned &ValNo
, MVT
&ValVT
, MVT
&LocVT
,
193 CCValAssign::LocInfo
&LocInfo
,
194 ISD::ArgFlagsTy
&ArgFlags
, CCState
&State
) {
195 return Analyze_CC_Sparc64_Full(false, ValNo
, ValVT
, LocVT
, LocInfo
, ArgFlags
,
199 static bool CC_Sparc64_Half(unsigned &ValNo
, MVT
&ValVT
, MVT
&LocVT
,
200 CCValAssign::LocInfo
&LocInfo
,
201 ISD::ArgFlagsTy
&ArgFlags
, CCState
&State
) {
202 return Analyze_CC_Sparc64_Half(false, ValNo
, ValVT
, LocVT
, LocInfo
, ArgFlags
,
206 static bool RetCC_Sparc64_Full(unsigned &ValNo
, MVT
&ValVT
, MVT
&LocVT
,
207 CCValAssign::LocInfo
&LocInfo
,
208 ISD::ArgFlagsTy
&ArgFlags
, CCState
&State
) {
209 return Analyze_CC_Sparc64_Full(true, ValNo
, ValVT
, LocVT
, LocInfo
, ArgFlags
,
213 static bool RetCC_Sparc64_Half(unsigned &ValNo
, MVT
&ValVT
, MVT
&LocVT
,
214 CCValAssign::LocInfo
&LocInfo
,
215 ISD::ArgFlagsTy
&ArgFlags
, CCState
&State
) {
216 return Analyze_CC_Sparc64_Half(true, ValNo
, ValVT
, LocVT
, LocInfo
, ArgFlags
,
220 #include "SparcGenCallingConv.inc"
222 // The calling conventions in SparcCallingConv.td are described in terms of the
223 // callee's register window. This function translates registers to the
224 // corresponding caller window %o register.
225 static unsigned toCallerWindow(unsigned Reg
) {
226 static_assert(SP::I0
+ 7 == SP::I7
&& SP::O0
+ 7 == SP::O7
,
228 if (Reg
>= SP::I0
&& Reg
<= SP::I7
)
229 return Reg
- SP::I0
+ SP::O0
;
233 bool SparcTargetLowering::CanLowerReturn(
234 CallingConv::ID CallConv
, MachineFunction
&MF
, bool isVarArg
,
235 const SmallVectorImpl
<ISD::OutputArg
> &Outs
, LLVMContext
&Context
) const {
236 SmallVector
<CCValAssign
, 16> RVLocs
;
237 CCState
CCInfo(CallConv
, isVarArg
, MF
, RVLocs
, Context
);
238 return CCInfo
.CheckReturn(Outs
, Subtarget
->is64Bit() ? RetCC_Sparc64
243 SparcTargetLowering::LowerReturn(SDValue Chain
, CallingConv::ID CallConv
,
245 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
246 const SmallVectorImpl
<SDValue
> &OutVals
,
247 const SDLoc
&DL
, SelectionDAG
&DAG
) const {
248 if (Subtarget
->is64Bit())
249 return LowerReturn_64(Chain
, CallConv
, IsVarArg
, Outs
, OutVals
, DL
, DAG
);
250 return LowerReturn_32(Chain
, CallConv
, IsVarArg
, Outs
, OutVals
, DL
, DAG
);
254 SparcTargetLowering::LowerReturn_32(SDValue Chain
, CallingConv::ID CallConv
,
256 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
257 const SmallVectorImpl
<SDValue
> &OutVals
,
258 const SDLoc
&DL
, SelectionDAG
&DAG
) const {
259 MachineFunction
&MF
= DAG
.getMachineFunction();
261 // CCValAssign - represent the assignment of the return value to locations.
262 SmallVector
<CCValAssign
, 16> RVLocs
;
264 // CCState - Info about the registers and stack slot.
265 CCState
CCInfo(CallConv
, IsVarArg
, DAG
.getMachineFunction(), RVLocs
,
268 // Analyze return values.
269 CCInfo
.AnalyzeReturn(Outs
, RetCC_Sparc32
);
272 SmallVector
<SDValue
, 4> RetOps(1, Chain
);
273 // Make room for the return address offset.
274 RetOps
.push_back(SDValue());
276 // Copy the result values into the output registers.
277 for (unsigned i
= 0, realRVLocIdx
= 0;
279 ++i
, ++realRVLocIdx
) {
280 CCValAssign
&VA
= RVLocs
[i
];
281 assert(VA
.isRegLoc() && "Can only return in registers!");
283 SDValue Arg
= OutVals
[realRVLocIdx
];
285 if (VA
.needsCustom()) {
286 assert(VA
.getLocVT() == MVT::v2i32
);
287 // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
288 // happen by default if this wasn't a legal type)
290 SDValue Part0
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, DL
, MVT::i32
,
292 DAG
.getConstant(0, DL
, getVectorIdxTy(DAG
.getDataLayout())));
293 SDValue Part1
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, DL
, MVT::i32
,
295 DAG
.getConstant(1, DL
, getVectorIdxTy(DAG
.getDataLayout())));
297 Chain
= DAG
.getCopyToReg(Chain
, DL
, VA
.getLocReg(), Part0
, Glue
);
298 Glue
= Chain
.getValue(1);
299 RetOps
.push_back(DAG
.getRegister(VA
.getLocReg(), VA
.getLocVT()));
300 VA
= RVLocs
[++i
]; // skip ahead to next loc
301 Chain
= DAG
.getCopyToReg(Chain
, DL
, VA
.getLocReg(), Part1
,
304 Chain
= DAG
.getCopyToReg(Chain
, DL
, VA
.getLocReg(), Arg
, Glue
);
306 // Guarantee that all emitted copies are stuck together with flags.
307 Glue
= Chain
.getValue(1);
308 RetOps
.push_back(DAG
.getRegister(VA
.getLocReg(), VA
.getLocVT()));
311 unsigned RetAddrOffset
= 8; // Call Inst + Delay Slot
312 // If the function returns a struct, copy the SRetReturnReg to I0
313 if (MF
.getFunction().hasStructRetAttr()) {
314 SparcMachineFunctionInfo
*SFI
= MF
.getInfo
<SparcMachineFunctionInfo
>();
315 Register Reg
= SFI
->getSRetReturnReg();
317 llvm_unreachable("sret virtual register not created in the entry block");
318 auto PtrVT
= getPointerTy(DAG
.getDataLayout());
319 SDValue Val
= DAG
.getCopyFromReg(Chain
, DL
, Reg
, PtrVT
);
320 Chain
= DAG
.getCopyToReg(Chain
, DL
, SP::I0
, Val
, Glue
);
321 Glue
= Chain
.getValue(1);
322 RetOps
.push_back(DAG
.getRegister(SP::I0
, PtrVT
));
323 RetAddrOffset
= 12; // CallInst + Delay Slot + Unimp
326 RetOps
[0] = Chain
; // Update chain.
327 RetOps
[1] = DAG
.getConstant(RetAddrOffset
, DL
, MVT::i32
);
329 // Add the glue if we have it.
331 RetOps
.push_back(Glue
);
333 return DAG
.getNode(SPISD::RET_GLUE
, DL
, MVT::Other
, RetOps
);
336 // Lower return values for the 64-bit ABI.
337 // Return values are passed the exactly the same way as function arguments.
339 SparcTargetLowering::LowerReturn_64(SDValue Chain
, CallingConv::ID CallConv
,
341 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
342 const SmallVectorImpl
<SDValue
> &OutVals
,
343 const SDLoc
&DL
, SelectionDAG
&DAG
) const {
344 // CCValAssign - represent the assignment of the return value to locations.
345 SmallVector
<CCValAssign
, 16> RVLocs
;
347 // CCState - Info about the registers and stack slot.
348 CCState
CCInfo(CallConv
, IsVarArg
, DAG
.getMachineFunction(), RVLocs
,
351 // Analyze return values.
352 CCInfo
.AnalyzeReturn(Outs
, RetCC_Sparc64
);
355 SmallVector
<SDValue
, 4> RetOps(1, Chain
);
357 // The second operand on the return instruction is the return address offset.
358 // The return address is always %i7+8 with the 64-bit ABI.
359 RetOps
.push_back(DAG
.getConstant(8, DL
, MVT::i32
));
361 // Copy the result values into the output registers.
362 for (unsigned i
= 0; i
!= RVLocs
.size(); ++i
) {
363 CCValAssign
&VA
= RVLocs
[i
];
364 assert(VA
.isRegLoc() && "Can only return in registers!");
365 SDValue OutVal
= OutVals
[i
];
367 // Integer return values must be sign or zero extended by the callee.
368 switch (VA
.getLocInfo()) {
369 case CCValAssign::Full
: break;
370 case CCValAssign::SExt
:
371 OutVal
= DAG
.getNode(ISD::SIGN_EXTEND
, DL
, VA
.getLocVT(), OutVal
);
373 case CCValAssign::ZExt
:
374 OutVal
= DAG
.getNode(ISD::ZERO_EXTEND
, DL
, VA
.getLocVT(), OutVal
);
376 case CCValAssign::AExt
:
377 OutVal
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, VA
.getLocVT(), OutVal
);
380 llvm_unreachable("Unknown loc info!");
383 // The custom bit on an i32 return value indicates that it should be passed
384 // in the high bits of the register.
385 if (VA
.getValVT() == MVT::i32
&& VA
.needsCustom()) {
386 OutVal
= DAG
.getNode(ISD::SHL
, DL
, MVT::i64
, OutVal
,
387 DAG
.getConstant(32, DL
, MVT::i32
));
389 // The next value may go in the low bits of the same register.
390 // Handle both at once.
391 if (i
+1 < RVLocs
.size() && RVLocs
[i
+1].getLocReg() == VA
.getLocReg()) {
392 SDValue NV
= DAG
.getNode(ISD::ZERO_EXTEND
, DL
, MVT::i64
, OutVals
[i
+1]);
393 OutVal
= DAG
.getNode(ISD::OR
, DL
, MVT::i64
, OutVal
, NV
);
394 // Skip the next value, it's already done.
399 Chain
= DAG
.getCopyToReg(Chain
, DL
, VA
.getLocReg(), OutVal
, Glue
);
401 // Guarantee that all emitted copies are stuck together with flags.
402 Glue
= Chain
.getValue(1);
403 RetOps
.push_back(DAG
.getRegister(VA
.getLocReg(), VA
.getLocVT()));
406 RetOps
[0] = Chain
; // Update chain.
408 // Add the flag if we have it.
410 RetOps
.push_back(Glue
);
412 return DAG
.getNode(SPISD::RET_GLUE
, DL
, MVT::Other
, RetOps
);
415 SDValue
SparcTargetLowering::LowerFormalArguments(
416 SDValue Chain
, CallingConv::ID CallConv
, bool IsVarArg
,
417 const SmallVectorImpl
<ISD::InputArg
> &Ins
, const SDLoc
&DL
,
418 SelectionDAG
&DAG
, SmallVectorImpl
<SDValue
> &InVals
) const {
419 if (Subtarget
->is64Bit())
420 return LowerFormalArguments_64(Chain
, CallConv
, IsVarArg
, Ins
,
422 return LowerFormalArguments_32(Chain
, CallConv
, IsVarArg
, Ins
,
426 /// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
427 /// passed in either one or two GPRs, including FP values. TODO: we should
428 /// pass FP values in FP registers for fastcc functions.
429 SDValue
SparcTargetLowering::LowerFormalArguments_32(
430 SDValue Chain
, CallingConv::ID CallConv
, bool isVarArg
,
431 const SmallVectorImpl
<ISD::InputArg
> &Ins
, const SDLoc
&dl
,
432 SelectionDAG
&DAG
, SmallVectorImpl
<SDValue
> &InVals
) const {
433 MachineFunction
&MF
= DAG
.getMachineFunction();
434 MachineRegisterInfo
&RegInfo
= MF
.getRegInfo();
435 SparcMachineFunctionInfo
*FuncInfo
= MF
.getInfo
<SparcMachineFunctionInfo
>();
437 // Assign locations to all of the incoming arguments.
438 SmallVector
<CCValAssign
, 16> ArgLocs
;
439 CCState
CCInfo(CallConv
, isVarArg
, DAG
.getMachineFunction(), ArgLocs
,
441 CCInfo
.AnalyzeFormalArguments(Ins
, CC_Sparc32
);
443 const unsigned StackOffset
= 92;
444 bool IsLittleEndian
= DAG
.getDataLayout().isLittleEndian();
447 for (unsigned i
= 0, e
= ArgLocs
.size(); i
!= e
; ++i
, ++InIdx
) {
448 CCValAssign
&VA
= ArgLocs
[i
];
450 if (Ins
[InIdx
].Flags
.isSRet()) {
452 report_fatal_error("sparc only supports sret on the first parameter");
453 // Get SRet from [%fp+64].
454 int FrameIdx
= MF
.getFrameInfo().CreateFixedObject(4, 64, true);
455 SDValue FIPtr
= DAG
.getFrameIndex(FrameIdx
, MVT::i32
);
457 DAG
.getLoad(MVT::i32
, dl
, Chain
, FIPtr
, MachinePointerInfo());
458 InVals
.push_back(Arg
);
463 if (VA
.needsCustom()) {
464 assert(VA
.getLocVT() == MVT::f64
|| VA
.getLocVT() == MVT::v2i32
);
466 Register VRegHi
= RegInfo
.createVirtualRegister(&SP::IntRegsRegClass
);
467 MF
.getRegInfo().addLiveIn(VA
.getLocReg(), VRegHi
);
468 SDValue HiVal
= DAG
.getCopyFromReg(Chain
, dl
, VRegHi
, MVT::i32
);
471 CCValAssign
&NextVA
= ArgLocs
[++i
];
474 if (NextVA
.isMemLoc()) {
475 int FrameIdx
= MF
.getFrameInfo().
476 CreateFixedObject(4, StackOffset
+NextVA
.getLocMemOffset(),true);
477 SDValue FIPtr
= DAG
.getFrameIndex(FrameIdx
, MVT::i32
);
478 LoVal
= DAG
.getLoad(MVT::i32
, dl
, Chain
, FIPtr
, MachinePointerInfo());
480 Register loReg
= MF
.addLiveIn(NextVA
.getLocReg(),
481 &SP::IntRegsRegClass
);
482 LoVal
= DAG
.getCopyFromReg(Chain
, dl
, loReg
, MVT::i32
);
486 std::swap(LoVal
, HiVal
);
489 DAG
.getNode(ISD::BUILD_PAIR
, dl
, MVT::i64
, LoVal
, HiVal
);
490 WholeValue
= DAG
.getNode(ISD::BITCAST
, dl
, VA
.getLocVT(), WholeValue
);
491 InVals
.push_back(WholeValue
);
494 Register VReg
= RegInfo
.createVirtualRegister(&SP::IntRegsRegClass
);
495 MF
.getRegInfo().addLiveIn(VA
.getLocReg(), VReg
);
496 SDValue Arg
= DAG
.getCopyFromReg(Chain
, dl
, VReg
, MVT::i32
);
497 if (VA
.getLocVT() == MVT::f32
)
498 Arg
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::f32
, Arg
);
499 else if (VA
.getLocVT() != MVT::i32
) {
500 Arg
= DAG
.getNode(ISD::AssertSext
, dl
, MVT::i32
, Arg
,
501 DAG
.getValueType(VA
.getLocVT()));
502 Arg
= DAG
.getNode(ISD::TRUNCATE
, dl
, VA
.getLocVT(), Arg
);
504 InVals
.push_back(Arg
);
508 assert(VA
.isMemLoc());
510 unsigned Offset
= VA
.getLocMemOffset()+StackOffset
;
511 auto PtrVT
= getPointerTy(DAG
.getDataLayout());
513 if (VA
.needsCustom()) {
514 assert(VA
.getValVT() == MVT::f64
|| VA
.getValVT() == MVT::v2i32
);
515 // If it is double-word aligned, just load.
516 if (Offset
% 8 == 0) {
517 int FI
= MF
.getFrameInfo().CreateFixedObject(8,
520 SDValue FIPtr
= DAG
.getFrameIndex(FI
, PtrVT
);
522 DAG
.getLoad(VA
.getValVT(), dl
, Chain
, FIPtr
, MachinePointerInfo());
523 InVals
.push_back(Load
);
527 int FI
= MF
.getFrameInfo().CreateFixedObject(4,
530 SDValue FIPtr
= DAG
.getFrameIndex(FI
, PtrVT
);
532 DAG
.getLoad(MVT::i32
, dl
, Chain
, FIPtr
, MachinePointerInfo());
533 int FI2
= MF
.getFrameInfo().CreateFixedObject(4,
536 SDValue FIPtr2
= DAG
.getFrameIndex(FI2
, PtrVT
);
539 DAG
.getLoad(MVT::i32
, dl
, Chain
, FIPtr2
, MachinePointerInfo());
542 std::swap(LoVal
, HiVal
);
545 DAG
.getNode(ISD::BUILD_PAIR
, dl
, MVT::i64
, LoVal
, HiVal
);
546 WholeValue
= DAG
.getNode(ISD::BITCAST
, dl
, VA
.getValVT(), WholeValue
);
547 InVals
.push_back(WholeValue
);
551 int FI
= MF
.getFrameInfo().CreateFixedObject(4,
554 SDValue FIPtr
= DAG
.getFrameIndex(FI
, PtrVT
);
556 if (VA
.getValVT() == MVT::i32
|| VA
.getValVT() == MVT::f32
) {
557 Load
= DAG
.getLoad(VA
.getValVT(), dl
, Chain
, FIPtr
, MachinePointerInfo());
558 } else if (VA
.getValVT() == MVT::f128
) {
559 report_fatal_error("SPARCv8 does not handle f128 in calls; "
562 // We shouldn't see any other value types here.
563 llvm_unreachable("Unexpected ValVT encountered in frame lowering.");
565 InVals
.push_back(Load
);
568 if (MF
.getFunction().hasStructRetAttr()) {
569 // Copy the SRet Argument to SRetReturnReg.
570 SparcMachineFunctionInfo
*SFI
= MF
.getInfo
<SparcMachineFunctionInfo
>();
571 Register Reg
= SFI
->getSRetReturnReg();
573 Reg
= MF
.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass
);
574 SFI
->setSRetReturnReg(Reg
);
576 SDValue Copy
= DAG
.getCopyToReg(DAG
.getEntryNode(), dl
, Reg
, InVals
[0]);
577 Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Copy
, Chain
);
580 // Store remaining ArgRegs to the stack if this is a varargs function.
582 static const MCPhysReg ArgRegs
[] = {
583 SP::I0
, SP::I1
, SP::I2
, SP::I3
, SP::I4
, SP::I5
585 unsigned NumAllocated
= CCInfo
.getFirstUnallocated(ArgRegs
);
586 const MCPhysReg
*CurArgReg
= ArgRegs
+NumAllocated
, *ArgRegEnd
= ArgRegs
+6;
587 unsigned ArgOffset
= CCInfo
.getStackSize();
588 if (NumAllocated
== 6)
589 ArgOffset
+= StackOffset
;
592 ArgOffset
= 68+4*NumAllocated
;
595 // Remember the vararg offset for the va_start implementation.
596 FuncInfo
->setVarArgsFrameOffset(ArgOffset
);
598 std::vector
<SDValue
> OutChains
;
600 for (; CurArgReg
!= ArgRegEnd
; ++CurArgReg
) {
601 Register VReg
= RegInfo
.createVirtualRegister(&SP::IntRegsRegClass
);
602 MF
.getRegInfo().addLiveIn(*CurArgReg
, VReg
);
603 SDValue Arg
= DAG
.getCopyFromReg(DAG
.getRoot(), dl
, VReg
, MVT::i32
);
605 int FrameIdx
= MF
.getFrameInfo().CreateFixedObject(4, ArgOffset
,
607 SDValue FIPtr
= DAG
.getFrameIndex(FrameIdx
, MVT::i32
);
610 DAG
.getStore(DAG
.getRoot(), dl
, Arg
, FIPtr
, MachinePointerInfo()));
614 if (!OutChains
.empty()) {
615 OutChains
.push_back(Chain
);
616 Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, OutChains
);
623 // Lower formal arguments for the 64 bit ABI.
624 SDValue
SparcTargetLowering::LowerFormalArguments_64(
625 SDValue Chain
, CallingConv::ID CallConv
, bool IsVarArg
,
626 const SmallVectorImpl
<ISD::InputArg
> &Ins
, const SDLoc
&DL
,
627 SelectionDAG
&DAG
, SmallVectorImpl
<SDValue
> &InVals
) const {
628 MachineFunction
&MF
= DAG
.getMachineFunction();
630 // Analyze arguments according to CC_Sparc64.
631 SmallVector
<CCValAssign
, 16> ArgLocs
;
632 CCState
CCInfo(CallConv
, IsVarArg
, DAG
.getMachineFunction(), ArgLocs
,
634 CCInfo
.AnalyzeFormalArguments(Ins
, CC_Sparc64
);
636 // The argument array begins at %fp+BIAS+128, after the register save area.
637 const unsigned ArgArea
= 128;
639 for (unsigned i
= 0, e
= ArgLocs
.size(); i
!= e
; ++i
) {
640 CCValAssign
&VA
= ArgLocs
[i
];
642 // This argument is passed in a register.
643 // All integer register arguments are promoted by the caller to i64.
645 // Create a virtual register for the promoted live-in value.
646 Register VReg
= MF
.addLiveIn(VA
.getLocReg(),
647 getRegClassFor(VA
.getLocVT()));
648 SDValue Arg
= DAG
.getCopyFromReg(Chain
, DL
, VReg
, VA
.getLocVT());
650 // Get the high bits for i32 struct elements.
651 if (VA
.getValVT() == MVT::i32
&& VA
.needsCustom())
652 Arg
= DAG
.getNode(ISD::SRL
, DL
, VA
.getLocVT(), Arg
,
653 DAG
.getConstant(32, DL
, MVT::i32
));
655 // The caller promoted the argument, so insert an Assert?ext SDNode so we
656 // won't promote the value again in this function.
657 switch (VA
.getLocInfo()) {
658 case CCValAssign::SExt
:
659 Arg
= DAG
.getNode(ISD::AssertSext
, DL
, VA
.getLocVT(), Arg
,
660 DAG
.getValueType(VA
.getValVT()));
662 case CCValAssign::ZExt
:
663 Arg
= DAG
.getNode(ISD::AssertZext
, DL
, VA
.getLocVT(), Arg
,
664 DAG
.getValueType(VA
.getValVT()));
670 // Truncate the register down to the argument type.
672 Arg
= DAG
.getNode(ISD::TRUNCATE
, DL
, VA
.getValVT(), Arg
);
674 InVals
.push_back(Arg
);
678 // The registers are exhausted. This argument was passed on the stack.
679 assert(VA
.isMemLoc());
680 // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
681 // beginning of the arguments area at %fp+BIAS+128.
682 unsigned Offset
= VA
.getLocMemOffset() + ArgArea
;
683 unsigned ValSize
= VA
.getValVT().getSizeInBits() / 8;
684 // Adjust offset for extended arguments, SPARC is big-endian.
685 // The caller will have written the full slot with extended bytes, but we
686 // prefer our own extending loads.
688 Offset
+= 8 - ValSize
;
689 int FI
= MF
.getFrameInfo().CreateFixedObject(ValSize
, Offset
, true);
691 DAG
.getLoad(VA
.getValVT(), DL
, Chain
,
692 DAG
.getFrameIndex(FI
, getPointerTy(MF
.getDataLayout())),
693 MachinePointerInfo::getFixedStack(MF
, FI
)));
699 // This function takes variable arguments, some of which may have been passed
700 // in registers %i0-%i5. Variable floating point arguments are never passed
701 // in floating point registers. They go on %i0-%i5 or on the stack like
702 // integer arguments.
704 // The va_start intrinsic needs to know the offset to the first variable
706 unsigned ArgOffset
= CCInfo
.getStackSize();
707 SparcMachineFunctionInfo
*FuncInfo
= MF
.getInfo
<SparcMachineFunctionInfo
>();
708 // Skip the 128 bytes of register save area.
709 FuncInfo
->setVarArgsFrameOffset(ArgOffset
+ ArgArea
+
710 Subtarget
->getStackPointerBias());
712 // Save the variable arguments that were passed in registers.
713 // The caller is required to reserve stack space for 6 arguments regardless
714 // of how many arguments were actually passed.
715 SmallVector
<SDValue
, 8> OutChains
;
716 for (; ArgOffset
< 6*8; ArgOffset
+= 8) {
717 Register VReg
= MF
.addLiveIn(SP::I0
+ ArgOffset
/8, &SP::I64RegsRegClass
);
718 SDValue VArg
= DAG
.getCopyFromReg(Chain
, DL
, VReg
, MVT::i64
);
719 int FI
= MF
.getFrameInfo().CreateFixedObject(8, ArgOffset
+ ArgArea
, true);
720 auto PtrVT
= getPointerTy(MF
.getDataLayout());
722 DAG
.getStore(Chain
, DL
, VArg
, DAG
.getFrameIndex(FI
, PtrVT
),
723 MachinePointerInfo::getFixedStack(MF
, FI
)));
726 if (!OutChains
.empty())
727 Chain
= DAG
.getNode(ISD::TokenFactor
, DL
, MVT::Other
, OutChains
);
733 SparcTargetLowering::LowerCall(TargetLowering::CallLoweringInfo
&CLI
,
734 SmallVectorImpl
<SDValue
> &InVals
) const {
735 if (Subtarget
->is64Bit())
736 return LowerCall_64(CLI
, InVals
);
737 return LowerCall_32(CLI
, InVals
);
740 static bool hasReturnsTwiceAttr(SelectionDAG
&DAG
, SDValue Callee
,
741 const CallBase
*Call
) {
743 return Call
->hasFnAttr(Attribute::ReturnsTwice
);
745 const Function
*CalleeFn
= nullptr;
746 if (GlobalAddressSDNode
*G
= dyn_cast
<GlobalAddressSDNode
>(Callee
)) {
747 CalleeFn
= dyn_cast
<Function
>(G
->getGlobal());
748 } else if (ExternalSymbolSDNode
*E
=
749 dyn_cast
<ExternalSymbolSDNode
>(Callee
)) {
750 const Function
&Fn
= DAG
.getMachineFunction().getFunction();
751 const Module
*M
= Fn
.getParent();
752 const char *CalleeName
= E
->getSymbol();
753 CalleeFn
= M
->getFunction(CalleeName
);
758 return CalleeFn
->hasFnAttribute(Attribute::ReturnsTwice
);
761 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
762 /// for tail call optimization.
763 bool SparcTargetLowering::IsEligibleForTailCallOptimization(
764 CCState
&CCInfo
, CallLoweringInfo
&CLI
, MachineFunction
&MF
) const {
766 auto &Outs
= CLI
.Outs
;
767 auto &Caller
= MF
.getFunction();
769 // Do not tail call opt functions with "disable-tail-calls" attribute.
770 if (Caller
.getFnAttribute("disable-tail-calls").getValueAsString() == "true")
773 // Do not tail call opt if the stack is used to pass parameters.
774 // 64-bit targets have a slightly higher limit since the ABI requires
775 // to allocate some space even when all the parameters fit inside registers.
776 unsigned StackSizeLimit
= Subtarget
->is64Bit() ? 48 : 0;
777 if (CCInfo
.getStackSize() > StackSizeLimit
)
780 // Do not tail call opt if either the callee or caller returns
781 // a struct and the other does not.
782 if (!Outs
.empty() && Caller
.hasStructRetAttr() != Outs
[0].Flags
.isSRet())
785 // Byval parameters hand the function a pointer directly into the stack area
786 // we want to reuse during a tail call.
787 for (auto &Arg
: Outs
)
788 if (Arg
.Flags
.isByVal())
794 // Lower a call for the 32-bit ABI.
796 SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo
&CLI
,
797 SmallVectorImpl
<SDValue
> &InVals
) const {
798 SelectionDAG
&DAG
= CLI
.DAG
;
800 SmallVectorImpl
<ISD::OutputArg
> &Outs
= CLI
.Outs
;
801 SmallVectorImpl
<SDValue
> &OutVals
= CLI
.OutVals
;
802 SmallVectorImpl
<ISD::InputArg
> &Ins
= CLI
.Ins
;
803 SDValue Chain
= CLI
.Chain
;
804 SDValue Callee
= CLI
.Callee
;
805 bool &isTailCall
= CLI
.IsTailCall
;
806 CallingConv::ID CallConv
= CLI
.CallConv
;
807 bool isVarArg
= CLI
.IsVarArg
;
809 // Analyze operands of the call, assigning locations to each operand.
810 SmallVector
<CCValAssign
, 16> ArgLocs
;
811 CCState
CCInfo(CallConv
, isVarArg
, DAG
.getMachineFunction(), ArgLocs
,
813 CCInfo
.AnalyzeCallOperands(Outs
, CC_Sparc32
);
815 isTailCall
= isTailCall
&& IsEligibleForTailCallOptimization(
816 CCInfo
, CLI
, DAG
.getMachineFunction());
818 // Get the size of the outgoing arguments stack space requirement.
819 unsigned ArgsSize
= CCInfo
.getStackSize();
821 // Keep stack frames 8-byte aligned.
822 ArgsSize
= (ArgsSize
+7) & ~7;
824 MachineFrameInfo
&MFI
= DAG
.getMachineFunction().getFrameInfo();
826 // Create local copies for byval args.
827 SmallVector
<SDValue
, 8> ByValArgs
;
828 for (unsigned i
= 0, e
= Outs
.size(); i
!= e
; ++i
) {
829 ISD::ArgFlagsTy Flags
= Outs
[i
].Flags
;
830 if (!Flags
.isByVal())
833 SDValue Arg
= OutVals
[i
];
834 unsigned Size
= Flags
.getByValSize();
835 Align Alignment
= Flags
.getNonZeroByValAlign();
838 int FI
= MFI
.CreateStackObject(Size
, Alignment
, false);
839 SDValue FIPtr
= DAG
.getFrameIndex(FI
, getPointerTy(DAG
.getDataLayout()));
840 SDValue SizeNode
= DAG
.getConstant(Size
, dl
, MVT::i32
);
842 Chain
= DAG
.getMemcpy(Chain
, dl
, FIPtr
, Arg
, SizeNode
, Alignment
,
843 false, // isVolatile,
844 (Size
<= 32), // AlwaysInline if size <= 32,
846 MachinePointerInfo(), MachinePointerInfo());
847 ByValArgs
.push_back(FIPtr
);
851 ByValArgs
.push_back(nullVal
);
855 assert(!isTailCall
|| ArgsSize
== 0);
858 Chain
= DAG
.getCALLSEQ_START(Chain
, ArgsSize
, 0, dl
);
860 SmallVector
<std::pair
<unsigned, SDValue
>, 8> RegsToPass
;
861 SmallVector
<SDValue
, 8> MemOpChains
;
863 const unsigned StackOffset
= 92;
864 bool hasStructRetAttr
= false;
865 unsigned SRetArgSize
= 0;
866 // Walk the register/memloc assignments, inserting copies/loads.
867 for (unsigned i
= 0, realArgIdx
= 0, byvalArgIdx
= 0, e
= ArgLocs
.size();
870 CCValAssign
&VA
= ArgLocs
[i
];
871 SDValue Arg
= OutVals
[realArgIdx
];
873 ISD::ArgFlagsTy Flags
= Outs
[realArgIdx
].Flags
;
875 // Use local copy if it is a byval arg.
876 if (Flags
.isByVal()) {
877 Arg
= ByValArgs
[byvalArgIdx
++];
883 // Promote the value if needed.
884 switch (VA
.getLocInfo()) {
885 default: llvm_unreachable("Unknown loc info!");
886 case CCValAssign::Full
: break;
887 case CCValAssign::SExt
:
888 Arg
= DAG
.getNode(ISD::SIGN_EXTEND
, dl
, VA
.getLocVT(), Arg
);
890 case CCValAssign::ZExt
:
891 Arg
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, VA
.getLocVT(), Arg
);
893 case CCValAssign::AExt
:
894 Arg
= DAG
.getNode(ISD::ANY_EXTEND
, dl
, VA
.getLocVT(), Arg
);
896 case CCValAssign::BCvt
:
897 Arg
= DAG
.getNode(ISD::BITCAST
, dl
, VA
.getLocVT(), Arg
);
901 if (Flags
.isSRet()) {
902 assert(VA
.needsCustom());
907 // store SRet argument in %sp+64
908 SDValue StackPtr
= DAG
.getRegister(SP::O6
, MVT::i32
);
909 SDValue PtrOff
= DAG
.getIntPtrConstant(64, dl
);
910 PtrOff
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, StackPtr
, PtrOff
);
911 MemOpChains
.push_back(
912 DAG
.getStore(Chain
, dl
, Arg
, PtrOff
, MachinePointerInfo()));
913 hasStructRetAttr
= true;
914 // sret only allowed on first argument
915 assert(Outs
[realArgIdx
].OrigArgIndex
== 0);
917 DAG
.getDataLayout().getTypeAllocSize(CLI
.getArgs()[0].IndirectType
);
921 if (VA
.needsCustom()) {
922 assert(VA
.getLocVT() == MVT::f64
|| VA
.getLocVT() == MVT::v2i32
);
925 unsigned Offset
= VA
.getLocMemOffset() + StackOffset
;
926 // if it is double-word aligned, just store.
927 if (Offset
% 8 == 0) {
928 SDValue StackPtr
= DAG
.getRegister(SP::O6
, MVT::i32
);
929 SDValue PtrOff
= DAG
.getIntPtrConstant(Offset
, dl
);
930 PtrOff
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, StackPtr
, PtrOff
);
931 MemOpChains
.push_back(
932 DAG
.getStore(Chain
, dl
, Arg
, PtrOff
, MachinePointerInfo()));
937 if (VA
.getLocVT() == MVT::f64
) {
938 // Move from the float value from float registers into the
939 // integer registers.
940 if (ConstantFPSDNode
*C
= dyn_cast
<ConstantFPSDNode
>(Arg
))
941 Arg
= bitcastConstantFPToInt(C
, dl
, DAG
);
943 Arg
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::v2i32
, Arg
);
946 SDValue Part0
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, MVT::i32
,
948 DAG
.getConstant(0, dl
, getVectorIdxTy(DAG
.getDataLayout())));
949 SDValue Part1
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, MVT::i32
,
951 DAG
.getConstant(1, dl
, getVectorIdxTy(DAG
.getDataLayout())));
954 RegsToPass
.push_back(std::make_pair(VA
.getLocReg(), Part0
));
956 CCValAssign
&NextVA
= ArgLocs
[++i
];
957 if (NextVA
.isRegLoc()) {
958 RegsToPass
.push_back(std::make_pair(NextVA
.getLocReg(), Part1
));
960 // Store the second part in stack.
961 unsigned Offset
= NextVA
.getLocMemOffset() + StackOffset
;
962 SDValue StackPtr
= DAG
.getRegister(SP::O6
, MVT::i32
);
963 SDValue PtrOff
= DAG
.getIntPtrConstant(Offset
, dl
);
964 PtrOff
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, StackPtr
, PtrOff
);
965 MemOpChains
.push_back(
966 DAG
.getStore(Chain
, dl
, Part1
, PtrOff
, MachinePointerInfo()));
969 unsigned Offset
= VA
.getLocMemOffset() + StackOffset
;
970 // Store the first part.
971 SDValue StackPtr
= DAG
.getRegister(SP::O6
, MVT::i32
);
972 SDValue PtrOff
= DAG
.getIntPtrConstant(Offset
, dl
);
973 PtrOff
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, StackPtr
, PtrOff
);
974 MemOpChains
.push_back(
975 DAG
.getStore(Chain
, dl
, Part0
, PtrOff
, MachinePointerInfo()));
976 // Store the second part.
977 PtrOff
= DAG
.getIntPtrConstant(Offset
+ 4, dl
);
978 PtrOff
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, StackPtr
, PtrOff
);
979 MemOpChains
.push_back(
980 DAG
.getStore(Chain
, dl
, Part1
, PtrOff
, MachinePointerInfo()));
985 // Arguments that can be passed on register must be kept at
988 if (VA
.getLocVT() != MVT::f32
) {
989 RegsToPass
.push_back(std::make_pair(VA
.getLocReg(), Arg
));
992 Arg
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::i32
, Arg
);
993 RegsToPass
.push_back(std::make_pair(VA
.getLocReg(), Arg
));
997 assert(VA
.isMemLoc());
999 // Create a store off the stack pointer for this argument.
1000 SDValue StackPtr
= DAG
.getRegister(SP::O6
, MVT::i32
);
1001 SDValue PtrOff
= DAG
.getIntPtrConstant(VA
.getLocMemOffset() + StackOffset
,
1003 PtrOff
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, StackPtr
, PtrOff
);
1004 MemOpChains
.push_back(
1005 DAG
.getStore(Chain
, dl
, Arg
, PtrOff
, MachinePointerInfo()));
1009 // Emit all stores, make sure the occur before any copies into physregs.
1010 if (!MemOpChains
.empty())
1011 Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, MemOpChains
);
1013 // Build a sequence of copy-to-reg nodes chained together with token
1014 // chain and flag operands which copy the outgoing args into registers.
1015 // The InGlue in necessary since all emitted instructions must be
1018 for (unsigned i
= 0, e
= RegsToPass
.size(); i
!= e
; ++i
) {
1019 Register Reg
= RegsToPass
[i
].first
;
1021 Reg
= toCallerWindow(Reg
);
1022 Chain
= DAG
.getCopyToReg(Chain
, dl
, Reg
, RegsToPass
[i
].second
, InGlue
);
1023 InGlue
= Chain
.getValue(1);
1026 bool hasReturnsTwice
= hasReturnsTwiceAttr(DAG
, Callee
, CLI
.CB
);
1028 // If the callee is a GlobalAddress node (quite common, every direct call is)
1029 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1030 // Likewise ExternalSymbol -> TargetExternalSymbol.
1031 unsigned TF
= isPositionIndependent() ? SparcMCExpr::VK_Sparc_WPLT30
1032 : SparcMCExpr::VK_Sparc_WDISP30
;
1033 if (GlobalAddressSDNode
*G
= dyn_cast
<GlobalAddressSDNode
>(Callee
))
1034 Callee
= DAG
.getTargetGlobalAddress(G
->getGlobal(), dl
, MVT::i32
, 0, TF
);
1035 else if (ExternalSymbolSDNode
*E
= dyn_cast
<ExternalSymbolSDNode
>(Callee
))
1036 Callee
= DAG
.getTargetExternalSymbol(E
->getSymbol(), MVT::i32
, TF
);
1038 // Returns a chain & a flag for retval copy to use
1039 SDVTList NodeTys
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
1040 SmallVector
<SDValue
, 8> Ops
;
1041 Ops
.push_back(Chain
);
1042 Ops
.push_back(Callee
);
1043 if (hasStructRetAttr
)
1044 Ops
.push_back(DAG
.getTargetConstant(SRetArgSize
, dl
, MVT::i32
));
1045 for (unsigned i
= 0, e
= RegsToPass
.size(); i
!= e
; ++i
) {
1046 Register Reg
= RegsToPass
[i
].first
;
1048 Reg
= toCallerWindow(Reg
);
1049 Ops
.push_back(DAG
.getRegister(Reg
, RegsToPass
[i
].second
.getValueType()));
1052 // Add a register mask operand representing the call-preserved registers.
1053 const SparcRegisterInfo
*TRI
= Subtarget
->getRegisterInfo();
1054 const uint32_t *Mask
=
1056 ? TRI
->getRTCallPreservedMask(CallConv
)
1057 : TRI
->getCallPreservedMask(DAG
.getMachineFunction(), CallConv
));
1058 assert(Mask
&& "Missing call preserved mask for calling convention");
1059 Ops
.push_back(DAG
.getRegisterMask(Mask
));
1061 if (InGlue
.getNode())
1062 Ops
.push_back(InGlue
);
1065 DAG
.getMachineFunction().getFrameInfo().setHasTailCall();
1066 return DAG
.getNode(SPISD::TAIL_CALL
, dl
, MVT::Other
, Ops
);
1069 Chain
= DAG
.getNode(SPISD::CALL
, dl
, NodeTys
, Ops
);
1070 InGlue
= Chain
.getValue(1);
1072 Chain
= DAG
.getCALLSEQ_END(Chain
, ArgsSize
, 0, InGlue
, dl
);
1073 InGlue
= Chain
.getValue(1);
1075 // Assign locations to each value returned by this call.
1076 SmallVector
<CCValAssign
, 16> RVLocs
;
1077 CCState
RVInfo(CallConv
, isVarArg
, DAG
.getMachineFunction(), RVLocs
,
1080 RVInfo
.AnalyzeCallResult(Ins
, RetCC_Sparc32
);
1082 // Copy all of the result registers out of their specified physreg.
1083 for (unsigned i
= 0; i
!= RVLocs
.size(); ++i
) {
1084 assert(RVLocs
[i
].isRegLoc() && "Can only return in registers!");
1085 if (RVLocs
[i
].getLocVT() == MVT::v2i32
) {
1086 SDValue Vec
= DAG
.getNode(ISD::UNDEF
, dl
, MVT::v2i32
);
1087 SDValue Lo
= DAG
.getCopyFromReg(
1088 Chain
, dl
, toCallerWindow(RVLocs
[i
++].getLocReg()), MVT::i32
, InGlue
);
1089 Chain
= Lo
.getValue(1);
1090 InGlue
= Lo
.getValue(2);
1091 Vec
= DAG
.getNode(ISD::INSERT_VECTOR_ELT
, dl
, MVT::v2i32
, Vec
, Lo
,
1092 DAG
.getConstant(0, dl
, MVT::i32
));
1093 SDValue Hi
= DAG
.getCopyFromReg(
1094 Chain
, dl
, toCallerWindow(RVLocs
[i
].getLocReg()), MVT::i32
, InGlue
);
1095 Chain
= Hi
.getValue(1);
1096 InGlue
= Hi
.getValue(2);
1097 Vec
= DAG
.getNode(ISD::INSERT_VECTOR_ELT
, dl
, MVT::v2i32
, Vec
, Hi
,
1098 DAG
.getConstant(1, dl
, MVT::i32
));
1099 InVals
.push_back(Vec
);
1102 DAG
.getCopyFromReg(Chain
, dl
, toCallerWindow(RVLocs
[i
].getLocReg()),
1103 RVLocs
[i
].getValVT(), InGlue
)
1105 InGlue
= Chain
.getValue(2);
1106 InVals
.push_back(Chain
.getValue(0));
1113 // FIXME? Maybe this could be a TableGen attribute on some registers and
1114 // this table could be generated automatically from RegInfo.
1115 Register
SparcTargetLowering::getRegisterByName(const char* RegName
, LLT VT
,
1116 const MachineFunction
&MF
) const {
1117 Register Reg
= StringSwitch
<Register
>(RegName
)
1118 .Case("i0", SP::I0
).Case("i1", SP::I1
).Case("i2", SP::I2
).Case("i3", SP::I3
)
1119 .Case("i4", SP::I4
).Case("i5", SP::I5
).Case("i6", SP::I6
).Case("i7", SP::I7
)
1120 .Case("o0", SP::O0
).Case("o1", SP::O1
).Case("o2", SP::O2
).Case("o3", SP::O3
)
1121 .Case("o4", SP::O4
).Case("o5", SP::O5
).Case("o6", SP::O6
).Case("o7", SP::O7
)
1122 .Case("l0", SP::L0
).Case("l1", SP::L1
).Case("l2", SP::L2
).Case("l3", SP::L3
)
1123 .Case("l4", SP::L4
).Case("l5", SP::L5
).Case("l6", SP::L6
).Case("l7", SP::L7
)
1124 .Case("g0", SP::G0
).Case("g1", SP::G1
).Case("g2", SP::G2
).Case("g3", SP::G3
)
1125 .Case("g4", SP::G4
).Case("g5", SP::G5
).Case("g6", SP::G6
).Case("g7", SP::G7
)
1131 report_fatal_error("Invalid register name global variable");
1134 // Fixup floating point arguments in the ... part of a varargs call.
1136 // The SPARC v9 ABI requires that floating point arguments are treated the same
1137 // as integers when calling a varargs function. This does not apply to the
1138 // fixed arguments that are part of the function's prototype.
1140 // This function post-processes a CCValAssign array created by
1141 // AnalyzeCallOperands().
1142 static void fixupVariableFloatArgs(SmallVectorImpl
<CCValAssign
> &ArgLocs
,
1143 ArrayRef
<ISD::OutputArg
> Outs
) {
1144 for (unsigned i
= 0, e
= ArgLocs
.size(); i
!= e
; ++i
) {
1145 CCValAssign
&VA
= ArgLocs
[i
];
1146 MVT ValTy
= VA
.getLocVT();
1147 // FIXME: What about f32 arguments? C promotes them to f64 when calling
1148 // varargs functions.
1149 if (!VA
.isRegLoc() || (ValTy
!= MVT::f64
&& ValTy
!= MVT::f128
))
1151 // The fixed arguments to a varargs function still go in FP registers.
1152 if (Outs
[VA
.getValNo()].IsFixed
)
1155 // This floating point argument should be reassigned.
1156 // Determine the offset into the argument array.
1157 Register firstReg
= (ValTy
== MVT::f64
) ? SP::D0
: SP::Q0
;
1158 unsigned argSize
= (ValTy
== MVT::f64
) ? 8 : 16;
1159 unsigned Offset
= argSize
* (VA
.getLocReg() - firstReg
);
1160 assert(Offset
< 16*8 && "Offset out of range, bad register enum?");
1163 // This argument should go in %i0-%i5.
1164 unsigned IReg
= SP::I0
+ Offset
/8;
1165 if (ValTy
== MVT::f64
)
1166 // Full register, just bitconvert into i64.
1167 VA
= CCValAssign::getReg(VA
.getValNo(), VA
.getValVT(), IReg
, MVT::i64
,
1170 assert(ValTy
== MVT::f128
&& "Unexpected type!");
1171 // Full register, just bitconvert into i128 -- We will lower this into
1172 // two i64s in LowerCall_64.
1173 VA
= CCValAssign::getCustomReg(VA
.getValNo(), VA
.getValVT(), IReg
,
1174 MVT::i128
, CCValAssign::BCvt
);
1177 // This needs to go to memory, we're out of integer registers.
1178 VA
= CCValAssign::getMem(VA
.getValNo(), VA
.getValVT(), Offset
,
1179 VA
.getLocVT(), VA
.getLocInfo());
1184 // Lower a call for the 64-bit ABI.
1186 SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo
&CLI
,
1187 SmallVectorImpl
<SDValue
> &InVals
) const {
1188 SelectionDAG
&DAG
= CLI
.DAG
;
1190 SDValue Chain
= CLI
.Chain
;
1191 auto PtrVT
= getPointerTy(DAG
.getDataLayout());
1193 // Analyze operands of the call, assigning locations to each operand.
1194 SmallVector
<CCValAssign
, 16> ArgLocs
;
1195 CCState
CCInfo(CLI
.CallConv
, CLI
.IsVarArg
, DAG
.getMachineFunction(), ArgLocs
,
1197 CCInfo
.AnalyzeCallOperands(CLI
.Outs
, CC_Sparc64
);
1199 CLI
.IsTailCall
= CLI
.IsTailCall
&& IsEligibleForTailCallOptimization(
1200 CCInfo
, CLI
, DAG
.getMachineFunction());
1202 // Get the size of the outgoing arguments stack space requirement.
1203 // The stack offset computed by CC_Sparc64 includes all arguments.
1204 // Called functions expect 6 argument words to exist in the stack frame, used
1206 unsigned StackReserved
= 6 * 8u;
1207 unsigned ArgsSize
= std::max
<unsigned>(StackReserved
, CCInfo
.getStackSize());
1209 // Keep stack frames 16-byte aligned.
1210 ArgsSize
= alignTo(ArgsSize
, 16);
1212 // Varargs calls require special treatment.
1214 fixupVariableFloatArgs(ArgLocs
, CLI
.Outs
);
1216 assert(!CLI
.IsTailCall
|| ArgsSize
== StackReserved
);
1218 // Adjust the stack pointer to make room for the arguments.
1219 // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
1220 // with more than 6 arguments.
1221 if (!CLI
.IsTailCall
)
1222 Chain
= DAG
.getCALLSEQ_START(Chain
, ArgsSize
, 0, DL
);
1224 // Collect the set of registers to pass to the function and their values.
1225 // This will be emitted as a sequence of CopyToReg nodes glued to the call
1227 SmallVector
<std::pair
<Register
, SDValue
>, 8> RegsToPass
;
1229 // Collect chains from all the memory opeations that copy arguments to the
1230 // stack. They must follow the stack pointer adjustment above and precede the
1231 // call instruction itself.
1232 SmallVector
<SDValue
, 8> MemOpChains
;
1234 for (unsigned i
= 0, e
= ArgLocs
.size(); i
!= e
; ++i
) {
1235 const CCValAssign
&VA
= ArgLocs
[i
];
1236 SDValue Arg
= CLI
.OutVals
[i
];
1238 // Promote the value if needed.
1239 switch (VA
.getLocInfo()) {
1241 llvm_unreachable("Unknown location info!");
1242 case CCValAssign::Full
:
1244 case CCValAssign::SExt
:
1245 Arg
= DAG
.getNode(ISD::SIGN_EXTEND
, DL
, VA
.getLocVT(), Arg
);
1247 case CCValAssign::ZExt
:
1248 Arg
= DAG
.getNode(ISD::ZERO_EXTEND
, DL
, VA
.getLocVT(), Arg
);
1250 case CCValAssign::AExt
:
1251 Arg
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, VA
.getLocVT(), Arg
);
1253 case CCValAssign::BCvt
:
1254 // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
1255 // SPARC does not support i128 natively. Lower it into two i64, see below.
1256 if (!VA
.needsCustom() || VA
.getValVT() != MVT::f128
1257 || VA
.getLocVT() != MVT::i128
)
1258 Arg
= DAG
.getNode(ISD::BITCAST
, DL
, VA
.getLocVT(), Arg
);
1262 if (VA
.isRegLoc()) {
1263 if (VA
.needsCustom() && VA
.getValVT() == MVT::f128
1264 && VA
.getLocVT() == MVT::i128
) {
1265 // Store and reload into the integer register reg and reg+1.
1266 unsigned Offset
= 8 * (VA
.getLocReg() - SP::I0
);
1267 unsigned StackOffset
= Offset
+ Subtarget
->getStackPointerBias() + 128;
1268 SDValue StackPtr
= DAG
.getRegister(SP::O6
, PtrVT
);
1269 SDValue HiPtrOff
= DAG
.getIntPtrConstant(StackOffset
, DL
);
1270 HiPtrOff
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, StackPtr
, HiPtrOff
);
1271 SDValue LoPtrOff
= DAG
.getIntPtrConstant(StackOffset
+ 8, DL
);
1272 LoPtrOff
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, StackPtr
, LoPtrOff
);
1274 // Store to %sp+BIAS+128+Offset
1276 DAG
.getStore(Chain
, DL
, Arg
, HiPtrOff
, MachinePointerInfo());
1277 // Load into Reg and Reg+1
1279 DAG
.getLoad(MVT::i64
, DL
, Store
, HiPtrOff
, MachinePointerInfo());
1281 DAG
.getLoad(MVT::i64
, DL
, Store
, LoPtrOff
, MachinePointerInfo());
1283 Register HiReg
= VA
.getLocReg();
1284 Register LoReg
= VA
.getLocReg() + 1;
1285 if (!CLI
.IsTailCall
) {
1286 HiReg
= toCallerWindow(HiReg
);
1287 LoReg
= toCallerWindow(LoReg
);
1290 RegsToPass
.push_back(std::make_pair(HiReg
, Hi64
));
1291 RegsToPass
.push_back(std::make_pair(LoReg
, Lo64
));
1295 // The custom bit on an i32 return value indicates that it should be
1296 // passed in the high bits of the register.
1297 if (VA
.getValVT() == MVT::i32
&& VA
.needsCustom()) {
1298 Arg
= DAG
.getNode(ISD::SHL
, DL
, MVT::i64
, Arg
,
1299 DAG
.getConstant(32, DL
, MVT::i32
));
1301 // The next value may go in the low bits of the same register.
1302 // Handle both at once.
1303 if (i
+1 < ArgLocs
.size() && ArgLocs
[i
+1].isRegLoc() &&
1304 ArgLocs
[i
+1].getLocReg() == VA
.getLocReg()) {
1305 SDValue NV
= DAG
.getNode(ISD::ZERO_EXTEND
, DL
, MVT::i64
,
1307 Arg
= DAG
.getNode(ISD::OR
, DL
, MVT::i64
, Arg
, NV
);
1308 // Skip the next value, it's already done.
1313 Register Reg
= VA
.getLocReg();
1314 if (!CLI
.IsTailCall
)
1315 Reg
= toCallerWindow(Reg
);
1316 RegsToPass
.push_back(std::make_pair(Reg
, Arg
));
1320 assert(VA
.isMemLoc());
1322 // Create a store off the stack pointer for this argument.
1323 SDValue StackPtr
= DAG
.getRegister(SP::O6
, PtrVT
);
1324 // The argument area starts at %fp+BIAS+128 in the callee frame,
1325 // %sp+BIAS+128 in ours.
1326 SDValue PtrOff
= DAG
.getIntPtrConstant(VA
.getLocMemOffset() +
1327 Subtarget
->getStackPointerBias() +
1329 PtrOff
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, StackPtr
, PtrOff
);
1330 MemOpChains
.push_back(
1331 DAG
.getStore(Chain
, DL
, Arg
, PtrOff
, MachinePointerInfo()));
1334 // Emit all stores, make sure they occur before the call.
1335 if (!MemOpChains
.empty())
1336 Chain
= DAG
.getNode(ISD::TokenFactor
, DL
, MVT::Other
, MemOpChains
);
1338 // Build a sequence of CopyToReg nodes glued together with token chain and
1339 // glue operands which copy the outgoing args into registers. The InGlue is
1340 // necessary since all emitted instructions must be stuck together in order
1341 // to pass the live physical registers.
1343 for (unsigned i
= 0, e
= RegsToPass
.size(); i
!= e
; ++i
) {
1344 Chain
= DAG
.getCopyToReg(Chain
, DL
,
1345 RegsToPass
[i
].first
, RegsToPass
[i
].second
, InGlue
);
1346 InGlue
= Chain
.getValue(1);
1349 // If the callee is a GlobalAddress node (quite common, every direct call is)
1350 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1351 // Likewise ExternalSymbol -> TargetExternalSymbol.
1352 SDValue Callee
= CLI
.Callee
;
1353 bool hasReturnsTwice
= hasReturnsTwiceAttr(DAG
, Callee
, CLI
.CB
);
1354 unsigned TF
= isPositionIndependent() ? SparcMCExpr::VK_Sparc_WPLT30
1355 : SparcMCExpr::VK_Sparc_WDISP30
;
1356 if (GlobalAddressSDNode
*G
= dyn_cast
<GlobalAddressSDNode
>(Callee
))
1357 Callee
= DAG
.getTargetGlobalAddress(G
->getGlobal(), DL
, PtrVT
, 0, TF
);
1358 else if (ExternalSymbolSDNode
*E
= dyn_cast
<ExternalSymbolSDNode
>(Callee
))
1359 Callee
= DAG
.getTargetExternalSymbol(E
->getSymbol(), PtrVT
, TF
);
1361 // Build the operands for the call instruction itself.
1362 SmallVector
<SDValue
, 8> Ops
;
1363 Ops
.push_back(Chain
);
1364 Ops
.push_back(Callee
);
1365 for (unsigned i
= 0, e
= RegsToPass
.size(); i
!= e
; ++i
)
1366 Ops
.push_back(DAG
.getRegister(RegsToPass
[i
].first
,
1367 RegsToPass
[i
].second
.getValueType()));
1369 // Add a register mask operand representing the call-preserved registers.
1370 const SparcRegisterInfo
*TRI
= Subtarget
->getRegisterInfo();
1371 const uint32_t *Mask
=
1372 ((hasReturnsTwice
) ? TRI
->getRTCallPreservedMask(CLI
.CallConv
)
1373 : TRI
->getCallPreservedMask(DAG
.getMachineFunction(),
1375 assert(Mask
&& "Missing call preserved mask for calling convention");
1376 Ops
.push_back(DAG
.getRegisterMask(Mask
));
1378 // Make sure the CopyToReg nodes are glued to the call instruction which
1379 // consumes the registers.
1380 if (InGlue
.getNode())
1381 Ops
.push_back(InGlue
);
1383 // Now the call itself.
1384 if (CLI
.IsTailCall
) {
1385 DAG
.getMachineFunction().getFrameInfo().setHasTailCall();
1386 return DAG
.getNode(SPISD::TAIL_CALL
, DL
, MVT::Other
, Ops
);
1388 SDVTList NodeTys
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
1389 Chain
= DAG
.getNode(SPISD::CALL
, DL
, NodeTys
, Ops
);
1390 InGlue
= Chain
.getValue(1);
1392 // Revert the stack pointer immediately after the call.
1393 Chain
= DAG
.getCALLSEQ_END(Chain
, ArgsSize
, 0, InGlue
, DL
);
1394 InGlue
= Chain
.getValue(1);
1396 // Now extract the return values. This is more or less the same as
1397 // LowerFormalArguments_64.
1399 // Assign locations to each value returned by this call.
1400 SmallVector
<CCValAssign
, 16> RVLocs
;
1401 CCState
RVInfo(CLI
.CallConv
, CLI
.IsVarArg
, DAG
.getMachineFunction(), RVLocs
,
1404 // Set inreg flag manually for codegen generated library calls that
1406 if (CLI
.Ins
.size() == 1 && CLI
.Ins
[0].VT
== MVT::f32
&& !CLI
.CB
)
1407 CLI
.Ins
[0].Flags
.setInReg();
1409 RVInfo
.AnalyzeCallResult(CLI
.Ins
, RetCC_Sparc64
);
1411 // Copy all of the result registers out of their specified physreg.
1412 for (unsigned i
= 0; i
!= RVLocs
.size(); ++i
) {
1413 CCValAssign
&VA
= RVLocs
[i
];
1414 assert(VA
.isRegLoc() && "Can only return in registers!");
1415 unsigned Reg
= toCallerWindow(VA
.getLocReg());
1417 // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
1418 // reside in the same register in the high and low bits. Reuse the
1419 // CopyFromReg previous node to avoid duplicate copies.
1421 if (RegisterSDNode
*SrcReg
= dyn_cast
<RegisterSDNode
>(Chain
.getOperand(1)))
1422 if (SrcReg
->getReg() == Reg
&& Chain
->getOpcode() == ISD::CopyFromReg
)
1423 RV
= Chain
.getValue(0);
1425 // But usually we'll create a new CopyFromReg for a different register.
1426 if (!RV
.getNode()) {
1427 RV
= DAG
.getCopyFromReg(Chain
, DL
, Reg
, RVLocs
[i
].getLocVT(), InGlue
);
1428 Chain
= RV
.getValue(1);
1429 InGlue
= Chain
.getValue(2);
1432 // Get the high bits for i32 struct elements.
1433 if (VA
.getValVT() == MVT::i32
&& VA
.needsCustom())
1434 RV
= DAG
.getNode(ISD::SRL
, DL
, VA
.getLocVT(), RV
,
1435 DAG
.getConstant(32, DL
, MVT::i32
));
1437 // The callee promoted the return value, so insert an Assert?ext SDNode so
1438 // we won't promote the value again in this function.
1439 switch (VA
.getLocInfo()) {
1440 case CCValAssign::SExt
:
1441 RV
= DAG
.getNode(ISD::AssertSext
, DL
, VA
.getLocVT(), RV
,
1442 DAG
.getValueType(VA
.getValVT()));
1444 case CCValAssign::ZExt
:
1445 RV
= DAG
.getNode(ISD::AssertZext
, DL
, VA
.getLocVT(), RV
,
1446 DAG
.getValueType(VA
.getValVT()));
1452 // Truncate the register down to the return value type.
1453 if (VA
.isExtInLoc())
1454 RV
= DAG
.getNode(ISD::TRUNCATE
, DL
, VA
.getValVT(), RV
);
1456 InVals
.push_back(RV
);
1462 //===----------------------------------------------------------------------===//
1463 // TargetLowering Implementation
1464 //===----------------------------------------------------------------------===//
1466 TargetLowering::AtomicExpansionKind
SparcTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst
*AI
) const {
1467 if (AI
->getOperation() == AtomicRMWInst::Xchg
&&
1468 AI
->getType()->getPrimitiveSizeInBits() == 32)
1469 return AtomicExpansionKind::None
; // Uses xchg instruction
1471 return AtomicExpansionKind::CmpXChg
;
1474 /// intCondCCodeToRcond - Convert a DAG integer condition code to a SPARC
1475 /// rcond condition.
1476 static SPCC::CondCodes
intCondCCodeToRcond(ISD::CondCode CC
) {
1479 llvm_unreachable("Unknown/unsigned integer condition code!");
1483 return SPCC::REG_NZ
;
1485 return SPCC::REG_LZ
;
1487 return SPCC::REG_GZ
;
1489 return SPCC::REG_LEZ
;
1491 return SPCC::REG_GEZ
;
1495 /// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
1497 static SPCC::CondCodes
IntCondCCodeToICC(ISD::CondCode CC
) {
1499 default: llvm_unreachable("Unknown integer condition code!");
1500 case ISD::SETEQ
: return SPCC::ICC_E
;
1501 case ISD::SETNE
: return SPCC::ICC_NE
;
1502 case ISD::SETLT
: return SPCC::ICC_L
;
1503 case ISD::SETGT
: return SPCC::ICC_G
;
1504 case ISD::SETLE
: return SPCC::ICC_LE
;
1505 case ISD::SETGE
: return SPCC::ICC_GE
;
1506 case ISD::SETULT
: return SPCC::ICC_CS
;
1507 case ISD::SETULE
: return SPCC::ICC_LEU
;
1508 case ISD::SETUGT
: return SPCC::ICC_GU
;
1509 case ISD::SETUGE
: return SPCC::ICC_CC
;
1513 /// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
1515 static SPCC::CondCodes
FPCondCCodeToFCC(ISD::CondCode CC
) {
1517 default: llvm_unreachable("Unknown fp condition code!");
1519 case ISD::SETOEQ
: return SPCC::FCC_E
;
1521 case ISD::SETUNE
: return SPCC::FCC_NE
;
1523 case ISD::SETOLT
: return SPCC::FCC_L
;
1525 case ISD::SETOGT
: return SPCC::FCC_G
;
1527 case ISD::SETOLE
: return SPCC::FCC_LE
;
1529 case ISD::SETOGE
: return SPCC::FCC_GE
;
1530 case ISD::SETULT
: return SPCC::FCC_UL
;
1531 case ISD::SETULE
: return SPCC::FCC_ULE
;
1532 case ISD::SETUGT
: return SPCC::FCC_UG
;
1533 case ISD::SETUGE
: return SPCC::FCC_UGE
;
1534 case ISD::SETUO
: return SPCC::FCC_U
;
1535 case ISD::SETO
: return SPCC::FCC_O
;
1536 case ISD::SETONE
: return SPCC::FCC_LG
;
1537 case ISD::SETUEQ
: return SPCC::FCC_UE
;
1541 SparcTargetLowering::SparcTargetLowering(const TargetMachine
&TM
,
1542 const SparcSubtarget
&STI
)
1543 : TargetLowering(TM
), Subtarget(&STI
) {
1544 MVT PtrVT
= MVT::getIntegerVT(TM
.getPointerSizeInBits(0));
1546 // Instructions which use registers as conditionals examine all the
1547 // bits (as does the pseudo SELECT_CC expansion). I don't think it
1548 // matters much whether it's ZeroOrOneBooleanContent, or
1549 // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
1551 setBooleanContents(ZeroOrOneBooleanContent
);
1552 setBooleanVectorContents(ZeroOrOneBooleanContent
);
1554 // Set up the register classes.
1555 addRegisterClass(MVT::i32
, &SP::IntRegsRegClass
);
1556 if (!Subtarget
->useSoftFloat()) {
1557 addRegisterClass(MVT::f32
, &SP::FPRegsRegClass
);
1558 addRegisterClass(MVT::f64
, &SP::DFPRegsRegClass
);
1559 addRegisterClass(MVT::f128
, &SP::QFPRegsRegClass
);
1561 if (Subtarget
->is64Bit()) {
1562 addRegisterClass(MVT::i64
, &SP::I64RegsRegClass
);
1564 // On 32bit sparc, we define a double-register 32bit register
1565 // class, as well. This is modeled in LLVM as a 2-vector of i32.
1566 addRegisterClass(MVT::v2i32
, &SP::IntPairRegClass
);
1568 // ...but almost all operations must be expanded, so set that as
1570 for (unsigned Op
= 0; Op
< ISD::BUILTIN_OP_END
; ++Op
) {
1571 setOperationAction(Op
, MVT::v2i32
, Expand
);
1573 // Truncating/extending stores/loads are also not supported.
1574 for (MVT VT
: MVT::integer_fixedlen_vector_valuetypes()) {
1575 setLoadExtAction(ISD::SEXTLOAD
, VT
, MVT::v2i32
, Expand
);
1576 setLoadExtAction(ISD::ZEXTLOAD
, VT
, MVT::v2i32
, Expand
);
1577 setLoadExtAction(ISD::EXTLOAD
, VT
, MVT::v2i32
, Expand
);
1579 setLoadExtAction(ISD::SEXTLOAD
, MVT::v2i32
, VT
, Expand
);
1580 setLoadExtAction(ISD::ZEXTLOAD
, MVT::v2i32
, VT
, Expand
);
1581 setLoadExtAction(ISD::EXTLOAD
, MVT::v2i32
, VT
, Expand
);
1583 setTruncStoreAction(VT
, MVT::v2i32
, Expand
);
1584 setTruncStoreAction(MVT::v2i32
, VT
, Expand
);
1586 // However, load and store *are* legal.
1587 setOperationAction(ISD::LOAD
, MVT::v2i32
, Legal
);
1588 setOperationAction(ISD::STORE
, MVT::v2i32
, Legal
);
1589 setOperationAction(ISD::EXTRACT_VECTOR_ELT
, MVT::v2i32
, Legal
);
1590 setOperationAction(ISD::BUILD_VECTOR
, MVT::v2i32
, Legal
);
1592 // And we need to promote i64 loads/stores into vector load/store
1593 setOperationAction(ISD::LOAD
, MVT::i64
, Custom
);
1594 setOperationAction(ISD::STORE
, MVT::i64
, Custom
);
1596 // Sadly, this doesn't work:
1597 // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
1598 // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
1601 // Turn FP extload into load/fpextend
1602 for (MVT VT
: MVT::fp_valuetypes()) {
1603 setLoadExtAction(ISD::EXTLOAD
, VT
, MVT::f16
, Expand
);
1604 setLoadExtAction(ISD::EXTLOAD
, VT
, MVT::f32
, Expand
);
1605 setLoadExtAction(ISD::EXTLOAD
, VT
, MVT::f64
, Expand
);
1608 // Sparc doesn't have i1 sign extending load
1609 for (MVT VT
: MVT::integer_valuetypes())
1610 setLoadExtAction(ISD::SEXTLOAD
, VT
, MVT::i1
, Promote
);
1612 // Turn FP truncstore into trunc + store.
1613 setTruncStoreAction(MVT::f32
, MVT::f16
, Expand
);
1614 setTruncStoreAction(MVT::f64
, MVT::f16
, Expand
);
1615 setTruncStoreAction(MVT::f64
, MVT::f32
, Expand
);
1616 setTruncStoreAction(MVT::f128
, MVT::f16
, Expand
);
1617 setTruncStoreAction(MVT::f128
, MVT::f32
, Expand
);
1618 setTruncStoreAction(MVT::f128
, MVT::f64
, Expand
);
1620 // Custom legalize GlobalAddress nodes into LO/HI parts.
1621 setOperationAction(ISD::GlobalAddress
, PtrVT
, Custom
);
1622 setOperationAction(ISD::GlobalTLSAddress
, PtrVT
, Custom
);
1623 setOperationAction(ISD::ConstantPool
, PtrVT
, Custom
);
1624 setOperationAction(ISD::BlockAddress
, PtrVT
, Custom
);
1626 // Sparc doesn't have sext_inreg, replace them with shl/sra
1627 setOperationAction(ISD::SIGN_EXTEND_INREG
, MVT::i16
, Expand
);
1628 setOperationAction(ISD::SIGN_EXTEND_INREG
, MVT::i8
, Expand
);
1629 setOperationAction(ISD::SIGN_EXTEND_INREG
, MVT::i1
, Expand
);
1631 // Sparc has no REM or DIVREM operations.
1632 setOperationAction(ISD::UREM
, MVT::i32
, Expand
);
1633 setOperationAction(ISD::SREM
, MVT::i32
, Expand
);
1634 setOperationAction(ISD::SDIVREM
, MVT::i32
, Expand
);
1635 setOperationAction(ISD::UDIVREM
, MVT::i32
, Expand
);
1637 // ... nor does SparcV9.
1638 if (Subtarget
->is64Bit()) {
1639 setOperationAction(ISD::UREM
, MVT::i64
, Expand
);
1640 setOperationAction(ISD::SREM
, MVT::i64
, Expand
);
1641 setOperationAction(ISD::SDIVREM
, MVT::i64
, Expand
);
1642 setOperationAction(ISD::UDIVREM
, MVT::i64
, Expand
);
1645 // Custom expand fp<->sint
1646 setOperationAction(ISD::FP_TO_SINT
, MVT::i32
, Custom
);
1647 setOperationAction(ISD::SINT_TO_FP
, MVT::i32
, Custom
);
1648 setOperationAction(ISD::FP_TO_SINT
, MVT::i64
, Custom
);
1649 setOperationAction(ISD::SINT_TO_FP
, MVT::i64
, Custom
);
1651 // Custom Expand fp<->uint
1652 setOperationAction(ISD::FP_TO_UINT
, MVT::i32
, Custom
);
1653 setOperationAction(ISD::UINT_TO_FP
, MVT::i32
, Custom
);
1654 setOperationAction(ISD::FP_TO_UINT
, MVT::i64
, Custom
);
1655 setOperationAction(ISD::UINT_TO_FP
, MVT::i64
, Custom
);
1657 // Lower f16 conversion operations into library calls
1658 setOperationAction(ISD::FP16_TO_FP
, MVT::f32
, Expand
);
1659 setOperationAction(ISD::FP_TO_FP16
, MVT::f32
, Expand
);
1660 setOperationAction(ISD::FP16_TO_FP
, MVT::f64
, Expand
);
1661 setOperationAction(ISD::FP_TO_FP16
, MVT::f64
, Expand
);
1662 setOperationAction(ISD::FP16_TO_FP
, MVT::f128
, Expand
);
1663 setOperationAction(ISD::FP_TO_FP16
, MVT::f128
, Expand
);
1665 setOperationAction(ISD::BITCAST
, MVT::f32
, Expand
);
1666 setOperationAction(ISD::BITCAST
, MVT::i32
, Expand
);
1668 // Sparc has no select or setcc: expand to SELECT_CC.
1669 setOperationAction(ISD::SELECT
, MVT::i32
, Expand
);
1670 setOperationAction(ISD::SELECT
, MVT::f32
, Expand
);
1671 setOperationAction(ISD::SELECT
, MVT::f64
, Expand
);
1672 setOperationAction(ISD::SELECT
, MVT::f128
, Expand
);
1674 setOperationAction(ISD::SETCC
, MVT::i32
, Expand
);
1675 setOperationAction(ISD::SETCC
, MVT::f32
, Expand
);
1676 setOperationAction(ISD::SETCC
, MVT::f64
, Expand
);
1677 setOperationAction(ISD::SETCC
, MVT::f128
, Expand
);
1679 // Sparc doesn't have BRCOND either, it has BR_CC.
1680 setOperationAction(ISD::BRCOND
, MVT::Other
, Expand
);
1681 setOperationAction(ISD::BRIND
, MVT::Other
, Expand
);
1682 setOperationAction(ISD::BR_JT
, MVT::Other
, Expand
);
1683 setOperationAction(ISD::BR_CC
, MVT::i32
, Custom
);
1684 setOperationAction(ISD::BR_CC
, MVT::f32
, Custom
);
1685 setOperationAction(ISD::BR_CC
, MVT::f64
, Custom
);
1686 setOperationAction(ISD::BR_CC
, MVT::f128
, Custom
);
1688 setOperationAction(ISD::SELECT_CC
, MVT::i32
, Custom
);
1689 setOperationAction(ISD::SELECT_CC
, MVT::f32
, Custom
);
1690 setOperationAction(ISD::SELECT_CC
, MVT::f64
, Custom
);
1691 setOperationAction(ISD::SELECT_CC
, MVT::f128
, Custom
);
1693 setOperationAction(ISD::ADDC
, MVT::i32
, Custom
);
1694 setOperationAction(ISD::ADDE
, MVT::i32
, Custom
);
1695 setOperationAction(ISD::SUBC
, MVT::i32
, Custom
);
1696 setOperationAction(ISD::SUBE
, MVT::i32
, Custom
);
1698 if (Subtarget
->is64Bit()) {
1699 setOperationAction(ISD::ADDC
, MVT::i64
, Custom
);
1700 setOperationAction(ISD::ADDE
, MVT::i64
, Custom
);
1701 setOperationAction(ISD::SUBC
, MVT::i64
, Custom
);
1702 setOperationAction(ISD::SUBE
, MVT::i64
, Custom
);
1703 setOperationAction(ISD::BITCAST
, MVT::f64
, Expand
);
1704 setOperationAction(ISD::BITCAST
, MVT::i64
, Expand
);
1705 setOperationAction(ISD::SELECT
, MVT::i64
, Expand
);
1706 setOperationAction(ISD::SETCC
, MVT::i64
, Expand
);
1707 setOperationAction(ISD::BR_CC
, MVT::i64
, Custom
);
1708 setOperationAction(ISD::SELECT_CC
, MVT::i64
, Custom
);
1710 setOperationAction(ISD::CTPOP
, MVT::i64
,
1711 Subtarget
->usePopc() ? Legal
: Expand
);
1712 setOperationAction(ISD::CTTZ
, MVT::i64
, Expand
);
1713 setOperationAction(ISD::CTLZ
, MVT::i64
, Expand
);
1714 setOperationAction(ISD::BSWAP
, MVT::i64
, Expand
);
1715 setOperationAction(ISD::ROTL
, MVT::i64
, Expand
);
1716 setOperationAction(ISD::ROTR
, MVT::i64
, Expand
);
1717 setOperationAction(ISD::DYNAMIC_STACKALLOC
, MVT::i64
, Custom
);
1721 // Atomics are supported on SparcV9. 32-bit atomics are also
1722 // supported by some Leon SparcV8 variants. Otherwise, atomics
1724 if (Subtarget
->isV9())
1725 setMaxAtomicSizeInBitsSupported(64);
1726 else if (Subtarget
->hasLeonCasa())
1727 setMaxAtomicSizeInBitsSupported(32);
1729 setMaxAtomicSizeInBitsSupported(0);
1731 setMinCmpXchgSizeInBits(32);
1733 setOperationAction(ISD::ATOMIC_SWAP
, MVT::i32
, Legal
);
1735 setOperationAction(ISD::ATOMIC_FENCE
, MVT::Other
, Legal
);
1737 // Custom Lower Atomic LOAD/STORE
1738 setOperationAction(ISD::ATOMIC_LOAD
, MVT::i32
, Custom
);
1739 setOperationAction(ISD::ATOMIC_STORE
, MVT::i32
, Custom
);
1741 if (Subtarget
->is64Bit()) {
1742 setOperationAction(ISD::ATOMIC_CMP_SWAP
, MVT::i64
, Legal
);
1743 setOperationAction(ISD::ATOMIC_SWAP
, MVT::i64
, Legal
);
1744 setOperationAction(ISD::ATOMIC_LOAD
, MVT::i64
, Custom
);
1745 setOperationAction(ISD::ATOMIC_STORE
, MVT::i64
, Custom
);
1748 if (!Subtarget
->is64Bit()) {
1749 // These libcalls are not available in 32-bit.
1750 setLibcallName(RTLIB::MULO_I64
, nullptr);
1751 setLibcallName(RTLIB::MUL_I128
, nullptr);
1752 setLibcallName(RTLIB::SHL_I128
, nullptr);
1753 setLibcallName(RTLIB::SRL_I128
, nullptr);
1754 setLibcallName(RTLIB::SRA_I128
, nullptr);
1757 setLibcallName(RTLIB::MULO_I128
, nullptr);
1759 if (!Subtarget
->isV9()) {
1760 // SparcV8 does not have FNEGD and FABSD.
1761 setOperationAction(ISD::FNEG
, MVT::f64
, Custom
);
1762 setOperationAction(ISD::FABS
, MVT::f64
, Custom
);
1765 setOperationAction(ISD::FSIN
, MVT::f128
, Expand
);
1766 setOperationAction(ISD::FCOS
, MVT::f128
, Expand
);
1767 setOperationAction(ISD::FSINCOS
, MVT::f128
, Expand
);
1768 setOperationAction(ISD::FREM
, MVT::f128
, Expand
);
1769 setOperationAction(ISD::FMA
, MVT::f128
, Expand
);
1770 setOperationAction(ISD::FSIN
, MVT::f64
, Expand
);
1771 setOperationAction(ISD::FCOS
, MVT::f64
, Expand
);
1772 setOperationAction(ISD::FSINCOS
, MVT::f64
, Expand
);
1773 setOperationAction(ISD::FREM
, MVT::f64
, Expand
);
1774 setOperationAction(ISD::FMA
, MVT::f64
, Expand
);
1775 setOperationAction(ISD::FSIN
, MVT::f32
, Expand
);
1776 setOperationAction(ISD::FCOS
, MVT::f32
, Expand
);
1777 setOperationAction(ISD::FSINCOS
, MVT::f32
, Expand
);
1778 setOperationAction(ISD::FREM
, MVT::f32
, Expand
);
1779 setOperationAction(ISD::FMA
, MVT::f32
, Expand
);
1780 setOperationAction(ISD::CTTZ
, MVT::i32
, Expand
);
1781 setOperationAction(ISD::CTLZ
, MVT::i32
, Expand
);
1782 setOperationAction(ISD::ROTL
, MVT::i32
, Expand
);
1783 setOperationAction(ISD::ROTR
, MVT::i32
, Expand
);
1784 setOperationAction(ISD::BSWAP
, MVT::i32
, Expand
);
1785 setOperationAction(ISD::FCOPYSIGN
, MVT::f128
, Expand
);
1786 setOperationAction(ISD::FCOPYSIGN
, MVT::f64
, Expand
);
1787 setOperationAction(ISD::FCOPYSIGN
, MVT::f32
, Expand
);
1788 setOperationAction(ISD::FPOW
, MVT::f128
, Expand
);
1789 setOperationAction(ISD::FPOW
, MVT::f64
, Expand
);
1790 setOperationAction(ISD::FPOW
, MVT::f32
, Expand
);
1792 setOperationAction(ISD::SHL_PARTS
, MVT::i32
, Expand
);
1793 setOperationAction(ISD::SRA_PARTS
, MVT::i32
, Expand
);
1794 setOperationAction(ISD::SRL_PARTS
, MVT::i32
, Expand
);
1796 // Expands to [SU]MUL_LOHI.
1797 setOperationAction(ISD::MULHU
, MVT::i32
, Expand
);
1798 setOperationAction(ISD::MULHS
, MVT::i32
, Expand
);
1799 setOperationAction(ISD::MUL
, MVT::i32
, Expand
);
1801 if (Subtarget
->useSoftMulDiv()) {
1802 // .umul works for both signed and unsigned
1803 setOperationAction(ISD::SMUL_LOHI
, MVT::i32
, Expand
);
1804 setOperationAction(ISD::UMUL_LOHI
, MVT::i32
, Expand
);
1805 setLibcallName(RTLIB::MUL_I32
, ".umul");
1807 setOperationAction(ISD::SDIV
, MVT::i32
, Expand
);
1808 setLibcallName(RTLIB::SDIV_I32
, ".div");
1810 setOperationAction(ISD::UDIV
, MVT::i32
, Expand
);
1811 setLibcallName(RTLIB::UDIV_I32
, ".udiv");
1813 setLibcallName(RTLIB::SREM_I32
, ".rem");
1814 setLibcallName(RTLIB::UREM_I32
, ".urem");
1817 if (Subtarget
->is64Bit()) {
1818 setOperationAction(ISD::UMUL_LOHI
, MVT::i64
, Expand
);
1819 setOperationAction(ISD::SMUL_LOHI
, MVT::i64
, Expand
);
1820 setOperationAction(ISD::MULHU
, MVT::i64
, Expand
);
1821 setOperationAction(ISD::MULHS
, MVT::i64
, Expand
);
1823 setOperationAction(ISD::UMULO
, MVT::i64
, Custom
);
1824 setOperationAction(ISD::SMULO
, MVT::i64
, Custom
);
1826 setOperationAction(ISD::SHL_PARTS
, MVT::i64
, Expand
);
1827 setOperationAction(ISD::SRA_PARTS
, MVT::i64
, Expand
);
1828 setOperationAction(ISD::SRL_PARTS
, MVT::i64
, Expand
);
1831 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1832 setOperationAction(ISD::VASTART
, MVT::Other
, Custom
);
1833 // VAARG needs to be lowered to not do unaligned accesses for doubles.
1834 setOperationAction(ISD::VAARG
, MVT::Other
, Custom
);
1836 setOperationAction(ISD::TRAP
, MVT::Other
, Legal
);
1837 setOperationAction(ISD::DEBUGTRAP
, MVT::Other
, Legal
);
1839 // Use the default implementation.
1840 setOperationAction(ISD::VACOPY
, MVT::Other
, Expand
);
1841 setOperationAction(ISD::VAEND
, MVT::Other
, Expand
);
1842 setOperationAction(ISD::STACKSAVE
, MVT::Other
, Expand
);
1843 setOperationAction(ISD::STACKRESTORE
, MVT::Other
, Expand
);
1844 setOperationAction(ISD::DYNAMIC_STACKALLOC
, MVT::i32
, Custom
);
1846 setStackPointerRegisterToSaveRestore(SP::O6
);
1848 setOperationAction(ISD::CTPOP
, MVT::i32
,
1849 Subtarget
->usePopc() ? Legal
: Expand
);
1851 if (Subtarget
->isV9() && Subtarget
->hasHardQuad()) {
1852 setOperationAction(ISD::LOAD
, MVT::f128
, Legal
);
1853 setOperationAction(ISD::STORE
, MVT::f128
, Legal
);
1855 setOperationAction(ISD::LOAD
, MVT::f128
, Custom
);
1856 setOperationAction(ISD::STORE
, MVT::f128
, Custom
);
1859 if (Subtarget
->hasHardQuad()) {
1860 setOperationAction(ISD::FADD
, MVT::f128
, Legal
);
1861 setOperationAction(ISD::FSUB
, MVT::f128
, Legal
);
1862 setOperationAction(ISD::FMUL
, MVT::f128
, Legal
);
1863 setOperationAction(ISD::FDIV
, MVT::f128
, Legal
);
1864 setOperationAction(ISD::FSQRT
, MVT::f128
, Legal
);
1865 setOperationAction(ISD::FP_EXTEND
, MVT::f128
, Legal
);
1866 setOperationAction(ISD::FP_ROUND
, MVT::f64
, Legal
);
1867 if (Subtarget
->isV9()) {
1868 setOperationAction(ISD::FNEG
, MVT::f128
, Legal
);
1869 setOperationAction(ISD::FABS
, MVT::f128
, Legal
);
1871 setOperationAction(ISD::FNEG
, MVT::f128
, Custom
);
1872 setOperationAction(ISD::FABS
, MVT::f128
, Custom
);
1875 if (!Subtarget
->is64Bit()) {
1876 setLibcallName(RTLIB::FPTOSINT_F128_I64
, "_Q_qtoll");
1877 setLibcallName(RTLIB::FPTOUINT_F128_I64
, "_Q_qtoull");
1878 setLibcallName(RTLIB::SINTTOFP_I64_F128
, "_Q_lltoq");
1879 setLibcallName(RTLIB::UINTTOFP_I64_F128
, "_Q_ulltoq");
1883 // Custom legalize f128 operations.
1885 setOperationAction(ISD::FADD
, MVT::f128
, Custom
);
1886 setOperationAction(ISD::FSUB
, MVT::f128
, Custom
);
1887 setOperationAction(ISD::FMUL
, MVT::f128
, Custom
);
1888 setOperationAction(ISD::FDIV
, MVT::f128
, Custom
);
1889 setOperationAction(ISD::FSQRT
, MVT::f128
, Custom
);
1890 setOperationAction(ISD::FNEG
, MVT::f128
, Custom
);
1891 setOperationAction(ISD::FABS
, MVT::f128
, Custom
);
1893 setOperationAction(ISD::FP_EXTEND
, MVT::f128
, Custom
);
1894 setOperationAction(ISD::FP_ROUND
, MVT::f64
, Custom
);
1895 setOperationAction(ISD::FP_ROUND
, MVT::f32
, Custom
);
1897 // Setup Runtime library names.
1898 if (Subtarget
->is64Bit() && !Subtarget
->useSoftFloat()) {
1899 setLibcallName(RTLIB::ADD_F128
, "_Qp_add");
1900 setLibcallName(RTLIB::SUB_F128
, "_Qp_sub");
1901 setLibcallName(RTLIB::MUL_F128
, "_Qp_mul");
1902 setLibcallName(RTLIB::DIV_F128
, "_Qp_div");
1903 setLibcallName(RTLIB::SQRT_F128
, "_Qp_sqrt");
1904 setLibcallName(RTLIB::FPTOSINT_F128_I32
, "_Qp_qtoi");
1905 setLibcallName(RTLIB::FPTOUINT_F128_I32
, "_Qp_qtoui");
1906 setLibcallName(RTLIB::SINTTOFP_I32_F128
, "_Qp_itoq");
1907 setLibcallName(RTLIB::UINTTOFP_I32_F128
, "_Qp_uitoq");
1908 setLibcallName(RTLIB::FPTOSINT_F128_I64
, "_Qp_qtox");
1909 setLibcallName(RTLIB::FPTOUINT_F128_I64
, "_Qp_qtoux");
1910 setLibcallName(RTLIB::SINTTOFP_I64_F128
, "_Qp_xtoq");
1911 setLibcallName(RTLIB::UINTTOFP_I64_F128
, "_Qp_uxtoq");
1912 setLibcallName(RTLIB::FPEXT_F32_F128
, "_Qp_stoq");
1913 setLibcallName(RTLIB::FPEXT_F64_F128
, "_Qp_dtoq");
1914 setLibcallName(RTLIB::FPROUND_F128_F32
, "_Qp_qtos");
1915 setLibcallName(RTLIB::FPROUND_F128_F64
, "_Qp_qtod");
1916 } else if (!Subtarget
->useSoftFloat()) {
1917 setLibcallName(RTLIB::ADD_F128
, "_Q_add");
1918 setLibcallName(RTLIB::SUB_F128
, "_Q_sub");
1919 setLibcallName(RTLIB::MUL_F128
, "_Q_mul");
1920 setLibcallName(RTLIB::DIV_F128
, "_Q_div");
1921 setLibcallName(RTLIB::SQRT_F128
, "_Q_sqrt");
1922 setLibcallName(RTLIB::FPTOSINT_F128_I32
, "_Q_qtoi");
1923 setLibcallName(RTLIB::FPTOUINT_F128_I32
, "_Q_qtou");
1924 setLibcallName(RTLIB::SINTTOFP_I32_F128
, "_Q_itoq");
1925 setLibcallName(RTLIB::UINTTOFP_I32_F128
, "_Q_utoq");
1926 setLibcallName(RTLIB::FPTOSINT_F128_I64
, "_Q_qtoll");
1927 setLibcallName(RTLIB::FPTOUINT_F128_I64
, "_Q_qtoull");
1928 setLibcallName(RTLIB::SINTTOFP_I64_F128
, "_Q_lltoq");
1929 setLibcallName(RTLIB::UINTTOFP_I64_F128
, "_Q_ulltoq");
1930 setLibcallName(RTLIB::FPEXT_F32_F128
, "_Q_stoq");
1931 setLibcallName(RTLIB::FPEXT_F64_F128
, "_Q_dtoq");
1932 setLibcallName(RTLIB::FPROUND_F128_F32
, "_Q_qtos");
1933 setLibcallName(RTLIB::FPROUND_F128_F64
, "_Q_qtod");
1937 if (Subtarget
->fixAllFDIVSQRT()) {
1938 // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as
1939 // the former instructions generate errata on LEON processors.
1940 setOperationAction(ISD::FDIV
, MVT::f32
, Promote
);
1941 setOperationAction(ISD::FSQRT
, MVT::f32
, Promote
);
1944 if (Subtarget
->hasNoFMULS()) {
1945 setOperationAction(ISD::FMUL
, MVT::f32
, Promote
);
1948 // Custom combine bitcast between f64 and v2i32
1949 if (!Subtarget
->is64Bit())
1950 setTargetDAGCombine(ISD::BITCAST
);
1952 if (Subtarget
->hasLeonCycleCounter())
1953 setOperationAction(ISD::READCYCLECOUNTER
, MVT::i64
, Custom
);
1955 setOperationAction(ISD::INTRINSIC_WO_CHAIN
, MVT::Other
, Custom
);
1957 setMinFunctionAlignment(Align(4));
1959 computeRegisterProperties(Subtarget
->getRegisterInfo());
1962 bool SparcTargetLowering::useSoftFloat() const {
1963 return Subtarget
->useSoftFloat();
1966 const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode
) const {
1967 switch ((SPISD::NodeType
)Opcode
) {
1968 case SPISD::FIRST_NUMBER
: break;
1969 case SPISD::CMPICC
: return "SPISD::CMPICC";
1970 case SPISD::CMPFCC
: return "SPISD::CMPFCC";
1971 case SPISD::CMPFCC_V9
:
1972 return "SPISD::CMPFCC_V9";
1973 case SPISD::BRICC
: return "SPISD::BRICC";
1975 return "SPISD::BPICC";
1977 return "SPISD::BPXCC";
1978 case SPISD::BRFCC
: return "SPISD::BRFCC";
1979 case SPISD::BRFCC_V9
:
1980 return "SPISD::BRFCC_V9";
1982 return "SPISD::BR_REG";
1983 case SPISD::SELECT_ICC
: return "SPISD::SELECT_ICC";
1984 case SPISD::SELECT_XCC
: return "SPISD::SELECT_XCC";
1985 case SPISD::SELECT_FCC
: return "SPISD::SELECT_FCC";
1986 case SPISD::SELECT_REG
:
1987 return "SPISD::SELECT_REG";
1988 case SPISD::Hi
: return "SPISD::Hi";
1989 case SPISD::Lo
: return "SPISD::Lo";
1990 case SPISD::FTOI
: return "SPISD::FTOI";
1991 case SPISD::ITOF
: return "SPISD::ITOF";
1992 case SPISD::FTOX
: return "SPISD::FTOX";
1993 case SPISD::XTOF
: return "SPISD::XTOF";
1994 case SPISD::CALL
: return "SPISD::CALL";
1995 case SPISD::RET_GLUE
: return "SPISD::RET_GLUE";
1996 case SPISD::GLOBAL_BASE_REG
: return "SPISD::GLOBAL_BASE_REG";
1997 case SPISD::FLUSHW
: return "SPISD::FLUSHW";
1998 case SPISD::TLS_ADD
: return "SPISD::TLS_ADD";
1999 case SPISD::TLS_LD
: return "SPISD::TLS_LD";
2000 case SPISD::TLS_CALL
: return "SPISD::TLS_CALL";
2001 case SPISD::TAIL_CALL
: return "SPISD::TAIL_CALL";
2002 case SPISD::LOAD_GDOP
: return "SPISD::LOAD_GDOP";
2007 EVT
SparcTargetLowering::getSetCCResultType(const DataLayout
&, LLVMContext
&,
2011 return VT
.changeVectorElementTypeToInteger();
2014 /// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
2015 /// be zero. Op is expected to be a target specific node. Used by DAG
2017 void SparcTargetLowering::computeKnownBitsForTargetNode
2020 const APInt
&DemandedElts
,
2021 const SelectionDAG
&DAG
,
2022 unsigned Depth
) const {
2026 switch (Op
.getOpcode()) {
2028 case SPISD::SELECT_ICC
:
2029 case SPISD::SELECT_XCC
:
2030 case SPISD::SELECT_FCC
:
2031 Known
= DAG
.computeKnownBits(Op
.getOperand(1), Depth
+ 1);
2032 Known2
= DAG
.computeKnownBits(Op
.getOperand(0), Depth
+ 1);
2034 // Only known if known in both the LHS and RHS.
2035 Known
= Known
.intersectWith(Known2
);
2040 // Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
2041 // set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
2042 static void LookThroughSetCC(SDValue
&LHS
, SDValue
&RHS
,
2043 ISD::CondCode CC
, unsigned &SPCC
) {
2044 if (isNullConstant(RHS
) && CC
== ISD::SETNE
&&
2045 (((LHS
.getOpcode() == SPISD::SELECT_ICC
||
2046 LHS
.getOpcode() == SPISD::SELECT_XCC
) &&
2047 LHS
.getOperand(3).getOpcode() == SPISD::CMPICC
) ||
2048 (LHS
.getOpcode() == SPISD::SELECT_FCC
&&
2049 (LHS
.getOperand(3).getOpcode() == SPISD::CMPFCC
||
2050 LHS
.getOperand(3).getOpcode() == SPISD::CMPFCC_V9
))) &&
2051 isOneConstant(LHS
.getOperand(0)) && isNullConstant(LHS
.getOperand(1))) {
2052 SDValue CMPCC
= LHS
.getOperand(3);
2053 SPCC
= cast
<ConstantSDNode
>(LHS
.getOperand(2))->getZExtValue();
2054 LHS
= CMPCC
.getOperand(0);
2055 RHS
= CMPCC
.getOperand(1);
2059 // Convert to a target node and set target flags.
2060 SDValue
SparcTargetLowering::withTargetFlags(SDValue Op
, unsigned TF
,
2061 SelectionDAG
&DAG
) const {
2062 if (const GlobalAddressSDNode
*GA
= dyn_cast
<GlobalAddressSDNode
>(Op
))
2063 return DAG
.getTargetGlobalAddress(GA
->getGlobal(),
2065 GA
->getValueType(0),
2066 GA
->getOffset(), TF
);
2068 if (const ConstantPoolSDNode
*CP
= dyn_cast
<ConstantPoolSDNode
>(Op
))
2069 return DAG
.getTargetConstantPool(CP
->getConstVal(), CP
->getValueType(0),
2070 CP
->getAlign(), CP
->getOffset(), TF
);
2072 if (const BlockAddressSDNode
*BA
= dyn_cast
<BlockAddressSDNode
>(Op
))
2073 return DAG
.getTargetBlockAddress(BA
->getBlockAddress(),
2078 if (const ExternalSymbolSDNode
*ES
= dyn_cast
<ExternalSymbolSDNode
>(Op
))
2079 return DAG
.getTargetExternalSymbol(ES
->getSymbol(),
2080 ES
->getValueType(0), TF
);
2082 llvm_unreachable("Unhandled address SDNode");
2085 // Split Op into high and low parts according to HiTF and LoTF.
2086 // Return an ADD node combining the parts.
2087 SDValue
SparcTargetLowering::makeHiLoPair(SDValue Op
,
2088 unsigned HiTF
, unsigned LoTF
,
2089 SelectionDAG
&DAG
) const {
2091 EVT VT
= Op
.getValueType();
2092 SDValue Hi
= DAG
.getNode(SPISD::Hi
, DL
, VT
, withTargetFlags(Op
, HiTF
, DAG
));
2093 SDValue Lo
= DAG
.getNode(SPISD::Lo
, DL
, VT
, withTargetFlags(Op
, LoTF
, DAG
));
2094 return DAG
.getNode(ISD::ADD
, DL
, VT
, Hi
, Lo
);
2097 // Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
2098 // or ExternalSymbol SDNode.
2099 SDValue
SparcTargetLowering::makeAddress(SDValue Op
, SelectionDAG
&DAG
) const {
2101 EVT VT
= getPointerTy(DAG
.getDataLayout());
2103 // Handle PIC mode first. SPARC needs a got load for every variable!
2104 if (isPositionIndependent()) {
2105 const Module
*M
= DAG
.getMachineFunction().getFunction().getParent();
2106 PICLevel::Level picLevel
= M
->getPICLevel();
2109 if (picLevel
== PICLevel::SmallPIC
) {
2110 // This is the pic13 code model, the GOT is known to be smaller than 8KiB.
2111 Idx
= DAG
.getNode(SPISD::Lo
, DL
, Op
.getValueType(),
2112 withTargetFlags(Op
, SparcMCExpr::VK_Sparc_GOT13
, DAG
));
2114 // This is the pic32 code model, the GOT is known to be smaller than 4GB.
2115 Idx
= makeHiLoPair(Op
, SparcMCExpr::VK_Sparc_GOT22
,
2116 SparcMCExpr::VK_Sparc_GOT10
, DAG
);
2119 SDValue GlobalBase
= DAG
.getNode(SPISD::GLOBAL_BASE_REG
, DL
, VT
);
2120 SDValue AbsAddr
= DAG
.getNode(ISD::ADD
, DL
, VT
, GlobalBase
, Idx
);
2121 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2122 // function has calls.
2123 MachineFrameInfo
&MFI
= DAG
.getMachineFunction().getFrameInfo();
2124 MFI
.setHasCalls(true);
2125 return DAG
.getLoad(VT
, DL
, DAG
.getEntryNode(), AbsAddr
,
2126 MachinePointerInfo::getGOT(DAG
.getMachineFunction()));
2129 // This is one of the absolute code models.
2130 switch(getTargetMachine().getCodeModel()) {
2132 llvm_unreachable("Unsupported absolute code model");
2133 case CodeModel::Small
:
2135 return makeHiLoPair(Op
, SparcMCExpr::VK_Sparc_HI
,
2136 SparcMCExpr::VK_Sparc_LO
, DAG
);
2137 case CodeModel::Medium
: {
2139 SDValue H44
= makeHiLoPair(Op
, SparcMCExpr::VK_Sparc_H44
,
2140 SparcMCExpr::VK_Sparc_M44
, DAG
);
2141 H44
= DAG
.getNode(ISD::SHL
, DL
, VT
, H44
, DAG
.getConstant(12, DL
, MVT::i32
));
2142 SDValue L44
= withTargetFlags(Op
, SparcMCExpr::VK_Sparc_L44
, DAG
);
2143 L44
= DAG
.getNode(SPISD::Lo
, DL
, VT
, L44
);
2144 return DAG
.getNode(ISD::ADD
, DL
, VT
, H44
, L44
);
2146 case CodeModel::Large
: {
2148 SDValue Hi
= makeHiLoPair(Op
, SparcMCExpr::VK_Sparc_HH
,
2149 SparcMCExpr::VK_Sparc_HM
, DAG
);
2150 Hi
= DAG
.getNode(ISD::SHL
, DL
, VT
, Hi
, DAG
.getConstant(32, DL
, MVT::i32
));
2151 SDValue Lo
= makeHiLoPair(Op
, SparcMCExpr::VK_Sparc_HI
,
2152 SparcMCExpr::VK_Sparc_LO
, DAG
);
2153 return DAG
.getNode(ISD::ADD
, DL
, VT
, Hi
, Lo
);
2158 SDValue
SparcTargetLowering::LowerGlobalAddress(SDValue Op
,
2159 SelectionDAG
&DAG
) const {
2160 return makeAddress(Op
, DAG
);
2163 SDValue
SparcTargetLowering::LowerConstantPool(SDValue Op
,
2164 SelectionDAG
&DAG
) const {
2165 return makeAddress(Op
, DAG
);
2168 SDValue
SparcTargetLowering::LowerBlockAddress(SDValue Op
,
2169 SelectionDAG
&DAG
) const {
2170 return makeAddress(Op
, DAG
);
2173 SDValue
SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op
,
2174 SelectionDAG
&DAG
) const {
2176 GlobalAddressSDNode
*GA
= cast
<GlobalAddressSDNode
>(Op
);
2177 if (DAG
.getTarget().useEmulatedTLS())
2178 return LowerToTLSEmulatedModel(GA
, DAG
);
2181 const GlobalValue
*GV
= GA
->getGlobal();
2182 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
2184 TLSModel::Model model
= getTargetMachine().getTLSModel(GV
);
2186 if (model
== TLSModel::GeneralDynamic
|| model
== TLSModel::LocalDynamic
) {
2187 unsigned HiTF
= ((model
== TLSModel::GeneralDynamic
)
2188 ? SparcMCExpr::VK_Sparc_TLS_GD_HI22
2189 : SparcMCExpr::VK_Sparc_TLS_LDM_HI22
);
2190 unsigned LoTF
= ((model
== TLSModel::GeneralDynamic
)
2191 ? SparcMCExpr::VK_Sparc_TLS_GD_LO10
2192 : SparcMCExpr::VK_Sparc_TLS_LDM_LO10
);
2193 unsigned addTF
= ((model
== TLSModel::GeneralDynamic
)
2194 ? SparcMCExpr::VK_Sparc_TLS_GD_ADD
2195 : SparcMCExpr::VK_Sparc_TLS_LDM_ADD
);
2196 unsigned callTF
= ((model
== TLSModel::GeneralDynamic
)
2197 ? SparcMCExpr::VK_Sparc_TLS_GD_CALL
2198 : SparcMCExpr::VK_Sparc_TLS_LDM_CALL
);
2200 SDValue HiLo
= makeHiLoPair(Op
, HiTF
, LoTF
, DAG
);
2201 SDValue Base
= DAG
.getNode(SPISD::GLOBAL_BASE_REG
, DL
, PtrVT
);
2202 SDValue Argument
= DAG
.getNode(SPISD::TLS_ADD
, DL
, PtrVT
, Base
, HiLo
,
2203 withTargetFlags(Op
, addTF
, DAG
));
2205 SDValue Chain
= DAG
.getEntryNode();
2208 Chain
= DAG
.getCALLSEQ_START(Chain
, 1, 0, DL
);
2209 Chain
= DAG
.getCopyToReg(Chain
, DL
, SP::O0
, Argument
, InGlue
);
2210 InGlue
= Chain
.getValue(1);
2211 SDValue Callee
= DAG
.getTargetExternalSymbol("__tls_get_addr", PtrVT
);
2212 SDValue Symbol
= withTargetFlags(Op
, callTF
, DAG
);
2214 SDVTList NodeTys
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
2215 const uint32_t *Mask
= Subtarget
->getRegisterInfo()->getCallPreservedMask(
2216 DAG
.getMachineFunction(), CallingConv::C
);
2217 assert(Mask
&& "Missing call preserved mask for calling convention");
2218 SDValue Ops
[] = {Chain
,
2221 DAG
.getRegister(SP::O0
, PtrVT
),
2222 DAG
.getRegisterMask(Mask
),
2224 Chain
= DAG
.getNode(SPISD::TLS_CALL
, DL
, NodeTys
, Ops
);
2225 InGlue
= Chain
.getValue(1);
2226 Chain
= DAG
.getCALLSEQ_END(Chain
, 1, 0, InGlue
, DL
);
2227 InGlue
= Chain
.getValue(1);
2228 SDValue Ret
= DAG
.getCopyFromReg(Chain
, DL
, SP::O0
, PtrVT
, InGlue
);
2230 if (model
!= TLSModel::LocalDynamic
)
2233 SDValue Hi
= DAG
.getNode(SPISD::Hi
, DL
, PtrVT
,
2234 withTargetFlags(Op
, SparcMCExpr::VK_Sparc_TLS_LDO_HIX22
, DAG
));
2235 SDValue Lo
= DAG
.getNode(SPISD::Lo
, DL
, PtrVT
,
2236 withTargetFlags(Op
, SparcMCExpr::VK_Sparc_TLS_LDO_LOX10
, DAG
));
2237 HiLo
= DAG
.getNode(ISD::XOR
, DL
, PtrVT
, Hi
, Lo
);
2238 return DAG
.getNode(SPISD::TLS_ADD
, DL
, PtrVT
, Ret
, HiLo
,
2239 withTargetFlags(Op
, SparcMCExpr::VK_Sparc_TLS_LDO_ADD
, DAG
));
2242 if (model
== TLSModel::InitialExec
) {
2243 unsigned ldTF
= ((PtrVT
== MVT::i64
)? SparcMCExpr::VK_Sparc_TLS_IE_LDX
2244 : SparcMCExpr::VK_Sparc_TLS_IE_LD
);
2246 SDValue Base
= DAG
.getNode(SPISD::GLOBAL_BASE_REG
, DL
, PtrVT
);
2248 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2249 // function has calls.
2250 MachineFrameInfo
&MFI
= DAG
.getMachineFunction().getFrameInfo();
2251 MFI
.setHasCalls(true);
2253 SDValue TGA
= makeHiLoPair(Op
,
2254 SparcMCExpr::VK_Sparc_TLS_IE_HI22
,
2255 SparcMCExpr::VK_Sparc_TLS_IE_LO10
, DAG
);
2256 SDValue Ptr
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, Base
, TGA
);
2257 SDValue Offset
= DAG
.getNode(SPISD::TLS_LD
,
2259 withTargetFlags(Op
, ldTF
, DAG
));
2260 return DAG
.getNode(SPISD::TLS_ADD
, DL
, PtrVT
,
2261 DAG
.getRegister(SP::G7
, PtrVT
), Offset
,
2263 SparcMCExpr::VK_Sparc_TLS_IE_ADD
, DAG
));
2266 assert(model
== TLSModel::LocalExec
);
2267 SDValue Hi
= DAG
.getNode(SPISD::Hi
, DL
, PtrVT
,
2268 withTargetFlags(Op
, SparcMCExpr::VK_Sparc_TLS_LE_HIX22
, DAG
));
2269 SDValue Lo
= DAG
.getNode(SPISD::Lo
, DL
, PtrVT
,
2270 withTargetFlags(Op
, SparcMCExpr::VK_Sparc_TLS_LE_LOX10
, DAG
));
2271 SDValue Offset
= DAG
.getNode(ISD::XOR
, DL
, PtrVT
, Hi
, Lo
);
2273 return DAG
.getNode(ISD::ADD
, DL
, PtrVT
,
2274 DAG
.getRegister(SP::G7
, PtrVT
), Offset
);
2277 SDValue
SparcTargetLowering::LowerF128_LibCallArg(SDValue Chain
,
2278 ArgListTy
&Args
, SDValue Arg
,
2280 SelectionDAG
&DAG
) const {
2281 MachineFrameInfo
&MFI
= DAG
.getMachineFunction().getFrameInfo();
2282 EVT ArgVT
= Arg
.getValueType();
2283 Type
*ArgTy
= ArgVT
.getTypeForEVT(*DAG
.getContext());
2289 if (ArgTy
->isFP128Ty()) {
2290 // Create a stack object and pass the pointer to the library function.
2291 int FI
= MFI
.CreateStackObject(16, Align(8), false);
2292 SDValue FIPtr
= DAG
.getFrameIndex(FI
, getPointerTy(DAG
.getDataLayout()));
2293 Chain
= DAG
.getStore(Chain
, DL
, Entry
.Node
, FIPtr
, MachinePointerInfo(),
2297 Entry
.Ty
= PointerType::getUnqual(ArgTy
);
2299 Args
.push_back(Entry
);
2304 SparcTargetLowering::LowerF128Op(SDValue Op
, SelectionDAG
&DAG
,
2305 const char *LibFuncName
,
2306 unsigned numArgs
) const {
2310 MachineFrameInfo
&MFI
= DAG
.getMachineFunction().getFrameInfo();
2311 auto PtrVT
= getPointerTy(DAG
.getDataLayout());
2313 SDValue Callee
= DAG
.getExternalSymbol(LibFuncName
, PtrVT
);
2314 Type
*RetTy
= Op
.getValueType().getTypeForEVT(*DAG
.getContext());
2315 Type
*RetTyABI
= RetTy
;
2316 SDValue Chain
= DAG
.getEntryNode();
2319 if (RetTy
->isFP128Ty()) {
2320 // Create a Stack Object to receive the return value of type f128.
2322 int RetFI
= MFI
.CreateStackObject(16, Align(8), false);
2323 RetPtr
= DAG
.getFrameIndex(RetFI
, PtrVT
);
2324 Entry
.Node
= RetPtr
;
2325 Entry
.Ty
= PointerType::getUnqual(RetTy
);
2326 if (!Subtarget
->is64Bit()) {
2327 Entry
.IsSRet
= true;
2328 Entry
.IndirectType
= RetTy
;
2330 Entry
.IsReturned
= false;
2331 Args
.push_back(Entry
);
2332 RetTyABI
= Type::getVoidTy(*DAG
.getContext());
2335 assert(Op
->getNumOperands() >= numArgs
&& "Not enough operands!");
2336 for (unsigned i
= 0, e
= numArgs
; i
!= e
; ++i
) {
2337 Chain
= LowerF128_LibCallArg(Chain
, Args
, Op
.getOperand(i
), SDLoc(Op
), DAG
);
2339 TargetLowering::CallLoweringInfo
CLI(DAG
);
2340 CLI
.setDebugLoc(SDLoc(Op
)).setChain(Chain
)
2341 .setCallee(CallingConv::C
, RetTyABI
, Callee
, std::move(Args
));
2343 std::pair
<SDValue
, SDValue
> CallInfo
= LowerCallTo(CLI
);
2345 // chain is in second result.
2346 if (RetTyABI
== RetTy
)
2347 return CallInfo
.first
;
2349 assert (RetTy
->isFP128Ty() && "Unexpected return type!");
2351 Chain
= CallInfo
.second
;
2353 // Load RetPtr to get the return value.
2354 return DAG
.getLoad(Op
.getValueType(), SDLoc(Op
), Chain
, RetPtr
,
2355 MachinePointerInfo(), Align(8));
2358 SDValue
SparcTargetLowering::LowerF128Compare(SDValue LHS
, SDValue RHS
,
2359 unsigned &SPCC
, const SDLoc
&DL
,
2360 SelectionDAG
&DAG
) const {
2362 const char *LibCall
= nullptr;
2363 bool is64Bit
= Subtarget
->is64Bit();
2365 default: llvm_unreachable("Unhandled conditional code!");
2366 case SPCC::FCC_E
: LibCall
= is64Bit
? "_Qp_feq" : "_Q_feq"; break;
2367 case SPCC::FCC_NE
: LibCall
= is64Bit
? "_Qp_fne" : "_Q_fne"; break;
2368 case SPCC::FCC_L
: LibCall
= is64Bit
? "_Qp_flt" : "_Q_flt"; break;
2369 case SPCC::FCC_G
: LibCall
= is64Bit
? "_Qp_fgt" : "_Q_fgt"; break;
2370 case SPCC::FCC_LE
: LibCall
= is64Bit
? "_Qp_fle" : "_Q_fle"; break;
2371 case SPCC::FCC_GE
: LibCall
= is64Bit
? "_Qp_fge" : "_Q_fge"; break;
2379 case SPCC::FCC_UE
: LibCall
= is64Bit
? "_Qp_cmp" : "_Q_cmp"; break;
2382 auto PtrVT
= getPointerTy(DAG
.getDataLayout());
2383 SDValue Callee
= DAG
.getExternalSymbol(LibCall
, PtrVT
);
2384 Type
*RetTy
= Type::getInt32Ty(*DAG
.getContext());
2386 SDValue Chain
= DAG
.getEntryNode();
2387 Chain
= LowerF128_LibCallArg(Chain
, Args
, LHS
, DL
, DAG
);
2388 Chain
= LowerF128_LibCallArg(Chain
, Args
, RHS
, DL
, DAG
);
2390 TargetLowering::CallLoweringInfo
CLI(DAG
);
2391 CLI
.setDebugLoc(DL
).setChain(Chain
)
2392 .setCallee(CallingConv::C
, RetTy
, Callee
, std::move(Args
));
2394 std::pair
<SDValue
, SDValue
> CallInfo
= LowerCallTo(CLI
);
2396 // result is in first, and chain is in second result.
2397 SDValue Result
= CallInfo
.first
;
2401 SDValue RHS
= DAG
.getConstant(0, DL
, Result
.getValueType());
2402 SPCC
= SPCC::ICC_NE
;
2403 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2405 case SPCC::FCC_UL
: {
2406 SDValue Mask
= DAG
.getConstant(1, DL
, Result
.getValueType());
2407 Result
= DAG
.getNode(ISD::AND
, DL
, Result
.getValueType(), Result
, Mask
);
2408 SDValue RHS
= DAG
.getConstant(0, DL
, Result
.getValueType());
2409 SPCC
= SPCC::ICC_NE
;
2410 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2412 case SPCC::FCC_ULE
: {
2413 SDValue RHS
= DAG
.getConstant(2, DL
, Result
.getValueType());
2414 SPCC
= SPCC::ICC_NE
;
2415 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2417 case SPCC::FCC_UG
: {
2418 SDValue RHS
= DAG
.getConstant(1, DL
, Result
.getValueType());
2420 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2422 case SPCC::FCC_UGE
: {
2423 SDValue RHS
= DAG
.getConstant(1, DL
, Result
.getValueType());
2424 SPCC
= SPCC::ICC_NE
;
2425 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2428 case SPCC::FCC_U
: {
2429 SDValue RHS
= DAG
.getConstant(3, DL
, Result
.getValueType());
2431 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2433 case SPCC::FCC_O
: {
2434 SDValue RHS
= DAG
.getConstant(3, DL
, Result
.getValueType());
2435 SPCC
= SPCC::ICC_NE
;
2436 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2438 case SPCC::FCC_LG
: {
2439 SDValue Mask
= DAG
.getConstant(3, DL
, Result
.getValueType());
2440 Result
= DAG
.getNode(ISD::AND
, DL
, Result
.getValueType(), Result
, Mask
);
2441 SDValue RHS
= DAG
.getConstant(0, DL
, Result
.getValueType());
2442 SPCC
= SPCC::ICC_NE
;
2443 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2445 case SPCC::FCC_UE
: {
2446 SDValue Mask
= DAG
.getConstant(3, DL
, Result
.getValueType());
2447 Result
= DAG
.getNode(ISD::AND
, DL
, Result
.getValueType(), Result
, Mask
);
2448 SDValue RHS
= DAG
.getConstant(0, DL
, Result
.getValueType());
2450 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2456 LowerF128_FPEXTEND(SDValue Op
, SelectionDAG
&DAG
,
2457 const SparcTargetLowering
&TLI
) {
2459 if (Op
.getOperand(0).getValueType() == MVT::f64
)
2460 return TLI
.LowerF128Op(Op
, DAG
,
2461 TLI
.getLibcallName(RTLIB::FPEXT_F64_F128
), 1);
2463 if (Op
.getOperand(0).getValueType() == MVT::f32
)
2464 return TLI
.LowerF128Op(Op
, DAG
,
2465 TLI
.getLibcallName(RTLIB::FPEXT_F32_F128
), 1);
2467 llvm_unreachable("fpextend with non-float operand!");
2472 LowerF128_FPROUND(SDValue Op
, SelectionDAG
&DAG
,
2473 const SparcTargetLowering
&TLI
) {
2474 // FP_ROUND on f64 and f32 are legal.
2475 if (Op
.getOperand(0).getValueType() != MVT::f128
)
2478 if (Op
.getValueType() == MVT::f64
)
2479 return TLI
.LowerF128Op(Op
, DAG
,
2480 TLI
.getLibcallName(RTLIB::FPROUND_F128_F64
), 1);
2481 if (Op
.getValueType() == MVT::f32
)
2482 return TLI
.LowerF128Op(Op
, DAG
,
2483 TLI
.getLibcallName(RTLIB::FPROUND_F128_F32
), 1);
2485 llvm_unreachable("fpround to non-float!");
2489 static SDValue
LowerFP_TO_SINT(SDValue Op
, SelectionDAG
&DAG
,
2490 const SparcTargetLowering
&TLI
,
2493 EVT VT
= Op
.getValueType();
2494 assert(VT
== MVT::i32
|| VT
== MVT::i64
);
2496 // Expand f128 operations to fp128 abi calls.
2497 if (Op
.getOperand(0).getValueType() == MVT::f128
2498 && (!hasHardQuad
|| !TLI
.isTypeLegal(VT
))) {
2499 const char *libName
= TLI
.getLibcallName(VT
== MVT::i32
2500 ? RTLIB::FPTOSINT_F128_I32
2501 : RTLIB::FPTOSINT_F128_I64
);
2502 return TLI
.LowerF128Op(Op
, DAG
, libName
, 1);
2505 // Expand if the resulting type is illegal.
2506 if (!TLI
.isTypeLegal(VT
))
2509 // Otherwise, Convert the fp value to integer in an FP register.
2511 Op
= DAG
.getNode(SPISD::FTOI
, dl
, MVT::f32
, Op
.getOperand(0));
2513 Op
= DAG
.getNode(SPISD::FTOX
, dl
, MVT::f64
, Op
.getOperand(0));
2515 return DAG
.getNode(ISD::BITCAST
, dl
, VT
, Op
);
2518 static SDValue
LowerSINT_TO_FP(SDValue Op
, SelectionDAG
&DAG
,
2519 const SparcTargetLowering
&TLI
,
2522 EVT OpVT
= Op
.getOperand(0).getValueType();
2523 assert(OpVT
== MVT::i32
|| (OpVT
== MVT::i64
));
2525 EVT floatVT
= (OpVT
== MVT::i32
) ? MVT::f32
: MVT::f64
;
2527 // Expand f128 operations to fp128 ABI calls.
2528 if (Op
.getValueType() == MVT::f128
2529 && (!hasHardQuad
|| !TLI
.isTypeLegal(OpVT
))) {
2530 const char *libName
= TLI
.getLibcallName(OpVT
== MVT::i32
2531 ? RTLIB::SINTTOFP_I32_F128
2532 : RTLIB::SINTTOFP_I64_F128
);
2533 return TLI
.LowerF128Op(Op
, DAG
, libName
, 1);
2536 // Expand if the operand type is illegal.
2537 if (!TLI
.isTypeLegal(OpVT
))
2540 // Otherwise, Convert the int value to FP in an FP register.
2541 SDValue Tmp
= DAG
.getNode(ISD::BITCAST
, dl
, floatVT
, Op
.getOperand(0));
2542 unsigned opcode
= (OpVT
== MVT::i32
)? SPISD::ITOF
: SPISD::XTOF
;
2543 return DAG
.getNode(opcode
, dl
, Op
.getValueType(), Tmp
);
2546 static SDValue
LowerFP_TO_UINT(SDValue Op
, SelectionDAG
&DAG
,
2547 const SparcTargetLowering
&TLI
,
2550 EVT VT
= Op
.getValueType();
2552 // Expand if it does not involve f128 or the target has support for
2553 // quad floating point instructions and the resulting type is legal.
2554 if (Op
.getOperand(0).getValueType() != MVT::f128
||
2555 (hasHardQuad
&& TLI
.isTypeLegal(VT
)))
2558 assert(VT
== MVT::i32
|| VT
== MVT::i64
);
2560 return TLI
.LowerF128Op(Op
, DAG
,
2561 TLI
.getLibcallName(VT
== MVT::i32
2562 ? RTLIB::FPTOUINT_F128_I32
2563 : RTLIB::FPTOUINT_F128_I64
),
2567 static SDValue
LowerUINT_TO_FP(SDValue Op
, SelectionDAG
&DAG
,
2568 const SparcTargetLowering
&TLI
,
2571 EVT OpVT
= Op
.getOperand(0).getValueType();
2572 assert(OpVT
== MVT::i32
|| OpVT
== MVT::i64
);
2574 // Expand if it does not involve f128 or the target has support for
2575 // quad floating point instructions and the operand type is legal.
2576 if (Op
.getValueType() != MVT::f128
|| (hasHardQuad
&& TLI
.isTypeLegal(OpVT
)))
2579 return TLI
.LowerF128Op(Op
, DAG
,
2580 TLI
.getLibcallName(OpVT
== MVT::i32
2581 ? RTLIB::UINTTOFP_I32_F128
2582 : RTLIB::UINTTOFP_I64_F128
),
2586 static SDValue
LowerBR_CC(SDValue Op
, SelectionDAG
&DAG
,
2587 const SparcTargetLowering
&TLI
, bool hasHardQuad
,
2588 bool isV9
, bool is64Bit
) {
2589 SDValue Chain
= Op
.getOperand(0);
2590 ISD::CondCode CC
= cast
<CondCodeSDNode
>(Op
.getOperand(1))->get();
2591 SDValue LHS
= Op
.getOperand(2);
2592 SDValue RHS
= Op
.getOperand(3);
2593 SDValue Dest
= Op
.getOperand(4);
2595 unsigned Opc
, SPCC
= ~0U;
2597 // If this is a br_cc of a "setcc", and if the setcc got lowered into
2598 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2599 LookThroughSetCC(LHS
, RHS
, CC
, SPCC
);
2600 assert(LHS
.getValueType() == RHS
.getValueType());
2602 // Get the condition flag.
2603 SDValue CompareFlag
;
2604 if (LHS
.getValueType().isInteger()) {
2605 // On V9 processors running in 64-bit mode, if CC compares two `i64`s
2606 // and the RHS is zero we might be able to use a specialized branch.
2607 if (is64Bit
&& isV9
&& LHS
.getValueType() == MVT::i64
&&
2608 isNullConstant(RHS
) && !ISD::isUnsignedIntSetCC(CC
))
2609 return DAG
.getNode(SPISD::BR_REG
, dl
, MVT::Other
, Chain
, Dest
,
2610 DAG
.getConstant(intCondCCodeToRcond(CC
), dl
, MVT::i32
),
2613 CompareFlag
= DAG
.getNode(SPISD::CMPICC
, dl
, MVT::Glue
, LHS
, RHS
);
2614 if (SPCC
== ~0U) SPCC
= IntCondCCodeToICC(CC
);
2616 // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
2617 Opc
= LHS
.getValueType() == MVT::i32
? SPISD::BPICC
: SPISD::BPXCC
;
2619 // Non-v9 targets don't have xcc.
2622 if (!hasHardQuad
&& LHS
.getValueType() == MVT::f128
) {
2623 if (SPCC
== ~0U) SPCC
= FPCondCCodeToFCC(CC
);
2624 CompareFlag
= TLI
.LowerF128Compare(LHS
, RHS
, SPCC
, dl
, DAG
);
2625 Opc
= isV9
? SPISD::BPICC
: SPISD::BRICC
;
2627 unsigned CmpOpc
= isV9
? SPISD::CMPFCC_V9
: SPISD::CMPFCC
;
2628 CompareFlag
= DAG
.getNode(CmpOpc
, dl
, MVT::Glue
, LHS
, RHS
);
2629 if (SPCC
== ~0U) SPCC
= FPCondCCodeToFCC(CC
);
2630 Opc
= isV9
? SPISD::BRFCC_V9
: SPISD::BRFCC
;
2633 return DAG
.getNode(Opc
, dl
, MVT::Other
, Chain
, Dest
,
2634 DAG
.getConstant(SPCC
, dl
, MVT::i32
), CompareFlag
);
2637 static SDValue
LowerSELECT_CC(SDValue Op
, SelectionDAG
&DAG
,
2638 const SparcTargetLowering
&TLI
, bool hasHardQuad
,
2639 bool isV9
, bool is64Bit
) {
2640 SDValue LHS
= Op
.getOperand(0);
2641 SDValue RHS
= Op
.getOperand(1);
2642 ISD::CondCode CC
= cast
<CondCodeSDNode
>(Op
.getOperand(4))->get();
2643 SDValue TrueVal
= Op
.getOperand(2);
2644 SDValue FalseVal
= Op
.getOperand(3);
2646 unsigned Opc
, SPCC
= ~0U;
2648 // If this is a select_cc of a "setcc", and if the setcc got lowered into
2649 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2650 LookThroughSetCC(LHS
, RHS
, CC
, SPCC
);
2651 assert(LHS
.getValueType() == RHS
.getValueType());
2653 SDValue CompareFlag
;
2654 if (LHS
.getValueType().isInteger()) {
2655 // On V9 processors running in 64-bit mode, if CC compares two `i64`s
2656 // and the RHS is zero we might be able to use a specialized select.
2657 // All SELECT_CC between any two scalar integer types are eligible for
2658 // lowering to specialized instructions. Additionally, f32 and f64 types
2659 // are also eligible, but for f128 we can only use the specialized
2660 // instruction when we have hardquad.
2661 EVT ValType
= TrueVal
.getValueType();
2662 bool IsEligibleType
= ValType
.isScalarInteger() || ValType
== MVT::f32
||
2663 ValType
== MVT::f64
||
2664 (ValType
== MVT::f128
&& hasHardQuad
);
2665 if (is64Bit
&& isV9
&& LHS
.getValueType() == MVT::i64
&&
2666 isNullConstant(RHS
) && !ISD::isUnsignedIntSetCC(CC
) && IsEligibleType
)
2668 SPISD::SELECT_REG
, dl
, TrueVal
.getValueType(), TrueVal
, FalseVal
,
2669 DAG
.getConstant(intCondCCodeToRcond(CC
), dl
, MVT::i32
), LHS
);
2671 CompareFlag
= DAG
.getNode(SPISD::CMPICC
, dl
, MVT::Glue
, LHS
, RHS
);
2672 Opc
= LHS
.getValueType() == MVT::i32
?
2673 SPISD::SELECT_ICC
: SPISD::SELECT_XCC
;
2674 if (SPCC
== ~0U) SPCC
= IntCondCCodeToICC(CC
);
2676 if (!hasHardQuad
&& LHS
.getValueType() == MVT::f128
) {
2677 if (SPCC
== ~0U) SPCC
= FPCondCCodeToFCC(CC
);
2678 CompareFlag
= TLI
.LowerF128Compare(LHS
, RHS
, SPCC
, dl
, DAG
);
2679 Opc
= SPISD::SELECT_ICC
;
2681 unsigned CmpOpc
= isV9
? SPISD::CMPFCC_V9
: SPISD::CMPFCC
;
2682 CompareFlag
= DAG
.getNode(CmpOpc
, dl
, MVT::Glue
, LHS
, RHS
);
2683 Opc
= SPISD::SELECT_FCC
;
2684 if (SPCC
== ~0U) SPCC
= FPCondCCodeToFCC(CC
);
2687 return DAG
.getNode(Opc
, dl
, TrueVal
.getValueType(), TrueVal
, FalseVal
,
2688 DAG
.getConstant(SPCC
, dl
, MVT::i32
), CompareFlag
);
2691 static SDValue
LowerVASTART(SDValue Op
, SelectionDAG
&DAG
,
2692 const SparcTargetLowering
&TLI
) {
2693 MachineFunction
&MF
= DAG
.getMachineFunction();
2694 SparcMachineFunctionInfo
*FuncInfo
= MF
.getInfo
<SparcMachineFunctionInfo
>();
2695 auto PtrVT
= TLI
.getPointerTy(DAG
.getDataLayout());
2697 // Need frame address to find the address of VarArgsFrameIndex.
2698 MF
.getFrameInfo().setFrameAddressIsTaken(true);
2700 // vastart just stores the address of the VarArgsFrameIndex slot into the
2701 // memory location argument.
2704 DAG
.getNode(ISD::ADD
, DL
, PtrVT
, DAG
.getRegister(SP::I6
, PtrVT
),
2705 DAG
.getIntPtrConstant(FuncInfo
->getVarArgsFrameOffset(), DL
));
2706 const Value
*SV
= cast
<SrcValueSDNode
>(Op
.getOperand(2))->getValue();
2707 return DAG
.getStore(Op
.getOperand(0), DL
, Offset
, Op
.getOperand(1),
2708 MachinePointerInfo(SV
));
2711 static SDValue
LowerVAARG(SDValue Op
, SelectionDAG
&DAG
) {
2712 SDNode
*Node
= Op
.getNode();
2713 EVT VT
= Node
->getValueType(0);
2714 SDValue InChain
= Node
->getOperand(0);
2715 SDValue VAListPtr
= Node
->getOperand(1);
2716 EVT PtrVT
= VAListPtr
.getValueType();
2717 const Value
*SV
= cast
<SrcValueSDNode
>(Node
->getOperand(2))->getValue();
2720 DAG
.getLoad(PtrVT
, DL
, InChain
, VAListPtr
, MachinePointerInfo(SV
));
2721 // Increment the pointer, VAList, to the next vaarg.
2722 SDValue NextPtr
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, VAList
,
2723 DAG
.getIntPtrConstant(VT
.getSizeInBits()/8,
2725 // Store the incremented VAList to the legalized pointer.
2726 InChain
= DAG
.getStore(VAList
.getValue(1), DL
, NextPtr
, VAListPtr
,
2727 MachinePointerInfo(SV
));
2728 // Load the actual argument out of the pointer VAList.
2729 // We can't count on greater alignment than the word size.
2731 VT
, DL
, InChain
, VAList
, MachinePointerInfo(),
2732 Align(std::min(PtrVT
.getFixedSizeInBits(), VT
.getFixedSizeInBits()) / 8));
2735 static SDValue
LowerDYNAMIC_STACKALLOC(SDValue Op
, SelectionDAG
&DAG
,
2736 const SparcSubtarget
*Subtarget
) {
2737 SDValue Chain
= Op
.getOperand(0); // Legalize the chain.
2738 SDValue Size
= Op
.getOperand(1); // Legalize the size.
2739 MaybeAlign Alignment
=
2740 cast
<ConstantSDNode
>(Op
.getOperand(2))->getMaybeAlignValue();
2741 Align StackAlign
= Subtarget
->getFrameLowering()->getStackAlign();
2742 EVT VT
= Size
->getValueType(0);
2745 // TODO: implement over-aligned alloca. (Note: also implies
2746 // supporting support for overaligned function frames + dynamic
2747 // allocations, at all, which currently isn't supported)
2748 if (Alignment
&& *Alignment
> StackAlign
) {
2749 const MachineFunction
&MF
= DAG
.getMachineFunction();
2750 report_fatal_error("Function \"" + Twine(MF
.getName()) + "\": "
2751 "over-aligned dynamic alloca not supported.");
2754 // The resultant pointer needs to be above the register spill area
2755 // at the bottom of the stack.
2756 unsigned regSpillArea
;
2757 if (Subtarget
->is64Bit()) {
2760 // On Sparc32, the size of the spill area is 92. Unfortunately,
2761 // that's only 4-byte aligned, not 8-byte aligned (the stack
2762 // pointer is 8-byte aligned). So, if the user asked for an 8-byte
2763 // aligned dynamic allocation, we actually need to add 96 to the
2764 // bottom of the stack, instead of 92, to ensure 8-byte alignment.
2766 // That also means adding 4 to the size of the allocation --
2767 // before applying the 8-byte rounding. Unfortunately, we the
2768 // value we get here has already had rounding applied. So, we need
2769 // to add 8, instead, wasting a bit more memory.
2771 // Further, this only actually needs to be done if the required
2772 // alignment is > 4, but, we've lost that info by this point, too,
2773 // so we always apply it.
2775 // (An alternative approach would be to always reserve 96 bytes
2776 // instead of the required 92, but then we'd waste 4 extra bytes
2777 // in every frame, not just those with dynamic stack allocations)
2779 // TODO: modify code in SelectionDAGBuilder to make this less sad.
2781 Size
= DAG
.getNode(ISD::ADD
, dl
, VT
, Size
,
2782 DAG
.getConstant(8, dl
, VT
));
2786 unsigned SPReg
= SP::O6
;
2787 SDValue SP
= DAG
.getCopyFromReg(Chain
, dl
, SPReg
, VT
);
2788 SDValue NewSP
= DAG
.getNode(ISD::SUB
, dl
, VT
, SP
, Size
); // Value
2789 Chain
= DAG
.getCopyToReg(SP
.getValue(1), dl
, SPReg
, NewSP
); // Output chain
2791 regSpillArea
+= Subtarget
->getStackPointerBias();
2793 SDValue NewVal
= DAG
.getNode(ISD::ADD
, dl
, VT
, NewSP
,
2794 DAG
.getConstant(regSpillArea
, dl
, VT
));
2795 SDValue Ops
[2] = { NewVal
, Chain
};
2796 return DAG
.getMergeValues(Ops
, dl
);
2800 static SDValue
getFLUSHW(SDValue Op
, SelectionDAG
&DAG
) {
2802 SDValue Chain
= DAG
.getNode(SPISD::FLUSHW
,
2803 dl
, MVT::Other
, DAG
.getEntryNode());
2807 static SDValue
getFRAMEADDR(uint64_t depth
, SDValue Op
, SelectionDAG
&DAG
,
2808 const SparcSubtarget
*Subtarget
,
2809 bool AlwaysFlush
= false) {
2810 MachineFrameInfo
&MFI
= DAG
.getMachineFunction().getFrameInfo();
2811 MFI
.setFrameAddressIsTaken(true);
2813 EVT VT
= Op
.getValueType();
2815 unsigned FrameReg
= SP::I6
;
2816 unsigned stackBias
= Subtarget
->getStackPointerBias();
2821 // flush first to make sure the windowed registers' values are in stack
2822 Chain
= (depth
|| AlwaysFlush
) ? getFLUSHW(Op
, DAG
) : DAG
.getEntryNode();
2824 FrameAddr
= DAG
.getCopyFromReg(Chain
, dl
, FrameReg
, VT
);
2826 unsigned Offset
= (Subtarget
->is64Bit()) ? (stackBias
+ 112) : 56;
2829 SDValue Ptr
= DAG
.getNode(ISD::ADD
, dl
, VT
, FrameAddr
,
2830 DAG
.getIntPtrConstant(Offset
, dl
));
2831 FrameAddr
= DAG
.getLoad(VT
, dl
, Chain
, Ptr
, MachinePointerInfo());
2833 if (Subtarget
->is64Bit())
2834 FrameAddr
= DAG
.getNode(ISD::ADD
, dl
, VT
, FrameAddr
,
2835 DAG
.getIntPtrConstant(stackBias
, dl
));
2840 static SDValue
LowerFRAMEADDR(SDValue Op
, SelectionDAG
&DAG
,
2841 const SparcSubtarget
*Subtarget
) {
2843 uint64_t depth
= Op
.getConstantOperandVal(0);
2845 return getFRAMEADDR(depth
, Op
, DAG
, Subtarget
);
2849 static SDValue
LowerRETURNADDR(SDValue Op
, SelectionDAG
&DAG
,
2850 const SparcTargetLowering
&TLI
,
2851 const SparcSubtarget
*Subtarget
) {
2852 MachineFunction
&MF
= DAG
.getMachineFunction();
2853 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
2854 MFI
.setReturnAddressIsTaken(true);
2856 if (TLI
.verifyReturnAddressArgumentIsConstant(Op
, DAG
))
2859 EVT VT
= Op
.getValueType();
2861 uint64_t depth
= Op
.getConstantOperandVal(0);
2865 auto PtrVT
= TLI
.getPointerTy(DAG
.getDataLayout());
2866 Register RetReg
= MF
.addLiveIn(SP::I7
, TLI
.getRegClassFor(PtrVT
));
2867 RetAddr
= DAG
.getCopyFromReg(DAG
.getEntryNode(), dl
, RetReg
, VT
);
2871 // Need frame address to find return address of the caller.
2872 SDValue FrameAddr
= getFRAMEADDR(depth
- 1, Op
, DAG
, Subtarget
, true);
2874 unsigned Offset
= (Subtarget
->is64Bit()) ? 120 : 60;
2875 SDValue Ptr
= DAG
.getNode(ISD::ADD
,
2878 DAG
.getIntPtrConstant(Offset
, dl
));
2879 RetAddr
= DAG
.getLoad(VT
, dl
, DAG
.getEntryNode(), Ptr
, MachinePointerInfo());
2884 static SDValue
LowerF64Op(SDValue SrcReg64
, const SDLoc
&dl
, SelectionDAG
&DAG
,
2886 assert(SrcReg64
.getValueType() == MVT::f64
&& "LowerF64Op called on non-double!");
2887 assert(opcode
== ISD::FNEG
|| opcode
== ISD::FABS
);
2889 // Lower fneg/fabs on f64 to fneg/fabs on f32.
2890 // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
2891 // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
2893 // Note: in little-endian, the floating-point value is stored in the
2894 // registers are in the opposite order, so the subreg with the sign
2895 // bit is the highest-numbered (odd), rather than the
2896 // lowest-numbered (even).
2898 SDValue Hi32
= DAG
.getTargetExtractSubreg(SP::sub_even
, dl
, MVT::f32
,
2900 SDValue Lo32
= DAG
.getTargetExtractSubreg(SP::sub_odd
, dl
, MVT::f32
,
2903 if (DAG
.getDataLayout().isLittleEndian())
2904 Lo32
= DAG
.getNode(opcode
, dl
, MVT::f32
, Lo32
);
2906 Hi32
= DAG
.getNode(opcode
, dl
, MVT::f32
, Hi32
);
2908 SDValue DstReg64
= SDValue(DAG
.getMachineNode(TargetOpcode::IMPLICIT_DEF
,
2910 DstReg64
= DAG
.getTargetInsertSubreg(SP::sub_even
, dl
, MVT::f64
,
2912 DstReg64
= DAG
.getTargetInsertSubreg(SP::sub_odd
, dl
, MVT::f64
,
2917 // Lower a f128 load into two f64 loads.
2918 static SDValue
LowerF128Load(SDValue Op
, SelectionDAG
&DAG
)
2921 LoadSDNode
*LdNode
= cast
<LoadSDNode
>(Op
.getNode());
2922 assert(LdNode
->getOffset().isUndef() && "Unexpected node type");
2924 Align Alignment
= commonAlignment(LdNode
->getOriginalAlign(), 8);
2927 DAG
.getLoad(MVT::f64
, dl
, LdNode
->getChain(), LdNode
->getBasePtr(),
2928 LdNode
->getPointerInfo(), Alignment
);
2929 EVT addrVT
= LdNode
->getBasePtr().getValueType();
2930 SDValue LoPtr
= DAG
.getNode(ISD::ADD
, dl
, addrVT
,
2931 LdNode
->getBasePtr(),
2932 DAG
.getConstant(8, dl
, addrVT
));
2933 SDValue Lo64
= DAG
.getLoad(MVT::f64
, dl
, LdNode
->getChain(), LoPtr
,
2934 LdNode
->getPointerInfo().getWithOffset(8),
2937 SDValue SubRegEven
= DAG
.getTargetConstant(SP::sub_even64
, dl
, MVT::i32
);
2938 SDValue SubRegOdd
= DAG
.getTargetConstant(SP::sub_odd64
, dl
, MVT::i32
);
2940 SDNode
*InFP128
= DAG
.getMachineNode(TargetOpcode::IMPLICIT_DEF
,
2942 InFP128
= DAG
.getMachineNode(TargetOpcode::INSERT_SUBREG
, dl
,
2944 SDValue(InFP128
, 0),
2947 InFP128
= DAG
.getMachineNode(TargetOpcode::INSERT_SUBREG
, dl
,
2949 SDValue(InFP128
, 0),
2952 SDValue OutChains
[2] = { SDValue(Hi64
.getNode(), 1),
2953 SDValue(Lo64
.getNode(), 1) };
2954 SDValue OutChain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, OutChains
);
2955 SDValue Ops
[2] = {SDValue(InFP128
,0), OutChain
};
2956 return DAG
.getMergeValues(Ops
, dl
);
2959 static SDValue
LowerLOAD(SDValue Op
, SelectionDAG
&DAG
)
2961 LoadSDNode
*LdNode
= cast
<LoadSDNode
>(Op
.getNode());
2963 EVT MemVT
= LdNode
->getMemoryVT();
2964 if (MemVT
== MVT::f128
)
2965 return LowerF128Load(Op
, DAG
);
2970 // Lower a f128 store into two f64 stores.
2971 static SDValue
LowerF128Store(SDValue Op
, SelectionDAG
&DAG
) {
2973 StoreSDNode
*StNode
= cast
<StoreSDNode
>(Op
.getNode());
2974 assert(StNode
->getOffset().isUndef() && "Unexpected node type");
2976 SDValue SubRegEven
= DAG
.getTargetConstant(SP::sub_even64
, dl
, MVT::i32
);
2977 SDValue SubRegOdd
= DAG
.getTargetConstant(SP::sub_odd64
, dl
, MVT::i32
);
2979 SDNode
*Hi64
= DAG
.getMachineNode(TargetOpcode::EXTRACT_SUBREG
,
2984 SDNode
*Lo64
= DAG
.getMachineNode(TargetOpcode::EXTRACT_SUBREG
,
2990 Align Alignment
= commonAlignment(StNode
->getOriginalAlign(), 8);
2992 SDValue OutChains
[2];
2994 DAG
.getStore(StNode
->getChain(), dl
, SDValue(Hi64
, 0),
2995 StNode
->getBasePtr(), StNode
->getPointerInfo(),
2997 EVT addrVT
= StNode
->getBasePtr().getValueType();
2998 SDValue LoPtr
= DAG
.getNode(ISD::ADD
, dl
, addrVT
,
2999 StNode
->getBasePtr(),
3000 DAG
.getConstant(8, dl
, addrVT
));
3001 OutChains
[1] = DAG
.getStore(StNode
->getChain(), dl
, SDValue(Lo64
, 0), LoPtr
,
3002 StNode
->getPointerInfo().getWithOffset(8),
3004 return DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, OutChains
);
3007 static SDValue
LowerSTORE(SDValue Op
, SelectionDAG
&DAG
)
3010 StoreSDNode
*St
= cast
<StoreSDNode
>(Op
.getNode());
3012 EVT MemVT
= St
->getMemoryVT();
3013 if (MemVT
== MVT::f128
)
3014 return LowerF128Store(Op
, DAG
);
3016 if (MemVT
== MVT::i64
) {
3017 // Custom handling for i64 stores: turn it into a bitcast and a
3019 SDValue Val
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::v2i32
, St
->getValue());
3020 SDValue Chain
= DAG
.getStore(
3021 St
->getChain(), dl
, Val
, St
->getBasePtr(), St
->getPointerInfo(),
3022 St
->getOriginalAlign(), St
->getMemOperand()->getFlags(),
3030 static SDValue
LowerFNEGorFABS(SDValue Op
, SelectionDAG
&DAG
, bool isV9
) {
3031 assert((Op
.getOpcode() == ISD::FNEG
|| Op
.getOpcode() == ISD::FABS
)
3032 && "invalid opcode");
3036 if (Op
.getValueType() == MVT::f64
)
3037 return LowerF64Op(Op
.getOperand(0), dl
, DAG
, Op
.getOpcode());
3038 if (Op
.getValueType() != MVT::f128
)
3041 // Lower fabs/fneg on f128 to fabs/fneg on f64
3042 // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
3043 // (As with LowerF64Op, on little-endian, we need to negate the odd
3046 SDValue SrcReg128
= Op
.getOperand(0);
3047 SDValue Hi64
= DAG
.getTargetExtractSubreg(SP::sub_even64
, dl
, MVT::f64
,
3049 SDValue Lo64
= DAG
.getTargetExtractSubreg(SP::sub_odd64
, dl
, MVT::f64
,
3052 if (DAG
.getDataLayout().isLittleEndian()) {
3054 Lo64
= DAG
.getNode(Op
.getOpcode(), dl
, MVT::f64
, Lo64
);
3056 Lo64
= LowerF64Op(Lo64
, dl
, DAG
, Op
.getOpcode());
3059 Hi64
= DAG
.getNode(Op
.getOpcode(), dl
, MVT::f64
, Hi64
);
3061 Hi64
= LowerF64Op(Hi64
, dl
, DAG
, Op
.getOpcode());
3064 SDValue DstReg128
= SDValue(DAG
.getMachineNode(TargetOpcode::IMPLICIT_DEF
,
3066 DstReg128
= DAG
.getTargetInsertSubreg(SP::sub_even64
, dl
, MVT::f128
,
3068 DstReg128
= DAG
.getTargetInsertSubreg(SP::sub_odd64
, dl
, MVT::f128
,
3073 static SDValue
LowerADDC_ADDE_SUBC_SUBE(SDValue Op
, SelectionDAG
&DAG
) {
3075 if (Op
.getValueType() != MVT::i64
)
3079 SDValue Src1
= Op
.getOperand(0);
3080 SDValue Src1Lo
= DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::i32
, Src1
);
3081 SDValue Src1Hi
= DAG
.getNode(ISD::SRL
, dl
, MVT::i64
, Src1
,
3082 DAG
.getConstant(32, dl
, MVT::i64
));
3083 Src1Hi
= DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::i32
, Src1Hi
);
3085 SDValue Src2
= Op
.getOperand(1);
3086 SDValue Src2Lo
= DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::i32
, Src2
);
3087 SDValue Src2Hi
= DAG
.getNode(ISD::SRL
, dl
, MVT::i64
, Src2
,
3088 DAG
.getConstant(32, dl
, MVT::i64
));
3089 Src2Hi
= DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::i32
, Src2Hi
);
3092 bool hasChain
= false;
3093 unsigned hiOpc
= Op
.getOpcode();
3094 switch (Op
.getOpcode()) {
3095 default: llvm_unreachable("Invalid opcode");
3096 case ISD::ADDC
: hiOpc
= ISD::ADDE
; break;
3097 case ISD::ADDE
: hasChain
= true; break;
3098 case ISD::SUBC
: hiOpc
= ISD::SUBE
; break;
3099 case ISD::SUBE
: hasChain
= true; break;
3102 SDVTList VTs
= DAG
.getVTList(MVT::i32
, MVT::Glue
);
3104 Lo
= DAG
.getNode(Op
.getOpcode(), dl
, VTs
, Src1Lo
, Src2Lo
,
3107 Lo
= DAG
.getNode(Op
.getOpcode(), dl
, VTs
, Src1Lo
, Src2Lo
);
3109 SDValue Hi
= DAG
.getNode(hiOpc
, dl
, VTs
, Src1Hi
, Src2Hi
, Lo
.getValue(1));
3110 SDValue Carry
= Hi
.getValue(1);
3112 Lo
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, MVT::i64
, Lo
);
3113 Hi
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, MVT::i64
, Hi
);
3114 Hi
= DAG
.getNode(ISD::SHL
, dl
, MVT::i64
, Hi
,
3115 DAG
.getConstant(32, dl
, MVT::i64
));
3117 SDValue Dst
= DAG
.getNode(ISD::OR
, dl
, MVT::i64
, Hi
, Lo
);
3118 SDValue Ops
[2] = { Dst
, Carry
};
3119 return DAG
.getMergeValues(Ops
, dl
);
3122 // Custom lower UMULO/SMULO for SPARC. This code is similar to ExpandNode()
3123 // in LegalizeDAG.cpp except the order of arguments to the library function.
3124 static SDValue
LowerUMULO_SMULO(SDValue Op
, SelectionDAG
&DAG
,
3125 const SparcTargetLowering
&TLI
)
3127 unsigned opcode
= Op
.getOpcode();
3128 assert((opcode
== ISD::UMULO
|| opcode
== ISD::SMULO
) && "Invalid Opcode.");
3130 bool isSigned
= (opcode
== ISD::SMULO
);
3132 EVT WideVT
= MVT::i128
;
3134 SDValue LHS
= Op
.getOperand(0);
3136 if (LHS
.getValueType() != VT
)
3139 SDValue ShiftAmt
= DAG
.getConstant(63, dl
, VT
);
3141 SDValue RHS
= Op
.getOperand(1);
3142 SDValue HiLHS
, HiRHS
;
3144 HiLHS
= DAG
.getNode(ISD::SRA
, dl
, VT
, LHS
, ShiftAmt
);
3145 HiRHS
= DAG
.getNode(ISD::SRA
, dl
, MVT::i64
, RHS
, ShiftAmt
);
3147 HiLHS
= DAG
.getConstant(0, dl
, VT
);
3148 HiRHS
= DAG
.getConstant(0, dl
, MVT::i64
);
3151 SDValue Args
[] = { HiLHS
, LHS
, HiRHS
, RHS
};
3153 TargetLowering::MakeLibCallOptions CallOptions
;
3154 CallOptions
.setSExt(isSigned
);
3155 SDValue MulResult
= TLI
.makeLibCall(DAG
,
3156 RTLIB::MUL_I128
, WideVT
,
3157 Args
, CallOptions
, dl
).first
;
3158 SDValue BottomHalf
, TopHalf
;
3159 std::tie(BottomHalf
, TopHalf
) = DAG
.SplitScalar(MulResult
, dl
, VT
, VT
);
3161 SDValue Tmp1
= DAG
.getNode(ISD::SRA
, dl
, VT
, BottomHalf
, ShiftAmt
);
3162 TopHalf
= DAG
.getSetCC(dl
, MVT::i32
, TopHalf
, Tmp1
, ISD::SETNE
);
3164 TopHalf
= DAG
.getSetCC(dl
, MVT::i32
, TopHalf
, DAG
.getConstant(0, dl
, VT
),
3167 // MulResult is a node with an illegal type. Because such things are not
3168 // generally permitted during this phase of legalization, ensure that
3169 // nothing is left using the node. The above EXTRACT_ELEMENT nodes should have
3171 assert(MulResult
->use_empty() && "Illegally typed node still in use!");
3173 SDValue Ops
[2] = { BottomHalf
, TopHalf
} ;
3174 return DAG
.getMergeValues(Ops
, dl
);
3177 static SDValue
LowerATOMIC_LOAD_STORE(SDValue Op
, SelectionDAG
&DAG
) {
3178 if (isStrongerThanMonotonic(cast
<AtomicSDNode
>(Op
)->getSuccessOrdering())) {
3179 // Expand with a fence.
3183 // Monotonic load/stores are legal.
3187 SDValue
SparcTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op
,
3188 SelectionDAG
&DAG
) const {
3189 unsigned IntNo
= cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue();
3192 default: return SDValue(); // Don't custom lower most intrinsics.
3193 case Intrinsic::thread_pointer
: {
3194 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3195 return DAG
.getRegister(SP::G7
, PtrVT
);
3200 SDValue
SparcTargetLowering::
3201 LowerOperation(SDValue Op
, SelectionDAG
&DAG
) const {
3203 bool hasHardQuad
= Subtarget
->hasHardQuad();
3204 bool isV9
= Subtarget
->isV9();
3205 bool is64Bit
= Subtarget
->is64Bit();
3207 switch (Op
.getOpcode()) {
3208 default: llvm_unreachable("Should not custom lower this!");
3210 case ISD::RETURNADDR
: return LowerRETURNADDR(Op
, DAG
, *this,
3212 case ISD::FRAMEADDR
: return LowerFRAMEADDR(Op
, DAG
,
3214 case ISD::GlobalTLSAddress
: return LowerGlobalTLSAddress(Op
, DAG
);
3215 case ISD::GlobalAddress
: return LowerGlobalAddress(Op
, DAG
);
3216 case ISD::BlockAddress
: return LowerBlockAddress(Op
, DAG
);
3217 case ISD::ConstantPool
: return LowerConstantPool(Op
, DAG
);
3218 case ISD::FP_TO_SINT
: return LowerFP_TO_SINT(Op
, DAG
, *this,
3220 case ISD::SINT_TO_FP
: return LowerSINT_TO_FP(Op
, DAG
, *this,
3222 case ISD::FP_TO_UINT
: return LowerFP_TO_UINT(Op
, DAG
, *this,
3224 case ISD::UINT_TO_FP
: return LowerUINT_TO_FP(Op
, DAG
, *this,
3227 return LowerBR_CC(Op
, DAG
, *this, hasHardQuad
, isV9
, is64Bit
);
3228 case ISD::SELECT_CC
:
3229 return LowerSELECT_CC(Op
, DAG
, *this, hasHardQuad
, isV9
, is64Bit
);
3230 case ISD::VASTART
: return LowerVASTART(Op
, DAG
, *this);
3231 case ISD::VAARG
: return LowerVAARG(Op
, DAG
);
3232 case ISD::DYNAMIC_STACKALLOC
: return LowerDYNAMIC_STACKALLOC(Op
, DAG
,
3235 case ISD::LOAD
: return LowerLOAD(Op
, DAG
);
3236 case ISD::STORE
: return LowerSTORE(Op
, DAG
);
3237 case ISD::FADD
: return LowerF128Op(Op
, DAG
,
3238 getLibcallName(RTLIB::ADD_F128
), 2);
3239 case ISD::FSUB
: return LowerF128Op(Op
, DAG
,
3240 getLibcallName(RTLIB::SUB_F128
), 2);
3241 case ISD::FMUL
: return LowerF128Op(Op
, DAG
,
3242 getLibcallName(RTLIB::MUL_F128
), 2);
3243 case ISD::FDIV
: return LowerF128Op(Op
, DAG
,
3244 getLibcallName(RTLIB::DIV_F128
), 2);
3245 case ISD::FSQRT
: return LowerF128Op(Op
, DAG
,
3246 getLibcallName(RTLIB::SQRT_F128
),1);
3248 case ISD::FNEG
: return LowerFNEGorFABS(Op
, DAG
, isV9
);
3249 case ISD::FP_EXTEND
: return LowerF128_FPEXTEND(Op
, DAG
, *this);
3250 case ISD::FP_ROUND
: return LowerF128_FPROUND(Op
, DAG
, *this);
3254 case ISD::SUBE
: return LowerADDC_ADDE_SUBC_SUBE(Op
, DAG
);
3256 case ISD::SMULO
: return LowerUMULO_SMULO(Op
, DAG
, *this);
3257 case ISD::ATOMIC_LOAD
:
3258 case ISD::ATOMIC_STORE
: return LowerATOMIC_LOAD_STORE(Op
, DAG
);
3259 case ISD::INTRINSIC_WO_CHAIN
: return LowerINTRINSIC_WO_CHAIN(Op
, DAG
);
3263 SDValue
SparcTargetLowering::bitcastConstantFPToInt(ConstantFPSDNode
*C
,
3265 SelectionDAG
&DAG
) const {
3266 APInt V
= C
->getValueAPF().bitcastToAPInt();
3267 SDValue Lo
= DAG
.getConstant(V
.zextOrTrunc(32), DL
, MVT::i32
);
3268 SDValue Hi
= DAG
.getConstant(V
.lshr(32).zextOrTrunc(32), DL
, MVT::i32
);
3269 if (DAG
.getDataLayout().isLittleEndian())
3271 return DAG
.getBuildVector(MVT::v2i32
, DL
, {Hi
, Lo
});
3274 SDValue
SparcTargetLowering::PerformBITCASTCombine(SDNode
*N
,
3275 DAGCombinerInfo
&DCI
) const {
3277 SDValue Src
= N
->getOperand(0);
3279 if (isa
<ConstantFPSDNode
>(Src
) && N
->getSimpleValueType(0) == MVT::v2i32
&&
3280 Src
.getSimpleValueType() == MVT::f64
)
3281 return bitcastConstantFPToInt(cast
<ConstantFPSDNode
>(Src
), dl
, DCI
.DAG
);
3286 SDValue
SparcTargetLowering::PerformDAGCombine(SDNode
*N
,
3287 DAGCombinerInfo
&DCI
) const {
3288 switch (N
->getOpcode()) {
3292 return PerformBITCASTCombine(N
, DCI
);
3298 SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr
&MI
,
3299 MachineBasicBlock
*BB
) const {
3300 switch (MI
.getOpcode()) {
3301 default: llvm_unreachable("Unknown SELECT_CC!");
3302 case SP::SELECT_CC_Int_ICC
:
3303 case SP::SELECT_CC_FP_ICC
:
3304 case SP::SELECT_CC_DFP_ICC
:
3305 case SP::SELECT_CC_QFP_ICC
:
3306 if (Subtarget
->isV9())
3307 return expandSelectCC(MI
, BB
, SP::BPICC
);
3308 return expandSelectCC(MI
, BB
, SP::BCOND
);
3309 case SP::SELECT_CC_Int_XCC
:
3310 case SP::SELECT_CC_FP_XCC
:
3311 case SP::SELECT_CC_DFP_XCC
:
3312 case SP::SELECT_CC_QFP_XCC
:
3313 return expandSelectCC(MI
, BB
, SP::BPXCC
);
3314 case SP::SELECT_CC_Int_FCC
:
3315 case SP::SELECT_CC_FP_FCC
:
3316 case SP::SELECT_CC_DFP_FCC
:
3317 case SP::SELECT_CC_QFP_FCC
:
3318 if (Subtarget
->isV9())
3319 return expandSelectCC(MI
, BB
, SP::FBCOND_V9
);
3320 return expandSelectCC(MI
, BB
, SP::FBCOND
);
3325 SparcTargetLowering::expandSelectCC(MachineInstr
&MI
, MachineBasicBlock
*BB
,
3326 unsigned BROpcode
) const {
3327 const TargetInstrInfo
&TII
= *Subtarget
->getInstrInfo();
3328 DebugLoc dl
= MI
.getDebugLoc();
3329 unsigned CC
= (SPCC::CondCodes
)MI
.getOperand(3).getImm();
3331 // To "insert" a SELECT_CC instruction, we actually have to insert the
3332 // triangle control-flow pattern. The incoming instruction knows the
3333 // destination vreg to set, the condition code register to branch on, the
3334 // true/false values to select between, and the condition code for the branch.
3336 // We produce the following control flow:
3342 const BasicBlock
*LLVM_BB
= BB
->getBasicBlock();
3343 MachineFunction::iterator It
= ++BB
->getIterator();
3345 MachineBasicBlock
*ThisMBB
= BB
;
3346 MachineFunction
*F
= BB
->getParent();
3347 MachineBasicBlock
*IfFalseMBB
= F
->CreateMachineBasicBlock(LLVM_BB
);
3348 MachineBasicBlock
*SinkMBB
= F
->CreateMachineBasicBlock(LLVM_BB
);
3349 F
->insert(It
, IfFalseMBB
);
3350 F
->insert(It
, SinkMBB
);
3352 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
3353 SinkMBB
->splice(SinkMBB
->begin(), ThisMBB
,
3354 std::next(MachineBasicBlock::iterator(MI
)), ThisMBB
->end());
3355 SinkMBB
->transferSuccessorsAndUpdatePHIs(ThisMBB
);
3357 // Set the new successors for ThisMBB.
3358 ThisMBB
->addSuccessor(IfFalseMBB
);
3359 ThisMBB
->addSuccessor(SinkMBB
);
3361 BuildMI(ThisMBB
, dl
, TII
.get(BROpcode
))
3365 // IfFalseMBB just falls through to SinkMBB.
3366 IfFalseMBB
->addSuccessor(SinkMBB
);
3368 // %Result = phi [ %TrueValue, ThisMBB ], [ %FalseValue, IfFalseMBB ]
3369 BuildMI(*SinkMBB
, SinkMBB
->begin(), dl
, TII
.get(SP::PHI
),
3370 MI
.getOperand(0).getReg())
3371 .addReg(MI
.getOperand(1).getReg())
3373 .addReg(MI
.getOperand(2).getReg())
3374 .addMBB(IfFalseMBB
);
3376 MI
.eraseFromParent(); // The pseudo instruction is gone now.
3380 //===----------------------------------------------------------------------===//
3381 // Sparc Inline Assembly Support
3382 //===----------------------------------------------------------------------===//
3384 /// getConstraintType - Given a constraint letter, return the type of
3385 /// constraint it is for this target.
3386 SparcTargetLowering::ConstraintType
3387 SparcTargetLowering::getConstraintType(StringRef Constraint
) const {
3388 if (Constraint
.size() == 1) {
3389 switch (Constraint
[0]) {
3394 return C_RegisterClass
;
3400 return TargetLowering::getConstraintType(Constraint
);
3403 TargetLowering::ConstraintWeight
SparcTargetLowering::
3404 getSingleConstraintMatchWeight(AsmOperandInfo
&info
,
3405 const char *constraint
) const {
3406 ConstraintWeight weight
= CW_Invalid
;
3407 Value
*CallOperandVal
= info
.CallOperandVal
;
3408 // If we don't have a value, we can't do a match,
3409 // but allow it at the lowest weight.
3410 if (!CallOperandVal
)
3413 // Look at the constraint type.
3414 switch (*constraint
) {
3416 weight
= TargetLowering::getSingleConstraintMatchWeight(info
, constraint
);
3419 if (ConstantInt
*C
= dyn_cast
<ConstantInt
>(info
.CallOperandVal
)) {
3420 if (isInt
<13>(C
->getSExtValue()))
3421 weight
= CW_Constant
;
3428 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3429 /// vector. If it is invalid, don't add anything to Ops.
3430 void SparcTargetLowering::LowerAsmOperandForConstraint(
3431 SDValue Op
, StringRef Constraint
, std::vector
<SDValue
> &Ops
,
3432 SelectionDAG
&DAG
) const {
3435 // Only support length 1 constraints for now.
3436 if (Constraint
.size() > 1)
3439 char ConstraintLetter
= Constraint
[0];
3440 switch (ConstraintLetter
) {
3443 if (ConstantSDNode
*C
= dyn_cast
<ConstantSDNode
>(Op
)) {
3444 if (isInt
<13>(C
->getSExtValue())) {
3445 Result
= DAG
.getTargetConstant(C
->getSExtValue(), SDLoc(Op
),
3453 if (Result
.getNode()) {
3454 Ops
.push_back(Result
);
3457 TargetLowering::LowerAsmOperandForConstraint(Op
, Constraint
, Ops
, DAG
);
3460 std::pair
<unsigned, const TargetRegisterClass
*>
3461 SparcTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo
*TRI
,
3462 StringRef Constraint
,
3464 if (Constraint
.empty())
3465 return std::make_pair(0U, nullptr);
3467 if (Constraint
.size() == 1) {
3468 switch (Constraint
[0]) {
3470 if (VT
== MVT::v2i32
)
3471 return std::make_pair(0U, &SP::IntPairRegClass
);
3472 else if (Subtarget
->is64Bit())
3473 return std::make_pair(0U, &SP::I64RegsRegClass
);
3475 return std::make_pair(0U, &SP::IntRegsRegClass
);
3477 if (VT
== MVT::f32
|| VT
== MVT::i32
)
3478 return std::make_pair(0U, &SP::FPRegsRegClass
);
3479 else if (VT
== MVT::f64
|| VT
== MVT::i64
)
3480 return std::make_pair(0U, &SP::LowDFPRegsRegClass
);
3481 else if (VT
== MVT::f128
)
3482 return std::make_pair(0U, &SP::LowQFPRegsRegClass
);
3483 // This will generate an error message
3484 return std::make_pair(0U, nullptr);
3486 if (VT
== MVT::f32
|| VT
== MVT::i32
)
3487 return std::make_pair(0U, &SP::FPRegsRegClass
);
3488 else if (VT
== MVT::f64
|| VT
== MVT::i64
)
3489 return std::make_pair(0U, &SP::DFPRegsRegClass
);
3490 else if (VT
== MVT::f128
)
3491 return std::make_pair(0U, &SP::QFPRegsRegClass
);
3492 // This will generate an error message
3493 return std::make_pair(0U, nullptr);
3497 if (Constraint
.front() != '{')
3498 return std::make_pair(0U, nullptr);
3500 assert(Constraint
.back() == '}' && "Not a brace enclosed constraint?");
3501 StringRef
RegName(Constraint
.data() + 1, Constraint
.size() - 2);
3502 if (RegName
.empty())
3503 return std::make_pair(0U, nullptr);
3505 unsigned long long RegNo
;
3506 // Handle numbered register aliases.
3507 if (RegName
[0] == 'r' &&
3508 getAsUnsignedInteger(RegName
.begin() + 1, 10, RegNo
)) {
3514 return std::make_pair(0U, nullptr);
3515 const char RegTypes
[] = {'g', 'o', 'l', 'i'};
3516 char RegType
= RegTypes
[RegNo
/ 8];
3517 char RegIndex
= '0' + (RegNo
% 8);
3518 char Tmp
[] = {'{', RegType
, RegIndex
, '}', 0};
3519 return getRegForInlineAsmConstraint(TRI
, Tmp
, VT
);
3522 // Rewrite the fN constraint according to the value type if needed.
3523 if (VT
!= MVT::f32
&& VT
!= MVT::Other
&& RegName
[0] == 'f' &&
3524 getAsUnsignedInteger(RegName
.begin() + 1, 10, RegNo
)) {
3525 if (VT
== MVT::f64
&& (RegNo
% 2 == 0)) {
3526 return getRegForInlineAsmConstraint(
3527 TRI
, StringRef("{d" + utostr(RegNo
/ 2) + "}"), VT
);
3528 } else if (VT
== MVT::f128
&& (RegNo
% 4 == 0)) {
3529 return getRegForInlineAsmConstraint(
3530 TRI
, StringRef("{q" + utostr(RegNo
/ 4) + "}"), VT
);
3532 return std::make_pair(0U, nullptr);
3537 TargetLowering::getRegForInlineAsmConstraint(TRI
, Constraint
, VT
);
3538 if (!ResultPair
.second
)
3539 return std::make_pair(0U, nullptr);
3541 // Force the use of I64Regs over IntRegs for 64-bit values.
3542 if (Subtarget
->is64Bit() && VT
== MVT::i64
) {
3543 assert(ResultPair
.second
== &SP::IntRegsRegClass
&&
3544 "Unexpected register class");
3545 return std::make_pair(ResultPair
.first
, &SP::I64RegsRegClass
);
3552 SparcTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode
*GA
) const {
3553 // The Sparc target isn't yet aware of offsets.
3557 void SparcTargetLowering::ReplaceNodeResults(SDNode
*N
,
3558 SmallVectorImpl
<SDValue
>& Results
,
3559 SelectionDAG
&DAG
) const {
3563 RTLIB::Libcall libCall
= RTLIB::UNKNOWN_LIBCALL
;
3565 switch (N
->getOpcode()) {
3567 llvm_unreachable("Do not know how to custom type legalize this operation!");
3569 case ISD::FP_TO_SINT
:
3570 case ISD::FP_TO_UINT
:
3571 // Custom lower only if it involves f128 or i64.
3572 if (N
->getOperand(0).getValueType() != MVT::f128
3573 || N
->getValueType(0) != MVT::i64
)
3575 libCall
= ((N
->getOpcode() == ISD::FP_TO_SINT
)
3576 ? RTLIB::FPTOSINT_F128_I64
3577 : RTLIB::FPTOUINT_F128_I64
);
3579 Results
.push_back(LowerF128Op(SDValue(N
, 0),
3581 getLibcallName(libCall
),
3584 case ISD::READCYCLECOUNTER
: {
3585 assert(Subtarget
->hasLeonCycleCounter());
3586 SDValue Lo
= DAG
.getCopyFromReg(N
->getOperand(0), dl
, SP::ASR23
, MVT::i32
);
3587 SDValue Hi
= DAG
.getCopyFromReg(Lo
, dl
, SP::G0
, MVT::i32
);
3588 SDValue Ops
[] = { Lo
, Hi
};
3589 SDValue Pair
= DAG
.getNode(ISD::BUILD_PAIR
, dl
, MVT::i64
, Ops
);
3590 Results
.push_back(Pair
);
3591 Results
.push_back(N
->getOperand(0));
3594 case ISD::SINT_TO_FP
:
3595 case ISD::UINT_TO_FP
:
3596 // Custom lower only if it involves f128 or i64.
3597 if (N
->getValueType(0) != MVT::f128
3598 || N
->getOperand(0).getValueType() != MVT::i64
)
3601 libCall
= ((N
->getOpcode() == ISD::SINT_TO_FP
)
3602 ? RTLIB::SINTTOFP_I64_F128
3603 : RTLIB::UINTTOFP_I64_F128
);
3605 Results
.push_back(LowerF128Op(SDValue(N
, 0),
3607 getLibcallName(libCall
),
3611 LoadSDNode
*Ld
= cast
<LoadSDNode
>(N
);
3612 // Custom handling only for i64: turn i64 load into a v2i32 load,
3614 if (Ld
->getValueType(0) != MVT::i64
|| Ld
->getMemoryVT() != MVT::i64
)
3618 SDValue LoadRes
= DAG
.getExtLoad(
3619 Ld
->getExtensionType(), dl
, MVT::v2i32
, Ld
->getChain(),
3620 Ld
->getBasePtr(), Ld
->getPointerInfo(), MVT::v2i32
,
3621 Ld
->getOriginalAlign(), Ld
->getMemOperand()->getFlags(),
3624 SDValue Res
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::i64
, LoadRes
);
3625 Results
.push_back(Res
);
3626 Results
.push_back(LoadRes
.getValue(1));
3632 // Override to enable LOAD_STACK_GUARD lowering on Linux.
3633 bool SparcTargetLowering::useLoadStackGuardNode() const {
3634 if (!Subtarget
->isTargetLinux())
3635 return TargetLowering::useLoadStackGuardNode();
3639 // Override to disable global variable loading on Linux.
3640 void SparcTargetLowering::insertSSPDeclarations(Module
&M
) const {
3641 if (!Subtarget
->isTargetLinux())
3642 return TargetLowering::insertSSPDeclarations(M
);
3645 void SparcTargetLowering::AdjustInstrPostInstrSelection(MachineInstr
&MI
,
3646 SDNode
*Node
) const {
3647 assert(MI
.getOpcode() == SP::SUBCCrr
|| MI
.getOpcode() == SP::SUBCCri
);
3648 // If the result is dead, replace it with %g0.
3649 if (!Node
->hasAnyUseOfValue(0))
3650 MI
.getOperand(0).setReg(SP::G0
);