1 //===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the interfaces that Sparc uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #include "SparcISelLowering.h"
15 #include "MCTargetDesc/SparcMCExpr.h"
16 #include "SparcMachineFunctionInfo.h"
17 #include "SparcRegisterInfo.h"
18 #include "SparcTargetMachine.h"
19 #include "SparcTargetObjectFile.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/ADT/StringSwitch.h"
22 #include "llvm/CodeGen/CallingConvLower.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/SelectionDAG.h"
28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/Module.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/KnownBits.h"
37 //===----------------------------------------------------------------------===//
38 // Calling Convention Implementation
39 //===----------------------------------------------------------------------===//
41 static bool CC_Sparc_Assign_SRet(unsigned &ValNo
, MVT
&ValVT
,
42 MVT
&LocVT
, CCValAssign::LocInfo
&LocInfo
,
43 ISD::ArgFlagsTy
&ArgFlags
, CCState
&State
)
45 assert (ArgFlags
.isSRet());
47 // Assign SRet argument.
48 State
.addLoc(CCValAssign::getCustomMem(ValNo
, ValVT
,
54 static bool CC_Sparc_Assign_Split_64(unsigned &ValNo
, MVT
&ValVT
,
55 MVT
&LocVT
, CCValAssign::LocInfo
&LocInfo
,
56 ISD::ArgFlagsTy
&ArgFlags
, CCState
&State
)
58 static const MCPhysReg RegList
[] = {
59 SP::I0
, SP::I1
, SP::I2
, SP::I3
, SP::I4
, SP::I5
61 // Try to get first reg.
62 if (Register Reg
= State
.AllocateReg(RegList
)) {
63 State
.addLoc(CCValAssign::getCustomReg(ValNo
, ValVT
, Reg
, LocVT
, LocInfo
));
65 // Assign whole thing in stack.
66 State
.addLoc(CCValAssign::getCustomMem(
67 ValNo
, ValVT
, State
.AllocateStack(8, Align(4)), LocVT
, LocInfo
));
71 // Try to get second reg.
72 if (Register Reg
= State
.AllocateReg(RegList
))
73 State
.addLoc(CCValAssign::getCustomReg(ValNo
, ValVT
, Reg
, LocVT
, LocInfo
));
75 State
.addLoc(CCValAssign::getCustomMem(
76 ValNo
, ValVT
, State
.AllocateStack(4, Align(4)), LocVT
, LocInfo
));
80 static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo
, MVT
&ValVT
,
81 MVT
&LocVT
, CCValAssign::LocInfo
&LocInfo
,
82 ISD::ArgFlagsTy
&ArgFlags
, CCState
&State
)
84 static const MCPhysReg RegList
[] = {
85 SP::I0
, SP::I1
, SP::I2
, SP::I3
, SP::I4
, SP::I5
88 // Try to get first reg.
89 if (Register Reg
= State
.AllocateReg(RegList
))
90 State
.addLoc(CCValAssign::getCustomReg(ValNo
, ValVT
, Reg
, LocVT
, LocInfo
));
94 // Try to get second reg.
95 if (Register Reg
= State
.AllocateReg(RegList
))
96 State
.addLoc(CCValAssign::getCustomReg(ValNo
, ValVT
, Reg
, LocVT
, LocInfo
));
103 // Allocate a full-sized argument for the 64-bit ABI.
104 static bool CC_Sparc64_Full(unsigned &ValNo
, MVT
&ValVT
,
105 MVT
&LocVT
, CCValAssign::LocInfo
&LocInfo
,
106 ISD::ArgFlagsTy
&ArgFlags
, CCState
&State
) {
107 assert((LocVT
== MVT::f32
|| LocVT
== MVT::f128
108 || LocVT
.getSizeInBits() == 64) &&
109 "Can't handle non-64 bits locations");
111 // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
112 unsigned size
= (LocVT
== MVT::f128
) ? 16 : 8;
113 Align alignment
= (LocVT
== MVT::f128
) ? Align(16) : Align(8);
114 unsigned Offset
= State
.AllocateStack(size
, alignment
);
117 if (LocVT
== MVT::i64
&& Offset
< 6*8)
118 // Promote integers to %i0-%i5.
119 Reg
= SP::I0
+ Offset
/8;
120 else if (LocVT
== MVT::f64
&& Offset
< 16*8)
121 // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
122 Reg
= SP::D0
+ Offset
/8;
123 else if (LocVT
== MVT::f32
&& Offset
< 16*8)
124 // Promote floats to %f1, %f3, ...
125 Reg
= SP::F1
+ Offset
/4;
126 else if (LocVT
== MVT::f128
&& Offset
< 16*8)
127 // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
128 Reg
= SP::Q0
+ Offset
/16;
130 // Promote to register when possible, otherwise use the stack slot.
132 State
.addLoc(CCValAssign::getReg(ValNo
, ValVT
, Reg
, LocVT
, LocInfo
));
136 // This argument goes on the stack in an 8-byte slot.
137 // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
138 // the right-aligned float. The first 4 bytes of the stack slot are undefined.
139 if (LocVT
== MVT::f32
)
142 State
.addLoc(CCValAssign::getMem(ValNo
, ValVT
, Offset
, LocVT
, LocInfo
));
146 // Allocate a half-sized argument for the 64-bit ABI.
148 // This is used when passing { float, int } structs by value in registers.
149 static bool CC_Sparc64_Half(unsigned &ValNo
, MVT
&ValVT
,
150 MVT
&LocVT
, CCValAssign::LocInfo
&LocInfo
,
151 ISD::ArgFlagsTy
&ArgFlags
, CCState
&State
) {
152 assert(LocVT
.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
153 unsigned Offset
= State
.AllocateStack(4, Align(4));
155 if (LocVT
== MVT::f32
&& Offset
< 16*8) {
156 // Promote floats to %f0-%f31.
157 State
.addLoc(CCValAssign::getReg(ValNo
, ValVT
, SP::F0
+ Offset
/4,
162 if (LocVT
== MVT::i32
&& Offset
< 6*8) {
163 // Promote integers to %i0-%i5, using half the register.
164 unsigned Reg
= SP::I0
+ Offset
/8;
166 LocInfo
= CCValAssign::AExt
;
168 // Set the Custom bit if this i32 goes in the high bits of a register.
170 State
.addLoc(CCValAssign::getCustomReg(ValNo
, ValVT
, Reg
,
173 State
.addLoc(CCValAssign::getReg(ValNo
, ValVT
, Reg
, LocVT
, LocInfo
));
177 State
.addLoc(CCValAssign::getMem(ValNo
, ValVT
, Offset
, LocVT
, LocInfo
));
181 #include "SparcGenCallingConv.inc"
183 // The calling conventions in SparcCallingConv.td are described in terms of the
184 // callee's register window. This function translates registers to the
185 // corresponding caller window %o register.
186 static unsigned toCallerWindow(unsigned Reg
) {
187 static_assert(SP::I0
+ 7 == SP::I7
&& SP::O0
+ 7 == SP::O7
,
189 if (Reg
>= SP::I0
&& Reg
<= SP::I7
)
190 return Reg
- SP::I0
+ SP::O0
;
195 SparcTargetLowering::LowerReturn(SDValue Chain
, CallingConv::ID CallConv
,
197 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
198 const SmallVectorImpl
<SDValue
> &OutVals
,
199 const SDLoc
&DL
, SelectionDAG
&DAG
) const {
200 if (Subtarget
->is64Bit())
201 return LowerReturn_64(Chain
, CallConv
, IsVarArg
, Outs
, OutVals
, DL
, DAG
);
202 return LowerReturn_32(Chain
, CallConv
, IsVarArg
, Outs
, OutVals
, DL
, DAG
);
206 SparcTargetLowering::LowerReturn_32(SDValue Chain
, CallingConv::ID CallConv
,
208 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
209 const SmallVectorImpl
<SDValue
> &OutVals
,
210 const SDLoc
&DL
, SelectionDAG
&DAG
) const {
211 MachineFunction
&MF
= DAG
.getMachineFunction();
213 // CCValAssign - represent the assignment of the return value to locations.
214 SmallVector
<CCValAssign
, 16> RVLocs
;
216 // CCState - Info about the registers and stack slot.
217 CCState
CCInfo(CallConv
, IsVarArg
, DAG
.getMachineFunction(), RVLocs
,
220 // Analyze return values.
221 CCInfo
.AnalyzeReturn(Outs
, RetCC_Sparc32
);
224 SmallVector
<SDValue
, 4> RetOps(1, Chain
);
225 // Make room for the return address offset.
226 RetOps
.push_back(SDValue());
228 // Copy the result values into the output registers.
229 for (unsigned i
= 0, realRVLocIdx
= 0;
231 ++i
, ++realRVLocIdx
) {
232 CCValAssign
&VA
= RVLocs
[i
];
233 assert(VA
.isRegLoc() && "Can only return in registers!");
235 SDValue Arg
= OutVals
[realRVLocIdx
];
237 if (VA
.needsCustom()) {
238 assert(VA
.getLocVT() == MVT::v2i32
);
239 // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
240 // happen by default if this wasn't a legal type)
242 SDValue Part0
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, DL
, MVT::i32
,
244 DAG
.getConstant(0, DL
, getVectorIdxTy(DAG
.getDataLayout())));
245 SDValue Part1
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, DL
, MVT::i32
,
247 DAG
.getConstant(1, DL
, getVectorIdxTy(DAG
.getDataLayout())));
249 Chain
= DAG
.getCopyToReg(Chain
, DL
, VA
.getLocReg(), Part0
, Flag
);
250 Flag
= Chain
.getValue(1);
251 RetOps
.push_back(DAG
.getRegister(VA
.getLocReg(), VA
.getLocVT()));
252 VA
= RVLocs
[++i
]; // skip ahead to next loc
253 Chain
= DAG
.getCopyToReg(Chain
, DL
, VA
.getLocReg(), Part1
,
256 Chain
= DAG
.getCopyToReg(Chain
, DL
, VA
.getLocReg(), Arg
, Flag
);
258 // Guarantee that all emitted copies are stuck together with flags.
259 Flag
= Chain
.getValue(1);
260 RetOps
.push_back(DAG
.getRegister(VA
.getLocReg(), VA
.getLocVT()));
263 unsigned RetAddrOffset
= 8; // Call Inst + Delay Slot
264 // If the function returns a struct, copy the SRetReturnReg to I0
265 if (MF
.getFunction().hasStructRetAttr()) {
266 SparcMachineFunctionInfo
*SFI
= MF
.getInfo
<SparcMachineFunctionInfo
>();
267 Register Reg
= SFI
->getSRetReturnReg();
269 llvm_unreachable("sret virtual register not created in the entry block");
270 auto PtrVT
= getPointerTy(DAG
.getDataLayout());
271 SDValue Val
= DAG
.getCopyFromReg(Chain
, DL
, Reg
, PtrVT
);
272 Chain
= DAG
.getCopyToReg(Chain
, DL
, SP::I0
, Val
, Flag
);
273 Flag
= Chain
.getValue(1);
274 RetOps
.push_back(DAG
.getRegister(SP::I0
, PtrVT
));
275 RetAddrOffset
= 12; // CallInst + Delay Slot + Unimp
278 RetOps
[0] = Chain
; // Update chain.
279 RetOps
[1] = DAG
.getConstant(RetAddrOffset
, DL
, MVT::i32
);
281 // Add the flag if we have it.
283 RetOps
.push_back(Flag
);
285 return DAG
.getNode(SPISD::RET_FLAG
, DL
, MVT::Other
, RetOps
);
288 // Lower return values for the 64-bit ABI.
289 // Return values are passed the exactly the same way as function arguments.
291 SparcTargetLowering::LowerReturn_64(SDValue Chain
, CallingConv::ID CallConv
,
293 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
294 const SmallVectorImpl
<SDValue
> &OutVals
,
295 const SDLoc
&DL
, SelectionDAG
&DAG
) const {
296 // CCValAssign - represent the assignment of the return value to locations.
297 SmallVector
<CCValAssign
, 16> RVLocs
;
299 // CCState - Info about the registers and stack slot.
300 CCState
CCInfo(CallConv
, IsVarArg
, DAG
.getMachineFunction(), RVLocs
,
303 // Analyze return values.
304 CCInfo
.AnalyzeReturn(Outs
, RetCC_Sparc64
);
307 SmallVector
<SDValue
, 4> RetOps(1, Chain
);
309 // The second operand on the return instruction is the return address offset.
310 // The return address is always %i7+8 with the 64-bit ABI.
311 RetOps
.push_back(DAG
.getConstant(8, DL
, MVT::i32
));
313 // Copy the result values into the output registers.
314 for (unsigned i
= 0; i
!= RVLocs
.size(); ++i
) {
315 CCValAssign
&VA
= RVLocs
[i
];
316 assert(VA
.isRegLoc() && "Can only return in registers!");
317 SDValue OutVal
= OutVals
[i
];
319 // Integer return values must be sign or zero extended by the callee.
320 switch (VA
.getLocInfo()) {
321 case CCValAssign::Full
: break;
322 case CCValAssign::SExt
:
323 OutVal
= DAG
.getNode(ISD::SIGN_EXTEND
, DL
, VA
.getLocVT(), OutVal
);
325 case CCValAssign::ZExt
:
326 OutVal
= DAG
.getNode(ISD::ZERO_EXTEND
, DL
, VA
.getLocVT(), OutVal
);
328 case CCValAssign::AExt
:
329 OutVal
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, VA
.getLocVT(), OutVal
);
332 llvm_unreachable("Unknown loc info!");
335 // The custom bit on an i32 return value indicates that it should be passed
336 // in the high bits of the register.
337 if (VA
.getValVT() == MVT::i32
&& VA
.needsCustom()) {
338 OutVal
= DAG
.getNode(ISD::SHL
, DL
, MVT::i64
, OutVal
,
339 DAG
.getConstant(32, DL
, MVT::i32
));
341 // The next value may go in the low bits of the same register.
342 // Handle both at once.
343 if (i
+1 < RVLocs
.size() && RVLocs
[i
+1].getLocReg() == VA
.getLocReg()) {
344 SDValue NV
= DAG
.getNode(ISD::ZERO_EXTEND
, DL
, MVT::i64
, OutVals
[i
+1]);
345 OutVal
= DAG
.getNode(ISD::OR
, DL
, MVT::i64
, OutVal
, NV
);
346 // Skip the next value, it's already done.
351 Chain
= DAG
.getCopyToReg(Chain
, DL
, VA
.getLocReg(), OutVal
, Flag
);
353 // Guarantee that all emitted copies are stuck together with flags.
354 Flag
= Chain
.getValue(1);
355 RetOps
.push_back(DAG
.getRegister(VA
.getLocReg(), VA
.getLocVT()));
358 RetOps
[0] = Chain
; // Update chain.
360 // Add the flag if we have it.
362 RetOps
.push_back(Flag
);
364 return DAG
.getNode(SPISD::RET_FLAG
, DL
, MVT::Other
, RetOps
);
367 SDValue
SparcTargetLowering::LowerFormalArguments(
368 SDValue Chain
, CallingConv::ID CallConv
, bool IsVarArg
,
369 const SmallVectorImpl
<ISD::InputArg
> &Ins
, const SDLoc
&DL
,
370 SelectionDAG
&DAG
, SmallVectorImpl
<SDValue
> &InVals
) const {
371 if (Subtarget
->is64Bit())
372 return LowerFormalArguments_64(Chain
, CallConv
, IsVarArg
, Ins
,
374 return LowerFormalArguments_32(Chain
, CallConv
, IsVarArg
, Ins
,
378 /// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
379 /// passed in either one or two GPRs, including FP values. TODO: we should
380 /// pass FP values in FP registers for fastcc functions.
381 SDValue
SparcTargetLowering::LowerFormalArguments_32(
382 SDValue Chain
, CallingConv::ID CallConv
, bool isVarArg
,
383 const SmallVectorImpl
<ISD::InputArg
> &Ins
, const SDLoc
&dl
,
384 SelectionDAG
&DAG
, SmallVectorImpl
<SDValue
> &InVals
) const {
385 MachineFunction
&MF
= DAG
.getMachineFunction();
386 MachineRegisterInfo
&RegInfo
= MF
.getRegInfo();
387 SparcMachineFunctionInfo
*FuncInfo
= MF
.getInfo
<SparcMachineFunctionInfo
>();
389 // Assign locations to all of the incoming arguments.
390 SmallVector
<CCValAssign
, 16> ArgLocs
;
391 CCState
CCInfo(CallConv
, isVarArg
, DAG
.getMachineFunction(), ArgLocs
,
393 CCInfo
.AnalyzeFormalArguments(Ins
, CC_Sparc32
);
395 const unsigned StackOffset
= 92;
396 bool IsLittleEndian
= DAG
.getDataLayout().isLittleEndian();
399 for (unsigned i
= 0, e
= ArgLocs
.size(); i
!= e
; ++i
, ++InIdx
) {
400 CCValAssign
&VA
= ArgLocs
[i
];
402 if (Ins
[InIdx
].Flags
.isSRet()) {
404 report_fatal_error("sparc only supports sret on the first parameter");
405 // Get SRet from [%fp+64].
406 int FrameIdx
= MF
.getFrameInfo().CreateFixedObject(4, 64, true);
407 SDValue FIPtr
= DAG
.getFrameIndex(FrameIdx
, MVT::i32
);
409 DAG
.getLoad(MVT::i32
, dl
, Chain
, FIPtr
, MachinePointerInfo());
410 InVals
.push_back(Arg
);
415 if (VA
.needsCustom()) {
416 assert(VA
.getLocVT() == MVT::f64
|| VA
.getLocVT() == MVT::v2i32
);
418 Register VRegHi
= RegInfo
.createVirtualRegister(&SP::IntRegsRegClass
);
419 MF
.getRegInfo().addLiveIn(VA
.getLocReg(), VRegHi
);
420 SDValue HiVal
= DAG
.getCopyFromReg(Chain
, dl
, VRegHi
, MVT::i32
);
423 CCValAssign
&NextVA
= ArgLocs
[++i
];
426 if (NextVA
.isMemLoc()) {
427 int FrameIdx
= MF
.getFrameInfo().
428 CreateFixedObject(4, StackOffset
+NextVA
.getLocMemOffset(),true);
429 SDValue FIPtr
= DAG
.getFrameIndex(FrameIdx
, MVT::i32
);
430 LoVal
= DAG
.getLoad(MVT::i32
, dl
, Chain
, FIPtr
, MachinePointerInfo());
432 Register loReg
= MF
.addLiveIn(NextVA
.getLocReg(),
433 &SP::IntRegsRegClass
);
434 LoVal
= DAG
.getCopyFromReg(Chain
, dl
, loReg
, MVT::i32
);
438 std::swap(LoVal
, HiVal
);
441 DAG
.getNode(ISD::BUILD_PAIR
, dl
, MVT::i64
, LoVal
, HiVal
);
442 WholeValue
= DAG
.getNode(ISD::BITCAST
, dl
, VA
.getLocVT(), WholeValue
);
443 InVals
.push_back(WholeValue
);
446 Register VReg
= RegInfo
.createVirtualRegister(&SP::IntRegsRegClass
);
447 MF
.getRegInfo().addLiveIn(VA
.getLocReg(), VReg
);
448 SDValue Arg
= DAG
.getCopyFromReg(Chain
, dl
, VReg
, MVT::i32
);
449 if (VA
.getLocVT() == MVT::f32
)
450 Arg
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::f32
, Arg
);
451 else if (VA
.getLocVT() != MVT::i32
) {
452 Arg
= DAG
.getNode(ISD::AssertSext
, dl
, MVT::i32
, Arg
,
453 DAG
.getValueType(VA
.getLocVT()));
454 Arg
= DAG
.getNode(ISD::TRUNCATE
, dl
, VA
.getLocVT(), Arg
);
456 InVals
.push_back(Arg
);
460 assert(VA
.isMemLoc());
462 unsigned Offset
= VA
.getLocMemOffset()+StackOffset
;
463 auto PtrVT
= getPointerTy(DAG
.getDataLayout());
465 if (VA
.needsCustom()) {
466 assert(VA
.getValVT() == MVT::f64
|| VA
.getValVT() == MVT::v2i32
);
467 // If it is double-word aligned, just load.
468 if (Offset
% 8 == 0) {
469 int FI
= MF
.getFrameInfo().CreateFixedObject(8,
472 SDValue FIPtr
= DAG
.getFrameIndex(FI
, PtrVT
);
474 DAG
.getLoad(VA
.getValVT(), dl
, Chain
, FIPtr
, MachinePointerInfo());
475 InVals
.push_back(Load
);
479 int FI
= MF
.getFrameInfo().CreateFixedObject(4,
482 SDValue FIPtr
= DAG
.getFrameIndex(FI
, PtrVT
);
484 DAG
.getLoad(MVT::i32
, dl
, Chain
, FIPtr
, MachinePointerInfo());
485 int FI2
= MF
.getFrameInfo().CreateFixedObject(4,
488 SDValue FIPtr2
= DAG
.getFrameIndex(FI2
, PtrVT
);
491 DAG
.getLoad(MVT::i32
, dl
, Chain
, FIPtr2
, MachinePointerInfo());
494 std::swap(LoVal
, HiVal
);
497 DAG
.getNode(ISD::BUILD_PAIR
, dl
, MVT::i64
, LoVal
, HiVal
);
498 WholeValue
= DAG
.getNode(ISD::BITCAST
, dl
, VA
.getValVT(), WholeValue
);
499 InVals
.push_back(WholeValue
);
503 int FI
= MF
.getFrameInfo().CreateFixedObject(4,
506 SDValue FIPtr
= DAG
.getFrameIndex(FI
, PtrVT
);
508 if (VA
.getValVT() == MVT::i32
|| VA
.getValVT() == MVT::f32
) {
509 Load
= DAG
.getLoad(VA
.getValVT(), dl
, Chain
, FIPtr
, MachinePointerInfo());
510 } else if (VA
.getValVT() == MVT::f128
) {
511 report_fatal_error("SPARCv8 does not handle f128 in calls; "
514 // We shouldn't see any other value types here.
515 llvm_unreachable("Unexpected ValVT encountered in frame lowering.");
517 InVals
.push_back(Load
);
520 if (MF
.getFunction().hasStructRetAttr()) {
521 // Copy the SRet Argument to SRetReturnReg.
522 SparcMachineFunctionInfo
*SFI
= MF
.getInfo
<SparcMachineFunctionInfo
>();
523 Register Reg
= SFI
->getSRetReturnReg();
525 Reg
= MF
.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass
);
526 SFI
->setSRetReturnReg(Reg
);
528 SDValue Copy
= DAG
.getCopyToReg(DAG
.getEntryNode(), dl
, Reg
, InVals
[0]);
529 Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Copy
, Chain
);
532 // Store remaining ArgRegs to the stack if this is a varargs function.
534 static const MCPhysReg ArgRegs
[] = {
535 SP::I0
, SP::I1
, SP::I2
, SP::I3
, SP::I4
, SP::I5
537 unsigned NumAllocated
= CCInfo
.getFirstUnallocated(ArgRegs
);
538 const MCPhysReg
*CurArgReg
= ArgRegs
+NumAllocated
, *ArgRegEnd
= ArgRegs
+6;
539 unsigned ArgOffset
= CCInfo
.getNextStackOffset();
540 if (NumAllocated
== 6)
541 ArgOffset
+= StackOffset
;
544 ArgOffset
= 68+4*NumAllocated
;
547 // Remember the vararg offset for the va_start implementation.
548 FuncInfo
->setVarArgsFrameOffset(ArgOffset
);
550 std::vector
<SDValue
> OutChains
;
552 for (; CurArgReg
!= ArgRegEnd
; ++CurArgReg
) {
553 Register VReg
= RegInfo
.createVirtualRegister(&SP::IntRegsRegClass
);
554 MF
.getRegInfo().addLiveIn(*CurArgReg
, VReg
);
555 SDValue Arg
= DAG
.getCopyFromReg(DAG
.getRoot(), dl
, VReg
, MVT::i32
);
557 int FrameIdx
= MF
.getFrameInfo().CreateFixedObject(4, ArgOffset
,
559 SDValue FIPtr
= DAG
.getFrameIndex(FrameIdx
, MVT::i32
);
562 DAG
.getStore(DAG
.getRoot(), dl
, Arg
, FIPtr
, MachinePointerInfo()));
566 if (!OutChains
.empty()) {
567 OutChains
.push_back(Chain
);
568 Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, OutChains
);
575 // Lower formal arguments for the 64 bit ABI.
576 SDValue
SparcTargetLowering::LowerFormalArguments_64(
577 SDValue Chain
, CallingConv::ID CallConv
, bool IsVarArg
,
578 const SmallVectorImpl
<ISD::InputArg
> &Ins
, const SDLoc
&DL
,
579 SelectionDAG
&DAG
, SmallVectorImpl
<SDValue
> &InVals
) const {
580 MachineFunction
&MF
= DAG
.getMachineFunction();
582 // Analyze arguments according to CC_Sparc64.
583 SmallVector
<CCValAssign
, 16> ArgLocs
;
584 CCState
CCInfo(CallConv
, IsVarArg
, DAG
.getMachineFunction(), ArgLocs
,
586 CCInfo
.AnalyzeFormalArguments(Ins
, CC_Sparc64
);
588 // The argument array begins at %fp+BIAS+128, after the register save area.
589 const unsigned ArgArea
= 128;
591 for (unsigned i
= 0, e
= ArgLocs
.size(); i
!= e
; ++i
) {
592 CCValAssign
&VA
= ArgLocs
[i
];
594 // This argument is passed in a register.
595 // All integer register arguments are promoted by the caller to i64.
597 // Create a virtual register for the promoted live-in value.
598 Register VReg
= MF
.addLiveIn(VA
.getLocReg(),
599 getRegClassFor(VA
.getLocVT()));
600 SDValue Arg
= DAG
.getCopyFromReg(Chain
, DL
, VReg
, VA
.getLocVT());
602 // Get the high bits for i32 struct elements.
603 if (VA
.getValVT() == MVT::i32
&& VA
.needsCustom())
604 Arg
= DAG
.getNode(ISD::SRL
, DL
, VA
.getLocVT(), Arg
,
605 DAG
.getConstant(32, DL
, MVT::i32
));
607 // The caller promoted the argument, so insert an Assert?ext SDNode so we
608 // won't promote the value again in this function.
609 switch (VA
.getLocInfo()) {
610 case CCValAssign::SExt
:
611 Arg
= DAG
.getNode(ISD::AssertSext
, DL
, VA
.getLocVT(), Arg
,
612 DAG
.getValueType(VA
.getValVT()));
614 case CCValAssign::ZExt
:
615 Arg
= DAG
.getNode(ISD::AssertZext
, DL
, VA
.getLocVT(), Arg
,
616 DAG
.getValueType(VA
.getValVT()));
622 // Truncate the register down to the argument type.
624 Arg
= DAG
.getNode(ISD::TRUNCATE
, DL
, VA
.getValVT(), Arg
);
626 InVals
.push_back(Arg
);
630 // The registers are exhausted. This argument was passed on the stack.
631 assert(VA
.isMemLoc());
632 // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
633 // beginning of the arguments area at %fp+BIAS+128.
634 unsigned Offset
= VA
.getLocMemOffset() + ArgArea
;
635 unsigned ValSize
= VA
.getValVT().getSizeInBits() / 8;
636 // Adjust offset for extended arguments, SPARC is big-endian.
637 // The caller will have written the full slot with extended bytes, but we
638 // prefer our own extending loads.
640 Offset
+= 8 - ValSize
;
641 int FI
= MF
.getFrameInfo().CreateFixedObject(ValSize
, Offset
, true);
643 DAG
.getLoad(VA
.getValVT(), DL
, Chain
,
644 DAG
.getFrameIndex(FI
, getPointerTy(MF
.getDataLayout())),
645 MachinePointerInfo::getFixedStack(MF
, FI
)));
651 // This function takes variable arguments, some of which may have been passed
652 // in registers %i0-%i5. Variable floating point arguments are never passed
653 // in floating point registers. They go on %i0-%i5 or on the stack like
654 // integer arguments.
656 // The va_start intrinsic needs to know the offset to the first variable
658 unsigned ArgOffset
= CCInfo
.getNextStackOffset();
659 SparcMachineFunctionInfo
*FuncInfo
= MF
.getInfo
<SparcMachineFunctionInfo
>();
660 // Skip the 128 bytes of register save area.
661 FuncInfo
->setVarArgsFrameOffset(ArgOffset
+ ArgArea
+
662 Subtarget
->getStackPointerBias());
664 // Save the variable arguments that were passed in registers.
665 // The caller is required to reserve stack space for 6 arguments regardless
666 // of how many arguments were actually passed.
667 SmallVector
<SDValue
, 8> OutChains
;
668 for (; ArgOffset
< 6*8; ArgOffset
+= 8) {
669 Register VReg
= MF
.addLiveIn(SP::I0
+ ArgOffset
/8, &SP::I64RegsRegClass
);
670 SDValue VArg
= DAG
.getCopyFromReg(Chain
, DL
, VReg
, MVT::i64
);
671 int FI
= MF
.getFrameInfo().CreateFixedObject(8, ArgOffset
+ ArgArea
, true);
672 auto PtrVT
= getPointerTy(MF
.getDataLayout());
674 DAG
.getStore(Chain
, DL
, VArg
, DAG
.getFrameIndex(FI
, PtrVT
),
675 MachinePointerInfo::getFixedStack(MF
, FI
)));
678 if (!OutChains
.empty())
679 Chain
= DAG
.getNode(ISD::TokenFactor
, DL
, MVT::Other
, OutChains
);
685 SparcTargetLowering::LowerCall(TargetLowering::CallLoweringInfo
&CLI
,
686 SmallVectorImpl
<SDValue
> &InVals
) const {
687 if (Subtarget
->is64Bit())
688 return LowerCall_64(CLI
, InVals
);
689 return LowerCall_32(CLI
, InVals
);
692 static bool hasReturnsTwiceAttr(SelectionDAG
&DAG
, SDValue Callee
,
693 const CallBase
*Call
) {
695 return Call
->hasFnAttr(Attribute::ReturnsTwice
);
697 const Function
*CalleeFn
= nullptr;
698 if (GlobalAddressSDNode
*G
= dyn_cast
<GlobalAddressSDNode
>(Callee
)) {
699 CalleeFn
= dyn_cast
<Function
>(G
->getGlobal());
700 } else if (ExternalSymbolSDNode
*E
=
701 dyn_cast
<ExternalSymbolSDNode
>(Callee
)) {
702 const Function
&Fn
= DAG
.getMachineFunction().getFunction();
703 const Module
*M
= Fn
.getParent();
704 const char *CalleeName
= E
->getSymbol();
705 CalleeFn
= M
->getFunction(CalleeName
);
710 return CalleeFn
->hasFnAttribute(Attribute::ReturnsTwice
);
713 // Lower a call for the 32-bit ABI.
715 SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo
&CLI
,
716 SmallVectorImpl
<SDValue
> &InVals
) const {
717 SelectionDAG
&DAG
= CLI
.DAG
;
719 SmallVectorImpl
<ISD::OutputArg
> &Outs
= CLI
.Outs
;
720 SmallVectorImpl
<SDValue
> &OutVals
= CLI
.OutVals
;
721 SmallVectorImpl
<ISD::InputArg
> &Ins
= CLI
.Ins
;
722 SDValue Chain
= CLI
.Chain
;
723 SDValue Callee
= CLI
.Callee
;
724 bool &isTailCall
= CLI
.IsTailCall
;
725 CallingConv::ID CallConv
= CLI
.CallConv
;
726 bool isVarArg
= CLI
.IsVarArg
;
728 // Sparc target does not yet support tail call optimization.
731 // Analyze operands of the call, assigning locations to each operand.
732 SmallVector
<CCValAssign
, 16> ArgLocs
;
733 CCState
CCInfo(CallConv
, isVarArg
, DAG
.getMachineFunction(), ArgLocs
,
735 CCInfo
.AnalyzeCallOperands(Outs
, CC_Sparc32
);
737 // Get the size of the outgoing arguments stack space requirement.
738 unsigned ArgsSize
= CCInfo
.getNextStackOffset();
740 // Keep stack frames 8-byte aligned.
741 ArgsSize
= (ArgsSize
+7) & ~7;
743 MachineFrameInfo
&MFI
= DAG
.getMachineFunction().getFrameInfo();
745 // Create local copies for byval args.
746 SmallVector
<SDValue
, 8> ByValArgs
;
747 for (unsigned i
= 0, e
= Outs
.size(); i
!= e
; ++i
) {
748 ISD::ArgFlagsTy Flags
= Outs
[i
].Flags
;
749 if (!Flags
.isByVal())
752 SDValue Arg
= OutVals
[i
];
753 unsigned Size
= Flags
.getByValSize();
754 Align Alignment
= Flags
.getNonZeroByValAlign();
757 int FI
= MFI
.CreateStackObject(Size
, Alignment
, false);
758 SDValue FIPtr
= DAG
.getFrameIndex(FI
, getPointerTy(DAG
.getDataLayout()));
759 SDValue SizeNode
= DAG
.getConstant(Size
, dl
, MVT::i32
);
761 Chain
= DAG
.getMemcpy(Chain
, dl
, FIPtr
, Arg
, SizeNode
, Alignment
,
762 false, // isVolatile,
763 (Size
<= 32), // AlwaysInline if size <= 32,
765 MachinePointerInfo(), MachinePointerInfo());
766 ByValArgs
.push_back(FIPtr
);
770 ByValArgs
.push_back(nullVal
);
774 Chain
= DAG
.getCALLSEQ_START(Chain
, ArgsSize
, 0, dl
);
776 SmallVector
<std::pair
<unsigned, SDValue
>, 8> RegsToPass
;
777 SmallVector
<SDValue
, 8> MemOpChains
;
779 const unsigned StackOffset
= 92;
780 bool hasStructRetAttr
= false;
781 unsigned SRetArgSize
= 0;
782 // Walk the register/memloc assignments, inserting copies/loads.
783 for (unsigned i
= 0, realArgIdx
= 0, byvalArgIdx
= 0, e
= ArgLocs
.size();
786 CCValAssign
&VA
= ArgLocs
[i
];
787 SDValue Arg
= OutVals
[realArgIdx
];
789 ISD::ArgFlagsTy Flags
= Outs
[realArgIdx
].Flags
;
791 // Use local copy if it is a byval arg.
792 if (Flags
.isByVal()) {
793 Arg
= ByValArgs
[byvalArgIdx
++];
799 // Promote the value if needed.
800 switch (VA
.getLocInfo()) {
801 default: llvm_unreachable("Unknown loc info!");
802 case CCValAssign::Full
: break;
803 case CCValAssign::SExt
:
804 Arg
= DAG
.getNode(ISD::SIGN_EXTEND
, dl
, VA
.getLocVT(), Arg
);
806 case CCValAssign::ZExt
:
807 Arg
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, VA
.getLocVT(), Arg
);
809 case CCValAssign::AExt
:
810 Arg
= DAG
.getNode(ISD::ANY_EXTEND
, dl
, VA
.getLocVT(), Arg
);
812 case CCValAssign::BCvt
:
813 Arg
= DAG
.getNode(ISD::BITCAST
, dl
, VA
.getLocVT(), Arg
);
817 if (Flags
.isSRet()) {
818 assert(VA
.needsCustom());
819 // store SRet argument in %sp+64
820 SDValue StackPtr
= DAG
.getRegister(SP::O6
, MVT::i32
);
821 SDValue PtrOff
= DAG
.getIntPtrConstant(64, dl
);
822 PtrOff
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, StackPtr
, PtrOff
);
823 MemOpChains
.push_back(
824 DAG
.getStore(Chain
, dl
, Arg
, PtrOff
, MachinePointerInfo()));
825 hasStructRetAttr
= true;
826 // sret only allowed on first argument
827 assert(Outs
[realArgIdx
].OrigArgIndex
== 0);
829 DAG
.getDataLayout().getTypeAllocSize(CLI
.getArgs()[0].IndirectType
);
833 if (VA
.needsCustom()) {
834 assert(VA
.getLocVT() == MVT::f64
|| VA
.getLocVT() == MVT::v2i32
);
837 unsigned Offset
= VA
.getLocMemOffset() + StackOffset
;
838 // if it is double-word aligned, just store.
839 if (Offset
% 8 == 0) {
840 SDValue StackPtr
= DAG
.getRegister(SP::O6
, MVT::i32
);
841 SDValue PtrOff
= DAG
.getIntPtrConstant(Offset
, dl
);
842 PtrOff
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, StackPtr
, PtrOff
);
843 MemOpChains
.push_back(
844 DAG
.getStore(Chain
, dl
, Arg
, PtrOff
, MachinePointerInfo()));
849 if (VA
.getLocVT() == MVT::f64
) {
850 // Move from the float value from float registers into the
851 // integer registers.
852 if (ConstantFPSDNode
*C
= dyn_cast
<ConstantFPSDNode
>(Arg
))
853 Arg
= bitcastConstantFPToInt(C
, dl
, DAG
);
855 Arg
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::v2i32
, Arg
);
858 SDValue Part0
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, MVT::i32
,
860 DAG
.getConstant(0, dl
, getVectorIdxTy(DAG
.getDataLayout())));
861 SDValue Part1
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, MVT::i32
,
863 DAG
.getConstant(1, dl
, getVectorIdxTy(DAG
.getDataLayout())));
866 RegsToPass
.push_back(std::make_pair(VA
.getLocReg(), Part0
));
868 CCValAssign
&NextVA
= ArgLocs
[++i
];
869 if (NextVA
.isRegLoc()) {
870 RegsToPass
.push_back(std::make_pair(NextVA
.getLocReg(), Part1
));
872 // Store the second part in stack.
873 unsigned Offset
= NextVA
.getLocMemOffset() + StackOffset
;
874 SDValue StackPtr
= DAG
.getRegister(SP::O6
, MVT::i32
);
875 SDValue PtrOff
= DAG
.getIntPtrConstant(Offset
, dl
);
876 PtrOff
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, StackPtr
, PtrOff
);
877 MemOpChains
.push_back(
878 DAG
.getStore(Chain
, dl
, Part1
, PtrOff
, MachinePointerInfo()));
881 unsigned Offset
= VA
.getLocMemOffset() + StackOffset
;
882 // Store the first part.
883 SDValue StackPtr
= DAG
.getRegister(SP::O6
, MVT::i32
);
884 SDValue PtrOff
= DAG
.getIntPtrConstant(Offset
, dl
);
885 PtrOff
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, StackPtr
, PtrOff
);
886 MemOpChains
.push_back(
887 DAG
.getStore(Chain
, dl
, Part0
, PtrOff
, MachinePointerInfo()));
888 // Store the second part.
889 PtrOff
= DAG
.getIntPtrConstant(Offset
+ 4, dl
);
890 PtrOff
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, StackPtr
, PtrOff
);
891 MemOpChains
.push_back(
892 DAG
.getStore(Chain
, dl
, Part1
, PtrOff
, MachinePointerInfo()));
897 // Arguments that can be passed on register must be kept at
900 if (VA
.getLocVT() != MVT::f32
) {
901 RegsToPass
.push_back(std::make_pair(VA
.getLocReg(), Arg
));
904 Arg
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::i32
, Arg
);
905 RegsToPass
.push_back(std::make_pair(VA
.getLocReg(), Arg
));
909 assert(VA
.isMemLoc());
911 // Create a store off the stack pointer for this argument.
912 SDValue StackPtr
= DAG
.getRegister(SP::O6
, MVT::i32
);
913 SDValue PtrOff
= DAG
.getIntPtrConstant(VA
.getLocMemOffset() + StackOffset
,
915 PtrOff
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, StackPtr
, PtrOff
);
916 MemOpChains
.push_back(
917 DAG
.getStore(Chain
, dl
, Arg
, PtrOff
, MachinePointerInfo()));
921 // Emit all stores, make sure the occur before any copies into physregs.
922 if (!MemOpChains
.empty())
923 Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, MemOpChains
);
925 // Build a sequence of copy-to-reg nodes chained together with token
926 // chain and flag operands which copy the outgoing args into registers.
927 // The InFlag in necessary since all emitted instructions must be
930 for (unsigned i
= 0, e
= RegsToPass
.size(); i
!= e
; ++i
) {
931 Register Reg
= toCallerWindow(RegsToPass
[i
].first
);
932 Chain
= DAG
.getCopyToReg(Chain
, dl
, Reg
, RegsToPass
[i
].second
, InFlag
);
933 InFlag
= Chain
.getValue(1);
936 bool hasReturnsTwice
= hasReturnsTwiceAttr(DAG
, Callee
, CLI
.CB
);
938 // If the callee is a GlobalAddress node (quite common, every direct call is)
939 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
940 // Likewise ExternalSymbol -> TargetExternalSymbol.
941 unsigned TF
= isPositionIndependent() ? SparcMCExpr::VK_Sparc_WPLT30
942 : SparcMCExpr::VK_Sparc_WDISP30
;
943 if (GlobalAddressSDNode
*G
= dyn_cast
<GlobalAddressSDNode
>(Callee
))
944 Callee
= DAG
.getTargetGlobalAddress(G
->getGlobal(), dl
, MVT::i32
, 0, TF
);
945 else if (ExternalSymbolSDNode
*E
= dyn_cast
<ExternalSymbolSDNode
>(Callee
))
946 Callee
= DAG
.getTargetExternalSymbol(E
->getSymbol(), MVT::i32
, TF
);
948 // Returns a chain & a flag for retval copy to use
949 SDVTList NodeTys
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
950 SmallVector
<SDValue
, 8> Ops
;
951 Ops
.push_back(Chain
);
952 Ops
.push_back(Callee
);
953 if (hasStructRetAttr
)
954 Ops
.push_back(DAG
.getTargetConstant(SRetArgSize
, dl
, MVT::i32
));
955 for (unsigned i
= 0, e
= RegsToPass
.size(); i
!= e
; ++i
)
956 Ops
.push_back(DAG
.getRegister(toCallerWindow(RegsToPass
[i
].first
),
957 RegsToPass
[i
].second
.getValueType()));
959 // Add a register mask operand representing the call-preserved registers.
960 const SparcRegisterInfo
*TRI
= Subtarget
->getRegisterInfo();
961 const uint32_t *Mask
=
963 ? TRI
->getRTCallPreservedMask(CallConv
)
964 : TRI
->getCallPreservedMask(DAG
.getMachineFunction(), CallConv
));
965 assert(Mask
&& "Missing call preserved mask for calling convention");
966 Ops
.push_back(DAG
.getRegisterMask(Mask
));
968 if (InFlag
.getNode())
969 Ops
.push_back(InFlag
);
971 Chain
= DAG
.getNode(SPISD::CALL
, dl
, NodeTys
, Ops
);
972 InFlag
= Chain
.getValue(1);
974 Chain
= DAG
.getCALLSEQ_END(Chain
, DAG
.getIntPtrConstant(ArgsSize
, dl
, true),
975 DAG
.getIntPtrConstant(0, dl
, true), InFlag
, dl
);
976 InFlag
= Chain
.getValue(1);
978 // Assign locations to each value returned by this call.
979 SmallVector
<CCValAssign
, 16> RVLocs
;
980 CCState
RVInfo(CallConv
, isVarArg
, DAG
.getMachineFunction(), RVLocs
,
983 RVInfo
.AnalyzeCallResult(Ins
, RetCC_Sparc32
);
985 // Copy all of the result registers out of their specified physreg.
986 for (unsigned i
= 0; i
!= RVLocs
.size(); ++i
) {
987 if (RVLocs
[i
].getLocVT() == MVT::v2i32
) {
988 SDValue Vec
= DAG
.getNode(ISD::UNDEF
, dl
, MVT::v2i32
);
989 SDValue Lo
= DAG
.getCopyFromReg(
990 Chain
, dl
, toCallerWindow(RVLocs
[i
++].getLocReg()), MVT::i32
, InFlag
);
991 Chain
= Lo
.getValue(1);
992 InFlag
= Lo
.getValue(2);
993 Vec
= DAG
.getNode(ISD::INSERT_VECTOR_ELT
, dl
, MVT::v2i32
, Vec
, Lo
,
994 DAG
.getConstant(0, dl
, MVT::i32
));
995 SDValue Hi
= DAG
.getCopyFromReg(
996 Chain
, dl
, toCallerWindow(RVLocs
[i
].getLocReg()), MVT::i32
, InFlag
);
997 Chain
= Hi
.getValue(1);
998 InFlag
= Hi
.getValue(2);
999 Vec
= DAG
.getNode(ISD::INSERT_VECTOR_ELT
, dl
, MVT::v2i32
, Vec
, Hi
,
1000 DAG
.getConstant(1, dl
, MVT::i32
));
1001 InVals
.push_back(Vec
);
1004 DAG
.getCopyFromReg(Chain
, dl
, toCallerWindow(RVLocs
[i
].getLocReg()),
1005 RVLocs
[i
].getValVT(), InFlag
)
1007 InFlag
= Chain
.getValue(2);
1008 InVals
.push_back(Chain
.getValue(0));
1015 // FIXME? Maybe this could be a TableGen attribute on some registers and
1016 // this table could be generated automatically from RegInfo.
1017 Register
SparcTargetLowering::getRegisterByName(const char* RegName
, LLT VT
,
1018 const MachineFunction
&MF
) const {
1019 Register Reg
= StringSwitch
<Register
>(RegName
)
1020 .Case("i0", SP::I0
).Case("i1", SP::I1
).Case("i2", SP::I2
).Case("i3", SP::I3
)
1021 .Case("i4", SP::I4
).Case("i5", SP::I5
).Case("i6", SP::I6
).Case("i7", SP::I7
)
1022 .Case("o0", SP::O0
).Case("o1", SP::O1
).Case("o2", SP::O2
).Case("o3", SP::O3
)
1023 .Case("o4", SP::O4
).Case("o5", SP::O5
).Case("o6", SP::O6
).Case("o7", SP::O7
)
1024 .Case("l0", SP::L0
).Case("l1", SP::L1
).Case("l2", SP::L2
).Case("l3", SP::L3
)
1025 .Case("l4", SP::L4
).Case("l5", SP::L5
).Case("l6", SP::L6
).Case("l7", SP::L7
)
1026 .Case("g0", SP::G0
).Case("g1", SP::G1
).Case("g2", SP::G2
).Case("g3", SP::G3
)
1027 .Case("g4", SP::G4
).Case("g5", SP::G5
).Case("g6", SP::G6
).Case("g7", SP::G7
)
1033 report_fatal_error("Invalid register name global variable");
1036 // Fixup floating point arguments in the ... part of a varargs call.
1038 // The SPARC v9 ABI requires that floating point arguments are treated the same
1039 // as integers when calling a varargs function. This does not apply to the
1040 // fixed arguments that are part of the function's prototype.
1042 // This function post-processes a CCValAssign array created by
1043 // AnalyzeCallOperands().
1044 static void fixupVariableFloatArgs(SmallVectorImpl
<CCValAssign
> &ArgLocs
,
1045 ArrayRef
<ISD::OutputArg
> Outs
) {
1046 for (unsigned i
= 0, e
= ArgLocs
.size(); i
!= e
; ++i
) {
1047 const CCValAssign
&VA
= ArgLocs
[i
];
1048 MVT ValTy
= VA
.getLocVT();
1049 // FIXME: What about f32 arguments? C promotes them to f64 when calling
1050 // varargs functions.
1051 if (!VA
.isRegLoc() || (ValTy
!= MVT::f64
&& ValTy
!= MVT::f128
))
1053 // The fixed arguments to a varargs function still go in FP registers.
1054 if (Outs
[VA
.getValNo()].IsFixed
)
1057 // This floating point argument should be reassigned.
1060 // Determine the offset into the argument array.
1061 Register firstReg
= (ValTy
== MVT::f64
) ? SP::D0
: SP::Q0
;
1062 unsigned argSize
= (ValTy
== MVT::f64
) ? 8 : 16;
1063 unsigned Offset
= argSize
* (VA
.getLocReg() - firstReg
);
1064 assert(Offset
< 16*8 && "Offset out of range, bad register enum?");
1067 // This argument should go in %i0-%i5.
1068 unsigned IReg
= SP::I0
+ Offset
/8;
1069 if (ValTy
== MVT::f64
)
1070 // Full register, just bitconvert into i64.
1071 NewVA
= CCValAssign::getReg(VA
.getValNo(), VA
.getValVT(),
1072 IReg
, MVT::i64
, CCValAssign::BCvt
);
1074 assert(ValTy
== MVT::f128
&& "Unexpected type!");
1075 // Full register, just bitconvert into i128 -- We will lower this into
1076 // two i64s in LowerCall_64.
1077 NewVA
= CCValAssign::getCustomReg(VA
.getValNo(), VA
.getValVT(),
1078 IReg
, MVT::i128
, CCValAssign::BCvt
);
1081 // This needs to go to memory, we're out of integer registers.
1082 NewVA
= CCValAssign::getMem(VA
.getValNo(), VA
.getValVT(),
1083 Offset
, VA
.getLocVT(), VA
.getLocInfo());
1089 // Lower a call for the 64-bit ABI.
1091 SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo
&CLI
,
1092 SmallVectorImpl
<SDValue
> &InVals
) const {
1093 SelectionDAG
&DAG
= CLI
.DAG
;
1095 SDValue Chain
= CLI
.Chain
;
1096 auto PtrVT
= getPointerTy(DAG
.getDataLayout());
1098 // Sparc target does not yet support tail call optimization.
1099 CLI
.IsTailCall
= false;
1101 // Analyze operands of the call, assigning locations to each operand.
1102 SmallVector
<CCValAssign
, 16> ArgLocs
;
1103 CCState
CCInfo(CLI
.CallConv
, CLI
.IsVarArg
, DAG
.getMachineFunction(), ArgLocs
,
1105 CCInfo
.AnalyzeCallOperands(CLI
.Outs
, CC_Sparc64
);
1107 // Get the size of the outgoing arguments stack space requirement.
1108 // The stack offset computed by CC_Sparc64 includes all arguments.
1109 // Called functions expect 6 argument words to exist in the stack frame, used
1111 unsigned ArgsSize
= std::max(6*8u, CCInfo
.getNextStackOffset());
1113 // Keep stack frames 16-byte aligned.
1114 ArgsSize
= alignTo(ArgsSize
, 16);
1116 // Varargs calls require special treatment.
1118 fixupVariableFloatArgs(ArgLocs
, CLI
.Outs
);
1120 // Adjust the stack pointer to make room for the arguments.
1121 // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
1122 // with more than 6 arguments.
1123 Chain
= DAG
.getCALLSEQ_START(Chain
, ArgsSize
, 0, DL
);
1125 // Collect the set of registers to pass to the function and their values.
1126 // This will be emitted as a sequence of CopyToReg nodes glued to the call
1128 SmallVector
<std::pair
<Register
, SDValue
>, 8> RegsToPass
;
1130 // Collect chains from all the memory opeations that copy arguments to the
1131 // stack. They must follow the stack pointer adjustment above and precede the
1132 // call instruction itself.
1133 SmallVector
<SDValue
, 8> MemOpChains
;
1135 for (unsigned i
= 0, e
= ArgLocs
.size(); i
!= e
; ++i
) {
1136 const CCValAssign
&VA
= ArgLocs
[i
];
1137 SDValue Arg
= CLI
.OutVals
[i
];
1139 // Promote the value if needed.
1140 switch (VA
.getLocInfo()) {
1142 llvm_unreachable("Unknown location info!");
1143 case CCValAssign::Full
:
1145 case CCValAssign::SExt
:
1146 Arg
= DAG
.getNode(ISD::SIGN_EXTEND
, DL
, VA
.getLocVT(), Arg
);
1148 case CCValAssign::ZExt
:
1149 Arg
= DAG
.getNode(ISD::ZERO_EXTEND
, DL
, VA
.getLocVT(), Arg
);
1151 case CCValAssign::AExt
:
1152 Arg
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, VA
.getLocVT(), Arg
);
1154 case CCValAssign::BCvt
:
1155 // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
1156 // SPARC does not support i128 natively. Lower it into two i64, see below.
1157 if (!VA
.needsCustom() || VA
.getValVT() != MVT::f128
1158 || VA
.getLocVT() != MVT::i128
)
1159 Arg
= DAG
.getNode(ISD::BITCAST
, DL
, VA
.getLocVT(), Arg
);
1163 if (VA
.isRegLoc()) {
1164 if (VA
.needsCustom() && VA
.getValVT() == MVT::f128
1165 && VA
.getLocVT() == MVT::i128
) {
1166 // Store and reload into the integer register reg and reg+1.
1167 unsigned Offset
= 8 * (VA
.getLocReg() - SP::I0
);
1168 unsigned StackOffset
= Offset
+ Subtarget
->getStackPointerBias() + 128;
1169 SDValue StackPtr
= DAG
.getRegister(SP::O6
, PtrVT
);
1170 SDValue HiPtrOff
= DAG
.getIntPtrConstant(StackOffset
, DL
);
1171 HiPtrOff
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, StackPtr
, HiPtrOff
);
1172 SDValue LoPtrOff
= DAG
.getIntPtrConstant(StackOffset
+ 8, DL
);
1173 LoPtrOff
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, StackPtr
, LoPtrOff
);
1175 // Store to %sp+BIAS+128+Offset
1177 DAG
.getStore(Chain
, DL
, Arg
, HiPtrOff
, MachinePointerInfo());
1178 // Load into Reg and Reg+1
1180 DAG
.getLoad(MVT::i64
, DL
, Store
, HiPtrOff
, MachinePointerInfo());
1182 DAG
.getLoad(MVT::i64
, DL
, Store
, LoPtrOff
, MachinePointerInfo());
1183 RegsToPass
.push_back(std::make_pair(toCallerWindow(VA
.getLocReg()),
1185 RegsToPass
.push_back(std::make_pair(toCallerWindow(VA
.getLocReg()+1),
1190 // The custom bit on an i32 return value indicates that it should be
1191 // passed in the high bits of the register.
1192 if (VA
.getValVT() == MVT::i32
&& VA
.needsCustom()) {
1193 Arg
= DAG
.getNode(ISD::SHL
, DL
, MVT::i64
, Arg
,
1194 DAG
.getConstant(32, DL
, MVT::i32
));
1196 // The next value may go in the low bits of the same register.
1197 // Handle both at once.
1198 if (i
+1 < ArgLocs
.size() && ArgLocs
[i
+1].isRegLoc() &&
1199 ArgLocs
[i
+1].getLocReg() == VA
.getLocReg()) {
1200 SDValue NV
= DAG
.getNode(ISD::ZERO_EXTEND
, DL
, MVT::i64
,
1202 Arg
= DAG
.getNode(ISD::OR
, DL
, MVT::i64
, Arg
, NV
);
1203 // Skip the next value, it's already done.
1207 RegsToPass
.push_back(std::make_pair(toCallerWindow(VA
.getLocReg()), Arg
));
1211 assert(VA
.isMemLoc());
1213 // Create a store off the stack pointer for this argument.
1214 SDValue StackPtr
= DAG
.getRegister(SP::O6
, PtrVT
);
1215 // The argument area starts at %fp+BIAS+128 in the callee frame,
1216 // %sp+BIAS+128 in ours.
1217 SDValue PtrOff
= DAG
.getIntPtrConstant(VA
.getLocMemOffset() +
1218 Subtarget
->getStackPointerBias() +
1220 PtrOff
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, StackPtr
, PtrOff
);
1221 MemOpChains
.push_back(
1222 DAG
.getStore(Chain
, DL
, Arg
, PtrOff
, MachinePointerInfo()));
1225 // Emit all stores, make sure they occur before the call.
1226 if (!MemOpChains
.empty())
1227 Chain
= DAG
.getNode(ISD::TokenFactor
, DL
, MVT::Other
, MemOpChains
);
1229 // Build a sequence of CopyToReg nodes glued together with token chain and
1230 // glue operands which copy the outgoing args into registers. The InGlue is
1231 // necessary since all emitted instructions must be stuck together in order
1232 // to pass the live physical registers.
1234 for (unsigned i
= 0, e
= RegsToPass
.size(); i
!= e
; ++i
) {
1235 Chain
= DAG
.getCopyToReg(Chain
, DL
,
1236 RegsToPass
[i
].first
, RegsToPass
[i
].second
, InGlue
);
1237 InGlue
= Chain
.getValue(1);
1240 // If the callee is a GlobalAddress node (quite common, every direct call is)
1241 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1242 // Likewise ExternalSymbol -> TargetExternalSymbol.
1243 SDValue Callee
= CLI
.Callee
;
1244 bool hasReturnsTwice
= hasReturnsTwiceAttr(DAG
, Callee
, CLI
.CB
);
1245 unsigned TF
= isPositionIndependent() ? SparcMCExpr::VK_Sparc_WPLT30
1246 : SparcMCExpr::VK_Sparc_WDISP30
;
1247 if (GlobalAddressSDNode
*G
= dyn_cast
<GlobalAddressSDNode
>(Callee
))
1248 Callee
= DAG
.getTargetGlobalAddress(G
->getGlobal(), DL
, PtrVT
, 0, TF
);
1249 else if (ExternalSymbolSDNode
*E
= dyn_cast
<ExternalSymbolSDNode
>(Callee
))
1250 Callee
= DAG
.getTargetExternalSymbol(E
->getSymbol(), PtrVT
, TF
);
1252 // Build the operands for the call instruction itself.
1253 SmallVector
<SDValue
, 8> Ops
;
1254 Ops
.push_back(Chain
);
1255 Ops
.push_back(Callee
);
1256 for (unsigned i
= 0, e
= RegsToPass
.size(); i
!= e
; ++i
)
1257 Ops
.push_back(DAG
.getRegister(RegsToPass
[i
].first
,
1258 RegsToPass
[i
].second
.getValueType()));
1260 // Add a register mask operand representing the call-preserved registers.
1261 const SparcRegisterInfo
*TRI
= Subtarget
->getRegisterInfo();
1262 const uint32_t *Mask
=
1263 ((hasReturnsTwice
) ? TRI
->getRTCallPreservedMask(CLI
.CallConv
)
1264 : TRI
->getCallPreservedMask(DAG
.getMachineFunction(),
1266 assert(Mask
&& "Missing call preserved mask for calling convention");
1267 Ops
.push_back(DAG
.getRegisterMask(Mask
));
1269 // Make sure the CopyToReg nodes are glued to the call instruction which
1270 // consumes the registers.
1271 if (InGlue
.getNode())
1272 Ops
.push_back(InGlue
);
1274 // Now the call itself.
1275 SDVTList NodeTys
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
1276 Chain
= DAG
.getNode(SPISD::CALL
, DL
, NodeTys
, Ops
);
1277 InGlue
= Chain
.getValue(1);
1279 // Revert the stack pointer immediately after the call.
1280 Chain
= DAG
.getCALLSEQ_END(Chain
, DAG
.getIntPtrConstant(ArgsSize
, DL
, true),
1281 DAG
.getIntPtrConstant(0, DL
, true), InGlue
, DL
);
1282 InGlue
= Chain
.getValue(1);
1284 // Now extract the return values. This is more or less the same as
1285 // LowerFormalArguments_64.
1287 // Assign locations to each value returned by this call.
1288 SmallVector
<CCValAssign
, 16> RVLocs
;
1289 CCState
RVInfo(CLI
.CallConv
, CLI
.IsVarArg
, DAG
.getMachineFunction(), RVLocs
,
1292 // Set inreg flag manually for codegen generated library calls that
1294 if (CLI
.Ins
.size() == 1 && CLI
.Ins
[0].VT
== MVT::f32
&& !CLI
.CB
)
1295 CLI
.Ins
[0].Flags
.setInReg();
1297 RVInfo
.AnalyzeCallResult(CLI
.Ins
, RetCC_Sparc64
);
1299 // Copy all of the result registers out of their specified physreg.
1300 for (unsigned i
= 0; i
!= RVLocs
.size(); ++i
) {
1301 CCValAssign
&VA
= RVLocs
[i
];
1302 unsigned Reg
= toCallerWindow(VA
.getLocReg());
1304 // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
1305 // reside in the same register in the high and low bits. Reuse the
1306 // CopyFromReg previous node to avoid duplicate copies.
1308 if (RegisterSDNode
*SrcReg
= dyn_cast
<RegisterSDNode
>(Chain
.getOperand(1)))
1309 if (SrcReg
->getReg() == Reg
&& Chain
->getOpcode() == ISD::CopyFromReg
)
1310 RV
= Chain
.getValue(0);
1312 // But usually we'll create a new CopyFromReg for a different register.
1313 if (!RV
.getNode()) {
1314 RV
= DAG
.getCopyFromReg(Chain
, DL
, Reg
, RVLocs
[i
].getLocVT(), InGlue
);
1315 Chain
= RV
.getValue(1);
1316 InGlue
= Chain
.getValue(2);
1319 // Get the high bits for i32 struct elements.
1320 if (VA
.getValVT() == MVT::i32
&& VA
.needsCustom())
1321 RV
= DAG
.getNode(ISD::SRL
, DL
, VA
.getLocVT(), RV
,
1322 DAG
.getConstant(32, DL
, MVT::i32
));
1324 // The callee promoted the return value, so insert an Assert?ext SDNode so
1325 // we won't promote the value again in this function.
1326 switch (VA
.getLocInfo()) {
1327 case CCValAssign::SExt
:
1328 RV
= DAG
.getNode(ISD::AssertSext
, DL
, VA
.getLocVT(), RV
,
1329 DAG
.getValueType(VA
.getValVT()));
1331 case CCValAssign::ZExt
:
1332 RV
= DAG
.getNode(ISD::AssertZext
, DL
, VA
.getLocVT(), RV
,
1333 DAG
.getValueType(VA
.getValVT()));
1339 // Truncate the register down to the return value type.
1340 if (VA
.isExtInLoc())
1341 RV
= DAG
.getNode(ISD::TRUNCATE
, DL
, VA
.getValVT(), RV
);
1343 InVals
.push_back(RV
);
1349 //===----------------------------------------------------------------------===//
1350 // TargetLowering Implementation
1351 //===----------------------------------------------------------------------===//
1353 TargetLowering::AtomicExpansionKind
SparcTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst
*AI
) const {
1354 if (AI
->getOperation() == AtomicRMWInst::Xchg
&&
1355 AI
->getType()->getPrimitiveSizeInBits() == 32)
1356 return AtomicExpansionKind::None
; // Uses xchg instruction
1358 return AtomicExpansionKind::CmpXChg
;
1361 /// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
1363 static SPCC::CondCodes
IntCondCCodeToICC(ISD::CondCode CC
) {
1365 default: llvm_unreachable("Unknown integer condition code!");
1366 case ISD::SETEQ
: return SPCC::ICC_E
;
1367 case ISD::SETNE
: return SPCC::ICC_NE
;
1368 case ISD::SETLT
: return SPCC::ICC_L
;
1369 case ISD::SETGT
: return SPCC::ICC_G
;
1370 case ISD::SETLE
: return SPCC::ICC_LE
;
1371 case ISD::SETGE
: return SPCC::ICC_GE
;
1372 case ISD::SETULT
: return SPCC::ICC_CS
;
1373 case ISD::SETULE
: return SPCC::ICC_LEU
;
1374 case ISD::SETUGT
: return SPCC::ICC_GU
;
1375 case ISD::SETUGE
: return SPCC::ICC_CC
;
1379 /// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
1381 static SPCC::CondCodes
FPCondCCodeToFCC(ISD::CondCode CC
) {
1383 default: llvm_unreachable("Unknown fp condition code!");
1385 case ISD::SETOEQ
: return SPCC::FCC_E
;
1387 case ISD::SETUNE
: return SPCC::FCC_NE
;
1389 case ISD::SETOLT
: return SPCC::FCC_L
;
1391 case ISD::SETOGT
: return SPCC::FCC_G
;
1393 case ISD::SETOLE
: return SPCC::FCC_LE
;
1395 case ISD::SETOGE
: return SPCC::FCC_GE
;
1396 case ISD::SETULT
: return SPCC::FCC_UL
;
1397 case ISD::SETULE
: return SPCC::FCC_ULE
;
1398 case ISD::SETUGT
: return SPCC::FCC_UG
;
1399 case ISD::SETUGE
: return SPCC::FCC_UGE
;
1400 case ISD::SETUO
: return SPCC::FCC_U
;
1401 case ISD::SETO
: return SPCC::FCC_O
;
1402 case ISD::SETONE
: return SPCC::FCC_LG
;
1403 case ISD::SETUEQ
: return SPCC::FCC_UE
;
1407 SparcTargetLowering::SparcTargetLowering(const TargetMachine
&TM
,
1408 const SparcSubtarget
&STI
)
1409 : TargetLowering(TM
), Subtarget(&STI
) {
1410 MVT PtrVT
= MVT::getIntegerVT(8 * TM
.getPointerSize(0));
1412 // Instructions which use registers as conditionals examine all the
1413 // bits (as does the pseudo SELECT_CC expansion). I don't think it
1414 // matters much whether it's ZeroOrOneBooleanContent, or
1415 // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
1417 setBooleanContents(ZeroOrOneBooleanContent
);
1418 setBooleanVectorContents(ZeroOrOneBooleanContent
);
1420 // Set up the register classes.
1421 addRegisterClass(MVT::i32
, &SP::IntRegsRegClass
);
1422 if (!Subtarget
->useSoftFloat()) {
1423 addRegisterClass(MVT::f32
, &SP::FPRegsRegClass
);
1424 addRegisterClass(MVT::f64
, &SP::DFPRegsRegClass
);
1425 addRegisterClass(MVT::f128
, &SP::QFPRegsRegClass
);
1427 if (Subtarget
->is64Bit()) {
1428 addRegisterClass(MVT::i64
, &SP::I64RegsRegClass
);
1430 // On 32bit sparc, we define a double-register 32bit register
1431 // class, as well. This is modeled in LLVM as a 2-vector of i32.
1432 addRegisterClass(MVT::v2i32
, &SP::IntPairRegClass
);
1434 // ...but almost all operations must be expanded, so set that as
1436 for (unsigned Op
= 0; Op
< ISD::BUILTIN_OP_END
; ++Op
) {
1437 setOperationAction(Op
, MVT::v2i32
, Expand
);
1439 // Truncating/extending stores/loads are also not supported.
1440 for (MVT VT
: MVT::integer_fixedlen_vector_valuetypes()) {
1441 setLoadExtAction(ISD::SEXTLOAD
, VT
, MVT::v2i32
, Expand
);
1442 setLoadExtAction(ISD::ZEXTLOAD
, VT
, MVT::v2i32
, Expand
);
1443 setLoadExtAction(ISD::EXTLOAD
, VT
, MVT::v2i32
, Expand
);
1445 setLoadExtAction(ISD::SEXTLOAD
, MVT::v2i32
, VT
, Expand
);
1446 setLoadExtAction(ISD::ZEXTLOAD
, MVT::v2i32
, VT
, Expand
);
1447 setLoadExtAction(ISD::EXTLOAD
, MVT::v2i32
, VT
, Expand
);
1449 setTruncStoreAction(VT
, MVT::v2i32
, Expand
);
1450 setTruncStoreAction(MVT::v2i32
, VT
, Expand
);
1452 // However, load and store *are* legal.
1453 setOperationAction(ISD::LOAD
, MVT::v2i32
, Legal
);
1454 setOperationAction(ISD::STORE
, MVT::v2i32
, Legal
);
1455 setOperationAction(ISD::EXTRACT_VECTOR_ELT
, MVT::v2i32
, Legal
);
1456 setOperationAction(ISD::BUILD_VECTOR
, MVT::v2i32
, Legal
);
1458 // And we need to promote i64 loads/stores into vector load/store
1459 setOperationAction(ISD::LOAD
, MVT::i64
, Custom
);
1460 setOperationAction(ISD::STORE
, MVT::i64
, Custom
);
1462 // Sadly, this doesn't work:
1463 // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
1464 // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
1467 // Turn FP extload into load/fpextend
1468 for (MVT VT
: MVT::fp_valuetypes()) {
1469 setLoadExtAction(ISD::EXTLOAD
, VT
, MVT::f16
, Expand
);
1470 setLoadExtAction(ISD::EXTLOAD
, VT
, MVT::f32
, Expand
);
1471 setLoadExtAction(ISD::EXTLOAD
, VT
, MVT::f64
, Expand
);
1474 // Sparc doesn't have i1 sign extending load
1475 for (MVT VT
: MVT::integer_valuetypes())
1476 setLoadExtAction(ISD::SEXTLOAD
, VT
, MVT::i1
, Promote
);
1478 // Turn FP truncstore into trunc + store.
1479 setTruncStoreAction(MVT::f32
, MVT::f16
, Expand
);
1480 setTruncStoreAction(MVT::f64
, MVT::f16
, Expand
);
1481 setTruncStoreAction(MVT::f64
, MVT::f32
, Expand
);
1482 setTruncStoreAction(MVT::f128
, MVT::f16
, Expand
);
1483 setTruncStoreAction(MVT::f128
, MVT::f32
, Expand
);
1484 setTruncStoreAction(MVT::f128
, MVT::f64
, Expand
);
1486 // Custom legalize GlobalAddress nodes into LO/HI parts.
1487 setOperationAction(ISD::GlobalAddress
, PtrVT
, Custom
);
1488 setOperationAction(ISD::GlobalTLSAddress
, PtrVT
, Custom
);
1489 setOperationAction(ISD::ConstantPool
, PtrVT
, Custom
);
1490 setOperationAction(ISD::BlockAddress
, PtrVT
, Custom
);
1492 // Sparc doesn't have sext_inreg, replace them with shl/sra
1493 setOperationAction(ISD::SIGN_EXTEND_INREG
, MVT::i16
, Expand
);
1494 setOperationAction(ISD::SIGN_EXTEND_INREG
, MVT::i8
, Expand
);
1495 setOperationAction(ISD::SIGN_EXTEND_INREG
, MVT::i1
, Expand
);
1497 // Sparc has no REM or DIVREM operations.
1498 setOperationAction(ISD::UREM
, MVT::i32
, Expand
);
1499 setOperationAction(ISD::SREM
, MVT::i32
, Expand
);
1500 setOperationAction(ISD::SDIVREM
, MVT::i32
, Expand
);
1501 setOperationAction(ISD::UDIVREM
, MVT::i32
, Expand
);
1503 // ... nor does SparcV9.
1504 if (Subtarget
->is64Bit()) {
1505 setOperationAction(ISD::UREM
, MVT::i64
, Expand
);
1506 setOperationAction(ISD::SREM
, MVT::i64
, Expand
);
1507 setOperationAction(ISD::SDIVREM
, MVT::i64
, Expand
);
1508 setOperationAction(ISD::UDIVREM
, MVT::i64
, Expand
);
1511 // Custom expand fp<->sint
1512 setOperationAction(ISD::FP_TO_SINT
, MVT::i32
, Custom
);
1513 setOperationAction(ISD::SINT_TO_FP
, MVT::i32
, Custom
);
1514 setOperationAction(ISD::FP_TO_SINT
, MVT::i64
, Custom
);
1515 setOperationAction(ISD::SINT_TO_FP
, MVT::i64
, Custom
);
1517 // Custom Expand fp<->uint
1518 setOperationAction(ISD::FP_TO_UINT
, MVT::i32
, Custom
);
1519 setOperationAction(ISD::UINT_TO_FP
, MVT::i32
, Custom
);
1520 setOperationAction(ISD::FP_TO_UINT
, MVT::i64
, Custom
);
1521 setOperationAction(ISD::UINT_TO_FP
, MVT::i64
, Custom
);
1523 // Lower f16 conversion operations into library calls
1524 setOperationAction(ISD::FP16_TO_FP
, MVT::f32
, Expand
);
1525 setOperationAction(ISD::FP_TO_FP16
, MVT::f32
, Expand
);
1526 setOperationAction(ISD::FP16_TO_FP
, MVT::f64
, Expand
);
1527 setOperationAction(ISD::FP_TO_FP16
, MVT::f64
, Expand
);
1528 setOperationAction(ISD::FP16_TO_FP
, MVT::f128
, Expand
);
1529 setOperationAction(ISD::FP_TO_FP16
, MVT::f128
, Expand
);
1531 setOperationAction(ISD::BITCAST
, MVT::f32
, Expand
);
1532 setOperationAction(ISD::BITCAST
, MVT::i32
, Expand
);
1534 // Sparc has no select or setcc: expand to SELECT_CC.
1535 setOperationAction(ISD::SELECT
, MVT::i32
, Expand
);
1536 setOperationAction(ISD::SELECT
, MVT::f32
, Expand
);
1537 setOperationAction(ISD::SELECT
, MVT::f64
, Expand
);
1538 setOperationAction(ISD::SELECT
, MVT::f128
, Expand
);
1540 setOperationAction(ISD::SETCC
, MVT::i32
, Expand
);
1541 setOperationAction(ISD::SETCC
, MVT::f32
, Expand
);
1542 setOperationAction(ISD::SETCC
, MVT::f64
, Expand
);
1543 setOperationAction(ISD::SETCC
, MVT::f128
, Expand
);
1545 // Sparc doesn't have BRCOND either, it has BR_CC.
1546 setOperationAction(ISD::BRCOND
, MVT::Other
, Expand
);
1547 setOperationAction(ISD::BRIND
, MVT::Other
, Expand
);
1548 setOperationAction(ISD::BR_JT
, MVT::Other
, Expand
);
1549 setOperationAction(ISD::BR_CC
, MVT::i32
, Custom
);
1550 setOperationAction(ISD::BR_CC
, MVT::f32
, Custom
);
1551 setOperationAction(ISD::BR_CC
, MVT::f64
, Custom
);
1552 setOperationAction(ISD::BR_CC
, MVT::f128
, Custom
);
1554 setOperationAction(ISD::SELECT_CC
, MVT::i32
, Custom
);
1555 setOperationAction(ISD::SELECT_CC
, MVT::f32
, Custom
);
1556 setOperationAction(ISD::SELECT_CC
, MVT::f64
, Custom
);
1557 setOperationAction(ISD::SELECT_CC
, MVT::f128
, Custom
);
1559 setOperationAction(ISD::ADDC
, MVT::i32
, Custom
);
1560 setOperationAction(ISD::ADDE
, MVT::i32
, Custom
);
1561 setOperationAction(ISD::SUBC
, MVT::i32
, Custom
);
1562 setOperationAction(ISD::SUBE
, MVT::i32
, Custom
);
1564 if (Subtarget
->is64Bit()) {
1565 setOperationAction(ISD::ADDC
, MVT::i64
, Custom
);
1566 setOperationAction(ISD::ADDE
, MVT::i64
, Custom
);
1567 setOperationAction(ISD::SUBC
, MVT::i64
, Custom
);
1568 setOperationAction(ISD::SUBE
, MVT::i64
, Custom
);
1569 setOperationAction(ISD::BITCAST
, MVT::f64
, Expand
);
1570 setOperationAction(ISD::BITCAST
, MVT::i64
, Expand
);
1571 setOperationAction(ISD::SELECT
, MVT::i64
, Expand
);
1572 setOperationAction(ISD::SETCC
, MVT::i64
, Expand
);
1573 setOperationAction(ISD::BR_CC
, MVT::i64
, Custom
);
1574 setOperationAction(ISD::SELECT_CC
, MVT::i64
, Custom
);
1576 setOperationAction(ISD::CTPOP
, MVT::i64
,
1577 Subtarget
->usePopc() ? Legal
: Expand
);
1578 setOperationAction(ISD::CTTZ
, MVT::i64
, Expand
);
1579 setOperationAction(ISD::CTLZ
, MVT::i64
, Expand
);
1580 setOperationAction(ISD::BSWAP
, MVT::i64
, Expand
);
1581 setOperationAction(ISD::ROTL
, MVT::i64
, Expand
);
1582 setOperationAction(ISD::ROTR
, MVT::i64
, Expand
);
1583 setOperationAction(ISD::DYNAMIC_STACKALLOC
, MVT::i64
, Custom
);
1587 // Atomics are supported on SparcV9. 32-bit atomics are also
1588 // supported by some Leon SparcV8 variants. Otherwise, atomics
1590 if (Subtarget
->isV9())
1591 setMaxAtomicSizeInBitsSupported(64);
1592 else if (Subtarget
->hasLeonCasa())
1593 setMaxAtomicSizeInBitsSupported(32);
1595 setMaxAtomicSizeInBitsSupported(0);
1597 setMinCmpXchgSizeInBits(32);
1599 setOperationAction(ISD::ATOMIC_SWAP
, MVT::i32
, Legal
);
1601 setOperationAction(ISD::ATOMIC_FENCE
, MVT::Other
, Legal
);
1603 // Custom Lower Atomic LOAD/STORE
1604 setOperationAction(ISD::ATOMIC_LOAD
, MVT::i32
, Custom
);
1605 setOperationAction(ISD::ATOMIC_STORE
, MVT::i32
, Custom
);
1607 if (Subtarget
->is64Bit()) {
1608 setOperationAction(ISD::ATOMIC_CMP_SWAP
, MVT::i64
, Legal
);
1609 setOperationAction(ISD::ATOMIC_SWAP
, MVT::i64
, Legal
);
1610 setOperationAction(ISD::ATOMIC_LOAD
, MVT::i64
, Custom
);
1611 setOperationAction(ISD::ATOMIC_STORE
, MVT::i64
, Custom
);
1614 if (!Subtarget
->is64Bit()) {
1615 // These libcalls are not available in 32-bit.
1616 setLibcallName(RTLIB::MULO_I64
, nullptr);
1617 setLibcallName(RTLIB::SHL_I128
, nullptr);
1618 setLibcallName(RTLIB::SRL_I128
, nullptr);
1619 setLibcallName(RTLIB::SRA_I128
, nullptr);
1622 setLibcallName(RTLIB::MULO_I128
, nullptr);
1624 if (!Subtarget
->isV9()) {
1625 // SparcV8 does not have FNEGD and FABSD.
1626 setOperationAction(ISD::FNEG
, MVT::f64
, Custom
);
1627 setOperationAction(ISD::FABS
, MVT::f64
, Custom
);
1630 setOperationAction(ISD::FSIN
, MVT::f128
, Expand
);
1631 setOperationAction(ISD::FCOS
, MVT::f128
, Expand
);
1632 setOperationAction(ISD::FSINCOS
, MVT::f128
, Expand
);
1633 setOperationAction(ISD::FREM
, MVT::f128
, Expand
);
1634 setOperationAction(ISD::FMA
, MVT::f128
, Expand
);
1635 setOperationAction(ISD::FSIN
, MVT::f64
, Expand
);
1636 setOperationAction(ISD::FCOS
, MVT::f64
, Expand
);
1637 setOperationAction(ISD::FSINCOS
, MVT::f64
, Expand
);
1638 setOperationAction(ISD::FREM
, MVT::f64
, Expand
);
1639 setOperationAction(ISD::FMA
, MVT::f64
, Expand
);
1640 setOperationAction(ISD::FSIN
, MVT::f32
, Expand
);
1641 setOperationAction(ISD::FCOS
, MVT::f32
, Expand
);
1642 setOperationAction(ISD::FSINCOS
, MVT::f32
, Expand
);
1643 setOperationAction(ISD::FREM
, MVT::f32
, Expand
);
1644 setOperationAction(ISD::FMA
, MVT::f32
, Expand
);
1645 setOperationAction(ISD::CTTZ
, MVT::i32
, Expand
);
1646 setOperationAction(ISD::CTLZ
, MVT::i32
, Expand
);
1647 setOperationAction(ISD::ROTL
, MVT::i32
, Expand
);
1648 setOperationAction(ISD::ROTR
, MVT::i32
, Expand
);
1649 setOperationAction(ISD::BSWAP
, MVT::i32
, Expand
);
1650 setOperationAction(ISD::FCOPYSIGN
, MVT::f128
, Expand
);
1651 setOperationAction(ISD::FCOPYSIGN
, MVT::f64
, Expand
);
1652 setOperationAction(ISD::FCOPYSIGN
, MVT::f32
, Expand
);
1653 setOperationAction(ISD::FPOW
, MVT::f128
, Expand
);
1654 setOperationAction(ISD::FPOW
, MVT::f64
, Expand
);
1655 setOperationAction(ISD::FPOW
, MVT::f32
, Expand
);
1657 setOperationAction(ISD::SHL_PARTS
, MVT::i32
, Expand
);
1658 setOperationAction(ISD::SRA_PARTS
, MVT::i32
, Expand
);
1659 setOperationAction(ISD::SRL_PARTS
, MVT::i32
, Expand
);
1661 // Expands to [SU]MUL_LOHI.
1662 setOperationAction(ISD::MULHU
, MVT::i32
, Expand
);
1663 setOperationAction(ISD::MULHS
, MVT::i32
, Expand
);
1664 setOperationAction(ISD::MUL
, MVT::i32
, Expand
);
1666 if (Subtarget
->useSoftMulDiv()) {
1667 // .umul works for both signed and unsigned
1668 setOperationAction(ISD::SMUL_LOHI
, MVT::i32
, Expand
);
1669 setOperationAction(ISD::UMUL_LOHI
, MVT::i32
, Expand
);
1670 setLibcallName(RTLIB::MUL_I32
, ".umul");
1672 setOperationAction(ISD::SDIV
, MVT::i32
, Expand
);
1673 setLibcallName(RTLIB::SDIV_I32
, ".div");
1675 setOperationAction(ISD::UDIV
, MVT::i32
, Expand
);
1676 setLibcallName(RTLIB::UDIV_I32
, ".udiv");
1678 setLibcallName(RTLIB::SREM_I32
, ".rem");
1679 setLibcallName(RTLIB::UREM_I32
, ".urem");
1682 if (Subtarget
->is64Bit()) {
1683 setOperationAction(ISD::UMUL_LOHI
, MVT::i64
, Expand
);
1684 setOperationAction(ISD::SMUL_LOHI
, MVT::i64
, Expand
);
1685 setOperationAction(ISD::MULHU
, MVT::i64
, Expand
);
1686 setOperationAction(ISD::MULHS
, MVT::i64
, Expand
);
1688 setOperationAction(ISD::UMULO
, MVT::i64
, Custom
);
1689 setOperationAction(ISD::SMULO
, MVT::i64
, Custom
);
1691 setOperationAction(ISD::SHL_PARTS
, MVT::i64
, Expand
);
1692 setOperationAction(ISD::SRA_PARTS
, MVT::i64
, Expand
);
1693 setOperationAction(ISD::SRL_PARTS
, MVT::i64
, Expand
);
1696 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1697 setOperationAction(ISD::VASTART
, MVT::Other
, Custom
);
1698 // VAARG needs to be lowered to not do unaligned accesses for doubles.
1699 setOperationAction(ISD::VAARG
, MVT::Other
, Custom
);
1701 setOperationAction(ISD::TRAP
, MVT::Other
, Legal
);
1702 setOperationAction(ISD::DEBUGTRAP
, MVT::Other
, Legal
);
1704 // Use the default implementation.
1705 setOperationAction(ISD::VACOPY
, MVT::Other
, Expand
);
1706 setOperationAction(ISD::VAEND
, MVT::Other
, Expand
);
1707 setOperationAction(ISD::STACKSAVE
, MVT::Other
, Expand
);
1708 setOperationAction(ISD::STACKRESTORE
, MVT::Other
, Expand
);
1709 setOperationAction(ISD::DYNAMIC_STACKALLOC
, MVT::i32
, Custom
);
1711 setStackPointerRegisterToSaveRestore(SP::O6
);
1713 setOperationAction(ISD::CTPOP
, MVT::i32
,
1714 Subtarget
->usePopc() ? Legal
: Expand
);
1716 if (Subtarget
->isV9() && Subtarget
->hasHardQuad()) {
1717 setOperationAction(ISD::LOAD
, MVT::f128
, Legal
);
1718 setOperationAction(ISD::STORE
, MVT::f128
, Legal
);
1720 setOperationAction(ISD::LOAD
, MVT::f128
, Custom
);
1721 setOperationAction(ISD::STORE
, MVT::f128
, Custom
);
1724 if (Subtarget
->hasHardQuad()) {
1725 setOperationAction(ISD::FADD
, MVT::f128
, Legal
);
1726 setOperationAction(ISD::FSUB
, MVT::f128
, Legal
);
1727 setOperationAction(ISD::FMUL
, MVT::f128
, Legal
);
1728 setOperationAction(ISD::FDIV
, MVT::f128
, Legal
);
1729 setOperationAction(ISD::FSQRT
, MVT::f128
, Legal
);
1730 setOperationAction(ISD::FP_EXTEND
, MVT::f128
, Legal
);
1731 setOperationAction(ISD::FP_ROUND
, MVT::f64
, Legal
);
1732 if (Subtarget
->isV9()) {
1733 setOperationAction(ISD::FNEG
, MVT::f128
, Legal
);
1734 setOperationAction(ISD::FABS
, MVT::f128
, Legal
);
1736 setOperationAction(ISD::FNEG
, MVT::f128
, Custom
);
1737 setOperationAction(ISD::FABS
, MVT::f128
, Custom
);
1740 if (!Subtarget
->is64Bit()) {
1741 setLibcallName(RTLIB::FPTOSINT_F128_I64
, "_Q_qtoll");
1742 setLibcallName(RTLIB::FPTOUINT_F128_I64
, "_Q_qtoull");
1743 setLibcallName(RTLIB::SINTTOFP_I64_F128
, "_Q_lltoq");
1744 setLibcallName(RTLIB::UINTTOFP_I64_F128
, "_Q_ulltoq");
1748 // Custom legalize f128 operations.
1750 setOperationAction(ISD::FADD
, MVT::f128
, Custom
);
1751 setOperationAction(ISD::FSUB
, MVT::f128
, Custom
);
1752 setOperationAction(ISD::FMUL
, MVT::f128
, Custom
);
1753 setOperationAction(ISD::FDIV
, MVT::f128
, Custom
);
1754 setOperationAction(ISD::FSQRT
, MVT::f128
, Custom
);
1755 setOperationAction(ISD::FNEG
, MVT::f128
, Custom
);
1756 setOperationAction(ISD::FABS
, MVT::f128
, Custom
);
1758 setOperationAction(ISD::FP_EXTEND
, MVT::f128
, Custom
);
1759 setOperationAction(ISD::FP_ROUND
, MVT::f64
, Custom
);
1760 setOperationAction(ISD::FP_ROUND
, MVT::f32
, Custom
);
1762 // Setup Runtime library names.
1763 if (Subtarget
->is64Bit() && !Subtarget
->useSoftFloat()) {
1764 setLibcallName(RTLIB::ADD_F128
, "_Qp_add");
1765 setLibcallName(RTLIB::SUB_F128
, "_Qp_sub");
1766 setLibcallName(RTLIB::MUL_F128
, "_Qp_mul");
1767 setLibcallName(RTLIB::DIV_F128
, "_Qp_div");
1768 setLibcallName(RTLIB::SQRT_F128
, "_Qp_sqrt");
1769 setLibcallName(RTLIB::FPTOSINT_F128_I32
, "_Qp_qtoi");
1770 setLibcallName(RTLIB::FPTOUINT_F128_I32
, "_Qp_qtoui");
1771 setLibcallName(RTLIB::SINTTOFP_I32_F128
, "_Qp_itoq");
1772 setLibcallName(RTLIB::UINTTOFP_I32_F128
, "_Qp_uitoq");
1773 setLibcallName(RTLIB::FPTOSINT_F128_I64
, "_Qp_qtox");
1774 setLibcallName(RTLIB::FPTOUINT_F128_I64
, "_Qp_qtoux");
1775 setLibcallName(RTLIB::SINTTOFP_I64_F128
, "_Qp_xtoq");
1776 setLibcallName(RTLIB::UINTTOFP_I64_F128
, "_Qp_uxtoq");
1777 setLibcallName(RTLIB::FPEXT_F32_F128
, "_Qp_stoq");
1778 setLibcallName(RTLIB::FPEXT_F64_F128
, "_Qp_dtoq");
1779 setLibcallName(RTLIB::FPROUND_F128_F32
, "_Qp_qtos");
1780 setLibcallName(RTLIB::FPROUND_F128_F64
, "_Qp_qtod");
1781 } else if (!Subtarget
->useSoftFloat()) {
1782 setLibcallName(RTLIB::ADD_F128
, "_Q_add");
1783 setLibcallName(RTLIB::SUB_F128
, "_Q_sub");
1784 setLibcallName(RTLIB::MUL_F128
, "_Q_mul");
1785 setLibcallName(RTLIB::DIV_F128
, "_Q_div");
1786 setLibcallName(RTLIB::SQRT_F128
, "_Q_sqrt");
1787 setLibcallName(RTLIB::FPTOSINT_F128_I32
, "_Q_qtoi");
1788 setLibcallName(RTLIB::FPTOUINT_F128_I32
, "_Q_qtou");
1789 setLibcallName(RTLIB::SINTTOFP_I32_F128
, "_Q_itoq");
1790 setLibcallName(RTLIB::UINTTOFP_I32_F128
, "_Q_utoq");
1791 setLibcallName(RTLIB::FPTOSINT_F128_I64
, "_Q_qtoll");
1792 setLibcallName(RTLIB::FPTOUINT_F128_I64
, "_Q_qtoull");
1793 setLibcallName(RTLIB::SINTTOFP_I64_F128
, "_Q_lltoq");
1794 setLibcallName(RTLIB::UINTTOFP_I64_F128
, "_Q_ulltoq");
1795 setLibcallName(RTLIB::FPEXT_F32_F128
, "_Q_stoq");
1796 setLibcallName(RTLIB::FPEXT_F64_F128
, "_Q_dtoq");
1797 setLibcallName(RTLIB::FPROUND_F128_F32
, "_Q_qtos");
1798 setLibcallName(RTLIB::FPROUND_F128_F64
, "_Q_qtod");
1802 if (Subtarget
->fixAllFDIVSQRT()) {
1803 // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as
1804 // the former instructions generate errata on LEON processors.
1805 setOperationAction(ISD::FDIV
, MVT::f32
, Promote
);
1806 setOperationAction(ISD::FSQRT
, MVT::f32
, Promote
);
1809 if (Subtarget
->hasNoFMULS()) {
1810 setOperationAction(ISD::FMUL
, MVT::f32
, Promote
);
1813 // Custom combine bitcast between f64 and v2i32
1814 if (!Subtarget
->is64Bit())
1815 setTargetDAGCombine(ISD::BITCAST
);
1817 if (Subtarget
->hasLeonCycleCounter())
1818 setOperationAction(ISD::READCYCLECOUNTER
, MVT::i64
, Custom
);
1820 setOperationAction(ISD::INTRINSIC_WO_CHAIN
, MVT::Other
, Custom
);
1822 setMinFunctionAlignment(Align(4));
1824 computeRegisterProperties(Subtarget
->getRegisterInfo());
1827 bool SparcTargetLowering::useSoftFloat() const {
1828 return Subtarget
->useSoftFloat();
1831 const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode
) const {
1832 switch ((SPISD::NodeType
)Opcode
) {
1833 case SPISD::FIRST_NUMBER
: break;
1834 case SPISD::CMPICC
: return "SPISD::CMPICC";
1835 case SPISD::CMPFCC
: return "SPISD::CMPFCC";
1836 case SPISD::BRICC
: return "SPISD::BRICC";
1837 case SPISD::BRXCC
: return "SPISD::BRXCC";
1838 case SPISD::BRFCC
: return "SPISD::BRFCC";
1839 case SPISD::SELECT_ICC
: return "SPISD::SELECT_ICC";
1840 case SPISD::SELECT_XCC
: return "SPISD::SELECT_XCC";
1841 case SPISD::SELECT_FCC
: return "SPISD::SELECT_FCC";
1842 case SPISD::Hi
: return "SPISD::Hi";
1843 case SPISD::Lo
: return "SPISD::Lo";
1844 case SPISD::FTOI
: return "SPISD::FTOI";
1845 case SPISD::ITOF
: return "SPISD::ITOF";
1846 case SPISD::FTOX
: return "SPISD::FTOX";
1847 case SPISD::XTOF
: return "SPISD::XTOF";
1848 case SPISD::CALL
: return "SPISD::CALL";
1849 case SPISD::RET_FLAG
: return "SPISD::RET_FLAG";
1850 case SPISD::GLOBAL_BASE_REG
: return "SPISD::GLOBAL_BASE_REG";
1851 case SPISD::FLUSHW
: return "SPISD::FLUSHW";
1852 case SPISD::TLS_ADD
: return "SPISD::TLS_ADD";
1853 case SPISD::TLS_LD
: return "SPISD::TLS_LD";
1854 case SPISD::TLS_CALL
: return "SPISD::TLS_CALL";
1859 EVT
SparcTargetLowering::getSetCCResultType(const DataLayout
&, LLVMContext
&,
1863 return VT
.changeVectorElementTypeToInteger();
1866 /// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
1867 /// be zero. Op is expected to be a target specific node. Used by DAG
1869 void SparcTargetLowering::computeKnownBitsForTargetNode
1872 const APInt
&DemandedElts
,
1873 const SelectionDAG
&DAG
,
1874 unsigned Depth
) const {
1878 switch (Op
.getOpcode()) {
1880 case SPISD::SELECT_ICC
:
1881 case SPISD::SELECT_XCC
:
1882 case SPISD::SELECT_FCC
:
1883 Known
= DAG
.computeKnownBits(Op
.getOperand(1), Depth
+ 1);
1884 Known2
= DAG
.computeKnownBits(Op
.getOperand(0), Depth
+ 1);
1886 // Only known if known in both the LHS and RHS.
1887 Known
= KnownBits::commonBits(Known
, Known2
);
1892 // Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
1893 // set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
1894 static void LookThroughSetCC(SDValue
&LHS
, SDValue
&RHS
,
1895 ISD::CondCode CC
, unsigned &SPCC
) {
1896 if (isNullConstant(RHS
) &&
1898 (((LHS
.getOpcode() == SPISD::SELECT_ICC
||
1899 LHS
.getOpcode() == SPISD::SELECT_XCC
) &&
1900 LHS
.getOperand(3).getOpcode() == SPISD::CMPICC
) ||
1901 (LHS
.getOpcode() == SPISD::SELECT_FCC
&&
1902 LHS
.getOperand(3).getOpcode() == SPISD::CMPFCC
)) &&
1903 isOneConstant(LHS
.getOperand(0)) &&
1904 isNullConstant(LHS
.getOperand(1))) {
1905 SDValue CMPCC
= LHS
.getOperand(3);
1906 SPCC
= cast
<ConstantSDNode
>(LHS
.getOperand(2))->getZExtValue();
1907 LHS
= CMPCC
.getOperand(0);
1908 RHS
= CMPCC
.getOperand(1);
1912 // Convert to a target node and set target flags.
1913 SDValue
SparcTargetLowering::withTargetFlags(SDValue Op
, unsigned TF
,
1914 SelectionDAG
&DAG
) const {
1915 if (const GlobalAddressSDNode
*GA
= dyn_cast
<GlobalAddressSDNode
>(Op
))
1916 return DAG
.getTargetGlobalAddress(GA
->getGlobal(),
1918 GA
->getValueType(0),
1919 GA
->getOffset(), TF
);
1921 if (const ConstantPoolSDNode
*CP
= dyn_cast
<ConstantPoolSDNode
>(Op
))
1922 return DAG
.getTargetConstantPool(CP
->getConstVal(), CP
->getValueType(0),
1923 CP
->getAlign(), CP
->getOffset(), TF
);
1925 if (const BlockAddressSDNode
*BA
= dyn_cast
<BlockAddressSDNode
>(Op
))
1926 return DAG
.getTargetBlockAddress(BA
->getBlockAddress(),
1931 if (const ExternalSymbolSDNode
*ES
= dyn_cast
<ExternalSymbolSDNode
>(Op
))
1932 return DAG
.getTargetExternalSymbol(ES
->getSymbol(),
1933 ES
->getValueType(0), TF
);
1935 llvm_unreachable("Unhandled address SDNode");
1938 // Split Op into high and low parts according to HiTF and LoTF.
1939 // Return an ADD node combining the parts.
1940 SDValue
SparcTargetLowering::makeHiLoPair(SDValue Op
,
1941 unsigned HiTF
, unsigned LoTF
,
1942 SelectionDAG
&DAG
) const {
1944 EVT VT
= Op
.getValueType();
1945 SDValue Hi
= DAG
.getNode(SPISD::Hi
, DL
, VT
, withTargetFlags(Op
, HiTF
, DAG
));
1946 SDValue Lo
= DAG
.getNode(SPISD::Lo
, DL
, VT
, withTargetFlags(Op
, LoTF
, DAG
));
1947 return DAG
.getNode(ISD::ADD
, DL
, VT
, Hi
, Lo
);
1950 // Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
1951 // or ExternalSymbol SDNode.
1952 SDValue
SparcTargetLowering::makeAddress(SDValue Op
, SelectionDAG
&DAG
) const {
1954 EVT VT
= getPointerTy(DAG
.getDataLayout());
1956 // Handle PIC mode first. SPARC needs a got load for every variable!
1957 if (isPositionIndependent()) {
1958 const Module
*M
= DAG
.getMachineFunction().getFunction().getParent();
1959 PICLevel::Level picLevel
= M
->getPICLevel();
1962 if (picLevel
== PICLevel::SmallPIC
) {
1963 // This is the pic13 code model, the GOT is known to be smaller than 8KiB.
1964 Idx
= DAG
.getNode(SPISD::Lo
, DL
, Op
.getValueType(),
1965 withTargetFlags(Op
, SparcMCExpr::VK_Sparc_GOT13
, DAG
));
1967 // This is the pic32 code model, the GOT is known to be smaller than 4GB.
1968 Idx
= makeHiLoPair(Op
, SparcMCExpr::VK_Sparc_GOT22
,
1969 SparcMCExpr::VK_Sparc_GOT10
, DAG
);
1972 SDValue GlobalBase
= DAG
.getNode(SPISD::GLOBAL_BASE_REG
, DL
, VT
);
1973 SDValue AbsAddr
= DAG
.getNode(ISD::ADD
, DL
, VT
, GlobalBase
, Idx
);
1974 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
1975 // function has calls.
1976 MachineFrameInfo
&MFI
= DAG
.getMachineFunction().getFrameInfo();
1977 MFI
.setHasCalls(true);
1978 return DAG
.getLoad(VT
, DL
, DAG
.getEntryNode(), AbsAddr
,
1979 MachinePointerInfo::getGOT(DAG
.getMachineFunction()));
1982 // This is one of the absolute code models.
1983 switch(getTargetMachine().getCodeModel()) {
1985 llvm_unreachable("Unsupported absolute code model");
1986 case CodeModel::Small
:
1988 return makeHiLoPair(Op
, SparcMCExpr::VK_Sparc_HI
,
1989 SparcMCExpr::VK_Sparc_LO
, DAG
);
1990 case CodeModel::Medium
: {
1992 SDValue H44
= makeHiLoPair(Op
, SparcMCExpr::VK_Sparc_H44
,
1993 SparcMCExpr::VK_Sparc_M44
, DAG
);
1994 H44
= DAG
.getNode(ISD::SHL
, DL
, VT
, H44
, DAG
.getConstant(12, DL
, MVT::i32
));
1995 SDValue L44
= withTargetFlags(Op
, SparcMCExpr::VK_Sparc_L44
, DAG
);
1996 L44
= DAG
.getNode(SPISD::Lo
, DL
, VT
, L44
);
1997 return DAG
.getNode(ISD::ADD
, DL
, VT
, H44
, L44
);
1999 case CodeModel::Large
: {
2001 SDValue Hi
= makeHiLoPair(Op
, SparcMCExpr::VK_Sparc_HH
,
2002 SparcMCExpr::VK_Sparc_HM
, DAG
);
2003 Hi
= DAG
.getNode(ISD::SHL
, DL
, VT
, Hi
, DAG
.getConstant(32, DL
, MVT::i32
));
2004 SDValue Lo
= makeHiLoPair(Op
, SparcMCExpr::VK_Sparc_HI
,
2005 SparcMCExpr::VK_Sparc_LO
, DAG
);
2006 return DAG
.getNode(ISD::ADD
, DL
, VT
, Hi
, Lo
);
2011 SDValue
SparcTargetLowering::LowerGlobalAddress(SDValue Op
,
2012 SelectionDAG
&DAG
) const {
2013 return makeAddress(Op
, DAG
);
2016 SDValue
SparcTargetLowering::LowerConstantPool(SDValue Op
,
2017 SelectionDAG
&DAG
) const {
2018 return makeAddress(Op
, DAG
);
2021 SDValue
SparcTargetLowering::LowerBlockAddress(SDValue Op
,
2022 SelectionDAG
&DAG
) const {
2023 return makeAddress(Op
, DAG
);
2026 SDValue
SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op
,
2027 SelectionDAG
&DAG
) const {
2029 GlobalAddressSDNode
*GA
= cast
<GlobalAddressSDNode
>(Op
);
2030 if (DAG
.getTarget().useEmulatedTLS())
2031 return LowerToTLSEmulatedModel(GA
, DAG
);
2034 const GlobalValue
*GV
= GA
->getGlobal();
2035 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
2037 TLSModel::Model model
= getTargetMachine().getTLSModel(GV
);
2039 if (model
== TLSModel::GeneralDynamic
|| model
== TLSModel::LocalDynamic
) {
2040 unsigned HiTF
= ((model
== TLSModel::GeneralDynamic
)
2041 ? SparcMCExpr::VK_Sparc_TLS_GD_HI22
2042 : SparcMCExpr::VK_Sparc_TLS_LDM_HI22
);
2043 unsigned LoTF
= ((model
== TLSModel::GeneralDynamic
)
2044 ? SparcMCExpr::VK_Sparc_TLS_GD_LO10
2045 : SparcMCExpr::VK_Sparc_TLS_LDM_LO10
);
2046 unsigned addTF
= ((model
== TLSModel::GeneralDynamic
)
2047 ? SparcMCExpr::VK_Sparc_TLS_GD_ADD
2048 : SparcMCExpr::VK_Sparc_TLS_LDM_ADD
);
2049 unsigned callTF
= ((model
== TLSModel::GeneralDynamic
)
2050 ? SparcMCExpr::VK_Sparc_TLS_GD_CALL
2051 : SparcMCExpr::VK_Sparc_TLS_LDM_CALL
);
2053 SDValue HiLo
= makeHiLoPair(Op
, HiTF
, LoTF
, DAG
);
2054 SDValue Base
= DAG
.getNode(SPISD::GLOBAL_BASE_REG
, DL
, PtrVT
);
2055 SDValue Argument
= DAG
.getNode(SPISD::TLS_ADD
, DL
, PtrVT
, Base
, HiLo
,
2056 withTargetFlags(Op
, addTF
, DAG
));
2058 SDValue Chain
= DAG
.getEntryNode();
2061 Chain
= DAG
.getCALLSEQ_START(Chain
, 1, 0, DL
);
2062 Chain
= DAG
.getCopyToReg(Chain
, DL
, SP::O0
, Argument
, InFlag
);
2063 InFlag
= Chain
.getValue(1);
2064 SDValue Callee
= DAG
.getTargetExternalSymbol("__tls_get_addr", PtrVT
);
2065 SDValue Symbol
= withTargetFlags(Op
, callTF
, DAG
);
2067 SDVTList NodeTys
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
2068 const uint32_t *Mask
= Subtarget
->getRegisterInfo()->getCallPreservedMask(
2069 DAG
.getMachineFunction(), CallingConv::C
);
2070 assert(Mask
&& "Missing call preserved mask for calling convention");
2071 SDValue Ops
[] = {Chain
,
2074 DAG
.getRegister(SP::O0
, PtrVT
),
2075 DAG
.getRegisterMask(Mask
),
2077 Chain
= DAG
.getNode(SPISD::TLS_CALL
, DL
, NodeTys
, Ops
);
2078 InFlag
= Chain
.getValue(1);
2079 Chain
= DAG
.getCALLSEQ_END(Chain
, DAG
.getIntPtrConstant(1, DL
, true),
2080 DAG
.getIntPtrConstant(0, DL
, true), InFlag
, DL
);
2081 InFlag
= Chain
.getValue(1);
2082 SDValue Ret
= DAG
.getCopyFromReg(Chain
, DL
, SP::O0
, PtrVT
, InFlag
);
2084 if (model
!= TLSModel::LocalDynamic
)
2087 SDValue Hi
= DAG
.getNode(SPISD::Hi
, DL
, PtrVT
,
2088 withTargetFlags(Op
, SparcMCExpr::VK_Sparc_TLS_LDO_HIX22
, DAG
));
2089 SDValue Lo
= DAG
.getNode(SPISD::Lo
, DL
, PtrVT
,
2090 withTargetFlags(Op
, SparcMCExpr::VK_Sparc_TLS_LDO_LOX10
, DAG
));
2091 HiLo
= DAG
.getNode(ISD::XOR
, DL
, PtrVT
, Hi
, Lo
);
2092 return DAG
.getNode(SPISD::TLS_ADD
, DL
, PtrVT
, Ret
, HiLo
,
2093 withTargetFlags(Op
, SparcMCExpr::VK_Sparc_TLS_LDO_ADD
, DAG
));
2096 if (model
== TLSModel::InitialExec
) {
2097 unsigned ldTF
= ((PtrVT
== MVT::i64
)? SparcMCExpr::VK_Sparc_TLS_IE_LDX
2098 : SparcMCExpr::VK_Sparc_TLS_IE_LD
);
2100 SDValue Base
= DAG
.getNode(SPISD::GLOBAL_BASE_REG
, DL
, PtrVT
);
2102 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2103 // function has calls.
2104 MachineFrameInfo
&MFI
= DAG
.getMachineFunction().getFrameInfo();
2105 MFI
.setHasCalls(true);
2107 SDValue TGA
= makeHiLoPair(Op
,
2108 SparcMCExpr::VK_Sparc_TLS_IE_HI22
,
2109 SparcMCExpr::VK_Sparc_TLS_IE_LO10
, DAG
);
2110 SDValue Ptr
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, Base
, TGA
);
2111 SDValue Offset
= DAG
.getNode(SPISD::TLS_LD
,
2113 withTargetFlags(Op
, ldTF
, DAG
));
2114 return DAG
.getNode(SPISD::TLS_ADD
, DL
, PtrVT
,
2115 DAG
.getRegister(SP::G7
, PtrVT
), Offset
,
2117 SparcMCExpr::VK_Sparc_TLS_IE_ADD
, DAG
));
2120 assert(model
== TLSModel::LocalExec
);
2121 SDValue Hi
= DAG
.getNode(SPISD::Hi
, DL
, PtrVT
,
2122 withTargetFlags(Op
, SparcMCExpr::VK_Sparc_TLS_LE_HIX22
, DAG
));
2123 SDValue Lo
= DAG
.getNode(SPISD::Lo
, DL
, PtrVT
,
2124 withTargetFlags(Op
, SparcMCExpr::VK_Sparc_TLS_LE_LOX10
, DAG
));
2125 SDValue Offset
= DAG
.getNode(ISD::XOR
, DL
, PtrVT
, Hi
, Lo
);
2127 return DAG
.getNode(ISD::ADD
, DL
, PtrVT
,
2128 DAG
.getRegister(SP::G7
, PtrVT
), Offset
);
2131 SDValue
SparcTargetLowering::LowerF128_LibCallArg(SDValue Chain
,
2132 ArgListTy
&Args
, SDValue Arg
,
2134 SelectionDAG
&DAG
) const {
2135 MachineFrameInfo
&MFI
= DAG
.getMachineFunction().getFrameInfo();
2136 EVT ArgVT
= Arg
.getValueType();
2137 Type
*ArgTy
= ArgVT
.getTypeForEVT(*DAG
.getContext());
2143 if (ArgTy
->isFP128Ty()) {
2144 // Create a stack object and pass the pointer to the library function.
2145 int FI
= MFI
.CreateStackObject(16, Align(8), false);
2146 SDValue FIPtr
= DAG
.getFrameIndex(FI
, getPointerTy(DAG
.getDataLayout()));
2147 Chain
= DAG
.getStore(Chain
, DL
, Entry
.Node
, FIPtr
, MachinePointerInfo(),
2151 Entry
.Ty
= PointerType::getUnqual(ArgTy
);
2153 Args
.push_back(Entry
);
2158 SparcTargetLowering::LowerF128Op(SDValue Op
, SelectionDAG
&DAG
,
2159 const char *LibFuncName
,
2160 unsigned numArgs
) const {
2164 MachineFrameInfo
&MFI
= DAG
.getMachineFunction().getFrameInfo();
2165 auto PtrVT
= getPointerTy(DAG
.getDataLayout());
2167 SDValue Callee
= DAG
.getExternalSymbol(LibFuncName
, PtrVT
);
2168 Type
*RetTy
= Op
.getValueType().getTypeForEVT(*DAG
.getContext());
2169 Type
*RetTyABI
= RetTy
;
2170 SDValue Chain
= DAG
.getEntryNode();
2173 if (RetTy
->isFP128Ty()) {
2174 // Create a Stack Object to receive the return value of type f128.
2176 int RetFI
= MFI
.CreateStackObject(16, Align(8), false);
2177 RetPtr
= DAG
.getFrameIndex(RetFI
, PtrVT
);
2178 Entry
.Node
= RetPtr
;
2179 Entry
.Ty
= PointerType::getUnqual(RetTy
);
2180 if (!Subtarget
->is64Bit()) {
2181 Entry
.IsSRet
= true;
2182 Entry
.IndirectType
= RetTy
;
2184 Entry
.IsReturned
= false;
2185 Args
.push_back(Entry
);
2186 RetTyABI
= Type::getVoidTy(*DAG
.getContext());
2189 assert(Op
->getNumOperands() >= numArgs
&& "Not enough operands!");
2190 for (unsigned i
= 0, e
= numArgs
; i
!= e
; ++i
) {
2191 Chain
= LowerF128_LibCallArg(Chain
, Args
, Op
.getOperand(i
), SDLoc(Op
), DAG
);
2193 TargetLowering::CallLoweringInfo
CLI(DAG
);
2194 CLI
.setDebugLoc(SDLoc(Op
)).setChain(Chain
)
2195 .setCallee(CallingConv::C
, RetTyABI
, Callee
, std::move(Args
));
2197 std::pair
<SDValue
, SDValue
> CallInfo
= LowerCallTo(CLI
);
2199 // chain is in second result.
2200 if (RetTyABI
== RetTy
)
2201 return CallInfo
.first
;
2203 assert (RetTy
->isFP128Ty() && "Unexpected return type!");
2205 Chain
= CallInfo
.second
;
2207 // Load RetPtr to get the return value.
2208 return DAG
.getLoad(Op
.getValueType(), SDLoc(Op
), Chain
, RetPtr
,
2209 MachinePointerInfo(), Align(8));
2212 SDValue
SparcTargetLowering::LowerF128Compare(SDValue LHS
, SDValue RHS
,
2213 unsigned &SPCC
, const SDLoc
&DL
,
2214 SelectionDAG
&DAG
) const {
2216 const char *LibCall
= nullptr;
2217 bool is64Bit
= Subtarget
->is64Bit();
2219 default: llvm_unreachable("Unhandled conditional code!");
2220 case SPCC::FCC_E
: LibCall
= is64Bit
? "_Qp_feq" : "_Q_feq"; break;
2221 case SPCC::FCC_NE
: LibCall
= is64Bit
? "_Qp_fne" : "_Q_fne"; break;
2222 case SPCC::FCC_L
: LibCall
= is64Bit
? "_Qp_flt" : "_Q_flt"; break;
2223 case SPCC::FCC_G
: LibCall
= is64Bit
? "_Qp_fgt" : "_Q_fgt"; break;
2224 case SPCC::FCC_LE
: LibCall
= is64Bit
? "_Qp_fle" : "_Q_fle"; break;
2225 case SPCC::FCC_GE
: LibCall
= is64Bit
? "_Qp_fge" : "_Q_fge"; break;
2233 case SPCC::FCC_UE
: LibCall
= is64Bit
? "_Qp_cmp" : "_Q_cmp"; break;
2236 auto PtrVT
= getPointerTy(DAG
.getDataLayout());
2237 SDValue Callee
= DAG
.getExternalSymbol(LibCall
, PtrVT
);
2238 Type
*RetTy
= Type::getInt32Ty(*DAG
.getContext());
2240 SDValue Chain
= DAG
.getEntryNode();
2241 Chain
= LowerF128_LibCallArg(Chain
, Args
, LHS
, DL
, DAG
);
2242 Chain
= LowerF128_LibCallArg(Chain
, Args
, RHS
, DL
, DAG
);
2244 TargetLowering::CallLoweringInfo
CLI(DAG
);
2245 CLI
.setDebugLoc(DL
).setChain(Chain
)
2246 .setCallee(CallingConv::C
, RetTy
, Callee
, std::move(Args
));
2248 std::pair
<SDValue
, SDValue
> CallInfo
= LowerCallTo(CLI
);
2250 // result is in first, and chain is in second result.
2251 SDValue Result
= CallInfo
.first
;
2255 SDValue RHS
= DAG
.getConstant(0, DL
, Result
.getValueType());
2256 SPCC
= SPCC::ICC_NE
;
2257 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2259 case SPCC::FCC_UL
: {
2260 SDValue Mask
= DAG
.getConstant(1, DL
, Result
.getValueType());
2261 Result
= DAG
.getNode(ISD::AND
, DL
, Result
.getValueType(), Result
, Mask
);
2262 SDValue RHS
= DAG
.getConstant(0, DL
, Result
.getValueType());
2263 SPCC
= SPCC::ICC_NE
;
2264 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2266 case SPCC::FCC_ULE
: {
2267 SDValue RHS
= DAG
.getConstant(2, DL
, Result
.getValueType());
2268 SPCC
= SPCC::ICC_NE
;
2269 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2271 case SPCC::FCC_UG
: {
2272 SDValue RHS
= DAG
.getConstant(1, DL
, Result
.getValueType());
2274 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2276 case SPCC::FCC_UGE
: {
2277 SDValue RHS
= DAG
.getConstant(1, DL
, Result
.getValueType());
2278 SPCC
= SPCC::ICC_NE
;
2279 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2282 case SPCC::FCC_U
: {
2283 SDValue RHS
= DAG
.getConstant(3, DL
, Result
.getValueType());
2285 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2287 case SPCC::FCC_O
: {
2288 SDValue RHS
= DAG
.getConstant(3, DL
, Result
.getValueType());
2289 SPCC
= SPCC::ICC_NE
;
2290 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2292 case SPCC::FCC_LG
: {
2293 SDValue Mask
= DAG
.getConstant(3, DL
, Result
.getValueType());
2294 Result
= DAG
.getNode(ISD::AND
, DL
, Result
.getValueType(), Result
, Mask
);
2295 SDValue RHS
= DAG
.getConstant(0, DL
, Result
.getValueType());
2296 SPCC
= SPCC::ICC_NE
;
2297 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2299 case SPCC::FCC_UE
: {
2300 SDValue Mask
= DAG
.getConstant(3, DL
, Result
.getValueType());
2301 Result
= DAG
.getNode(ISD::AND
, DL
, Result
.getValueType(), Result
, Mask
);
2302 SDValue RHS
= DAG
.getConstant(0, DL
, Result
.getValueType());
2304 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2310 LowerF128_FPEXTEND(SDValue Op
, SelectionDAG
&DAG
,
2311 const SparcTargetLowering
&TLI
) {
2313 if (Op
.getOperand(0).getValueType() == MVT::f64
)
2314 return TLI
.LowerF128Op(Op
, DAG
,
2315 TLI
.getLibcallName(RTLIB::FPEXT_F64_F128
), 1);
2317 if (Op
.getOperand(0).getValueType() == MVT::f32
)
2318 return TLI
.LowerF128Op(Op
, DAG
,
2319 TLI
.getLibcallName(RTLIB::FPEXT_F32_F128
), 1);
2321 llvm_unreachable("fpextend with non-float operand!");
2326 LowerF128_FPROUND(SDValue Op
, SelectionDAG
&DAG
,
2327 const SparcTargetLowering
&TLI
) {
2328 // FP_ROUND on f64 and f32 are legal.
2329 if (Op
.getOperand(0).getValueType() != MVT::f128
)
2332 if (Op
.getValueType() == MVT::f64
)
2333 return TLI
.LowerF128Op(Op
, DAG
,
2334 TLI
.getLibcallName(RTLIB::FPROUND_F128_F64
), 1);
2335 if (Op
.getValueType() == MVT::f32
)
2336 return TLI
.LowerF128Op(Op
, DAG
,
2337 TLI
.getLibcallName(RTLIB::FPROUND_F128_F32
), 1);
2339 llvm_unreachable("fpround to non-float!");
2343 static SDValue
LowerFP_TO_SINT(SDValue Op
, SelectionDAG
&DAG
,
2344 const SparcTargetLowering
&TLI
,
2347 EVT VT
= Op
.getValueType();
2348 assert(VT
== MVT::i32
|| VT
== MVT::i64
);
2350 // Expand f128 operations to fp128 abi calls.
2351 if (Op
.getOperand(0).getValueType() == MVT::f128
2352 && (!hasHardQuad
|| !TLI
.isTypeLegal(VT
))) {
2353 const char *libName
= TLI
.getLibcallName(VT
== MVT::i32
2354 ? RTLIB::FPTOSINT_F128_I32
2355 : RTLIB::FPTOSINT_F128_I64
);
2356 return TLI
.LowerF128Op(Op
, DAG
, libName
, 1);
2359 // Expand if the resulting type is illegal.
2360 if (!TLI
.isTypeLegal(VT
))
2363 // Otherwise, Convert the fp value to integer in an FP register.
2365 Op
= DAG
.getNode(SPISD::FTOI
, dl
, MVT::f32
, Op
.getOperand(0));
2367 Op
= DAG
.getNode(SPISD::FTOX
, dl
, MVT::f64
, Op
.getOperand(0));
2369 return DAG
.getNode(ISD::BITCAST
, dl
, VT
, Op
);
2372 static SDValue
LowerSINT_TO_FP(SDValue Op
, SelectionDAG
&DAG
,
2373 const SparcTargetLowering
&TLI
,
2376 EVT OpVT
= Op
.getOperand(0).getValueType();
2377 assert(OpVT
== MVT::i32
|| (OpVT
== MVT::i64
));
2379 EVT floatVT
= (OpVT
== MVT::i32
) ? MVT::f32
: MVT::f64
;
2381 // Expand f128 operations to fp128 ABI calls.
2382 if (Op
.getValueType() == MVT::f128
2383 && (!hasHardQuad
|| !TLI
.isTypeLegal(OpVT
))) {
2384 const char *libName
= TLI
.getLibcallName(OpVT
== MVT::i32
2385 ? RTLIB::SINTTOFP_I32_F128
2386 : RTLIB::SINTTOFP_I64_F128
);
2387 return TLI
.LowerF128Op(Op
, DAG
, libName
, 1);
2390 // Expand if the operand type is illegal.
2391 if (!TLI
.isTypeLegal(OpVT
))
2394 // Otherwise, Convert the int value to FP in an FP register.
2395 SDValue Tmp
= DAG
.getNode(ISD::BITCAST
, dl
, floatVT
, Op
.getOperand(0));
2396 unsigned opcode
= (OpVT
== MVT::i32
)? SPISD::ITOF
: SPISD::XTOF
;
2397 return DAG
.getNode(opcode
, dl
, Op
.getValueType(), Tmp
);
2400 static SDValue
LowerFP_TO_UINT(SDValue Op
, SelectionDAG
&DAG
,
2401 const SparcTargetLowering
&TLI
,
2404 EVT VT
= Op
.getValueType();
2406 // Expand if it does not involve f128 or the target has support for
2407 // quad floating point instructions and the resulting type is legal.
2408 if (Op
.getOperand(0).getValueType() != MVT::f128
||
2409 (hasHardQuad
&& TLI
.isTypeLegal(VT
)))
2412 assert(VT
== MVT::i32
|| VT
== MVT::i64
);
2414 return TLI
.LowerF128Op(Op
, DAG
,
2415 TLI
.getLibcallName(VT
== MVT::i32
2416 ? RTLIB::FPTOUINT_F128_I32
2417 : RTLIB::FPTOUINT_F128_I64
),
2421 static SDValue
LowerUINT_TO_FP(SDValue Op
, SelectionDAG
&DAG
,
2422 const SparcTargetLowering
&TLI
,
2425 EVT OpVT
= Op
.getOperand(0).getValueType();
2426 assert(OpVT
== MVT::i32
|| OpVT
== MVT::i64
);
2428 // Expand if it does not involve f128 or the target has support for
2429 // quad floating point instructions and the operand type is legal.
2430 if (Op
.getValueType() != MVT::f128
|| (hasHardQuad
&& TLI
.isTypeLegal(OpVT
)))
2433 return TLI
.LowerF128Op(Op
, DAG
,
2434 TLI
.getLibcallName(OpVT
== MVT::i32
2435 ? RTLIB::UINTTOFP_I32_F128
2436 : RTLIB::UINTTOFP_I64_F128
),
2440 static SDValue
LowerBR_CC(SDValue Op
, SelectionDAG
&DAG
,
2441 const SparcTargetLowering
&TLI
,
2443 SDValue Chain
= Op
.getOperand(0);
2444 ISD::CondCode CC
= cast
<CondCodeSDNode
>(Op
.getOperand(1))->get();
2445 SDValue LHS
= Op
.getOperand(2);
2446 SDValue RHS
= Op
.getOperand(3);
2447 SDValue Dest
= Op
.getOperand(4);
2449 unsigned Opc
, SPCC
= ~0U;
2451 // If this is a br_cc of a "setcc", and if the setcc got lowered into
2452 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2453 LookThroughSetCC(LHS
, RHS
, CC
, SPCC
);
2455 // Get the condition flag.
2456 SDValue CompareFlag
;
2457 if (LHS
.getValueType().isInteger()) {
2458 CompareFlag
= DAG
.getNode(SPISD::CMPICC
, dl
, MVT::Glue
, LHS
, RHS
);
2459 if (SPCC
== ~0U) SPCC
= IntCondCCodeToICC(CC
);
2460 // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
2461 Opc
= LHS
.getValueType() == MVT::i32
? SPISD::BRICC
: SPISD::BRXCC
;
2463 if (!hasHardQuad
&& LHS
.getValueType() == MVT::f128
) {
2464 if (SPCC
== ~0U) SPCC
= FPCondCCodeToFCC(CC
);
2465 CompareFlag
= TLI
.LowerF128Compare(LHS
, RHS
, SPCC
, dl
, DAG
);
2468 CompareFlag
= DAG
.getNode(SPISD::CMPFCC
, dl
, MVT::Glue
, LHS
, RHS
);
2469 if (SPCC
== ~0U) SPCC
= FPCondCCodeToFCC(CC
);
2473 return DAG
.getNode(Opc
, dl
, MVT::Other
, Chain
, Dest
,
2474 DAG
.getConstant(SPCC
, dl
, MVT::i32
), CompareFlag
);
2477 static SDValue
LowerSELECT_CC(SDValue Op
, SelectionDAG
&DAG
,
2478 const SparcTargetLowering
&TLI
,
2480 SDValue LHS
= Op
.getOperand(0);
2481 SDValue RHS
= Op
.getOperand(1);
2482 ISD::CondCode CC
= cast
<CondCodeSDNode
>(Op
.getOperand(4))->get();
2483 SDValue TrueVal
= Op
.getOperand(2);
2484 SDValue FalseVal
= Op
.getOperand(3);
2486 unsigned Opc
, SPCC
= ~0U;
2488 // If this is a select_cc of a "setcc", and if the setcc got lowered into
2489 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2490 LookThroughSetCC(LHS
, RHS
, CC
, SPCC
);
2492 SDValue CompareFlag
;
2493 if (LHS
.getValueType().isInteger()) {
2494 CompareFlag
= DAG
.getNode(SPISD::CMPICC
, dl
, MVT::Glue
, LHS
, RHS
);
2495 Opc
= LHS
.getValueType() == MVT::i32
?
2496 SPISD::SELECT_ICC
: SPISD::SELECT_XCC
;
2497 if (SPCC
== ~0U) SPCC
= IntCondCCodeToICC(CC
);
2499 if (!hasHardQuad
&& LHS
.getValueType() == MVT::f128
) {
2500 if (SPCC
== ~0U) SPCC
= FPCondCCodeToFCC(CC
);
2501 CompareFlag
= TLI
.LowerF128Compare(LHS
, RHS
, SPCC
, dl
, DAG
);
2502 Opc
= SPISD::SELECT_ICC
;
2504 CompareFlag
= DAG
.getNode(SPISD::CMPFCC
, dl
, MVT::Glue
, LHS
, RHS
);
2505 Opc
= SPISD::SELECT_FCC
;
2506 if (SPCC
== ~0U) SPCC
= FPCondCCodeToFCC(CC
);
2509 return DAG
.getNode(Opc
, dl
, TrueVal
.getValueType(), TrueVal
, FalseVal
,
2510 DAG
.getConstant(SPCC
, dl
, MVT::i32
), CompareFlag
);
2513 static SDValue
LowerVASTART(SDValue Op
, SelectionDAG
&DAG
,
2514 const SparcTargetLowering
&TLI
) {
2515 MachineFunction
&MF
= DAG
.getMachineFunction();
2516 SparcMachineFunctionInfo
*FuncInfo
= MF
.getInfo
<SparcMachineFunctionInfo
>();
2517 auto PtrVT
= TLI
.getPointerTy(DAG
.getDataLayout());
2519 // Need frame address to find the address of VarArgsFrameIndex.
2520 MF
.getFrameInfo().setFrameAddressIsTaken(true);
2522 // vastart just stores the address of the VarArgsFrameIndex slot into the
2523 // memory location argument.
2526 DAG
.getNode(ISD::ADD
, DL
, PtrVT
, DAG
.getRegister(SP::I6
, PtrVT
),
2527 DAG
.getIntPtrConstant(FuncInfo
->getVarArgsFrameOffset(), DL
));
2528 const Value
*SV
= cast
<SrcValueSDNode
>(Op
.getOperand(2))->getValue();
2529 return DAG
.getStore(Op
.getOperand(0), DL
, Offset
, Op
.getOperand(1),
2530 MachinePointerInfo(SV
));
2533 static SDValue
LowerVAARG(SDValue Op
, SelectionDAG
&DAG
) {
2534 SDNode
*Node
= Op
.getNode();
2535 EVT VT
= Node
->getValueType(0);
2536 SDValue InChain
= Node
->getOperand(0);
2537 SDValue VAListPtr
= Node
->getOperand(1);
2538 EVT PtrVT
= VAListPtr
.getValueType();
2539 const Value
*SV
= cast
<SrcValueSDNode
>(Node
->getOperand(2))->getValue();
2542 DAG
.getLoad(PtrVT
, DL
, InChain
, VAListPtr
, MachinePointerInfo(SV
));
2543 // Increment the pointer, VAList, to the next vaarg.
2544 SDValue NextPtr
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, VAList
,
2545 DAG
.getIntPtrConstant(VT
.getSizeInBits()/8,
2547 // Store the incremented VAList to the legalized pointer.
2548 InChain
= DAG
.getStore(VAList
.getValue(1), DL
, NextPtr
, VAListPtr
,
2549 MachinePointerInfo(SV
));
2550 // Load the actual argument out of the pointer VAList.
2551 // We can't count on greater alignment than the word size.
2553 VT
, DL
, InChain
, VAList
, MachinePointerInfo(),
2554 std::min(PtrVT
.getFixedSizeInBits(), VT
.getFixedSizeInBits()) / 8);
2557 static SDValue
LowerDYNAMIC_STACKALLOC(SDValue Op
, SelectionDAG
&DAG
,
2558 const SparcSubtarget
*Subtarget
) {
2559 SDValue Chain
= Op
.getOperand(0); // Legalize the chain.
2560 SDValue Size
= Op
.getOperand(1); // Legalize the size.
2561 MaybeAlign Alignment
=
2562 cast
<ConstantSDNode
>(Op
.getOperand(2))->getMaybeAlignValue();
2563 Align StackAlign
= Subtarget
->getFrameLowering()->getStackAlign();
2564 EVT VT
= Size
->getValueType(0);
2567 // TODO: implement over-aligned alloca. (Note: also implies
2568 // supporting support for overaligned function frames + dynamic
2569 // allocations, at all, which currently isn't supported)
2570 if (Alignment
&& *Alignment
> StackAlign
) {
2571 const MachineFunction
&MF
= DAG
.getMachineFunction();
2572 report_fatal_error("Function \"" + Twine(MF
.getName()) + "\": "
2573 "over-aligned dynamic alloca not supported.");
2576 // The resultant pointer needs to be above the register spill area
2577 // at the bottom of the stack.
2578 unsigned regSpillArea
;
2579 if (Subtarget
->is64Bit()) {
2582 // On Sparc32, the size of the spill area is 92. Unfortunately,
2583 // that's only 4-byte aligned, not 8-byte aligned (the stack
2584 // pointer is 8-byte aligned). So, if the user asked for an 8-byte
2585 // aligned dynamic allocation, we actually need to add 96 to the
2586 // bottom of the stack, instead of 92, to ensure 8-byte alignment.
2588 // That also means adding 4 to the size of the allocation --
2589 // before applying the 8-byte rounding. Unfortunately, we the
2590 // value we get here has already had rounding applied. So, we need
2591 // to add 8, instead, wasting a bit more memory.
2593 // Further, this only actually needs to be done if the required
2594 // alignment is > 4, but, we've lost that info by this point, too,
2595 // so we always apply it.
2597 // (An alternative approach would be to always reserve 96 bytes
2598 // instead of the required 92, but then we'd waste 4 extra bytes
2599 // in every frame, not just those with dynamic stack allocations)
2601 // TODO: modify code in SelectionDAGBuilder to make this less sad.
2603 Size
= DAG
.getNode(ISD::ADD
, dl
, VT
, Size
,
2604 DAG
.getConstant(8, dl
, VT
));
2608 unsigned SPReg
= SP::O6
;
2609 SDValue SP
= DAG
.getCopyFromReg(Chain
, dl
, SPReg
, VT
);
2610 SDValue NewSP
= DAG
.getNode(ISD::SUB
, dl
, VT
, SP
, Size
); // Value
2611 Chain
= DAG
.getCopyToReg(SP
.getValue(1), dl
, SPReg
, NewSP
); // Output chain
2613 regSpillArea
+= Subtarget
->getStackPointerBias();
2615 SDValue NewVal
= DAG
.getNode(ISD::ADD
, dl
, VT
, NewSP
,
2616 DAG
.getConstant(regSpillArea
, dl
, VT
));
2617 SDValue Ops
[2] = { NewVal
, Chain
};
2618 return DAG
.getMergeValues(Ops
, dl
);
2622 static SDValue
getFLUSHW(SDValue Op
, SelectionDAG
&DAG
) {
2624 SDValue Chain
= DAG
.getNode(SPISD::FLUSHW
,
2625 dl
, MVT::Other
, DAG
.getEntryNode());
2629 static SDValue
getFRAMEADDR(uint64_t depth
, SDValue Op
, SelectionDAG
&DAG
,
2630 const SparcSubtarget
*Subtarget
,
2631 bool AlwaysFlush
= false) {
2632 MachineFrameInfo
&MFI
= DAG
.getMachineFunction().getFrameInfo();
2633 MFI
.setFrameAddressIsTaken(true);
2635 EVT VT
= Op
.getValueType();
2637 unsigned FrameReg
= SP::I6
;
2638 unsigned stackBias
= Subtarget
->getStackPointerBias();
2643 // flush first to make sure the windowed registers' values are in stack
2644 Chain
= (depth
|| AlwaysFlush
) ? getFLUSHW(Op
, DAG
) : DAG
.getEntryNode();
2646 FrameAddr
= DAG
.getCopyFromReg(Chain
, dl
, FrameReg
, VT
);
2648 unsigned Offset
= (Subtarget
->is64Bit()) ? (stackBias
+ 112) : 56;
2651 SDValue Ptr
= DAG
.getNode(ISD::ADD
, dl
, VT
, FrameAddr
,
2652 DAG
.getIntPtrConstant(Offset
, dl
));
2653 FrameAddr
= DAG
.getLoad(VT
, dl
, Chain
, Ptr
, MachinePointerInfo());
2655 if (Subtarget
->is64Bit())
2656 FrameAddr
= DAG
.getNode(ISD::ADD
, dl
, VT
, FrameAddr
,
2657 DAG
.getIntPtrConstant(stackBias
, dl
));
2662 static SDValue
LowerFRAMEADDR(SDValue Op
, SelectionDAG
&DAG
,
2663 const SparcSubtarget
*Subtarget
) {
2665 uint64_t depth
= Op
.getConstantOperandVal(0);
2667 return getFRAMEADDR(depth
, Op
, DAG
, Subtarget
);
2671 static SDValue
LowerRETURNADDR(SDValue Op
, SelectionDAG
&DAG
,
2672 const SparcTargetLowering
&TLI
,
2673 const SparcSubtarget
*Subtarget
) {
2674 MachineFunction
&MF
= DAG
.getMachineFunction();
2675 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
2676 MFI
.setReturnAddressIsTaken(true);
2678 if (TLI
.verifyReturnAddressArgumentIsConstant(Op
, DAG
))
2681 EVT VT
= Op
.getValueType();
2683 uint64_t depth
= Op
.getConstantOperandVal(0);
2687 auto PtrVT
= TLI
.getPointerTy(DAG
.getDataLayout());
2688 Register RetReg
= MF
.addLiveIn(SP::I7
, TLI
.getRegClassFor(PtrVT
));
2689 RetAddr
= DAG
.getCopyFromReg(DAG
.getEntryNode(), dl
, RetReg
, VT
);
2693 // Need frame address to find return address of the caller.
2694 SDValue FrameAddr
= getFRAMEADDR(depth
- 1, Op
, DAG
, Subtarget
, true);
2696 unsigned Offset
= (Subtarget
->is64Bit()) ? 120 : 60;
2697 SDValue Ptr
= DAG
.getNode(ISD::ADD
,
2700 DAG
.getIntPtrConstant(Offset
, dl
));
2701 RetAddr
= DAG
.getLoad(VT
, dl
, DAG
.getEntryNode(), Ptr
, MachinePointerInfo());
2706 static SDValue
LowerF64Op(SDValue SrcReg64
, const SDLoc
&dl
, SelectionDAG
&DAG
,
2708 assert(SrcReg64
.getValueType() == MVT::f64
&& "LowerF64Op called on non-double!");
2709 assert(opcode
== ISD::FNEG
|| opcode
== ISD::FABS
);
2711 // Lower fneg/fabs on f64 to fneg/fabs on f32.
2712 // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
2713 // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
2715 // Note: in little-endian, the floating-point value is stored in the
2716 // registers are in the opposite order, so the subreg with the sign
2717 // bit is the highest-numbered (odd), rather than the
2718 // lowest-numbered (even).
2720 SDValue Hi32
= DAG
.getTargetExtractSubreg(SP::sub_even
, dl
, MVT::f32
,
2722 SDValue Lo32
= DAG
.getTargetExtractSubreg(SP::sub_odd
, dl
, MVT::f32
,
2725 if (DAG
.getDataLayout().isLittleEndian())
2726 Lo32
= DAG
.getNode(opcode
, dl
, MVT::f32
, Lo32
);
2728 Hi32
= DAG
.getNode(opcode
, dl
, MVT::f32
, Hi32
);
2730 SDValue DstReg64
= SDValue(DAG
.getMachineNode(TargetOpcode::IMPLICIT_DEF
,
2732 DstReg64
= DAG
.getTargetInsertSubreg(SP::sub_even
, dl
, MVT::f64
,
2734 DstReg64
= DAG
.getTargetInsertSubreg(SP::sub_odd
, dl
, MVT::f64
,
2739 // Lower a f128 load into two f64 loads.
2740 static SDValue
LowerF128Load(SDValue Op
, SelectionDAG
&DAG
)
2743 LoadSDNode
*LdNode
= cast
<LoadSDNode
>(Op
.getNode());
2744 assert(LdNode
->getOffset().isUndef() && "Unexpected node type");
2746 Align Alignment
= commonAlignment(LdNode
->getOriginalAlign(), 8);
2749 DAG
.getLoad(MVT::f64
, dl
, LdNode
->getChain(), LdNode
->getBasePtr(),
2750 LdNode
->getPointerInfo(), Alignment
);
2751 EVT addrVT
= LdNode
->getBasePtr().getValueType();
2752 SDValue LoPtr
= DAG
.getNode(ISD::ADD
, dl
, addrVT
,
2753 LdNode
->getBasePtr(),
2754 DAG
.getConstant(8, dl
, addrVT
));
2755 SDValue Lo64
= DAG
.getLoad(MVT::f64
, dl
, LdNode
->getChain(), LoPtr
,
2756 LdNode
->getPointerInfo().getWithOffset(8),
2759 SDValue SubRegEven
= DAG
.getTargetConstant(SP::sub_even64
, dl
, MVT::i32
);
2760 SDValue SubRegOdd
= DAG
.getTargetConstant(SP::sub_odd64
, dl
, MVT::i32
);
2762 SDNode
*InFP128
= DAG
.getMachineNode(TargetOpcode::IMPLICIT_DEF
,
2764 InFP128
= DAG
.getMachineNode(TargetOpcode::INSERT_SUBREG
, dl
,
2766 SDValue(InFP128
, 0),
2769 InFP128
= DAG
.getMachineNode(TargetOpcode::INSERT_SUBREG
, dl
,
2771 SDValue(InFP128
, 0),
2774 SDValue OutChains
[2] = { SDValue(Hi64
.getNode(), 1),
2775 SDValue(Lo64
.getNode(), 1) };
2776 SDValue OutChain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, OutChains
);
2777 SDValue Ops
[2] = {SDValue(InFP128
,0), OutChain
};
2778 return DAG
.getMergeValues(Ops
, dl
);
2781 static SDValue
LowerLOAD(SDValue Op
, SelectionDAG
&DAG
)
2783 LoadSDNode
*LdNode
= cast
<LoadSDNode
>(Op
.getNode());
2785 EVT MemVT
= LdNode
->getMemoryVT();
2786 if (MemVT
== MVT::f128
)
2787 return LowerF128Load(Op
, DAG
);
2792 // Lower a f128 store into two f64 stores.
2793 static SDValue
LowerF128Store(SDValue Op
, SelectionDAG
&DAG
) {
2795 StoreSDNode
*StNode
= cast
<StoreSDNode
>(Op
.getNode());
2796 assert(StNode
->getOffset().isUndef() && "Unexpected node type");
2798 SDValue SubRegEven
= DAG
.getTargetConstant(SP::sub_even64
, dl
, MVT::i32
);
2799 SDValue SubRegOdd
= DAG
.getTargetConstant(SP::sub_odd64
, dl
, MVT::i32
);
2801 SDNode
*Hi64
= DAG
.getMachineNode(TargetOpcode::EXTRACT_SUBREG
,
2806 SDNode
*Lo64
= DAG
.getMachineNode(TargetOpcode::EXTRACT_SUBREG
,
2812 Align Alignment
= commonAlignment(StNode
->getOriginalAlign(), 8);
2814 SDValue OutChains
[2];
2816 DAG
.getStore(StNode
->getChain(), dl
, SDValue(Hi64
, 0),
2817 StNode
->getBasePtr(), StNode
->getPointerInfo(),
2819 EVT addrVT
= StNode
->getBasePtr().getValueType();
2820 SDValue LoPtr
= DAG
.getNode(ISD::ADD
, dl
, addrVT
,
2821 StNode
->getBasePtr(),
2822 DAG
.getConstant(8, dl
, addrVT
));
2823 OutChains
[1] = DAG
.getStore(StNode
->getChain(), dl
, SDValue(Lo64
, 0), LoPtr
,
2824 StNode
->getPointerInfo().getWithOffset(8),
2826 return DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, OutChains
);
2829 static SDValue
LowerSTORE(SDValue Op
, SelectionDAG
&DAG
)
2832 StoreSDNode
*St
= cast
<StoreSDNode
>(Op
.getNode());
2834 EVT MemVT
= St
->getMemoryVT();
2835 if (MemVT
== MVT::f128
)
2836 return LowerF128Store(Op
, DAG
);
2838 if (MemVT
== MVT::i64
) {
2839 // Custom handling for i64 stores: turn it into a bitcast and a
2841 SDValue Val
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::v2i32
, St
->getValue());
2842 SDValue Chain
= DAG
.getStore(
2843 St
->getChain(), dl
, Val
, St
->getBasePtr(), St
->getPointerInfo(),
2844 St
->getOriginalAlign(), St
->getMemOperand()->getFlags(),
2852 static SDValue
LowerFNEGorFABS(SDValue Op
, SelectionDAG
&DAG
, bool isV9
) {
2853 assert((Op
.getOpcode() == ISD::FNEG
|| Op
.getOpcode() == ISD::FABS
)
2854 && "invalid opcode");
2858 if (Op
.getValueType() == MVT::f64
)
2859 return LowerF64Op(Op
.getOperand(0), dl
, DAG
, Op
.getOpcode());
2860 if (Op
.getValueType() != MVT::f128
)
2863 // Lower fabs/fneg on f128 to fabs/fneg on f64
2864 // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
2865 // (As with LowerF64Op, on little-endian, we need to negate the odd
2868 SDValue SrcReg128
= Op
.getOperand(0);
2869 SDValue Hi64
= DAG
.getTargetExtractSubreg(SP::sub_even64
, dl
, MVT::f64
,
2871 SDValue Lo64
= DAG
.getTargetExtractSubreg(SP::sub_odd64
, dl
, MVT::f64
,
2874 if (DAG
.getDataLayout().isLittleEndian()) {
2876 Lo64
= DAG
.getNode(Op
.getOpcode(), dl
, MVT::f64
, Lo64
);
2878 Lo64
= LowerF64Op(Lo64
, dl
, DAG
, Op
.getOpcode());
2881 Hi64
= DAG
.getNode(Op
.getOpcode(), dl
, MVT::f64
, Hi64
);
2883 Hi64
= LowerF64Op(Hi64
, dl
, DAG
, Op
.getOpcode());
2886 SDValue DstReg128
= SDValue(DAG
.getMachineNode(TargetOpcode::IMPLICIT_DEF
,
2888 DstReg128
= DAG
.getTargetInsertSubreg(SP::sub_even64
, dl
, MVT::f128
,
2890 DstReg128
= DAG
.getTargetInsertSubreg(SP::sub_odd64
, dl
, MVT::f128
,
2895 static SDValue
LowerADDC_ADDE_SUBC_SUBE(SDValue Op
, SelectionDAG
&DAG
) {
2897 if (Op
.getValueType() != MVT::i64
)
2901 SDValue Src1
= Op
.getOperand(0);
2902 SDValue Src1Lo
= DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::i32
, Src1
);
2903 SDValue Src1Hi
= DAG
.getNode(ISD::SRL
, dl
, MVT::i64
, Src1
,
2904 DAG
.getConstant(32, dl
, MVT::i64
));
2905 Src1Hi
= DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::i32
, Src1Hi
);
2907 SDValue Src2
= Op
.getOperand(1);
2908 SDValue Src2Lo
= DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::i32
, Src2
);
2909 SDValue Src2Hi
= DAG
.getNode(ISD::SRL
, dl
, MVT::i64
, Src2
,
2910 DAG
.getConstant(32, dl
, MVT::i64
));
2911 Src2Hi
= DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::i32
, Src2Hi
);
2914 bool hasChain
= false;
2915 unsigned hiOpc
= Op
.getOpcode();
2916 switch (Op
.getOpcode()) {
2917 default: llvm_unreachable("Invalid opcode");
2918 case ISD::ADDC
: hiOpc
= ISD::ADDE
; break;
2919 case ISD::ADDE
: hasChain
= true; break;
2920 case ISD::SUBC
: hiOpc
= ISD::SUBE
; break;
2921 case ISD::SUBE
: hasChain
= true; break;
2924 SDVTList VTs
= DAG
.getVTList(MVT::i32
, MVT::Glue
);
2926 Lo
= DAG
.getNode(Op
.getOpcode(), dl
, VTs
, Src1Lo
, Src2Lo
,
2929 Lo
= DAG
.getNode(Op
.getOpcode(), dl
, VTs
, Src1Lo
, Src2Lo
);
2931 SDValue Hi
= DAG
.getNode(hiOpc
, dl
, VTs
, Src1Hi
, Src2Hi
, Lo
.getValue(1));
2932 SDValue Carry
= Hi
.getValue(1);
2934 Lo
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, MVT::i64
, Lo
);
2935 Hi
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, MVT::i64
, Hi
);
2936 Hi
= DAG
.getNode(ISD::SHL
, dl
, MVT::i64
, Hi
,
2937 DAG
.getConstant(32, dl
, MVT::i64
));
2939 SDValue Dst
= DAG
.getNode(ISD::OR
, dl
, MVT::i64
, Hi
, Lo
);
2940 SDValue Ops
[2] = { Dst
, Carry
};
2941 return DAG
.getMergeValues(Ops
, dl
);
2944 // Custom lower UMULO/SMULO for SPARC. This code is similar to ExpandNode()
2945 // in LegalizeDAG.cpp except the order of arguments to the library function.
2946 static SDValue
LowerUMULO_SMULO(SDValue Op
, SelectionDAG
&DAG
,
2947 const SparcTargetLowering
&TLI
)
2949 unsigned opcode
= Op
.getOpcode();
2950 assert((opcode
== ISD::UMULO
|| opcode
== ISD::SMULO
) && "Invalid Opcode.");
2952 bool isSigned
= (opcode
== ISD::SMULO
);
2954 EVT WideVT
= MVT::i128
;
2956 SDValue LHS
= Op
.getOperand(0);
2958 if (LHS
.getValueType() != VT
)
2961 SDValue ShiftAmt
= DAG
.getConstant(63, dl
, VT
);
2963 SDValue RHS
= Op
.getOperand(1);
2964 SDValue HiLHS
, HiRHS
;
2966 HiLHS
= DAG
.getNode(ISD::SRA
, dl
, VT
, LHS
, ShiftAmt
);
2967 HiRHS
= DAG
.getNode(ISD::SRA
, dl
, MVT::i64
, RHS
, ShiftAmt
);
2969 HiLHS
= DAG
.getConstant(0, dl
, VT
);
2970 HiRHS
= DAG
.getConstant(0, dl
, MVT::i64
);
2973 SDValue Args
[] = { HiLHS
, LHS
, HiRHS
, RHS
};
2975 TargetLowering::MakeLibCallOptions CallOptions
;
2976 CallOptions
.setSExt(isSigned
);
2977 SDValue MulResult
= TLI
.makeLibCall(DAG
,
2978 RTLIB::MUL_I128
, WideVT
,
2979 Args
, CallOptions
, dl
).first
;
2980 SDValue BottomHalf
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, VT
,
2981 MulResult
, DAG
.getIntPtrConstant(0, dl
));
2982 SDValue TopHalf
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, VT
,
2983 MulResult
, DAG
.getIntPtrConstant(1, dl
));
2985 SDValue Tmp1
= DAG
.getNode(ISD::SRA
, dl
, VT
, BottomHalf
, ShiftAmt
);
2986 TopHalf
= DAG
.getSetCC(dl
, MVT::i32
, TopHalf
, Tmp1
, ISD::SETNE
);
2988 TopHalf
= DAG
.getSetCC(dl
, MVT::i32
, TopHalf
, DAG
.getConstant(0, dl
, VT
),
2991 // MulResult is a node with an illegal type. Because such things are not
2992 // generally permitted during this phase of legalization, ensure that
2993 // nothing is left using the node. The above EXTRACT_ELEMENT nodes should have
2995 assert(MulResult
->use_empty() && "Illegally typed node still in use!");
2997 SDValue Ops
[2] = { BottomHalf
, TopHalf
} ;
2998 return DAG
.getMergeValues(Ops
, dl
);
3001 static SDValue
LowerATOMIC_LOAD_STORE(SDValue Op
, SelectionDAG
&DAG
) {
3002 if (isStrongerThanMonotonic(cast
<AtomicSDNode
>(Op
)->getSuccessOrdering())) {
3003 // Expand with a fence.
3007 // Monotonic load/stores are legal.
3011 SDValue
SparcTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op
,
3012 SelectionDAG
&DAG
) const {
3013 unsigned IntNo
= cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue();
3016 default: return SDValue(); // Don't custom lower most intrinsics.
3017 case Intrinsic::thread_pointer
: {
3018 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3019 return DAG
.getRegister(SP::G7
, PtrVT
);
3024 SDValue
SparcTargetLowering::
3025 LowerOperation(SDValue Op
, SelectionDAG
&DAG
) const {
3027 bool hasHardQuad
= Subtarget
->hasHardQuad();
3028 bool isV9
= Subtarget
->isV9();
3030 switch (Op
.getOpcode()) {
3031 default: llvm_unreachable("Should not custom lower this!");
3033 case ISD::RETURNADDR
: return LowerRETURNADDR(Op
, DAG
, *this,
3035 case ISD::FRAMEADDR
: return LowerFRAMEADDR(Op
, DAG
,
3037 case ISD::GlobalTLSAddress
: return LowerGlobalTLSAddress(Op
, DAG
);
3038 case ISD::GlobalAddress
: return LowerGlobalAddress(Op
, DAG
);
3039 case ISD::BlockAddress
: return LowerBlockAddress(Op
, DAG
);
3040 case ISD::ConstantPool
: return LowerConstantPool(Op
, DAG
);
3041 case ISD::FP_TO_SINT
: return LowerFP_TO_SINT(Op
, DAG
, *this,
3043 case ISD::SINT_TO_FP
: return LowerSINT_TO_FP(Op
, DAG
, *this,
3045 case ISD::FP_TO_UINT
: return LowerFP_TO_UINT(Op
, DAG
, *this,
3047 case ISD::UINT_TO_FP
: return LowerUINT_TO_FP(Op
, DAG
, *this,
3049 case ISD::BR_CC
: return LowerBR_CC(Op
, DAG
, *this,
3051 case ISD::SELECT_CC
: return LowerSELECT_CC(Op
, DAG
, *this,
3053 case ISD::VASTART
: return LowerVASTART(Op
, DAG
, *this);
3054 case ISD::VAARG
: return LowerVAARG(Op
, DAG
);
3055 case ISD::DYNAMIC_STACKALLOC
: return LowerDYNAMIC_STACKALLOC(Op
, DAG
,
3058 case ISD::LOAD
: return LowerLOAD(Op
, DAG
);
3059 case ISD::STORE
: return LowerSTORE(Op
, DAG
);
3060 case ISD::FADD
: return LowerF128Op(Op
, DAG
,
3061 getLibcallName(RTLIB::ADD_F128
), 2);
3062 case ISD::FSUB
: return LowerF128Op(Op
, DAG
,
3063 getLibcallName(RTLIB::SUB_F128
), 2);
3064 case ISD::FMUL
: return LowerF128Op(Op
, DAG
,
3065 getLibcallName(RTLIB::MUL_F128
), 2);
3066 case ISD::FDIV
: return LowerF128Op(Op
, DAG
,
3067 getLibcallName(RTLIB::DIV_F128
), 2);
3068 case ISD::FSQRT
: return LowerF128Op(Op
, DAG
,
3069 getLibcallName(RTLIB::SQRT_F128
),1);
3071 case ISD::FNEG
: return LowerFNEGorFABS(Op
, DAG
, isV9
);
3072 case ISD::FP_EXTEND
: return LowerF128_FPEXTEND(Op
, DAG
, *this);
3073 case ISD::FP_ROUND
: return LowerF128_FPROUND(Op
, DAG
, *this);
3077 case ISD::SUBE
: return LowerADDC_ADDE_SUBC_SUBE(Op
, DAG
);
3079 case ISD::SMULO
: return LowerUMULO_SMULO(Op
, DAG
, *this);
3080 case ISD::ATOMIC_LOAD
:
3081 case ISD::ATOMIC_STORE
: return LowerATOMIC_LOAD_STORE(Op
, DAG
);
3082 case ISD::INTRINSIC_WO_CHAIN
: return LowerINTRINSIC_WO_CHAIN(Op
, DAG
);
3086 SDValue
SparcTargetLowering::bitcastConstantFPToInt(ConstantFPSDNode
*C
,
3088 SelectionDAG
&DAG
) const {
3089 APInt V
= C
->getValueAPF().bitcastToAPInt();
3090 SDValue Lo
= DAG
.getConstant(V
.zextOrTrunc(32), DL
, MVT::i32
);
3091 SDValue Hi
= DAG
.getConstant(V
.lshr(32).zextOrTrunc(32), DL
, MVT::i32
);
3092 if (DAG
.getDataLayout().isLittleEndian())
3094 return DAG
.getBuildVector(MVT::v2i32
, DL
, {Hi
, Lo
});
3097 SDValue
SparcTargetLowering::PerformBITCASTCombine(SDNode
*N
,
3098 DAGCombinerInfo
&DCI
) const {
3100 SDValue Src
= N
->getOperand(0);
3102 if (isa
<ConstantFPSDNode
>(Src
) && N
->getSimpleValueType(0) == MVT::v2i32
&&
3103 Src
.getSimpleValueType() == MVT::f64
)
3104 return bitcastConstantFPToInt(cast
<ConstantFPSDNode
>(Src
), dl
, DCI
.DAG
);
3109 SDValue
SparcTargetLowering::PerformDAGCombine(SDNode
*N
,
3110 DAGCombinerInfo
&DCI
) const {
3111 switch (N
->getOpcode()) {
3115 return PerformBITCASTCombine(N
, DCI
);
3121 SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr
&MI
,
3122 MachineBasicBlock
*BB
) const {
3123 switch (MI
.getOpcode()) {
3124 default: llvm_unreachable("Unknown SELECT_CC!");
3125 case SP::SELECT_CC_Int_ICC
:
3126 case SP::SELECT_CC_FP_ICC
:
3127 case SP::SELECT_CC_DFP_ICC
:
3128 case SP::SELECT_CC_QFP_ICC
:
3129 return expandSelectCC(MI
, BB
, SP::BCOND
);
3130 case SP::SELECT_CC_Int_FCC
:
3131 case SP::SELECT_CC_FP_FCC
:
3132 case SP::SELECT_CC_DFP_FCC
:
3133 case SP::SELECT_CC_QFP_FCC
:
3134 return expandSelectCC(MI
, BB
, SP::FBCOND
);
3139 SparcTargetLowering::expandSelectCC(MachineInstr
&MI
, MachineBasicBlock
*BB
,
3140 unsigned BROpcode
) const {
3141 const TargetInstrInfo
&TII
= *Subtarget
->getInstrInfo();
3142 DebugLoc dl
= MI
.getDebugLoc();
3143 unsigned CC
= (SPCC::CondCodes
)MI
.getOperand(3).getImm();
3145 // To "insert" a SELECT_CC instruction, we actually have to insert the
3146 // triangle control-flow pattern. The incoming instruction knows the
3147 // destination vreg to set, the condition code register to branch on, the
3148 // true/false values to select between, and the condition code for the branch.
3150 // We produce the following control flow:
3156 const BasicBlock
*LLVM_BB
= BB
->getBasicBlock();
3157 MachineFunction::iterator It
= ++BB
->getIterator();
3159 MachineBasicBlock
*ThisMBB
= BB
;
3160 MachineFunction
*F
= BB
->getParent();
3161 MachineBasicBlock
*IfFalseMBB
= F
->CreateMachineBasicBlock(LLVM_BB
);
3162 MachineBasicBlock
*SinkMBB
= F
->CreateMachineBasicBlock(LLVM_BB
);
3163 F
->insert(It
, IfFalseMBB
);
3164 F
->insert(It
, SinkMBB
);
3166 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
3167 SinkMBB
->splice(SinkMBB
->begin(), ThisMBB
,
3168 std::next(MachineBasicBlock::iterator(MI
)), ThisMBB
->end());
3169 SinkMBB
->transferSuccessorsAndUpdatePHIs(ThisMBB
);
3171 // Set the new successors for ThisMBB.
3172 ThisMBB
->addSuccessor(IfFalseMBB
);
3173 ThisMBB
->addSuccessor(SinkMBB
);
3175 BuildMI(ThisMBB
, dl
, TII
.get(BROpcode
))
3179 // IfFalseMBB just falls through to SinkMBB.
3180 IfFalseMBB
->addSuccessor(SinkMBB
);
3182 // %Result = phi [ %TrueValue, ThisMBB ], [ %FalseValue, IfFalseMBB ]
3183 BuildMI(*SinkMBB
, SinkMBB
->begin(), dl
, TII
.get(SP::PHI
),
3184 MI
.getOperand(0).getReg())
3185 .addReg(MI
.getOperand(1).getReg())
3187 .addReg(MI
.getOperand(2).getReg())
3188 .addMBB(IfFalseMBB
);
3190 MI
.eraseFromParent(); // The pseudo instruction is gone now.
3194 //===----------------------------------------------------------------------===//
3195 // Sparc Inline Assembly Support
3196 //===----------------------------------------------------------------------===//
3198 /// getConstraintType - Given a constraint letter, return the type of
3199 /// constraint it is for this target.
3200 SparcTargetLowering::ConstraintType
3201 SparcTargetLowering::getConstraintType(StringRef Constraint
) const {
3202 if (Constraint
.size() == 1) {
3203 switch (Constraint
[0]) {
3208 return C_RegisterClass
;
3214 return TargetLowering::getConstraintType(Constraint
);
3217 TargetLowering::ConstraintWeight
SparcTargetLowering::
3218 getSingleConstraintMatchWeight(AsmOperandInfo
&info
,
3219 const char *constraint
) const {
3220 ConstraintWeight weight
= CW_Invalid
;
3221 Value
*CallOperandVal
= info
.CallOperandVal
;
3222 // If we don't have a value, we can't do a match,
3223 // but allow it at the lowest weight.
3224 if (!CallOperandVal
)
3227 // Look at the constraint type.
3228 switch (*constraint
) {
3230 weight
= TargetLowering::getSingleConstraintMatchWeight(info
, constraint
);
3233 if (ConstantInt
*C
= dyn_cast
<ConstantInt
>(info
.CallOperandVal
)) {
3234 if (isInt
<13>(C
->getSExtValue()))
3235 weight
= CW_Constant
;
3242 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3243 /// vector. If it is invalid, don't add anything to Ops.
3244 void SparcTargetLowering::
3245 LowerAsmOperandForConstraint(SDValue Op
,
3246 std::string
&Constraint
,
3247 std::vector
<SDValue
> &Ops
,
3248 SelectionDAG
&DAG
) const {
3251 // Only support length 1 constraints for now.
3252 if (Constraint
.length() > 1)
3255 char ConstraintLetter
= Constraint
[0];
3256 switch (ConstraintLetter
) {
3259 if (ConstantSDNode
*C
= dyn_cast
<ConstantSDNode
>(Op
)) {
3260 if (isInt
<13>(C
->getSExtValue())) {
3261 Result
= DAG
.getTargetConstant(C
->getSExtValue(), SDLoc(Op
),
3269 if (Result
.getNode()) {
3270 Ops
.push_back(Result
);
3273 TargetLowering::LowerAsmOperandForConstraint(Op
, Constraint
, Ops
, DAG
);
3276 std::pair
<unsigned, const TargetRegisterClass
*>
3277 SparcTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo
*TRI
,
3278 StringRef Constraint
,
3280 if (Constraint
.size() == 1) {
3281 switch (Constraint
[0]) {
3283 if (VT
== MVT::v2i32
)
3284 return std::make_pair(0U, &SP::IntPairRegClass
);
3285 else if (Subtarget
->is64Bit())
3286 return std::make_pair(0U, &SP::I64RegsRegClass
);
3288 return std::make_pair(0U, &SP::IntRegsRegClass
);
3290 if (VT
== MVT::f32
|| VT
== MVT::i32
)
3291 return std::make_pair(0U, &SP::FPRegsRegClass
);
3292 else if (VT
== MVT::f64
|| VT
== MVT::i64
)
3293 return std::make_pair(0U, &SP::LowDFPRegsRegClass
);
3294 else if (VT
== MVT::f128
)
3295 return std::make_pair(0U, &SP::LowQFPRegsRegClass
);
3296 // This will generate an error message
3297 return std::make_pair(0U, nullptr);
3299 if (VT
== MVT::f32
|| VT
== MVT::i32
)
3300 return std::make_pair(0U, &SP::FPRegsRegClass
);
3301 else if (VT
== MVT::f64
|| VT
== MVT::i64
)
3302 return std::make_pair(0U, &SP::DFPRegsRegClass
);
3303 else if (VT
== MVT::f128
)
3304 return std::make_pair(0U, &SP::QFPRegsRegClass
);
3305 // This will generate an error message
3306 return std::make_pair(0U, nullptr);
3308 } else if (!Constraint
.empty() && Constraint
.size() <= 5
3309 && Constraint
[0] == '{' && *(Constraint
.end()-1) == '}') {
3310 // constraint = '{r<d>}'
3311 // Remove the braces from around the name.
3312 StringRef
name(Constraint
.data()+1, Constraint
.size()-2);
3313 // Handle register aliases:
3318 uint64_t intVal
= 0;
3319 if (name
.substr(0, 1).equals("r")
3320 && !name
.substr(1).getAsInteger(10, intVal
) && intVal
<= 31) {
3321 const char regTypes
[] = { 'g', 'o', 'l', 'i' };
3322 char regType
= regTypes
[intVal
/8];
3323 char regIdx
= '0' + (intVal
% 8);
3324 char tmp
[] = { '{', regType
, regIdx
, '}', 0 };
3325 std::string newConstraint
= std::string(tmp
);
3326 return TargetLowering::getRegForInlineAsmConstraint(TRI
, newConstraint
,
3329 if (name
.substr(0, 1).equals("f") &&
3330 !name
.substr(1).getAsInteger(10, intVal
) && intVal
<= 63) {
3331 std::string newConstraint
;
3333 if (VT
== MVT::f32
|| VT
== MVT::Other
) {
3334 newConstraint
= "{f" + utostr(intVal
) + "}";
3335 } else if (VT
== MVT::f64
&& (intVal
% 2 == 0)) {
3336 newConstraint
= "{d" + utostr(intVal
/ 2) + "}";
3337 } else if (VT
== MVT::f128
&& (intVal
% 4 == 0)) {
3338 newConstraint
= "{q" + utostr(intVal
/ 4) + "}";
3340 return std::make_pair(0U, nullptr);
3342 return TargetLowering::getRegForInlineAsmConstraint(TRI
, newConstraint
,
3347 return TargetLowering::getRegForInlineAsmConstraint(TRI
, Constraint
, VT
);
3351 SparcTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode
*GA
) const {
3352 // The Sparc target isn't yet aware of offsets.
3356 void SparcTargetLowering::ReplaceNodeResults(SDNode
*N
,
3357 SmallVectorImpl
<SDValue
>& Results
,
3358 SelectionDAG
&DAG
) const {
3362 RTLIB::Libcall libCall
= RTLIB::UNKNOWN_LIBCALL
;
3364 switch (N
->getOpcode()) {
3366 llvm_unreachable("Do not know how to custom type legalize this operation!");
3368 case ISD::FP_TO_SINT
:
3369 case ISD::FP_TO_UINT
:
3370 // Custom lower only if it involves f128 or i64.
3371 if (N
->getOperand(0).getValueType() != MVT::f128
3372 || N
->getValueType(0) != MVT::i64
)
3374 libCall
= ((N
->getOpcode() == ISD::FP_TO_SINT
)
3375 ? RTLIB::FPTOSINT_F128_I64
3376 : RTLIB::FPTOUINT_F128_I64
);
3378 Results
.push_back(LowerF128Op(SDValue(N
, 0),
3380 getLibcallName(libCall
),
3383 case ISD::READCYCLECOUNTER
: {
3384 assert(Subtarget
->hasLeonCycleCounter());
3385 SDValue Lo
= DAG
.getCopyFromReg(N
->getOperand(0), dl
, SP::ASR23
, MVT::i32
);
3386 SDValue Hi
= DAG
.getCopyFromReg(Lo
, dl
, SP::G0
, MVT::i32
);
3387 SDValue Ops
[] = { Lo
, Hi
};
3388 SDValue Pair
= DAG
.getNode(ISD::BUILD_PAIR
, dl
, MVT::i64
, Ops
);
3389 Results
.push_back(Pair
);
3390 Results
.push_back(N
->getOperand(0));
3393 case ISD::SINT_TO_FP
:
3394 case ISD::UINT_TO_FP
:
3395 // Custom lower only if it involves f128 or i64.
3396 if (N
->getValueType(0) != MVT::f128
3397 || N
->getOperand(0).getValueType() != MVT::i64
)
3400 libCall
= ((N
->getOpcode() == ISD::SINT_TO_FP
)
3401 ? RTLIB::SINTTOFP_I64_F128
3402 : RTLIB::UINTTOFP_I64_F128
);
3404 Results
.push_back(LowerF128Op(SDValue(N
, 0),
3406 getLibcallName(libCall
),
3410 LoadSDNode
*Ld
= cast
<LoadSDNode
>(N
);
3411 // Custom handling only for i64: turn i64 load into a v2i32 load,
3413 if (Ld
->getValueType(0) != MVT::i64
|| Ld
->getMemoryVT() != MVT::i64
)
3417 SDValue LoadRes
= DAG
.getExtLoad(
3418 Ld
->getExtensionType(), dl
, MVT::v2i32
, Ld
->getChain(),
3419 Ld
->getBasePtr(), Ld
->getPointerInfo(), MVT::v2i32
,
3420 Ld
->getOriginalAlign(), Ld
->getMemOperand()->getFlags(),
3423 SDValue Res
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::i64
, LoadRes
);
3424 Results
.push_back(Res
);
3425 Results
.push_back(LoadRes
.getValue(1));
3431 // Override to enable LOAD_STACK_GUARD lowering on Linux.
3432 bool SparcTargetLowering::useLoadStackGuardNode() const {
3433 if (!Subtarget
->isTargetLinux())
3434 return TargetLowering::useLoadStackGuardNode();
3438 // Override to disable global variable loading on Linux.
3439 void SparcTargetLowering::insertSSPDeclarations(Module
&M
) const {
3440 if (!Subtarget
->isTargetLinux())
3441 return TargetLowering::insertSSPDeclarations(M
);