1 //===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the interfaces that Sparc uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #include "SparcISelLowering.h"
15 #include "MCTargetDesc/SparcMCExpr.h"
16 #include "SparcMachineFunctionInfo.h"
17 #include "SparcRegisterInfo.h"
18 #include "SparcTargetMachine.h"
19 #include "SparcTargetObjectFile.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/ADT/StringSwitch.h"
22 #include "llvm/CodeGen/CallingConvLower.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/SelectionDAG.h"
28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/Module.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/KnownBits.h"
37 //===----------------------------------------------------------------------===//
38 // Calling Convention Implementation
39 //===----------------------------------------------------------------------===//
41 static bool CC_Sparc_Assign_SRet(unsigned &ValNo
, MVT
&ValVT
,
42 MVT
&LocVT
, CCValAssign::LocInfo
&LocInfo
,
43 ISD::ArgFlagsTy
&ArgFlags
, CCState
&State
)
45 assert (ArgFlags
.isSRet());
47 // Assign SRet argument.
48 State
.addLoc(CCValAssign::getCustomMem(ValNo
, ValVT
,
54 static bool CC_Sparc_Assign_Split_64(unsigned &ValNo
, MVT
&ValVT
,
55 MVT
&LocVT
, CCValAssign::LocInfo
&LocInfo
,
56 ISD::ArgFlagsTy
&ArgFlags
, CCState
&State
)
58 static const MCPhysReg RegList
[] = {
59 SP::I0
, SP::I1
, SP::I2
, SP::I3
, SP::I4
, SP::I5
61 // Try to get first reg.
62 if (Register Reg
= State
.AllocateReg(RegList
)) {
63 State
.addLoc(CCValAssign::getCustomReg(ValNo
, ValVT
, Reg
, LocVT
, LocInfo
));
65 // Assign whole thing in stack.
66 State
.addLoc(CCValAssign::getCustomMem(
67 ValNo
, ValVT
, State
.AllocateStack(8, Align(4)), LocVT
, LocInfo
));
71 // Try to get second reg.
72 if (Register Reg
= State
.AllocateReg(RegList
))
73 State
.addLoc(CCValAssign::getCustomReg(ValNo
, ValVT
, Reg
, LocVT
, LocInfo
));
75 State
.addLoc(CCValAssign::getCustomMem(
76 ValNo
, ValVT
, State
.AllocateStack(4, Align(4)), LocVT
, LocInfo
));
80 static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo
, MVT
&ValVT
,
81 MVT
&LocVT
, CCValAssign::LocInfo
&LocInfo
,
82 ISD::ArgFlagsTy
&ArgFlags
, CCState
&State
)
84 static const MCPhysReg RegList
[] = {
85 SP::I0
, SP::I1
, SP::I2
, SP::I3
, SP::I4
, SP::I5
88 // Try to get first reg.
89 if (Register Reg
= State
.AllocateReg(RegList
))
90 State
.addLoc(CCValAssign::getCustomReg(ValNo
, ValVT
, Reg
, LocVT
, LocInfo
));
94 // Try to get second reg.
95 if (Register Reg
= State
.AllocateReg(RegList
))
96 State
.addLoc(CCValAssign::getCustomReg(ValNo
, ValVT
, Reg
, LocVT
, LocInfo
));
103 // Allocate a full-sized argument for the 64-bit ABI.
104 static bool CC_Sparc64_Full(unsigned &ValNo
, MVT
&ValVT
,
105 MVT
&LocVT
, CCValAssign::LocInfo
&LocInfo
,
106 ISD::ArgFlagsTy
&ArgFlags
, CCState
&State
) {
107 assert((LocVT
== MVT::f32
|| LocVT
== MVT::f128
108 || LocVT
.getSizeInBits() == 64) &&
109 "Can't handle non-64 bits locations");
111 // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
112 unsigned size
= (LocVT
== MVT::f128
) ? 16 : 8;
113 Align alignment
= (LocVT
== MVT::f128
) ? Align(16) : Align(8);
114 unsigned Offset
= State
.AllocateStack(size
, alignment
);
117 if (LocVT
== MVT::i64
&& Offset
< 6*8)
118 // Promote integers to %i0-%i5.
119 Reg
= SP::I0
+ Offset
/8;
120 else if (LocVT
== MVT::f64
&& Offset
< 16*8)
121 // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
122 Reg
= SP::D0
+ Offset
/8;
123 else if (LocVT
== MVT::f32
&& Offset
< 16*8)
124 // Promote floats to %f1, %f3, ...
125 Reg
= SP::F1
+ Offset
/4;
126 else if (LocVT
== MVT::f128
&& Offset
< 16*8)
127 // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
128 Reg
= SP::Q0
+ Offset
/16;
130 // Promote to register when possible, otherwise use the stack slot.
132 State
.addLoc(CCValAssign::getReg(ValNo
, ValVT
, Reg
, LocVT
, LocInfo
));
136 // This argument goes on the stack in an 8-byte slot.
137 // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
138 // the right-aligned float. The first 4 bytes of the stack slot are undefined.
139 if (LocVT
== MVT::f32
)
142 State
.addLoc(CCValAssign::getMem(ValNo
, ValVT
, Offset
, LocVT
, LocInfo
));
146 // Allocate a half-sized argument for the 64-bit ABI.
148 // This is used when passing { float, int } structs by value in registers.
149 static bool CC_Sparc64_Half(unsigned &ValNo
, MVT
&ValVT
,
150 MVT
&LocVT
, CCValAssign::LocInfo
&LocInfo
,
151 ISD::ArgFlagsTy
&ArgFlags
, CCState
&State
) {
152 assert(LocVT
.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
153 unsigned Offset
= State
.AllocateStack(4, Align(4));
155 if (LocVT
== MVT::f32
&& Offset
< 16*8) {
156 // Promote floats to %f0-%f31.
157 State
.addLoc(CCValAssign::getReg(ValNo
, ValVT
, SP::F0
+ Offset
/4,
162 if (LocVT
== MVT::i32
&& Offset
< 6*8) {
163 // Promote integers to %i0-%i5, using half the register.
164 unsigned Reg
= SP::I0
+ Offset
/8;
166 LocInfo
= CCValAssign::AExt
;
168 // Set the Custom bit if this i32 goes in the high bits of a register.
170 State
.addLoc(CCValAssign::getCustomReg(ValNo
, ValVT
, Reg
,
173 State
.addLoc(CCValAssign::getReg(ValNo
, ValVT
, Reg
, LocVT
, LocInfo
));
177 State
.addLoc(CCValAssign::getMem(ValNo
, ValVT
, Offset
, LocVT
, LocInfo
));
181 #include "SparcGenCallingConv.inc"
183 // The calling conventions in SparcCallingConv.td are described in terms of the
184 // callee's register window. This function translates registers to the
185 // corresponding caller window %o register.
186 static unsigned toCallerWindow(unsigned Reg
) {
187 static_assert(SP::I0
+ 7 == SP::I7
&& SP::O0
+ 7 == SP::O7
,
189 if (Reg
>= SP::I0
&& Reg
<= SP::I7
)
190 return Reg
- SP::I0
+ SP::O0
;
195 SparcTargetLowering::LowerReturn(SDValue Chain
, CallingConv::ID CallConv
,
197 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
198 const SmallVectorImpl
<SDValue
> &OutVals
,
199 const SDLoc
&DL
, SelectionDAG
&DAG
) const {
200 if (Subtarget
->is64Bit())
201 return LowerReturn_64(Chain
, CallConv
, IsVarArg
, Outs
, OutVals
, DL
, DAG
);
202 return LowerReturn_32(Chain
, CallConv
, IsVarArg
, Outs
, OutVals
, DL
, DAG
);
206 SparcTargetLowering::LowerReturn_32(SDValue Chain
, CallingConv::ID CallConv
,
208 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
209 const SmallVectorImpl
<SDValue
> &OutVals
,
210 const SDLoc
&DL
, SelectionDAG
&DAG
) const {
211 MachineFunction
&MF
= DAG
.getMachineFunction();
213 // CCValAssign - represent the assignment of the return value to locations.
214 SmallVector
<CCValAssign
, 16> RVLocs
;
216 // CCState - Info about the registers and stack slot.
217 CCState
CCInfo(CallConv
, IsVarArg
, DAG
.getMachineFunction(), RVLocs
,
220 // Analyze return values.
221 CCInfo
.AnalyzeReturn(Outs
, RetCC_Sparc32
);
224 SmallVector
<SDValue
, 4> RetOps(1, Chain
);
225 // Make room for the return address offset.
226 RetOps
.push_back(SDValue());
228 // Copy the result values into the output registers.
229 for (unsigned i
= 0, realRVLocIdx
= 0;
231 ++i
, ++realRVLocIdx
) {
232 CCValAssign
&VA
= RVLocs
[i
];
233 assert(VA
.isRegLoc() && "Can only return in registers!");
235 SDValue Arg
= OutVals
[realRVLocIdx
];
237 if (VA
.needsCustom()) {
238 assert(VA
.getLocVT() == MVT::v2i32
);
239 // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
240 // happen by default if this wasn't a legal type)
242 SDValue Part0
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, DL
, MVT::i32
,
244 DAG
.getConstant(0, DL
, getVectorIdxTy(DAG
.getDataLayout())));
245 SDValue Part1
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, DL
, MVT::i32
,
247 DAG
.getConstant(1, DL
, getVectorIdxTy(DAG
.getDataLayout())));
249 Chain
= DAG
.getCopyToReg(Chain
, DL
, VA
.getLocReg(), Part0
, Flag
);
250 Flag
= Chain
.getValue(1);
251 RetOps
.push_back(DAG
.getRegister(VA
.getLocReg(), VA
.getLocVT()));
252 VA
= RVLocs
[++i
]; // skip ahead to next loc
253 Chain
= DAG
.getCopyToReg(Chain
, DL
, VA
.getLocReg(), Part1
,
256 Chain
= DAG
.getCopyToReg(Chain
, DL
, VA
.getLocReg(), Arg
, Flag
);
258 // Guarantee that all emitted copies are stuck together with flags.
259 Flag
= Chain
.getValue(1);
260 RetOps
.push_back(DAG
.getRegister(VA
.getLocReg(), VA
.getLocVT()));
263 unsigned RetAddrOffset
= 8; // Call Inst + Delay Slot
264 // If the function returns a struct, copy the SRetReturnReg to I0
265 if (MF
.getFunction().hasStructRetAttr()) {
266 SparcMachineFunctionInfo
*SFI
= MF
.getInfo
<SparcMachineFunctionInfo
>();
267 Register Reg
= SFI
->getSRetReturnReg();
269 llvm_unreachable("sret virtual register not created in the entry block");
270 auto PtrVT
= getPointerTy(DAG
.getDataLayout());
271 SDValue Val
= DAG
.getCopyFromReg(Chain
, DL
, Reg
, PtrVT
);
272 Chain
= DAG
.getCopyToReg(Chain
, DL
, SP::I0
, Val
, Flag
);
273 Flag
= Chain
.getValue(1);
274 RetOps
.push_back(DAG
.getRegister(SP::I0
, PtrVT
));
275 RetAddrOffset
= 12; // CallInst + Delay Slot + Unimp
278 RetOps
[0] = Chain
; // Update chain.
279 RetOps
[1] = DAG
.getConstant(RetAddrOffset
, DL
, MVT::i32
);
281 // Add the flag if we have it.
283 RetOps
.push_back(Flag
);
285 return DAG
.getNode(SPISD::RET_FLAG
, DL
, MVT::Other
, RetOps
);
288 // Lower return values for the 64-bit ABI.
289 // Return values are passed the exactly the same way as function arguments.
291 SparcTargetLowering::LowerReturn_64(SDValue Chain
, CallingConv::ID CallConv
,
293 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
294 const SmallVectorImpl
<SDValue
> &OutVals
,
295 const SDLoc
&DL
, SelectionDAG
&DAG
) const {
296 // CCValAssign - represent the assignment of the return value to locations.
297 SmallVector
<CCValAssign
, 16> RVLocs
;
299 // CCState - Info about the registers and stack slot.
300 CCState
CCInfo(CallConv
, IsVarArg
, DAG
.getMachineFunction(), RVLocs
,
303 // Analyze return values.
304 CCInfo
.AnalyzeReturn(Outs
, RetCC_Sparc64
);
307 SmallVector
<SDValue
, 4> RetOps(1, Chain
);
309 // The second operand on the return instruction is the return address offset.
310 // The return address is always %i7+8 with the 64-bit ABI.
311 RetOps
.push_back(DAG
.getConstant(8, DL
, MVT::i32
));
313 // Copy the result values into the output registers.
314 for (unsigned i
= 0; i
!= RVLocs
.size(); ++i
) {
315 CCValAssign
&VA
= RVLocs
[i
];
316 assert(VA
.isRegLoc() && "Can only return in registers!");
317 SDValue OutVal
= OutVals
[i
];
319 // Integer return values must be sign or zero extended by the callee.
320 switch (VA
.getLocInfo()) {
321 case CCValAssign::Full
: break;
322 case CCValAssign::SExt
:
323 OutVal
= DAG
.getNode(ISD::SIGN_EXTEND
, DL
, VA
.getLocVT(), OutVal
);
325 case CCValAssign::ZExt
:
326 OutVal
= DAG
.getNode(ISD::ZERO_EXTEND
, DL
, VA
.getLocVT(), OutVal
);
328 case CCValAssign::AExt
:
329 OutVal
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, VA
.getLocVT(), OutVal
);
332 llvm_unreachable("Unknown loc info!");
335 // The custom bit on an i32 return value indicates that it should be passed
336 // in the high bits of the register.
337 if (VA
.getValVT() == MVT::i32
&& VA
.needsCustom()) {
338 OutVal
= DAG
.getNode(ISD::SHL
, DL
, MVT::i64
, OutVal
,
339 DAG
.getConstant(32, DL
, MVT::i32
));
341 // The next value may go in the low bits of the same register.
342 // Handle both at once.
343 if (i
+1 < RVLocs
.size() && RVLocs
[i
+1].getLocReg() == VA
.getLocReg()) {
344 SDValue NV
= DAG
.getNode(ISD::ZERO_EXTEND
, DL
, MVT::i64
, OutVals
[i
+1]);
345 OutVal
= DAG
.getNode(ISD::OR
, DL
, MVT::i64
, OutVal
, NV
);
346 // Skip the next value, it's already done.
351 Chain
= DAG
.getCopyToReg(Chain
, DL
, VA
.getLocReg(), OutVal
, Flag
);
353 // Guarantee that all emitted copies are stuck together with flags.
354 Flag
= Chain
.getValue(1);
355 RetOps
.push_back(DAG
.getRegister(VA
.getLocReg(), VA
.getLocVT()));
358 RetOps
[0] = Chain
; // Update chain.
360 // Add the flag if we have it.
362 RetOps
.push_back(Flag
);
364 return DAG
.getNode(SPISD::RET_FLAG
, DL
, MVT::Other
, RetOps
);
367 SDValue
SparcTargetLowering::LowerFormalArguments(
368 SDValue Chain
, CallingConv::ID CallConv
, bool IsVarArg
,
369 const SmallVectorImpl
<ISD::InputArg
> &Ins
, const SDLoc
&DL
,
370 SelectionDAG
&DAG
, SmallVectorImpl
<SDValue
> &InVals
) const {
371 if (Subtarget
->is64Bit())
372 return LowerFormalArguments_64(Chain
, CallConv
, IsVarArg
, Ins
,
374 return LowerFormalArguments_32(Chain
, CallConv
, IsVarArg
, Ins
,
378 /// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
379 /// passed in either one or two GPRs, including FP values. TODO: we should
380 /// pass FP values in FP registers for fastcc functions.
381 SDValue
SparcTargetLowering::LowerFormalArguments_32(
382 SDValue Chain
, CallingConv::ID CallConv
, bool isVarArg
,
383 const SmallVectorImpl
<ISD::InputArg
> &Ins
, const SDLoc
&dl
,
384 SelectionDAG
&DAG
, SmallVectorImpl
<SDValue
> &InVals
) const {
385 MachineFunction
&MF
= DAG
.getMachineFunction();
386 MachineRegisterInfo
&RegInfo
= MF
.getRegInfo();
387 SparcMachineFunctionInfo
*FuncInfo
= MF
.getInfo
<SparcMachineFunctionInfo
>();
389 // Assign locations to all of the incoming arguments.
390 SmallVector
<CCValAssign
, 16> ArgLocs
;
391 CCState
CCInfo(CallConv
, isVarArg
, DAG
.getMachineFunction(), ArgLocs
,
393 CCInfo
.AnalyzeFormalArguments(Ins
, CC_Sparc32
);
395 const unsigned StackOffset
= 92;
396 bool IsLittleEndian
= DAG
.getDataLayout().isLittleEndian();
399 for (unsigned i
= 0, e
= ArgLocs
.size(); i
!= e
; ++i
, ++InIdx
) {
400 CCValAssign
&VA
= ArgLocs
[i
];
402 if (Ins
[InIdx
].Flags
.isSRet()) {
404 report_fatal_error("sparc only supports sret on the first parameter");
405 // Get SRet from [%fp+64].
406 int FrameIdx
= MF
.getFrameInfo().CreateFixedObject(4, 64, true);
407 SDValue FIPtr
= DAG
.getFrameIndex(FrameIdx
, MVT::i32
);
409 DAG
.getLoad(MVT::i32
, dl
, Chain
, FIPtr
, MachinePointerInfo());
410 InVals
.push_back(Arg
);
415 if (VA
.needsCustom()) {
416 assert(VA
.getLocVT() == MVT::f64
|| VA
.getLocVT() == MVT::v2i32
);
418 Register VRegHi
= RegInfo
.createVirtualRegister(&SP::IntRegsRegClass
);
419 MF
.getRegInfo().addLiveIn(VA
.getLocReg(), VRegHi
);
420 SDValue HiVal
= DAG
.getCopyFromReg(Chain
, dl
, VRegHi
, MVT::i32
);
423 CCValAssign
&NextVA
= ArgLocs
[++i
];
426 if (NextVA
.isMemLoc()) {
427 int FrameIdx
= MF
.getFrameInfo().
428 CreateFixedObject(4, StackOffset
+NextVA
.getLocMemOffset(),true);
429 SDValue FIPtr
= DAG
.getFrameIndex(FrameIdx
, MVT::i32
);
430 LoVal
= DAG
.getLoad(MVT::i32
, dl
, Chain
, FIPtr
, MachinePointerInfo());
432 Register loReg
= MF
.addLiveIn(NextVA
.getLocReg(),
433 &SP::IntRegsRegClass
);
434 LoVal
= DAG
.getCopyFromReg(Chain
, dl
, loReg
, MVT::i32
);
438 std::swap(LoVal
, HiVal
);
441 DAG
.getNode(ISD::BUILD_PAIR
, dl
, MVT::i64
, LoVal
, HiVal
);
442 WholeValue
= DAG
.getNode(ISD::BITCAST
, dl
, VA
.getLocVT(), WholeValue
);
443 InVals
.push_back(WholeValue
);
446 Register VReg
= RegInfo
.createVirtualRegister(&SP::IntRegsRegClass
);
447 MF
.getRegInfo().addLiveIn(VA
.getLocReg(), VReg
);
448 SDValue Arg
= DAG
.getCopyFromReg(Chain
, dl
, VReg
, MVT::i32
);
449 if (VA
.getLocVT() == MVT::f32
)
450 Arg
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::f32
, Arg
);
451 else if (VA
.getLocVT() != MVT::i32
) {
452 Arg
= DAG
.getNode(ISD::AssertSext
, dl
, MVT::i32
, Arg
,
453 DAG
.getValueType(VA
.getLocVT()));
454 Arg
= DAG
.getNode(ISD::TRUNCATE
, dl
, VA
.getLocVT(), Arg
);
456 InVals
.push_back(Arg
);
460 assert(VA
.isMemLoc());
462 unsigned Offset
= VA
.getLocMemOffset()+StackOffset
;
463 auto PtrVT
= getPointerTy(DAG
.getDataLayout());
465 if (VA
.needsCustom()) {
466 assert(VA
.getValVT() == MVT::f64
|| VA
.getValVT() == MVT::v2i32
);
467 // If it is double-word aligned, just load.
468 if (Offset
% 8 == 0) {
469 int FI
= MF
.getFrameInfo().CreateFixedObject(8,
472 SDValue FIPtr
= DAG
.getFrameIndex(FI
, PtrVT
);
474 DAG
.getLoad(VA
.getValVT(), dl
, Chain
, FIPtr
, MachinePointerInfo());
475 InVals
.push_back(Load
);
479 int FI
= MF
.getFrameInfo().CreateFixedObject(4,
482 SDValue FIPtr
= DAG
.getFrameIndex(FI
, PtrVT
);
484 DAG
.getLoad(MVT::i32
, dl
, Chain
, FIPtr
, MachinePointerInfo());
485 int FI2
= MF
.getFrameInfo().CreateFixedObject(4,
488 SDValue FIPtr2
= DAG
.getFrameIndex(FI2
, PtrVT
);
491 DAG
.getLoad(MVT::i32
, dl
, Chain
, FIPtr2
, MachinePointerInfo());
494 std::swap(LoVal
, HiVal
);
497 DAG
.getNode(ISD::BUILD_PAIR
, dl
, MVT::i64
, LoVal
, HiVal
);
498 WholeValue
= DAG
.getNode(ISD::BITCAST
, dl
, VA
.getValVT(), WholeValue
);
499 InVals
.push_back(WholeValue
);
503 int FI
= MF
.getFrameInfo().CreateFixedObject(4,
506 SDValue FIPtr
= DAG
.getFrameIndex(FI
, PtrVT
);
508 if (VA
.getValVT() == MVT::i32
|| VA
.getValVT() == MVT::f32
) {
509 Load
= DAG
.getLoad(VA
.getValVT(), dl
, Chain
, FIPtr
, MachinePointerInfo());
510 } else if (VA
.getValVT() == MVT::f128
) {
511 report_fatal_error("SPARCv8 does not handle f128 in calls; "
514 // We shouldn't see any other value types here.
515 llvm_unreachable("Unexpected ValVT encountered in frame lowering.");
517 InVals
.push_back(Load
);
520 if (MF
.getFunction().hasStructRetAttr()) {
521 // Copy the SRet Argument to SRetReturnReg.
522 SparcMachineFunctionInfo
*SFI
= MF
.getInfo
<SparcMachineFunctionInfo
>();
523 Register Reg
= SFI
->getSRetReturnReg();
525 Reg
= MF
.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass
);
526 SFI
->setSRetReturnReg(Reg
);
528 SDValue Copy
= DAG
.getCopyToReg(DAG
.getEntryNode(), dl
, Reg
, InVals
[0]);
529 Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Copy
, Chain
);
532 // Store remaining ArgRegs to the stack if this is a varargs function.
534 static const MCPhysReg ArgRegs
[] = {
535 SP::I0
, SP::I1
, SP::I2
, SP::I3
, SP::I4
, SP::I5
537 unsigned NumAllocated
= CCInfo
.getFirstUnallocated(ArgRegs
);
538 const MCPhysReg
*CurArgReg
= ArgRegs
+NumAllocated
, *ArgRegEnd
= ArgRegs
+6;
539 unsigned ArgOffset
= CCInfo
.getNextStackOffset();
540 if (NumAllocated
== 6)
541 ArgOffset
+= StackOffset
;
544 ArgOffset
= 68+4*NumAllocated
;
547 // Remember the vararg offset for the va_start implementation.
548 FuncInfo
->setVarArgsFrameOffset(ArgOffset
);
550 std::vector
<SDValue
> OutChains
;
552 for (; CurArgReg
!= ArgRegEnd
; ++CurArgReg
) {
553 Register VReg
= RegInfo
.createVirtualRegister(&SP::IntRegsRegClass
);
554 MF
.getRegInfo().addLiveIn(*CurArgReg
, VReg
);
555 SDValue Arg
= DAG
.getCopyFromReg(DAG
.getRoot(), dl
, VReg
, MVT::i32
);
557 int FrameIdx
= MF
.getFrameInfo().CreateFixedObject(4, ArgOffset
,
559 SDValue FIPtr
= DAG
.getFrameIndex(FrameIdx
, MVT::i32
);
562 DAG
.getStore(DAG
.getRoot(), dl
, Arg
, FIPtr
, MachinePointerInfo()));
566 if (!OutChains
.empty()) {
567 OutChains
.push_back(Chain
);
568 Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, OutChains
);
575 // Lower formal arguments for the 64 bit ABI.
576 SDValue
SparcTargetLowering::LowerFormalArguments_64(
577 SDValue Chain
, CallingConv::ID CallConv
, bool IsVarArg
,
578 const SmallVectorImpl
<ISD::InputArg
> &Ins
, const SDLoc
&DL
,
579 SelectionDAG
&DAG
, SmallVectorImpl
<SDValue
> &InVals
) const {
580 MachineFunction
&MF
= DAG
.getMachineFunction();
582 // Analyze arguments according to CC_Sparc64.
583 SmallVector
<CCValAssign
, 16> ArgLocs
;
584 CCState
CCInfo(CallConv
, IsVarArg
, DAG
.getMachineFunction(), ArgLocs
,
586 CCInfo
.AnalyzeFormalArguments(Ins
, CC_Sparc64
);
588 // The argument array begins at %fp+BIAS+128, after the register save area.
589 const unsigned ArgArea
= 128;
591 for (unsigned i
= 0, e
= ArgLocs
.size(); i
!= e
; ++i
) {
592 CCValAssign
&VA
= ArgLocs
[i
];
594 // This argument is passed in a register.
595 // All integer register arguments are promoted by the caller to i64.
597 // Create a virtual register for the promoted live-in value.
598 Register VReg
= MF
.addLiveIn(VA
.getLocReg(),
599 getRegClassFor(VA
.getLocVT()));
600 SDValue Arg
= DAG
.getCopyFromReg(Chain
, DL
, VReg
, VA
.getLocVT());
602 // Get the high bits for i32 struct elements.
603 if (VA
.getValVT() == MVT::i32
&& VA
.needsCustom())
604 Arg
= DAG
.getNode(ISD::SRL
, DL
, VA
.getLocVT(), Arg
,
605 DAG
.getConstant(32, DL
, MVT::i32
));
607 // The caller promoted the argument, so insert an Assert?ext SDNode so we
608 // won't promote the value again in this function.
609 switch (VA
.getLocInfo()) {
610 case CCValAssign::SExt
:
611 Arg
= DAG
.getNode(ISD::AssertSext
, DL
, VA
.getLocVT(), Arg
,
612 DAG
.getValueType(VA
.getValVT()));
614 case CCValAssign::ZExt
:
615 Arg
= DAG
.getNode(ISD::AssertZext
, DL
, VA
.getLocVT(), Arg
,
616 DAG
.getValueType(VA
.getValVT()));
622 // Truncate the register down to the argument type.
624 Arg
= DAG
.getNode(ISD::TRUNCATE
, DL
, VA
.getValVT(), Arg
);
626 InVals
.push_back(Arg
);
630 // The registers are exhausted. This argument was passed on the stack.
631 assert(VA
.isMemLoc());
632 // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
633 // beginning of the arguments area at %fp+BIAS+128.
634 unsigned Offset
= VA
.getLocMemOffset() + ArgArea
;
635 unsigned ValSize
= VA
.getValVT().getSizeInBits() / 8;
636 // Adjust offset for extended arguments, SPARC is big-endian.
637 // The caller will have written the full slot with extended bytes, but we
638 // prefer our own extending loads.
640 Offset
+= 8 - ValSize
;
641 int FI
= MF
.getFrameInfo().CreateFixedObject(ValSize
, Offset
, true);
643 DAG
.getLoad(VA
.getValVT(), DL
, Chain
,
644 DAG
.getFrameIndex(FI
, getPointerTy(MF
.getDataLayout())),
645 MachinePointerInfo::getFixedStack(MF
, FI
)));
651 // This function takes variable arguments, some of which may have been passed
652 // in registers %i0-%i5. Variable floating point arguments are never passed
653 // in floating point registers. They go on %i0-%i5 or on the stack like
654 // integer arguments.
656 // The va_start intrinsic needs to know the offset to the first variable
658 unsigned ArgOffset
= CCInfo
.getNextStackOffset();
659 SparcMachineFunctionInfo
*FuncInfo
= MF
.getInfo
<SparcMachineFunctionInfo
>();
660 // Skip the 128 bytes of register save area.
661 FuncInfo
->setVarArgsFrameOffset(ArgOffset
+ ArgArea
+
662 Subtarget
->getStackPointerBias());
664 // Save the variable arguments that were passed in registers.
665 // The caller is required to reserve stack space for 6 arguments regardless
666 // of how many arguments were actually passed.
667 SmallVector
<SDValue
, 8> OutChains
;
668 for (; ArgOffset
< 6*8; ArgOffset
+= 8) {
669 Register VReg
= MF
.addLiveIn(SP::I0
+ ArgOffset
/8, &SP::I64RegsRegClass
);
670 SDValue VArg
= DAG
.getCopyFromReg(Chain
, DL
, VReg
, MVT::i64
);
671 int FI
= MF
.getFrameInfo().CreateFixedObject(8, ArgOffset
+ ArgArea
, true);
672 auto PtrVT
= getPointerTy(MF
.getDataLayout());
674 DAG
.getStore(Chain
, DL
, VArg
, DAG
.getFrameIndex(FI
, PtrVT
),
675 MachinePointerInfo::getFixedStack(MF
, FI
)));
678 if (!OutChains
.empty())
679 Chain
= DAG
.getNode(ISD::TokenFactor
, DL
, MVT::Other
, OutChains
);
685 SparcTargetLowering::LowerCall(TargetLowering::CallLoweringInfo
&CLI
,
686 SmallVectorImpl
<SDValue
> &InVals
) const {
687 if (Subtarget
->is64Bit())
688 return LowerCall_64(CLI
, InVals
);
689 return LowerCall_32(CLI
, InVals
);
692 static bool hasReturnsTwiceAttr(SelectionDAG
&DAG
, SDValue Callee
,
693 const CallBase
*Call
) {
695 return Call
->hasFnAttr(Attribute::ReturnsTwice
);
697 const Function
*CalleeFn
= nullptr;
698 if (GlobalAddressSDNode
*G
= dyn_cast
<GlobalAddressSDNode
>(Callee
)) {
699 CalleeFn
= dyn_cast
<Function
>(G
->getGlobal());
700 } else if (ExternalSymbolSDNode
*E
=
701 dyn_cast
<ExternalSymbolSDNode
>(Callee
)) {
702 const Function
&Fn
= DAG
.getMachineFunction().getFunction();
703 const Module
*M
= Fn
.getParent();
704 const char *CalleeName
= E
->getSymbol();
705 CalleeFn
= M
->getFunction(CalleeName
);
710 return CalleeFn
->hasFnAttribute(Attribute::ReturnsTwice
);
713 // Lower a call for the 32-bit ABI.
715 SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo
&CLI
,
716 SmallVectorImpl
<SDValue
> &InVals
) const {
717 SelectionDAG
&DAG
= CLI
.DAG
;
719 SmallVectorImpl
<ISD::OutputArg
> &Outs
= CLI
.Outs
;
720 SmallVectorImpl
<SDValue
> &OutVals
= CLI
.OutVals
;
721 SmallVectorImpl
<ISD::InputArg
> &Ins
= CLI
.Ins
;
722 SDValue Chain
= CLI
.Chain
;
723 SDValue Callee
= CLI
.Callee
;
724 bool &isTailCall
= CLI
.IsTailCall
;
725 CallingConv::ID CallConv
= CLI
.CallConv
;
726 bool isVarArg
= CLI
.IsVarArg
;
728 // Sparc target does not yet support tail call optimization.
731 // Analyze operands of the call, assigning locations to each operand.
732 SmallVector
<CCValAssign
, 16> ArgLocs
;
733 CCState
CCInfo(CallConv
, isVarArg
, DAG
.getMachineFunction(), ArgLocs
,
735 CCInfo
.AnalyzeCallOperands(Outs
, CC_Sparc32
);
737 // Get the size of the outgoing arguments stack space requirement.
738 unsigned ArgsSize
= CCInfo
.getNextStackOffset();
740 // Keep stack frames 8-byte aligned.
741 ArgsSize
= (ArgsSize
+7) & ~7;
743 MachineFrameInfo
&MFI
= DAG
.getMachineFunction().getFrameInfo();
745 // Create local copies for byval args.
746 SmallVector
<SDValue
, 8> ByValArgs
;
747 for (unsigned i
= 0, e
= Outs
.size(); i
!= e
; ++i
) {
748 ISD::ArgFlagsTy Flags
= Outs
[i
].Flags
;
749 if (!Flags
.isByVal())
752 SDValue Arg
= OutVals
[i
];
753 unsigned Size
= Flags
.getByValSize();
754 Align Alignment
= Flags
.getNonZeroByValAlign();
757 int FI
= MFI
.CreateStackObject(Size
, Alignment
, false);
758 SDValue FIPtr
= DAG
.getFrameIndex(FI
, getPointerTy(DAG
.getDataLayout()));
759 SDValue SizeNode
= DAG
.getConstant(Size
, dl
, MVT::i32
);
761 Chain
= DAG
.getMemcpy(Chain
, dl
, FIPtr
, Arg
, SizeNode
, Alignment
,
762 false, // isVolatile,
763 (Size
<= 32), // AlwaysInline if size <= 32,
765 MachinePointerInfo(), MachinePointerInfo());
766 ByValArgs
.push_back(FIPtr
);
770 ByValArgs
.push_back(nullVal
);
774 Chain
= DAG
.getCALLSEQ_START(Chain
, ArgsSize
, 0, dl
);
776 SmallVector
<std::pair
<unsigned, SDValue
>, 8> RegsToPass
;
777 SmallVector
<SDValue
, 8> MemOpChains
;
779 const unsigned StackOffset
= 92;
780 bool hasStructRetAttr
= false;
781 unsigned SRetArgSize
= 0;
782 // Walk the register/memloc assignments, inserting copies/loads.
783 for (unsigned i
= 0, realArgIdx
= 0, byvalArgIdx
= 0, e
= ArgLocs
.size();
786 CCValAssign
&VA
= ArgLocs
[i
];
787 SDValue Arg
= OutVals
[realArgIdx
];
789 ISD::ArgFlagsTy Flags
= Outs
[realArgIdx
].Flags
;
791 // Use local copy if it is a byval arg.
792 if (Flags
.isByVal()) {
793 Arg
= ByValArgs
[byvalArgIdx
++];
799 // Promote the value if needed.
800 switch (VA
.getLocInfo()) {
801 default: llvm_unreachable("Unknown loc info!");
802 case CCValAssign::Full
: break;
803 case CCValAssign::SExt
:
804 Arg
= DAG
.getNode(ISD::SIGN_EXTEND
, dl
, VA
.getLocVT(), Arg
);
806 case CCValAssign::ZExt
:
807 Arg
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, VA
.getLocVT(), Arg
);
809 case CCValAssign::AExt
:
810 Arg
= DAG
.getNode(ISD::ANY_EXTEND
, dl
, VA
.getLocVT(), Arg
);
812 case CCValAssign::BCvt
:
813 Arg
= DAG
.getNode(ISD::BITCAST
, dl
, VA
.getLocVT(), Arg
);
817 if (Flags
.isSRet()) {
818 assert(VA
.needsCustom());
819 // store SRet argument in %sp+64
820 SDValue StackPtr
= DAG
.getRegister(SP::O6
, MVT::i32
);
821 SDValue PtrOff
= DAG
.getIntPtrConstant(64, dl
);
822 PtrOff
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, StackPtr
, PtrOff
);
823 MemOpChains
.push_back(
824 DAG
.getStore(Chain
, dl
, Arg
, PtrOff
, MachinePointerInfo()));
825 hasStructRetAttr
= true;
826 // sret only allowed on first argument
827 assert(Outs
[realArgIdx
].OrigArgIndex
== 0);
828 PointerType
*Ty
= cast
<PointerType
>(CLI
.getArgs()[0].Ty
);
829 Type
*ElementTy
= Ty
->getElementType();
830 SRetArgSize
= DAG
.getDataLayout().getTypeAllocSize(ElementTy
);
834 if (VA
.needsCustom()) {
835 assert(VA
.getLocVT() == MVT::f64
|| VA
.getLocVT() == MVT::v2i32
);
838 unsigned Offset
= VA
.getLocMemOffset() + StackOffset
;
839 // if it is double-word aligned, just store.
840 if (Offset
% 8 == 0) {
841 SDValue StackPtr
= DAG
.getRegister(SP::O6
, MVT::i32
);
842 SDValue PtrOff
= DAG
.getIntPtrConstant(Offset
, dl
);
843 PtrOff
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, StackPtr
, PtrOff
);
844 MemOpChains
.push_back(
845 DAG
.getStore(Chain
, dl
, Arg
, PtrOff
, MachinePointerInfo()));
850 if (VA
.getLocVT() == MVT::f64
) {
851 // Move from the float value from float registers into the
852 // integer registers.
853 if (ConstantFPSDNode
*C
= dyn_cast
<ConstantFPSDNode
>(Arg
))
854 Arg
= bitcastConstantFPToInt(C
, dl
, DAG
);
856 Arg
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::v2i32
, Arg
);
859 SDValue Part0
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, MVT::i32
,
861 DAG
.getConstant(0, dl
, getVectorIdxTy(DAG
.getDataLayout())));
862 SDValue Part1
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, MVT::i32
,
864 DAG
.getConstant(1, dl
, getVectorIdxTy(DAG
.getDataLayout())));
867 RegsToPass
.push_back(std::make_pair(VA
.getLocReg(), Part0
));
869 CCValAssign
&NextVA
= ArgLocs
[++i
];
870 if (NextVA
.isRegLoc()) {
871 RegsToPass
.push_back(std::make_pair(NextVA
.getLocReg(), Part1
));
873 // Store the second part in stack.
874 unsigned Offset
= NextVA
.getLocMemOffset() + StackOffset
;
875 SDValue StackPtr
= DAG
.getRegister(SP::O6
, MVT::i32
);
876 SDValue PtrOff
= DAG
.getIntPtrConstant(Offset
, dl
);
877 PtrOff
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, StackPtr
, PtrOff
);
878 MemOpChains
.push_back(
879 DAG
.getStore(Chain
, dl
, Part1
, PtrOff
, MachinePointerInfo()));
882 unsigned Offset
= VA
.getLocMemOffset() + StackOffset
;
883 // Store the first part.
884 SDValue StackPtr
= DAG
.getRegister(SP::O6
, MVT::i32
);
885 SDValue PtrOff
= DAG
.getIntPtrConstant(Offset
, dl
);
886 PtrOff
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, StackPtr
, PtrOff
);
887 MemOpChains
.push_back(
888 DAG
.getStore(Chain
, dl
, Part0
, PtrOff
, MachinePointerInfo()));
889 // Store the second part.
890 PtrOff
= DAG
.getIntPtrConstant(Offset
+ 4, dl
);
891 PtrOff
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, StackPtr
, PtrOff
);
892 MemOpChains
.push_back(
893 DAG
.getStore(Chain
, dl
, Part1
, PtrOff
, MachinePointerInfo()));
898 // Arguments that can be passed on register must be kept at
901 if (VA
.getLocVT() != MVT::f32
) {
902 RegsToPass
.push_back(std::make_pair(VA
.getLocReg(), Arg
));
905 Arg
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::i32
, Arg
);
906 RegsToPass
.push_back(std::make_pair(VA
.getLocReg(), Arg
));
910 assert(VA
.isMemLoc());
912 // Create a store off the stack pointer for this argument.
913 SDValue StackPtr
= DAG
.getRegister(SP::O6
, MVT::i32
);
914 SDValue PtrOff
= DAG
.getIntPtrConstant(VA
.getLocMemOffset() + StackOffset
,
916 PtrOff
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, StackPtr
, PtrOff
);
917 MemOpChains
.push_back(
918 DAG
.getStore(Chain
, dl
, Arg
, PtrOff
, MachinePointerInfo()));
922 // Emit all stores, make sure the occur before any copies into physregs.
923 if (!MemOpChains
.empty())
924 Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, MemOpChains
);
926 // Build a sequence of copy-to-reg nodes chained together with token
927 // chain and flag operands which copy the outgoing args into registers.
928 // The InFlag in necessary since all emitted instructions must be
931 for (unsigned i
= 0, e
= RegsToPass
.size(); i
!= e
; ++i
) {
932 Register Reg
= toCallerWindow(RegsToPass
[i
].first
);
933 Chain
= DAG
.getCopyToReg(Chain
, dl
, Reg
, RegsToPass
[i
].second
, InFlag
);
934 InFlag
= Chain
.getValue(1);
937 bool hasReturnsTwice
= hasReturnsTwiceAttr(DAG
, Callee
, CLI
.CB
);
939 // If the callee is a GlobalAddress node (quite common, every direct call is)
940 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
941 // Likewise ExternalSymbol -> TargetExternalSymbol.
942 unsigned TF
= isPositionIndependent() ? SparcMCExpr::VK_Sparc_WPLT30
943 : SparcMCExpr::VK_Sparc_WDISP30
;
944 if (GlobalAddressSDNode
*G
= dyn_cast
<GlobalAddressSDNode
>(Callee
))
945 Callee
= DAG
.getTargetGlobalAddress(G
->getGlobal(), dl
, MVT::i32
, 0, TF
);
946 else if (ExternalSymbolSDNode
*E
= dyn_cast
<ExternalSymbolSDNode
>(Callee
))
947 Callee
= DAG
.getTargetExternalSymbol(E
->getSymbol(), MVT::i32
, TF
);
949 // Returns a chain & a flag for retval copy to use
950 SDVTList NodeTys
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
951 SmallVector
<SDValue
, 8> Ops
;
952 Ops
.push_back(Chain
);
953 Ops
.push_back(Callee
);
954 if (hasStructRetAttr
)
955 Ops
.push_back(DAG
.getTargetConstant(SRetArgSize
, dl
, MVT::i32
));
956 for (unsigned i
= 0, e
= RegsToPass
.size(); i
!= e
; ++i
)
957 Ops
.push_back(DAG
.getRegister(toCallerWindow(RegsToPass
[i
].first
),
958 RegsToPass
[i
].second
.getValueType()));
960 // Add a register mask operand representing the call-preserved registers.
961 const SparcRegisterInfo
*TRI
= Subtarget
->getRegisterInfo();
962 const uint32_t *Mask
=
964 ? TRI
->getRTCallPreservedMask(CallConv
)
965 : TRI
->getCallPreservedMask(DAG
.getMachineFunction(), CallConv
));
966 assert(Mask
&& "Missing call preserved mask for calling convention");
967 Ops
.push_back(DAG
.getRegisterMask(Mask
));
969 if (InFlag
.getNode())
970 Ops
.push_back(InFlag
);
972 Chain
= DAG
.getNode(SPISD::CALL
, dl
, NodeTys
, Ops
);
973 InFlag
= Chain
.getValue(1);
975 Chain
= DAG
.getCALLSEQ_END(Chain
, DAG
.getIntPtrConstant(ArgsSize
, dl
, true),
976 DAG
.getIntPtrConstant(0, dl
, true), InFlag
, dl
);
977 InFlag
= Chain
.getValue(1);
979 // Assign locations to each value returned by this call.
980 SmallVector
<CCValAssign
, 16> RVLocs
;
981 CCState
RVInfo(CallConv
, isVarArg
, DAG
.getMachineFunction(), RVLocs
,
984 RVInfo
.AnalyzeCallResult(Ins
, RetCC_Sparc32
);
986 // Copy all of the result registers out of their specified physreg.
987 for (unsigned i
= 0; i
!= RVLocs
.size(); ++i
) {
988 if (RVLocs
[i
].getLocVT() == MVT::v2i32
) {
989 SDValue Vec
= DAG
.getNode(ISD::UNDEF
, dl
, MVT::v2i32
);
990 SDValue Lo
= DAG
.getCopyFromReg(
991 Chain
, dl
, toCallerWindow(RVLocs
[i
++].getLocReg()), MVT::i32
, InFlag
);
992 Chain
= Lo
.getValue(1);
993 InFlag
= Lo
.getValue(2);
994 Vec
= DAG
.getNode(ISD::INSERT_VECTOR_ELT
, dl
, MVT::v2i32
, Vec
, Lo
,
995 DAG
.getConstant(0, dl
, MVT::i32
));
996 SDValue Hi
= DAG
.getCopyFromReg(
997 Chain
, dl
, toCallerWindow(RVLocs
[i
].getLocReg()), MVT::i32
, InFlag
);
998 Chain
= Hi
.getValue(1);
999 InFlag
= Hi
.getValue(2);
1000 Vec
= DAG
.getNode(ISD::INSERT_VECTOR_ELT
, dl
, MVT::v2i32
, Vec
, Hi
,
1001 DAG
.getConstant(1, dl
, MVT::i32
));
1002 InVals
.push_back(Vec
);
1005 DAG
.getCopyFromReg(Chain
, dl
, toCallerWindow(RVLocs
[i
].getLocReg()),
1006 RVLocs
[i
].getValVT(), InFlag
)
1008 InFlag
= Chain
.getValue(2);
1009 InVals
.push_back(Chain
.getValue(0));
1016 // FIXME? Maybe this could be a TableGen attribute on some registers and
1017 // this table could be generated automatically from RegInfo.
1018 Register
SparcTargetLowering::getRegisterByName(const char* RegName
, LLT VT
,
1019 const MachineFunction
&MF
) const {
1020 Register Reg
= StringSwitch
<Register
>(RegName
)
1021 .Case("i0", SP::I0
).Case("i1", SP::I1
).Case("i2", SP::I2
).Case("i3", SP::I3
)
1022 .Case("i4", SP::I4
).Case("i5", SP::I5
).Case("i6", SP::I6
).Case("i7", SP::I7
)
1023 .Case("o0", SP::O0
).Case("o1", SP::O1
).Case("o2", SP::O2
).Case("o3", SP::O3
)
1024 .Case("o4", SP::O4
).Case("o5", SP::O5
).Case("o6", SP::O6
).Case("o7", SP::O7
)
1025 .Case("l0", SP::L0
).Case("l1", SP::L1
).Case("l2", SP::L2
).Case("l3", SP::L3
)
1026 .Case("l4", SP::L4
).Case("l5", SP::L5
).Case("l6", SP::L6
).Case("l7", SP::L7
)
1027 .Case("g0", SP::G0
).Case("g1", SP::G1
).Case("g2", SP::G2
).Case("g3", SP::G3
)
1028 .Case("g4", SP::G4
).Case("g5", SP::G5
).Case("g6", SP::G6
).Case("g7", SP::G7
)
1034 report_fatal_error("Invalid register name global variable");
1037 // Fixup floating point arguments in the ... part of a varargs call.
1039 // The SPARC v9 ABI requires that floating point arguments are treated the same
1040 // as integers when calling a varargs function. This does not apply to the
1041 // fixed arguments that are part of the function's prototype.
1043 // This function post-processes a CCValAssign array created by
1044 // AnalyzeCallOperands().
1045 static void fixupVariableFloatArgs(SmallVectorImpl
<CCValAssign
> &ArgLocs
,
1046 ArrayRef
<ISD::OutputArg
> Outs
) {
1047 for (unsigned i
= 0, e
= ArgLocs
.size(); i
!= e
; ++i
) {
1048 const CCValAssign
&VA
= ArgLocs
[i
];
1049 MVT ValTy
= VA
.getLocVT();
1050 // FIXME: What about f32 arguments? C promotes them to f64 when calling
1051 // varargs functions.
1052 if (!VA
.isRegLoc() || (ValTy
!= MVT::f64
&& ValTy
!= MVT::f128
))
1054 // The fixed arguments to a varargs function still go in FP registers.
1055 if (Outs
[VA
.getValNo()].IsFixed
)
1058 // This floating point argument should be reassigned.
1061 // Determine the offset into the argument array.
1062 Register firstReg
= (ValTy
== MVT::f64
) ? SP::D0
: SP::Q0
;
1063 unsigned argSize
= (ValTy
== MVT::f64
) ? 8 : 16;
1064 unsigned Offset
= argSize
* (VA
.getLocReg() - firstReg
);
1065 assert(Offset
< 16*8 && "Offset out of range, bad register enum?");
1068 // This argument should go in %i0-%i5.
1069 unsigned IReg
= SP::I0
+ Offset
/8;
1070 if (ValTy
== MVT::f64
)
1071 // Full register, just bitconvert into i64.
1072 NewVA
= CCValAssign::getReg(VA
.getValNo(), VA
.getValVT(),
1073 IReg
, MVT::i64
, CCValAssign::BCvt
);
1075 assert(ValTy
== MVT::f128
&& "Unexpected type!");
1076 // Full register, just bitconvert into i128 -- We will lower this into
1077 // two i64s in LowerCall_64.
1078 NewVA
= CCValAssign::getCustomReg(VA
.getValNo(), VA
.getValVT(),
1079 IReg
, MVT::i128
, CCValAssign::BCvt
);
1082 // This needs to go to memory, we're out of integer registers.
1083 NewVA
= CCValAssign::getMem(VA
.getValNo(), VA
.getValVT(),
1084 Offset
, VA
.getLocVT(), VA
.getLocInfo());
1090 // Lower a call for the 64-bit ABI.
1092 SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo
&CLI
,
1093 SmallVectorImpl
<SDValue
> &InVals
) const {
1094 SelectionDAG
&DAG
= CLI
.DAG
;
1096 SDValue Chain
= CLI
.Chain
;
1097 auto PtrVT
= getPointerTy(DAG
.getDataLayout());
1099 // Sparc target does not yet support tail call optimization.
1100 CLI
.IsTailCall
= false;
1102 // Analyze operands of the call, assigning locations to each operand.
1103 SmallVector
<CCValAssign
, 16> ArgLocs
;
1104 CCState
CCInfo(CLI
.CallConv
, CLI
.IsVarArg
, DAG
.getMachineFunction(), ArgLocs
,
1106 CCInfo
.AnalyzeCallOperands(CLI
.Outs
, CC_Sparc64
);
1108 // Get the size of the outgoing arguments stack space requirement.
1109 // The stack offset computed by CC_Sparc64 includes all arguments.
1110 // Called functions expect 6 argument words to exist in the stack frame, used
1112 unsigned ArgsSize
= std::max(6*8u, CCInfo
.getNextStackOffset());
1114 // Keep stack frames 16-byte aligned.
1115 ArgsSize
= alignTo(ArgsSize
, 16);
1117 // Varargs calls require special treatment.
1119 fixupVariableFloatArgs(ArgLocs
, CLI
.Outs
);
1121 // Adjust the stack pointer to make room for the arguments.
1122 // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
1123 // with more than 6 arguments.
1124 Chain
= DAG
.getCALLSEQ_START(Chain
, ArgsSize
, 0, DL
);
1126 // Collect the set of registers to pass to the function and their values.
1127 // This will be emitted as a sequence of CopyToReg nodes glued to the call
1129 SmallVector
<std::pair
<Register
, SDValue
>, 8> RegsToPass
;
1131 // Collect chains from all the memory opeations that copy arguments to the
1132 // stack. They must follow the stack pointer adjustment above and precede the
1133 // call instruction itself.
1134 SmallVector
<SDValue
, 8> MemOpChains
;
1136 for (unsigned i
= 0, e
= ArgLocs
.size(); i
!= e
; ++i
) {
1137 const CCValAssign
&VA
= ArgLocs
[i
];
1138 SDValue Arg
= CLI
.OutVals
[i
];
1140 // Promote the value if needed.
1141 switch (VA
.getLocInfo()) {
1143 llvm_unreachable("Unknown location info!");
1144 case CCValAssign::Full
:
1146 case CCValAssign::SExt
:
1147 Arg
= DAG
.getNode(ISD::SIGN_EXTEND
, DL
, VA
.getLocVT(), Arg
);
1149 case CCValAssign::ZExt
:
1150 Arg
= DAG
.getNode(ISD::ZERO_EXTEND
, DL
, VA
.getLocVT(), Arg
);
1152 case CCValAssign::AExt
:
1153 Arg
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, VA
.getLocVT(), Arg
);
1155 case CCValAssign::BCvt
:
1156 // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
1157 // SPARC does not support i128 natively. Lower it into two i64, see below.
1158 if (!VA
.needsCustom() || VA
.getValVT() != MVT::f128
1159 || VA
.getLocVT() != MVT::i128
)
1160 Arg
= DAG
.getNode(ISD::BITCAST
, DL
, VA
.getLocVT(), Arg
);
1164 if (VA
.isRegLoc()) {
1165 if (VA
.needsCustom() && VA
.getValVT() == MVT::f128
1166 && VA
.getLocVT() == MVT::i128
) {
1167 // Store and reload into the integer register reg and reg+1.
1168 unsigned Offset
= 8 * (VA
.getLocReg() - SP::I0
);
1169 unsigned StackOffset
= Offset
+ Subtarget
->getStackPointerBias() + 128;
1170 SDValue StackPtr
= DAG
.getRegister(SP::O6
, PtrVT
);
1171 SDValue HiPtrOff
= DAG
.getIntPtrConstant(StackOffset
, DL
);
1172 HiPtrOff
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, StackPtr
, HiPtrOff
);
1173 SDValue LoPtrOff
= DAG
.getIntPtrConstant(StackOffset
+ 8, DL
);
1174 LoPtrOff
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, StackPtr
, LoPtrOff
);
1176 // Store to %sp+BIAS+128+Offset
1178 DAG
.getStore(Chain
, DL
, Arg
, HiPtrOff
, MachinePointerInfo());
1179 // Load into Reg and Reg+1
1181 DAG
.getLoad(MVT::i64
, DL
, Store
, HiPtrOff
, MachinePointerInfo());
1183 DAG
.getLoad(MVT::i64
, DL
, Store
, LoPtrOff
, MachinePointerInfo());
1184 RegsToPass
.push_back(std::make_pair(toCallerWindow(VA
.getLocReg()),
1186 RegsToPass
.push_back(std::make_pair(toCallerWindow(VA
.getLocReg()+1),
1191 // The custom bit on an i32 return value indicates that it should be
1192 // passed in the high bits of the register.
1193 if (VA
.getValVT() == MVT::i32
&& VA
.needsCustom()) {
1194 Arg
= DAG
.getNode(ISD::SHL
, DL
, MVT::i64
, Arg
,
1195 DAG
.getConstant(32, DL
, MVT::i32
));
1197 // The next value may go in the low bits of the same register.
1198 // Handle both at once.
1199 if (i
+1 < ArgLocs
.size() && ArgLocs
[i
+1].isRegLoc() &&
1200 ArgLocs
[i
+1].getLocReg() == VA
.getLocReg()) {
1201 SDValue NV
= DAG
.getNode(ISD::ZERO_EXTEND
, DL
, MVT::i64
,
1203 Arg
= DAG
.getNode(ISD::OR
, DL
, MVT::i64
, Arg
, NV
);
1204 // Skip the next value, it's already done.
1208 RegsToPass
.push_back(std::make_pair(toCallerWindow(VA
.getLocReg()), Arg
));
1212 assert(VA
.isMemLoc());
1214 // Create a store off the stack pointer for this argument.
1215 SDValue StackPtr
= DAG
.getRegister(SP::O6
, PtrVT
);
1216 // The argument area starts at %fp+BIAS+128 in the callee frame,
1217 // %sp+BIAS+128 in ours.
1218 SDValue PtrOff
= DAG
.getIntPtrConstant(VA
.getLocMemOffset() +
1219 Subtarget
->getStackPointerBias() +
1221 PtrOff
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, StackPtr
, PtrOff
);
1222 MemOpChains
.push_back(
1223 DAG
.getStore(Chain
, DL
, Arg
, PtrOff
, MachinePointerInfo()));
1226 // Emit all stores, make sure they occur before the call.
1227 if (!MemOpChains
.empty())
1228 Chain
= DAG
.getNode(ISD::TokenFactor
, DL
, MVT::Other
, MemOpChains
);
1230 // Build a sequence of CopyToReg nodes glued together with token chain and
1231 // glue operands which copy the outgoing args into registers. The InGlue is
1232 // necessary since all emitted instructions must be stuck together in order
1233 // to pass the live physical registers.
1235 for (unsigned i
= 0, e
= RegsToPass
.size(); i
!= e
; ++i
) {
1236 Chain
= DAG
.getCopyToReg(Chain
, DL
,
1237 RegsToPass
[i
].first
, RegsToPass
[i
].second
, InGlue
);
1238 InGlue
= Chain
.getValue(1);
1241 // If the callee is a GlobalAddress node (quite common, every direct call is)
1242 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1243 // Likewise ExternalSymbol -> TargetExternalSymbol.
1244 SDValue Callee
= CLI
.Callee
;
1245 bool hasReturnsTwice
= hasReturnsTwiceAttr(DAG
, Callee
, CLI
.CB
);
1246 unsigned TF
= isPositionIndependent() ? SparcMCExpr::VK_Sparc_WPLT30
1247 : SparcMCExpr::VK_Sparc_WDISP30
;
1248 if (GlobalAddressSDNode
*G
= dyn_cast
<GlobalAddressSDNode
>(Callee
))
1249 Callee
= DAG
.getTargetGlobalAddress(G
->getGlobal(), DL
, PtrVT
, 0, TF
);
1250 else if (ExternalSymbolSDNode
*E
= dyn_cast
<ExternalSymbolSDNode
>(Callee
))
1251 Callee
= DAG
.getTargetExternalSymbol(E
->getSymbol(), PtrVT
, TF
);
1253 // Build the operands for the call instruction itself.
1254 SmallVector
<SDValue
, 8> Ops
;
1255 Ops
.push_back(Chain
);
1256 Ops
.push_back(Callee
);
1257 for (unsigned i
= 0, e
= RegsToPass
.size(); i
!= e
; ++i
)
1258 Ops
.push_back(DAG
.getRegister(RegsToPass
[i
].first
,
1259 RegsToPass
[i
].second
.getValueType()));
1261 // Add a register mask operand representing the call-preserved registers.
1262 const SparcRegisterInfo
*TRI
= Subtarget
->getRegisterInfo();
1263 const uint32_t *Mask
=
1264 ((hasReturnsTwice
) ? TRI
->getRTCallPreservedMask(CLI
.CallConv
)
1265 : TRI
->getCallPreservedMask(DAG
.getMachineFunction(),
1267 assert(Mask
&& "Missing call preserved mask for calling convention");
1268 Ops
.push_back(DAG
.getRegisterMask(Mask
));
1270 // Make sure the CopyToReg nodes are glued to the call instruction which
1271 // consumes the registers.
1272 if (InGlue
.getNode())
1273 Ops
.push_back(InGlue
);
1275 // Now the call itself.
1276 SDVTList NodeTys
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
1277 Chain
= DAG
.getNode(SPISD::CALL
, DL
, NodeTys
, Ops
);
1278 InGlue
= Chain
.getValue(1);
1280 // Revert the stack pointer immediately after the call.
1281 Chain
= DAG
.getCALLSEQ_END(Chain
, DAG
.getIntPtrConstant(ArgsSize
, DL
, true),
1282 DAG
.getIntPtrConstant(0, DL
, true), InGlue
, DL
);
1283 InGlue
= Chain
.getValue(1);
1285 // Now extract the return values. This is more or less the same as
1286 // LowerFormalArguments_64.
1288 // Assign locations to each value returned by this call.
1289 SmallVector
<CCValAssign
, 16> RVLocs
;
1290 CCState
RVInfo(CLI
.CallConv
, CLI
.IsVarArg
, DAG
.getMachineFunction(), RVLocs
,
1293 // Set inreg flag manually for codegen generated library calls that
1295 if (CLI
.Ins
.size() == 1 && CLI
.Ins
[0].VT
== MVT::f32
&& !CLI
.CB
)
1296 CLI
.Ins
[0].Flags
.setInReg();
1298 RVInfo
.AnalyzeCallResult(CLI
.Ins
, RetCC_Sparc64
);
1300 // Copy all of the result registers out of their specified physreg.
1301 for (unsigned i
= 0; i
!= RVLocs
.size(); ++i
) {
1302 CCValAssign
&VA
= RVLocs
[i
];
1303 unsigned Reg
= toCallerWindow(VA
.getLocReg());
1305 // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
1306 // reside in the same register in the high and low bits. Reuse the
1307 // CopyFromReg previous node to avoid duplicate copies.
1309 if (RegisterSDNode
*SrcReg
= dyn_cast
<RegisterSDNode
>(Chain
.getOperand(1)))
1310 if (SrcReg
->getReg() == Reg
&& Chain
->getOpcode() == ISD::CopyFromReg
)
1311 RV
= Chain
.getValue(0);
1313 // But usually we'll create a new CopyFromReg for a different register.
1314 if (!RV
.getNode()) {
1315 RV
= DAG
.getCopyFromReg(Chain
, DL
, Reg
, RVLocs
[i
].getLocVT(), InGlue
);
1316 Chain
= RV
.getValue(1);
1317 InGlue
= Chain
.getValue(2);
1320 // Get the high bits for i32 struct elements.
1321 if (VA
.getValVT() == MVT::i32
&& VA
.needsCustom())
1322 RV
= DAG
.getNode(ISD::SRL
, DL
, VA
.getLocVT(), RV
,
1323 DAG
.getConstant(32, DL
, MVT::i32
));
1325 // The callee promoted the return value, so insert an Assert?ext SDNode so
1326 // we won't promote the value again in this function.
1327 switch (VA
.getLocInfo()) {
1328 case CCValAssign::SExt
:
1329 RV
= DAG
.getNode(ISD::AssertSext
, DL
, VA
.getLocVT(), RV
,
1330 DAG
.getValueType(VA
.getValVT()));
1332 case CCValAssign::ZExt
:
1333 RV
= DAG
.getNode(ISD::AssertZext
, DL
, VA
.getLocVT(), RV
,
1334 DAG
.getValueType(VA
.getValVT()));
1340 // Truncate the register down to the return value type.
1341 if (VA
.isExtInLoc())
1342 RV
= DAG
.getNode(ISD::TRUNCATE
, DL
, VA
.getValVT(), RV
);
1344 InVals
.push_back(RV
);
1350 //===----------------------------------------------------------------------===//
1351 // TargetLowering Implementation
1352 //===----------------------------------------------------------------------===//
1354 TargetLowering::AtomicExpansionKind
SparcTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst
*AI
) const {
1355 if (AI
->getOperation() == AtomicRMWInst::Xchg
&&
1356 AI
->getType()->getPrimitiveSizeInBits() == 32)
1357 return AtomicExpansionKind::None
; // Uses xchg instruction
1359 return AtomicExpansionKind::CmpXChg
;
1362 /// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
1364 static SPCC::CondCodes
IntCondCCodeToICC(ISD::CondCode CC
) {
1366 default: llvm_unreachable("Unknown integer condition code!");
1367 case ISD::SETEQ
: return SPCC::ICC_E
;
1368 case ISD::SETNE
: return SPCC::ICC_NE
;
1369 case ISD::SETLT
: return SPCC::ICC_L
;
1370 case ISD::SETGT
: return SPCC::ICC_G
;
1371 case ISD::SETLE
: return SPCC::ICC_LE
;
1372 case ISD::SETGE
: return SPCC::ICC_GE
;
1373 case ISD::SETULT
: return SPCC::ICC_CS
;
1374 case ISD::SETULE
: return SPCC::ICC_LEU
;
1375 case ISD::SETUGT
: return SPCC::ICC_GU
;
1376 case ISD::SETUGE
: return SPCC::ICC_CC
;
1380 /// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
1382 static SPCC::CondCodes
FPCondCCodeToFCC(ISD::CondCode CC
) {
1384 default: llvm_unreachable("Unknown fp condition code!");
1386 case ISD::SETOEQ
: return SPCC::FCC_E
;
1388 case ISD::SETUNE
: return SPCC::FCC_NE
;
1390 case ISD::SETOLT
: return SPCC::FCC_L
;
1392 case ISD::SETOGT
: return SPCC::FCC_G
;
1394 case ISD::SETOLE
: return SPCC::FCC_LE
;
1396 case ISD::SETOGE
: return SPCC::FCC_GE
;
1397 case ISD::SETULT
: return SPCC::FCC_UL
;
1398 case ISD::SETULE
: return SPCC::FCC_ULE
;
1399 case ISD::SETUGT
: return SPCC::FCC_UG
;
1400 case ISD::SETUGE
: return SPCC::FCC_UGE
;
1401 case ISD::SETUO
: return SPCC::FCC_U
;
1402 case ISD::SETO
: return SPCC::FCC_O
;
1403 case ISD::SETONE
: return SPCC::FCC_LG
;
1404 case ISD::SETUEQ
: return SPCC::FCC_UE
;
1408 SparcTargetLowering::SparcTargetLowering(const TargetMachine
&TM
,
1409 const SparcSubtarget
&STI
)
1410 : TargetLowering(TM
), Subtarget(&STI
) {
1411 MVT PtrVT
= MVT::getIntegerVT(8 * TM
.getPointerSize(0));
1413 // Instructions which use registers as conditionals examine all the
1414 // bits (as does the pseudo SELECT_CC expansion). I don't think it
1415 // matters much whether it's ZeroOrOneBooleanContent, or
1416 // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
1418 setBooleanContents(ZeroOrOneBooleanContent
);
1419 setBooleanVectorContents(ZeroOrOneBooleanContent
);
1421 // Set up the register classes.
1422 addRegisterClass(MVT::i32
, &SP::IntRegsRegClass
);
1423 if (!Subtarget
->useSoftFloat()) {
1424 addRegisterClass(MVT::f32
, &SP::FPRegsRegClass
);
1425 addRegisterClass(MVT::f64
, &SP::DFPRegsRegClass
);
1426 addRegisterClass(MVT::f128
, &SP::QFPRegsRegClass
);
1428 if (Subtarget
->is64Bit()) {
1429 addRegisterClass(MVT::i64
, &SP::I64RegsRegClass
);
1431 // On 32bit sparc, we define a double-register 32bit register
1432 // class, as well. This is modeled in LLVM as a 2-vector of i32.
1433 addRegisterClass(MVT::v2i32
, &SP::IntPairRegClass
);
1435 // ...but almost all operations must be expanded, so set that as
1437 for (unsigned Op
= 0; Op
< ISD::BUILTIN_OP_END
; ++Op
) {
1438 setOperationAction(Op
, MVT::v2i32
, Expand
);
1440 // Truncating/extending stores/loads are also not supported.
1441 for (MVT VT
: MVT::integer_fixedlen_vector_valuetypes()) {
1442 setLoadExtAction(ISD::SEXTLOAD
, VT
, MVT::v2i32
, Expand
);
1443 setLoadExtAction(ISD::ZEXTLOAD
, VT
, MVT::v2i32
, Expand
);
1444 setLoadExtAction(ISD::EXTLOAD
, VT
, MVT::v2i32
, Expand
);
1446 setLoadExtAction(ISD::SEXTLOAD
, MVT::v2i32
, VT
, Expand
);
1447 setLoadExtAction(ISD::ZEXTLOAD
, MVT::v2i32
, VT
, Expand
);
1448 setLoadExtAction(ISD::EXTLOAD
, MVT::v2i32
, VT
, Expand
);
1450 setTruncStoreAction(VT
, MVT::v2i32
, Expand
);
1451 setTruncStoreAction(MVT::v2i32
, VT
, Expand
);
1453 // However, load and store *are* legal.
1454 setOperationAction(ISD::LOAD
, MVT::v2i32
, Legal
);
1455 setOperationAction(ISD::STORE
, MVT::v2i32
, Legal
);
1456 setOperationAction(ISD::EXTRACT_VECTOR_ELT
, MVT::v2i32
, Legal
);
1457 setOperationAction(ISD::BUILD_VECTOR
, MVT::v2i32
, Legal
);
1459 // And we need to promote i64 loads/stores into vector load/store
1460 setOperationAction(ISD::LOAD
, MVT::i64
, Custom
);
1461 setOperationAction(ISD::STORE
, MVT::i64
, Custom
);
1463 // Sadly, this doesn't work:
1464 // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
1465 // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
1468 // Turn FP extload into load/fpextend
1469 for (MVT VT
: MVT::fp_valuetypes()) {
1470 setLoadExtAction(ISD::EXTLOAD
, VT
, MVT::f16
, Expand
);
1471 setLoadExtAction(ISD::EXTLOAD
, VT
, MVT::f32
, Expand
);
1472 setLoadExtAction(ISD::EXTLOAD
, VT
, MVT::f64
, Expand
);
1475 // Sparc doesn't have i1 sign extending load
1476 for (MVT VT
: MVT::integer_valuetypes())
1477 setLoadExtAction(ISD::SEXTLOAD
, VT
, MVT::i1
, Promote
);
1479 // Turn FP truncstore into trunc + store.
1480 setTruncStoreAction(MVT::f32
, MVT::f16
, Expand
);
1481 setTruncStoreAction(MVT::f64
, MVT::f16
, Expand
);
1482 setTruncStoreAction(MVT::f64
, MVT::f32
, Expand
);
1483 setTruncStoreAction(MVT::f128
, MVT::f16
, Expand
);
1484 setTruncStoreAction(MVT::f128
, MVT::f32
, Expand
);
1485 setTruncStoreAction(MVT::f128
, MVT::f64
, Expand
);
1487 // Custom legalize GlobalAddress nodes into LO/HI parts.
1488 setOperationAction(ISD::GlobalAddress
, PtrVT
, Custom
);
1489 setOperationAction(ISD::GlobalTLSAddress
, PtrVT
, Custom
);
1490 setOperationAction(ISD::ConstantPool
, PtrVT
, Custom
);
1491 setOperationAction(ISD::BlockAddress
, PtrVT
, Custom
);
1493 // Sparc doesn't have sext_inreg, replace them with shl/sra
1494 setOperationAction(ISD::SIGN_EXTEND_INREG
, MVT::i16
, Expand
);
1495 setOperationAction(ISD::SIGN_EXTEND_INREG
, MVT::i8
, Expand
);
1496 setOperationAction(ISD::SIGN_EXTEND_INREG
, MVT::i1
, Expand
);
1498 // Sparc has no REM or DIVREM operations.
1499 setOperationAction(ISD::UREM
, MVT::i32
, Expand
);
1500 setOperationAction(ISD::SREM
, MVT::i32
, Expand
);
1501 setOperationAction(ISD::SDIVREM
, MVT::i32
, Expand
);
1502 setOperationAction(ISD::UDIVREM
, MVT::i32
, Expand
);
1504 // ... nor does SparcV9.
1505 if (Subtarget
->is64Bit()) {
1506 setOperationAction(ISD::UREM
, MVT::i64
, Expand
);
1507 setOperationAction(ISD::SREM
, MVT::i64
, Expand
);
1508 setOperationAction(ISD::SDIVREM
, MVT::i64
, Expand
);
1509 setOperationAction(ISD::UDIVREM
, MVT::i64
, Expand
);
1512 // Custom expand fp<->sint
1513 setOperationAction(ISD::FP_TO_SINT
, MVT::i32
, Custom
);
1514 setOperationAction(ISD::SINT_TO_FP
, MVT::i32
, Custom
);
1515 setOperationAction(ISD::FP_TO_SINT
, MVT::i64
, Custom
);
1516 setOperationAction(ISD::SINT_TO_FP
, MVT::i64
, Custom
);
1518 // Custom Expand fp<->uint
1519 setOperationAction(ISD::FP_TO_UINT
, MVT::i32
, Custom
);
1520 setOperationAction(ISD::UINT_TO_FP
, MVT::i32
, Custom
);
1521 setOperationAction(ISD::FP_TO_UINT
, MVT::i64
, Custom
);
1522 setOperationAction(ISD::UINT_TO_FP
, MVT::i64
, Custom
);
1524 // Lower f16 conversion operations into library calls
1525 setOperationAction(ISD::FP16_TO_FP
, MVT::f32
, Expand
);
1526 setOperationAction(ISD::FP_TO_FP16
, MVT::f32
, Expand
);
1527 setOperationAction(ISD::FP16_TO_FP
, MVT::f64
, Expand
);
1528 setOperationAction(ISD::FP_TO_FP16
, MVT::f64
, Expand
);
1529 setOperationAction(ISD::FP16_TO_FP
, MVT::f128
, Expand
);
1530 setOperationAction(ISD::FP_TO_FP16
, MVT::f128
, Expand
);
1532 setOperationAction(ISD::BITCAST
, MVT::f32
, Expand
);
1533 setOperationAction(ISD::BITCAST
, MVT::i32
, Expand
);
1535 // Sparc has no select or setcc: expand to SELECT_CC.
1536 setOperationAction(ISD::SELECT
, MVT::i32
, Expand
);
1537 setOperationAction(ISD::SELECT
, MVT::f32
, Expand
);
1538 setOperationAction(ISD::SELECT
, MVT::f64
, Expand
);
1539 setOperationAction(ISD::SELECT
, MVT::f128
, Expand
);
1541 setOperationAction(ISD::SETCC
, MVT::i32
, Expand
);
1542 setOperationAction(ISD::SETCC
, MVT::f32
, Expand
);
1543 setOperationAction(ISD::SETCC
, MVT::f64
, Expand
);
1544 setOperationAction(ISD::SETCC
, MVT::f128
, Expand
);
1546 // Sparc doesn't have BRCOND either, it has BR_CC.
1547 setOperationAction(ISD::BRCOND
, MVT::Other
, Expand
);
1548 setOperationAction(ISD::BRIND
, MVT::Other
, Expand
);
1549 setOperationAction(ISD::BR_JT
, MVT::Other
, Expand
);
1550 setOperationAction(ISD::BR_CC
, MVT::i32
, Custom
);
1551 setOperationAction(ISD::BR_CC
, MVT::f32
, Custom
);
1552 setOperationAction(ISD::BR_CC
, MVT::f64
, Custom
);
1553 setOperationAction(ISD::BR_CC
, MVT::f128
, Custom
);
1555 setOperationAction(ISD::SELECT_CC
, MVT::i32
, Custom
);
1556 setOperationAction(ISD::SELECT_CC
, MVT::f32
, Custom
);
1557 setOperationAction(ISD::SELECT_CC
, MVT::f64
, Custom
);
1558 setOperationAction(ISD::SELECT_CC
, MVT::f128
, Custom
);
1560 setOperationAction(ISD::ADDC
, MVT::i32
, Custom
);
1561 setOperationAction(ISD::ADDE
, MVT::i32
, Custom
);
1562 setOperationAction(ISD::SUBC
, MVT::i32
, Custom
);
1563 setOperationAction(ISD::SUBE
, MVT::i32
, Custom
);
1565 if (Subtarget
->is64Bit()) {
1566 setOperationAction(ISD::ADDC
, MVT::i64
, Custom
);
1567 setOperationAction(ISD::ADDE
, MVT::i64
, Custom
);
1568 setOperationAction(ISD::SUBC
, MVT::i64
, Custom
);
1569 setOperationAction(ISD::SUBE
, MVT::i64
, Custom
);
1570 setOperationAction(ISD::BITCAST
, MVT::f64
, Expand
);
1571 setOperationAction(ISD::BITCAST
, MVT::i64
, Expand
);
1572 setOperationAction(ISD::SELECT
, MVT::i64
, Expand
);
1573 setOperationAction(ISD::SETCC
, MVT::i64
, Expand
);
1574 setOperationAction(ISD::BR_CC
, MVT::i64
, Custom
);
1575 setOperationAction(ISD::SELECT_CC
, MVT::i64
, Custom
);
1577 setOperationAction(ISD::CTPOP
, MVT::i64
,
1578 Subtarget
->usePopc() ? Legal
: Expand
);
1579 setOperationAction(ISD::CTTZ
, MVT::i64
, Expand
);
1580 setOperationAction(ISD::CTLZ
, MVT::i64
, Expand
);
1581 setOperationAction(ISD::BSWAP
, MVT::i64
, Expand
);
1582 setOperationAction(ISD::ROTL
, MVT::i64
, Expand
);
1583 setOperationAction(ISD::ROTR
, MVT::i64
, Expand
);
1584 setOperationAction(ISD::DYNAMIC_STACKALLOC
, MVT::i64
, Custom
);
1588 // Atomics are supported on SparcV9. 32-bit atomics are also
1589 // supported by some Leon SparcV8 variants. Otherwise, atomics
1591 if (Subtarget
->isV9())
1592 setMaxAtomicSizeInBitsSupported(64);
1593 else if (Subtarget
->hasLeonCasa())
1594 setMaxAtomicSizeInBitsSupported(32);
1596 setMaxAtomicSizeInBitsSupported(0);
1598 setMinCmpXchgSizeInBits(32);
1600 setOperationAction(ISD::ATOMIC_SWAP
, MVT::i32
, Legal
);
1602 setOperationAction(ISD::ATOMIC_FENCE
, MVT::Other
, Legal
);
1604 // Custom Lower Atomic LOAD/STORE
1605 setOperationAction(ISD::ATOMIC_LOAD
, MVT::i32
, Custom
);
1606 setOperationAction(ISD::ATOMIC_STORE
, MVT::i32
, Custom
);
1608 if (Subtarget
->is64Bit()) {
1609 setOperationAction(ISD::ATOMIC_CMP_SWAP
, MVT::i64
, Legal
);
1610 setOperationAction(ISD::ATOMIC_SWAP
, MVT::i64
, Legal
);
1611 setOperationAction(ISD::ATOMIC_LOAD
, MVT::i64
, Custom
);
1612 setOperationAction(ISD::ATOMIC_STORE
, MVT::i64
, Custom
);
1615 if (!Subtarget
->is64Bit()) {
1616 // These libcalls are not available in 32-bit.
1617 setLibcallName(RTLIB::SHL_I128
, nullptr);
1618 setLibcallName(RTLIB::SRL_I128
, nullptr);
1619 setLibcallName(RTLIB::SRA_I128
, nullptr);
1622 if (!Subtarget
->isV9()) {
1623 // SparcV8 does not have FNEGD and FABSD.
1624 setOperationAction(ISD::FNEG
, MVT::f64
, Custom
);
1625 setOperationAction(ISD::FABS
, MVT::f64
, Custom
);
1628 setOperationAction(ISD::FSIN
, MVT::f128
, Expand
);
1629 setOperationAction(ISD::FCOS
, MVT::f128
, Expand
);
1630 setOperationAction(ISD::FSINCOS
, MVT::f128
, Expand
);
1631 setOperationAction(ISD::FREM
, MVT::f128
, Expand
);
1632 setOperationAction(ISD::FMA
, MVT::f128
, Expand
);
1633 setOperationAction(ISD::FSIN
, MVT::f64
, Expand
);
1634 setOperationAction(ISD::FCOS
, MVT::f64
, Expand
);
1635 setOperationAction(ISD::FSINCOS
, MVT::f64
, Expand
);
1636 setOperationAction(ISD::FREM
, MVT::f64
, Expand
);
1637 setOperationAction(ISD::FMA
, MVT::f64
, Expand
);
1638 setOperationAction(ISD::FSIN
, MVT::f32
, Expand
);
1639 setOperationAction(ISD::FCOS
, MVT::f32
, Expand
);
1640 setOperationAction(ISD::FSINCOS
, MVT::f32
, Expand
);
1641 setOperationAction(ISD::FREM
, MVT::f32
, Expand
);
1642 setOperationAction(ISD::FMA
, MVT::f32
, Expand
);
1643 setOperationAction(ISD::CTTZ
, MVT::i32
, Expand
);
1644 setOperationAction(ISD::CTLZ
, MVT::i32
, Expand
);
1645 setOperationAction(ISD::ROTL
, MVT::i32
, Expand
);
1646 setOperationAction(ISD::ROTR
, MVT::i32
, Expand
);
1647 setOperationAction(ISD::BSWAP
, MVT::i32
, Expand
);
1648 setOperationAction(ISD::FCOPYSIGN
, MVT::f128
, Expand
);
1649 setOperationAction(ISD::FCOPYSIGN
, MVT::f64
, Expand
);
1650 setOperationAction(ISD::FCOPYSIGN
, MVT::f32
, Expand
);
1651 setOperationAction(ISD::FPOW
, MVT::f128
, Expand
);
1652 setOperationAction(ISD::FPOW
, MVT::f64
, Expand
);
1653 setOperationAction(ISD::FPOW
, MVT::f32
, Expand
);
1655 setOperationAction(ISD::SHL_PARTS
, MVT::i32
, Expand
);
1656 setOperationAction(ISD::SRA_PARTS
, MVT::i32
, Expand
);
1657 setOperationAction(ISD::SRL_PARTS
, MVT::i32
, Expand
);
1659 // Expands to [SU]MUL_LOHI.
1660 setOperationAction(ISD::MULHU
, MVT::i32
, Expand
);
1661 setOperationAction(ISD::MULHS
, MVT::i32
, Expand
);
1662 setOperationAction(ISD::MUL
, MVT::i32
, Expand
);
1664 if (Subtarget
->useSoftMulDiv()) {
1665 // .umul works for both signed and unsigned
1666 setOperationAction(ISD::SMUL_LOHI
, MVT::i32
, Expand
);
1667 setOperationAction(ISD::UMUL_LOHI
, MVT::i32
, Expand
);
1668 setLibcallName(RTLIB::MUL_I32
, ".umul");
1670 setOperationAction(ISD::SDIV
, MVT::i32
, Expand
);
1671 setLibcallName(RTLIB::SDIV_I32
, ".div");
1673 setOperationAction(ISD::UDIV
, MVT::i32
, Expand
);
1674 setLibcallName(RTLIB::UDIV_I32
, ".udiv");
1676 setLibcallName(RTLIB::SREM_I32
, ".rem");
1677 setLibcallName(RTLIB::UREM_I32
, ".urem");
1680 if (Subtarget
->is64Bit()) {
1681 setOperationAction(ISD::UMUL_LOHI
, MVT::i64
, Expand
);
1682 setOperationAction(ISD::SMUL_LOHI
, MVT::i64
, Expand
);
1683 setOperationAction(ISD::MULHU
, MVT::i64
, Expand
);
1684 setOperationAction(ISD::MULHS
, MVT::i64
, Expand
);
1686 setOperationAction(ISD::UMULO
, MVT::i64
, Custom
);
1687 setOperationAction(ISD::SMULO
, MVT::i64
, Custom
);
1689 setOperationAction(ISD::SHL_PARTS
, MVT::i64
, Expand
);
1690 setOperationAction(ISD::SRA_PARTS
, MVT::i64
, Expand
);
1691 setOperationAction(ISD::SRL_PARTS
, MVT::i64
, Expand
);
1694 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1695 setOperationAction(ISD::VASTART
, MVT::Other
, Custom
);
1696 // VAARG needs to be lowered to not do unaligned accesses for doubles.
1697 setOperationAction(ISD::VAARG
, MVT::Other
, Custom
);
1699 setOperationAction(ISD::TRAP
, MVT::Other
, Legal
);
1700 setOperationAction(ISD::DEBUGTRAP
, MVT::Other
, Legal
);
1702 // Use the default implementation.
1703 setOperationAction(ISD::VACOPY
, MVT::Other
, Expand
);
1704 setOperationAction(ISD::VAEND
, MVT::Other
, Expand
);
1705 setOperationAction(ISD::STACKSAVE
, MVT::Other
, Expand
);
1706 setOperationAction(ISD::STACKRESTORE
, MVT::Other
, Expand
);
1707 setOperationAction(ISD::DYNAMIC_STACKALLOC
, MVT::i32
, Custom
);
1709 setStackPointerRegisterToSaveRestore(SP::O6
);
1711 setOperationAction(ISD::CTPOP
, MVT::i32
,
1712 Subtarget
->usePopc() ? Legal
: Expand
);
1714 if (Subtarget
->isV9() && Subtarget
->hasHardQuad()) {
1715 setOperationAction(ISD::LOAD
, MVT::f128
, Legal
);
1716 setOperationAction(ISD::STORE
, MVT::f128
, Legal
);
1718 setOperationAction(ISD::LOAD
, MVT::f128
, Custom
);
1719 setOperationAction(ISD::STORE
, MVT::f128
, Custom
);
1722 if (Subtarget
->hasHardQuad()) {
1723 setOperationAction(ISD::FADD
, MVT::f128
, Legal
);
1724 setOperationAction(ISD::FSUB
, MVT::f128
, Legal
);
1725 setOperationAction(ISD::FMUL
, MVT::f128
, Legal
);
1726 setOperationAction(ISD::FDIV
, MVT::f128
, Legal
);
1727 setOperationAction(ISD::FSQRT
, MVT::f128
, Legal
);
1728 setOperationAction(ISD::FP_EXTEND
, MVT::f128
, Legal
);
1729 setOperationAction(ISD::FP_ROUND
, MVT::f64
, Legal
);
1730 if (Subtarget
->isV9()) {
1731 setOperationAction(ISD::FNEG
, MVT::f128
, Legal
);
1732 setOperationAction(ISD::FABS
, MVT::f128
, Legal
);
1734 setOperationAction(ISD::FNEG
, MVT::f128
, Custom
);
1735 setOperationAction(ISD::FABS
, MVT::f128
, Custom
);
1738 if (!Subtarget
->is64Bit()) {
1739 setLibcallName(RTLIB::FPTOSINT_F128_I64
, "_Q_qtoll");
1740 setLibcallName(RTLIB::FPTOUINT_F128_I64
, "_Q_qtoull");
1741 setLibcallName(RTLIB::SINTTOFP_I64_F128
, "_Q_lltoq");
1742 setLibcallName(RTLIB::UINTTOFP_I64_F128
, "_Q_ulltoq");
1746 // Custom legalize f128 operations.
1748 setOperationAction(ISD::FADD
, MVT::f128
, Custom
);
1749 setOperationAction(ISD::FSUB
, MVT::f128
, Custom
);
1750 setOperationAction(ISD::FMUL
, MVT::f128
, Custom
);
1751 setOperationAction(ISD::FDIV
, MVT::f128
, Custom
);
1752 setOperationAction(ISD::FSQRT
, MVT::f128
, Custom
);
1753 setOperationAction(ISD::FNEG
, MVT::f128
, Custom
);
1754 setOperationAction(ISD::FABS
, MVT::f128
, Custom
);
1756 setOperationAction(ISD::FP_EXTEND
, MVT::f128
, Custom
);
1757 setOperationAction(ISD::FP_ROUND
, MVT::f64
, Custom
);
1758 setOperationAction(ISD::FP_ROUND
, MVT::f32
, Custom
);
1760 // Setup Runtime library names.
1761 if (Subtarget
->is64Bit() && !Subtarget
->useSoftFloat()) {
1762 setLibcallName(RTLIB::ADD_F128
, "_Qp_add");
1763 setLibcallName(RTLIB::SUB_F128
, "_Qp_sub");
1764 setLibcallName(RTLIB::MUL_F128
, "_Qp_mul");
1765 setLibcallName(RTLIB::DIV_F128
, "_Qp_div");
1766 setLibcallName(RTLIB::SQRT_F128
, "_Qp_sqrt");
1767 setLibcallName(RTLIB::FPTOSINT_F128_I32
, "_Qp_qtoi");
1768 setLibcallName(RTLIB::FPTOUINT_F128_I32
, "_Qp_qtoui");
1769 setLibcallName(RTLIB::SINTTOFP_I32_F128
, "_Qp_itoq");
1770 setLibcallName(RTLIB::UINTTOFP_I32_F128
, "_Qp_uitoq");
1771 setLibcallName(RTLIB::FPTOSINT_F128_I64
, "_Qp_qtox");
1772 setLibcallName(RTLIB::FPTOUINT_F128_I64
, "_Qp_qtoux");
1773 setLibcallName(RTLIB::SINTTOFP_I64_F128
, "_Qp_xtoq");
1774 setLibcallName(RTLIB::UINTTOFP_I64_F128
, "_Qp_uxtoq");
1775 setLibcallName(RTLIB::FPEXT_F32_F128
, "_Qp_stoq");
1776 setLibcallName(RTLIB::FPEXT_F64_F128
, "_Qp_dtoq");
1777 setLibcallName(RTLIB::FPROUND_F128_F32
, "_Qp_qtos");
1778 setLibcallName(RTLIB::FPROUND_F128_F64
, "_Qp_qtod");
1779 } else if (!Subtarget
->useSoftFloat()) {
1780 setLibcallName(RTLIB::ADD_F128
, "_Q_add");
1781 setLibcallName(RTLIB::SUB_F128
, "_Q_sub");
1782 setLibcallName(RTLIB::MUL_F128
, "_Q_mul");
1783 setLibcallName(RTLIB::DIV_F128
, "_Q_div");
1784 setLibcallName(RTLIB::SQRT_F128
, "_Q_sqrt");
1785 setLibcallName(RTLIB::FPTOSINT_F128_I32
, "_Q_qtoi");
1786 setLibcallName(RTLIB::FPTOUINT_F128_I32
, "_Q_qtou");
1787 setLibcallName(RTLIB::SINTTOFP_I32_F128
, "_Q_itoq");
1788 setLibcallName(RTLIB::UINTTOFP_I32_F128
, "_Q_utoq");
1789 setLibcallName(RTLIB::FPTOSINT_F128_I64
, "_Q_qtoll");
1790 setLibcallName(RTLIB::FPTOUINT_F128_I64
, "_Q_qtoull");
1791 setLibcallName(RTLIB::SINTTOFP_I64_F128
, "_Q_lltoq");
1792 setLibcallName(RTLIB::UINTTOFP_I64_F128
, "_Q_ulltoq");
1793 setLibcallName(RTLIB::FPEXT_F32_F128
, "_Q_stoq");
1794 setLibcallName(RTLIB::FPEXT_F64_F128
, "_Q_dtoq");
1795 setLibcallName(RTLIB::FPROUND_F128_F32
, "_Q_qtos");
1796 setLibcallName(RTLIB::FPROUND_F128_F64
, "_Q_qtod");
1800 if (Subtarget
->fixAllFDIVSQRT()) {
1801 // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as
1802 // the former instructions generate errata on LEON processors.
1803 setOperationAction(ISD::FDIV
, MVT::f32
, Promote
);
1804 setOperationAction(ISD::FSQRT
, MVT::f32
, Promote
);
1807 if (Subtarget
->hasNoFMULS()) {
1808 setOperationAction(ISD::FMUL
, MVT::f32
, Promote
);
1811 // Custom combine bitcast between f64 and v2i32
1812 if (!Subtarget
->is64Bit())
1813 setTargetDAGCombine(ISD::BITCAST
);
1815 if (Subtarget
->hasLeonCycleCounter())
1816 setOperationAction(ISD::READCYCLECOUNTER
, MVT::i64
, Custom
);
1818 setOperationAction(ISD::INTRINSIC_WO_CHAIN
, MVT::Other
, Custom
);
1820 setMinFunctionAlignment(Align(4));
1822 computeRegisterProperties(Subtarget
->getRegisterInfo());
1825 bool SparcTargetLowering::useSoftFloat() const {
1826 return Subtarget
->useSoftFloat();
1829 const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode
) const {
1830 switch ((SPISD::NodeType
)Opcode
) {
1831 case SPISD::FIRST_NUMBER
: break;
1832 case SPISD::CMPICC
: return "SPISD::CMPICC";
1833 case SPISD::CMPFCC
: return "SPISD::CMPFCC";
1834 case SPISD::BRICC
: return "SPISD::BRICC";
1835 case SPISD::BRXCC
: return "SPISD::BRXCC";
1836 case SPISD::BRFCC
: return "SPISD::BRFCC";
1837 case SPISD::SELECT_ICC
: return "SPISD::SELECT_ICC";
1838 case SPISD::SELECT_XCC
: return "SPISD::SELECT_XCC";
1839 case SPISD::SELECT_FCC
: return "SPISD::SELECT_FCC";
1840 case SPISD::Hi
: return "SPISD::Hi";
1841 case SPISD::Lo
: return "SPISD::Lo";
1842 case SPISD::FTOI
: return "SPISD::FTOI";
1843 case SPISD::ITOF
: return "SPISD::ITOF";
1844 case SPISD::FTOX
: return "SPISD::FTOX";
1845 case SPISD::XTOF
: return "SPISD::XTOF";
1846 case SPISD::CALL
: return "SPISD::CALL";
1847 case SPISD::RET_FLAG
: return "SPISD::RET_FLAG";
1848 case SPISD::GLOBAL_BASE_REG
: return "SPISD::GLOBAL_BASE_REG";
1849 case SPISD::FLUSHW
: return "SPISD::FLUSHW";
1850 case SPISD::TLS_ADD
: return "SPISD::TLS_ADD";
1851 case SPISD::TLS_LD
: return "SPISD::TLS_LD";
1852 case SPISD::TLS_CALL
: return "SPISD::TLS_CALL";
1857 EVT
SparcTargetLowering::getSetCCResultType(const DataLayout
&, LLVMContext
&,
1861 return VT
.changeVectorElementTypeToInteger();
1864 /// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
1865 /// be zero. Op is expected to be a target specific node. Used by DAG
1867 void SparcTargetLowering::computeKnownBitsForTargetNode
1870 const APInt
&DemandedElts
,
1871 const SelectionDAG
&DAG
,
1872 unsigned Depth
) const {
1876 switch (Op
.getOpcode()) {
1878 case SPISD::SELECT_ICC
:
1879 case SPISD::SELECT_XCC
:
1880 case SPISD::SELECT_FCC
:
1881 Known
= DAG
.computeKnownBits(Op
.getOperand(1), Depth
+ 1);
1882 Known2
= DAG
.computeKnownBits(Op
.getOperand(0), Depth
+ 1);
1884 // Only known if known in both the LHS and RHS.
1885 Known
= KnownBits::commonBits(Known
, Known2
);
1890 // Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
1891 // set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
1892 static void LookThroughSetCC(SDValue
&LHS
, SDValue
&RHS
,
1893 ISD::CondCode CC
, unsigned &SPCC
) {
1894 if (isNullConstant(RHS
) &&
1896 (((LHS
.getOpcode() == SPISD::SELECT_ICC
||
1897 LHS
.getOpcode() == SPISD::SELECT_XCC
) &&
1898 LHS
.getOperand(3).getOpcode() == SPISD::CMPICC
) ||
1899 (LHS
.getOpcode() == SPISD::SELECT_FCC
&&
1900 LHS
.getOperand(3).getOpcode() == SPISD::CMPFCC
)) &&
1901 isOneConstant(LHS
.getOperand(0)) &&
1902 isNullConstant(LHS
.getOperand(1))) {
1903 SDValue CMPCC
= LHS
.getOperand(3);
1904 SPCC
= cast
<ConstantSDNode
>(LHS
.getOperand(2))->getZExtValue();
1905 LHS
= CMPCC
.getOperand(0);
1906 RHS
= CMPCC
.getOperand(1);
1910 // Convert to a target node and set target flags.
1911 SDValue
SparcTargetLowering::withTargetFlags(SDValue Op
, unsigned TF
,
1912 SelectionDAG
&DAG
) const {
1913 if (const GlobalAddressSDNode
*GA
= dyn_cast
<GlobalAddressSDNode
>(Op
))
1914 return DAG
.getTargetGlobalAddress(GA
->getGlobal(),
1916 GA
->getValueType(0),
1917 GA
->getOffset(), TF
);
1919 if (const ConstantPoolSDNode
*CP
= dyn_cast
<ConstantPoolSDNode
>(Op
))
1920 return DAG
.getTargetConstantPool(CP
->getConstVal(), CP
->getValueType(0),
1921 CP
->getAlign(), CP
->getOffset(), TF
);
1923 if (const BlockAddressSDNode
*BA
= dyn_cast
<BlockAddressSDNode
>(Op
))
1924 return DAG
.getTargetBlockAddress(BA
->getBlockAddress(),
1929 if (const ExternalSymbolSDNode
*ES
= dyn_cast
<ExternalSymbolSDNode
>(Op
))
1930 return DAG
.getTargetExternalSymbol(ES
->getSymbol(),
1931 ES
->getValueType(0), TF
);
1933 llvm_unreachable("Unhandled address SDNode");
1936 // Split Op into high and low parts according to HiTF and LoTF.
1937 // Return an ADD node combining the parts.
1938 SDValue
SparcTargetLowering::makeHiLoPair(SDValue Op
,
1939 unsigned HiTF
, unsigned LoTF
,
1940 SelectionDAG
&DAG
) const {
1942 EVT VT
= Op
.getValueType();
1943 SDValue Hi
= DAG
.getNode(SPISD::Hi
, DL
, VT
, withTargetFlags(Op
, HiTF
, DAG
));
1944 SDValue Lo
= DAG
.getNode(SPISD::Lo
, DL
, VT
, withTargetFlags(Op
, LoTF
, DAG
));
1945 return DAG
.getNode(ISD::ADD
, DL
, VT
, Hi
, Lo
);
1948 // Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
1949 // or ExternalSymbol SDNode.
1950 SDValue
SparcTargetLowering::makeAddress(SDValue Op
, SelectionDAG
&DAG
) const {
1952 EVT VT
= getPointerTy(DAG
.getDataLayout());
1954 // Handle PIC mode first. SPARC needs a got load for every variable!
1955 if (isPositionIndependent()) {
1956 const Module
*M
= DAG
.getMachineFunction().getFunction().getParent();
1957 PICLevel::Level picLevel
= M
->getPICLevel();
1960 if (picLevel
== PICLevel::SmallPIC
) {
1961 // This is the pic13 code model, the GOT is known to be smaller than 8KiB.
1962 Idx
= DAG
.getNode(SPISD::Lo
, DL
, Op
.getValueType(),
1963 withTargetFlags(Op
, SparcMCExpr::VK_Sparc_GOT13
, DAG
));
1965 // This is the pic32 code model, the GOT is known to be smaller than 4GB.
1966 Idx
= makeHiLoPair(Op
, SparcMCExpr::VK_Sparc_GOT22
,
1967 SparcMCExpr::VK_Sparc_GOT10
, DAG
);
1970 SDValue GlobalBase
= DAG
.getNode(SPISD::GLOBAL_BASE_REG
, DL
, VT
);
1971 SDValue AbsAddr
= DAG
.getNode(ISD::ADD
, DL
, VT
, GlobalBase
, Idx
);
1972 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
1973 // function has calls.
1974 MachineFrameInfo
&MFI
= DAG
.getMachineFunction().getFrameInfo();
1975 MFI
.setHasCalls(true);
1976 return DAG
.getLoad(VT
, DL
, DAG
.getEntryNode(), AbsAddr
,
1977 MachinePointerInfo::getGOT(DAG
.getMachineFunction()));
1980 // This is one of the absolute code models.
1981 switch(getTargetMachine().getCodeModel()) {
1983 llvm_unreachable("Unsupported absolute code model");
1984 case CodeModel::Small
:
1986 return makeHiLoPair(Op
, SparcMCExpr::VK_Sparc_HI
,
1987 SparcMCExpr::VK_Sparc_LO
, DAG
);
1988 case CodeModel::Medium
: {
1990 SDValue H44
= makeHiLoPair(Op
, SparcMCExpr::VK_Sparc_H44
,
1991 SparcMCExpr::VK_Sparc_M44
, DAG
);
1992 H44
= DAG
.getNode(ISD::SHL
, DL
, VT
, H44
, DAG
.getConstant(12, DL
, MVT::i32
));
1993 SDValue L44
= withTargetFlags(Op
, SparcMCExpr::VK_Sparc_L44
, DAG
);
1994 L44
= DAG
.getNode(SPISD::Lo
, DL
, VT
, L44
);
1995 return DAG
.getNode(ISD::ADD
, DL
, VT
, H44
, L44
);
1997 case CodeModel::Large
: {
1999 SDValue Hi
= makeHiLoPair(Op
, SparcMCExpr::VK_Sparc_HH
,
2000 SparcMCExpr::VK_Sparc_HM
, DAG
);
2001 Hi
= DAG
.getNode(ISD::SHL
, DL
, VT
, Hi
, DAG
.getConstant(32, DL
, MVT::i32
));
2002 SDValue Lo
= makeHiLoPair(Op
, SparcMCExpr::VK_Sparc_HI
,
2003 SparcMCExpr::VK_Sparc_LO
, DAG
);
2004 return DAG
.getNode(ISD::ADD
, DL
, VT
, Hi
, Lo
);
2009 SDValue
SparcTargetLowering::LowerGlobalAddress(SDValue Op
,
2010 SelectionDAG
&DAG
) const {
2011 return makeAddress(Op
, DAG
);
2014 SDValue
SparcTargetLowering::LowerConstantPool(SDValue Op
,
2015 SelectionDAG
&DAG
) const {
2016 return makeAddress(Op
, DAG
);
2019 SDValue
SparcTargetLowering::LowerBlockAddress(SDValue Op
,
2020 SelectionDAG
&DAG
) const {
2021 return makeAddress(Op
, DAG
);
2024 SDValue
SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op
,
2025 SelectionDAG
&DAG
) const {
2027 GlobalAddressSDNode
*GA
= cast
<GlobalAddressSDNode
>(Op
);
2028 if (DAG
.getTarget().useEmulatedTLS())
2029 return LowerToTLSEmulatedModel(GA
, DAG
);
2032 const GlobalValue
*GV
= GA
->getGlobal();
2033 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
2035 TLSModel::Model model
= getTargetMachine().getTLSModel(GV
);
2037 if (model
== TLSModel::GeneralDynamic
|| model
== TLSModel::LocalDynamic
) {
2038 unsigned HiTF
= ((model
== TLSModel::GeneralDynamic
)
2039 ? SparcMCExpr::VK_Sparc_TLS_GD_HI22
2040 : SparcMCExpr::VK_Sparc_TLS_LDM_HI22
);
2041 unsigned LoTF
= ((model
== TLSModel::GeneralDynamic
)
2042 ? SparcMCExpr::VK_Sparc_TLS_GD_LO10
2043 : SparcMCExpr::VK_Sparc_TLS_LDM_LO10
);
2044 unsigned addTF
= ((model
== TLSModel::GeneralDynamic
)
2045 ? SparcMCExpr::VK_Sparc_TLS_GD_ADD
2046 : SparcMCExpr::VK_Sparc_TLS_LDM_ADD
);
2047 unsigned callTF
= ((model
== TLSModel::GeneralDynamic
)
2048 ? SparcMCExpr::VK_Sparc_TLS_GD_CALL
2049 : SparcMCExpr::VK_Sparc_TLS_LDM_CALL
);
2051 SDValue HiLo
= makeHiLoPair(Op
, HiTF
, LoTF
, DAG
);
2052 SDValue Base
= DAG
.getNode(SPISD::GLOBAL_BASE_REG
, DL
, PtrVT
);
2053 SDValue Argument
= DAG
.getNode(SPISD::TLS_ADD
, DL
, PtrVT
, Base
, HiLo
,
2054 withTargetFlags(Op
, addTF
, DAG
));
2056 SDValue Chain
= DAG
.getEntryNode();
2059 Chain
= DAG
.getCALLSEQ_START(Chain
, 1, 0, DL
);
2060 Chain
= DAG
.getCopyToReg(Chain
, DL
, SP::O0
, Argument
, InFlag
);
2061 InFlag
= Chain
.getValue(1);
2062 SDValue Callee
= DAG
.getTargetExternalSymbol("__tls_get_addr", PtrVT
);
2063 SDValue Symbol
= withTargetFlags(Op
, callTF
, DAG
);
2065 SDVTList NodeTys
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
2066 const uint32_t *Mask
= Subtarget
->getRegisterInfo()->getCallPreservedMask(
2067 DAG
.getMachineFunction(), CallingConv::C
);
2068 assert(Mask
&& "Missing call preserved mask for calling convention");
2069 SDValue Ops
[] = {Chain
,
2072 DAG
.getRegister(SP::O0
, PtrVT
),
2073 DAG
.getRegisterMask(Mask
),
2075 Chain
= DAG
.getNode(SPISD::TLS_CALL
, DL
, NodeTys
, Ops
);
2076 InFlag
= Chain
.getValue(1);
2077 Chain
= DAG
.getCALLSEQ_END(Chain
, DAG
.getIntPtrConstant(1, DL
, true),
2078 DAG
.getIntPtrConstant(0, DL
, true), InFlag
, DL
);
2079 InFlag
= Chain
.getValue(1);
2080 SDValue Ret
= DAG
.getCopyFromReg(Chain
, DL
, SP::O0
, PtrVT
, InFlag
);
2082 if (model
!= TLSModel::LocalDynamic
)
2085 SDValue Hi
= DAG
.getNode(SPISD::Hi
, DL
, PtrVT
,
2086 withTargetFlags(Op
, SparcMCExpr::VK_Sparc_TLS_LDO_HIX22
, DAG
));
2087 SDValue Lo
= DAG
.getNode(SPISD::Lo
, DL
, PtrVT
,
2088 withTargetFlags(Op
, SparcMCExpr::VK_Sparc_TLS_LDO_LOX10
, DAG
));
2089 HiLo
= DAG
.getNode(ISD::XOR
, DL
, PtrVT
, Hi
, Lo
);
2090 return DAG
.getNode(SPISD::TLS_ADD
, DL
, PtrVT
, Ret
, HiLo
,
2091 withTargetFlags(Op
, SparcMCExpr::VK_Sparc_TLS_LDO_ADD
, DAG
));
2094 if (model
== TLSModel::InitialExec
) {
2095 unsigned ldTF
= ((PtrVT
== MVT::i64
)? SparcMCExpr::VK_Sparc_TLS_IE_LDX
2096 : SparcMCExpr::VK_Sparc_TLS_IE_LD
);
2098 SDValue Base
= DAG
.getNode(SPISD::GLOBAL_BASE_REG
, DL
, PtrVT
);
2100 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2101 // function has calls.
2102 MachineFrameInfo
&MFI
= DAG
.getMachineFunction().getFrameInfo();
2103 MFI
.setHasCalls(true);
2105 SDValue TGA
= makeHiLoPair(Op
,
2106 SparcMCExpr::VK_Sparc_TLS_IE_HI22
,
2107 SparcMCExpr::VK_Sparc_TLS_IE_LO10
, DAG
);
2108 SDValue Ptr
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, Base
, TGA
);
2109 SDValue Offset
= DAG
.getNode(SPISD::TLS_LD
,
2111 withTargetFlags(Op
, ldTF
, DAG
));
2112 return DAG
.getNode(SPISD::TLS_ADD
, DL
, PtrVT
,
2113 DAG
.getRegister(SP::G7
, PtrVT
), Offset
,
2115 SparcMCExpr::VK_Sparc_TLS_IE_ADD
, DAG
));
2118 assert(model
== TLSModel::LocalExec
);
2119 SDValue Hi
= DAG
.getNode(SPISD::Hi
, DL
, PtrVT
,
2120 withTargetFlags(Op
, SparcMCExpr::VK_Sparc_TLS_LE_HIX22
, DAG
));
2121 SDValue Lo
= DAG
.getNode(SPISD::Lo
, DL
, PtrVT
,
2122 withTargetFlags(Op
, SparcMCExpr::VK_Sparc_TLS_LE_LOX10
, DAG
));
2123 SDValue Offset
= DAG
.getNode(ISD::XOR
, DL
, PtrVT
, Hi
, Lo
);
2125 return DAG
.getNode(ISD::ADD
, DL
, PtrVT
,
2126 DAG
.getRegister(SP::G7
, PtrVT
), Offset
);
2129 SDValue
SparcTargetLowering::LowerF128_LibCallArg(SDValue Chain
,
2130 ArgListTy
&Args
, SDValue Arg
,
2132 SelectionDAG
&DAG
) const {
2133 MachineFrameInfo
&MFI
= DAG
.getMachineFunction().getFrameInfo();
2134 EVT ArgVT
= Arg
.getValueType();
2135 Type
*ArgTy
= ArgVT
.getTypeForEVT(*DAG
.getContext());
2141 if (ArgTy
->isFP128Ty()) {
2142 // Create a stack object and pass the pointer to the library function.
2143 int FI
= MFI
.CreateStackObject(16, Align(8), false);
2144 SDValue FIPtr
= DAG
.getFrameIndex(FI
, getPointerTy(DAG
.getDataLayout()));
2145 Chain
= DAG
.getStore(Chain
, DL
, Entry
.Node
, FIPtr
, MachinePointerInfo(),
2149 Entry
.Ty
= PointerType::getUnqual(ArgTy
);
2151 Args
.push_back(Entry
);
2156 SparcTargetLowering::LowerF128Op(SDValue Op
, SelectionDAG
&DAG
,
2157 const char *LibFuncName
,
2158 unsigned numArgs
) const {
2162 MachineFrameInfo
&MFI
= DAG
.getMachineFunction().getFrameInfo();
2163 auto PtrVT
= getPointerTy(DAG
.getDataLayout());
2165 SDValue Callee
= DAG
.getExternalSymbol(LibFuncName
, PtrVT
);
2166 Type
*RetTy
= Op
.getValueType().getTypeForEVT(*DAG
.getContext());
2167 Type
*RetTyABI
= RetTy
;
2168 SDValue Chain
= DAG
.getEntryNode();
2171 if (RetTy
->isFP128Ty()) {
2172 // Create a Stack Object to receive the return value of type f128.
2174 int RetFI
= MFI
.CreateStackObject(16, Align(8), false);
2175 RetPtr
= DAG
.getFrameIndex(RetFI
, PtrVT
);
2176 Entry
.Node
= RetPtr
;
2177 Entry
.Ty
= PointerType::getUnqual(RetTy
);
2178 if (!Subtarget
->is64Bit())
2179 Entry
.IsSRet
= true;
2180 Entry
.IsReturned
= false;
2181 Args
.push_back(Entry
);
2182 RetTyABI
= Type::getVoidTy(*DAG
.getContext());
2185 assert(Op
->getNumOperands() >= numArgs
&& "Not enough operands!");
2186 for (unsigned i
= 0, e
= numArgs
; i
!= e
; ++i
) {
2187 Chain
= LowerF128_LibCallArg(Chain
, Args
, Op
.getOperand(i
), SDLoc(Op
), DAG
);
2189 TargetLowering::CallLoweringInfo
CLI(DAG
);
2190 CLI
.setDebugLoc(SDLoc(Op
)).setChain(Chain
)
2191 .setCallee(CallingConv::C
, RetTyABI
, Callee
, std::move(Args
));
2193 std::pair
<SDValue
, SDValue
> CallInfo
= LowerCallTo(CLI
);
2195 // chain is in second result.
2196 if (RetTyABI
== RetTy
)
2197 return CallInfo
.first
;
2199 assert (RetTy
->isFP128Ty() && "Unexpected return type!");
2201 Chain
= CallInfo
.second
;
2203 // Load RetPtr to get the return value.
2204 return DAG
.getLoad(Op
.getValueType(), SDLoc(Op
), Chain
, RetPtr
,
2205 MachinePointerInfo(), Align(8));
2208 SDValue
SparcTargetLowering::LowerF128Compare(SDValue LHS
, SDValue RHS
,
2209 unsigned &SPCC
, const SDLoc
&DL
,
2210 SelectionDAG
&DAG
) const {
2212 const char *LibCall
= nullptr;
2213 bool is64Bit
= Subtarget
->is64Bit();
2215 default: llvm_unreachable("Unhandled conditional code!");
2216 case SPCC::FCC_E
: LibCall
= is64Bit
? "_Qp_feq" : "_Q_feq"; break;
2217 case SPCC::FCC_NE
: LibCall
= is64Bit
? "_Qp_fne" : "_Q_fne"; break;
2218 case SPCC::FCC_L
: LibCall
= is64Bit
? "_Qp_flt" : "_Q_flt"; break;
2219 case SPCC::FCC_G
: LibCall
= is64Bit
? "_Qp_fgt" : "_Q_fgt"; break;
2220 case SPCC::FCC_LE
: LibCall
= is64Bit
? "_Qp_fle" : "_Q_fle"; break;
2221 case SPCC::FCC_GE
: LibCall
= is64Bit
? "_Qp_fge" : "_Q_fge"; break;
2229 case SPCC::FCC_UE
: LibCall
= is64Bit
? "_Qp_cmp" : "_Q_cmp"; break;
2232 auto PtrVT
= getPointerTy(DAG
.getDataLayout());
2233 SDValue Callee
= DAG
.getExternalSymbol(LibCall
, PtrVT
);
2234 Type
*RetTy
= Type::getInt32Ty(*DAG
.getContext());
2236 SDValue Chain
= DAG
.getEntryNode();
2237 Chain
= LowerF128_LibCallArg(Chain
, Args
, LHS
, DL
, DAG
);
2238 Chain
= LowerF128_LibCallArg(Chain
, Args
, RHS
, DL
, DAG
);
2240 TargetLowering::CallLoweringInfo
CLI(DAG
);
2241 CLI
.setDebugLoc(DL
).setChain(Chain
)
2242 .setCallee(CallingConv::C
, RetTy
, Callee
, std::move(Args
));
2244 std::pair
<SDValue
, SDValue
> CallInfo
= LowerCallTo(CLI
);
2246 // result is in first, and chain is in second result.
2247 SDValue Result
= CallInfo
.first
;
2251 SDValue RHS
= DAG
.getConstant(0, DL
, Result
.getValueType());
2252 SPCC
= SPCC::ICC_NE
;
2253 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2255 case SPCC::FCC_UL
: {
2256 SDValue Mask
= DAG
.getConstant(1, DL
, Result
.getValueType());
2257 Result
= DAG
.getNode(ISD::AND
, DL
, Result
.getValueType(), Result
, Mask
);
2258 SDValue RHS
= DAG
.getConstant(0, DL
, Result
.getValueType());
2259 SPCC
= SPCC::ICC_NE
;
2260 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2262 case SPCC::FCC_ULE
: {
2263 SDValue RHS
= DAG
.getConstant(2, DL
, Result
.getValueType());
2264 SPCC
= SPCC::ICC_NE
;
2265 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2267 case SPCC::FCC_UG
: {
2268 SDValue RHS
= DAG
.getConstant(1, DL
, Result
.getValueType());
2270 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2272 case SPCC::FCC_UGE
: {
2273 SDValue RHS
= DAG
.getConstant(1, DL
, Result
.getValueType());
2274 SPCC
= SPCC::ICC_NE
;
2275 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2278 case SPCC::FCC_U
: {
2279 SDValue RHS
= DAG
.getConstant(3, DL
, Result
.getValueType());
2281 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2283 case SPCC::FCC_O
: {
2284 SDValue RHS
= DAG
.getConstant(3, DL
, Result
.getValueType());
2285 SPCC
= SPCC::ICC_NE
;
2286 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2288 case SPCC::FCC_LG
: {
2289 SDValue Mask
= DAG
.getConstant(3, DL
, Result
.getValueType());
2290 Result
= DAG
.getNode(ISD::AND
, DL
, Result
.getValueType(), Result
, Mask
);
2291 SDValue RHS
= DAG
.getConstant(0, DL
, Result
.getValueType());
2292 SPCC
= SPCC::ICC_NE
;
2293 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2295 case SPCC::FCC_UE
: {
2296 SDValue Mask
= DAG
.getConstant(3, DL
, Result
.getValueType());
2297 Result
= DAG
.getNode(ISD::AND
, DL
, Result
.getValueType(), Result
, Mask
);
2298 SDValue RHS
= DAG
.getConstant(0, DL
, Result
.getValueType());
2300 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2306 LowerF128_FPEXTEND(SDValue Op
, SelectionDAG
&DAG
,
2307 const SparcTargetLowering
&TLI
) {
2309 if (Op
.getOperand(0).getValueType() == MVT::f64
)
2310 return TLI
.LowerF128Op(Op
, DAG
,
2311 TLI
.getLibcallName(RTLIB::FPEXT_F64_F128
), 1);
2313 if (Op
.getOperand(0).getValueType() == MVT::f32
)
2314 return TLI
.LowerF128Op(Op
, DAG
,
2315 TLI
.getLibcallName(RTLIB::FPEXT_F32_F128
), 1);
2317 llvm_unreachable("fpextend with non-float operand!");
2322 LowerF128_FPROUND(SDValue Op
, SelectionDAG
&DAG
,
2323 const SparcTargetLowering
&TLI
) {
2324 // FP_ROUND on f64 and f32 are legal.
2325 if (Op
.getOperand(0).getValueType() != MVT::f128
)
2328 if (Op
.getValueType() == MVT::f64
)
2329 return TLI
.LowerF128Op(Op
, DAG
,
2330 TLI
.getLibcallName(RTLIB::FPROUND_F128_F64
), 1);
2331 if (Op
.getValueType() == MVT::f32
)
2332 return TLI
.LowerF128Op(Op
, DAG
,
2333 TLI
.getLibcallName(RTLIB::FPROUND_F128_F32
), 1);
2335 llvm_unreachable("fpround to non-float!");
2339 static SDValue
LowerFP_TO_SINT(SDValue Op
, SelectionDAG
&DAG
,
2340 const SparcTargetLowering
&TLI
,
2343 EVT VT
= Op
.getValueType();
2344 assert(VT
== MVT::i32
|| VT
== MVT::i64
);
2346 // Expand f128 operations to fp128 abi calls.
2347 if (Op
.getOperand(0).getValueType() == MVT::f128
2348 && (!hasHardQuad
|| !TLI
.isTypeLegal(VT
))) {
2349 const char *libName
= TLI
.getLibcallName(VT
== MVT::i32
2350 ? RTLIB::FPTOSINT_F128_I32
2351 : RTLIB::FPTOSINT_F128_I64
);
2352 return TLI
.LowerF128Op(Op
, DAG
, libName
, 1);
2355 // Expand if the resulting type is illegal.
2356 if (!TLI
.isTypeLegal(VT
))
2359 // Otherwise, Convert the fp value to integer in an FP register.
2361 Op
= DAG
.getNode(SPISD::FTOI
, dl
, MVT::f32
, Op
.getOperand(0));
2363 Op
= DAG
.getNode(SPISD::FTOX
, dl
, MVT::f64
, Op
.getOperand(0));
2365 return DAG
.getNode(ISD::BITCAST
, dl
, VT
, Op
);
2368 static SDValue
LowerSINT_TO_FP(SDValue Op
, SelectionDAG
&DAG
,
2369 const SparcTargetLowering
&TLI
,
2372 EVT OpVT
= Op
.getOperand(0).getValueType();
2373 assert(OpVT
== MVT::i32
|| (OpVT
== MVT::i64
));
2375 EVT floatVT
= (OpVT
== MVT::i32
) ? MVT::f32
: MVT::f64
;
2377 // Expand f128 operations to fp128 ABI calls.
2378 if (Op
.getValueType() == MVT::f128
2379 && (!hasHardQuad
|| !TLI
.isTypeLegal(OpVT
))) {
2380 const char *libName
= TLI
.getLibcallName(OpVT
== MVT::i32
2381 ? RTLIB::SINTTOFP_I32_F128
2382 : RTLIB::SINTTOFP_I64_F128
);
2383 return TLI
.LowerF128Op(Op
, DAG
, libName
, 1);
2386 // Expand if the operand type is illegal.
2387 if (!TLI
.isTypeLegal(OpVT
))
2390 // Otherwise, Convert the int value to FP in an FP register.
2391 SDValue Tmp
= DAG
.getNode(ISD::BITCAST
, dl
, floatVT
, Op
.getOperand(0));
2392 unsigned opcode
= (OpVT
== MVT::i32
)? SPISD::ITOF
: SPISD::XTOF
;
2393 return DAG
.getNode(opcode
, dl
, Op
.getValueType(), Tmp
);
2396 static SDValue
LowerFP_TO_UINT(SDValue Op
, SelectionDAG
&DAG
,
2397 const SparcTargetLowering
&TLI
,
2400 EVT VT
= Op
.getValueType();
2402 // Expand if it does not involve f128 or the target has support for
2403 // quad floating point instructions and the resulting type is legal.
2404 if (Op
.getOperand(0).getValueType() != MVT::f128
||
2405 (hasHardQuad
&& TLI
.isTypeLegal(VT
)))
2408 assert(VT
== MVT::i32
|| VT
== MVT::i64
);
2410 return TLI
.LowerF128Op(Op
, DAG
,
2411 TLI
.getLibcallName(VT
== MVT::i32
2412 ? RTLIB::FPTOUINT_F128_I32
2413 : RTLIB::FPTOUINT_F128_I64
),
2417 static SDValue
LowerUINT_TO_FP(SDValue Op
, SelectionDAG
&DAG
,
2418 const SparcTargetLowering
&TLI
,
2421 EVT OpVT
= Op
.getOperand(0).getValueType();
2422 assert(OpVT
== MVT::i32
|| OpVT
== MVT::i64
);
2424 // Expand if it does not involve f128 or the target has support for
2425 // quad floating point instructions and the operand type is legal.
2426 if (Op
.getValueType() != MVT::f128
|| (hasHardQuad
&& TLI
.isTypeLegal(OpVT
)))
2429 return TLI
.LowerF128Op(Op
, DAG
,
2430 TLI
.getLibcallName(OpVT
== MVT::i32
2431 ? RTLIB::UINTTOFP_I32_F128
2432 : RTLIB::UINTTOFP_I64_F128
),
2436 static SDValue
LowerBR_CC(SDValue Op
, SelectionDAG
&DAG
,
2437 const SparcTargetLowering
&TLI
,
2439 SDValue Chain
= Op
.getOperand(0);
2440 ISD::CondCode CC
= cast
<CondCodeSDNode
>(Op
.getOperand(1))->get();
2441 SDValue LHS
= Op
.getOperand(2);
2442 SDValue RHS
= Op
.getOperand(3);
2443 SDValue Dest
= Op
.getOperand(4);
2445 unsigned Opc
, SPCC
= ~0U;
2447 // If this is a br_cc of a "setcc", and if the setcc got lowered into
2448 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2449 LookThroughSetCC(LHS
, RHS
, CC
, SPCC
);
2451 // Get the condition flag.
2452 SDValue CompareFlag
;
2453 if (LHS
.getValueType().isInteger()) {
2454 CompareFlag
= DAG
.getNode(SPISD::CMPICC
, dl
, MVT::Glue
, LHS
, RHS
);
2455 if (SPCC
== ~0U) SPCC
= IntCondCCodeToICC(CC
);
2456 // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
2457 Opc
= LHS
.getValueType() == MVT::i32
? SPISD::BRICC
: SPISD::BRXCC
;
2459 if (!hasHardQuad
&& LHS
.getValueType() == MVT::f128
) {
2460 if (SPCC
== ~0U) SPCC
= FPCondCCodeToFCC(CC
);
2461 CompareFlag
= TLI
.LowerF128Compare(LHS
, RHS
, SPCC
, dl
, DAG
);
2464 CompareFlag
= DAG
.getNode(SPISD::CMPFCC
, dl
, MVT::Glue
, LHS
, RHS
);
2465 if (SPCC
== ~0U) SPCC
= FPCondCCodeToFCC(CC
);
2469 return DAG
.getNode(Opc
, dl
, MVT::Other
, Chain
, Dest
,
2470 DAG
.getConstant(SPCC
, dl
, MVT::i32
), CompareFlag
);
2473 static SDValue
LowerSELECT_CC(SDValue Op
, SelectionDAG
&DAG
,
2474 const SparcTargetLowering
&TLI
,
2476 SDValue LHS
= Op
.getOperand(0);
2477 SDValue RHS
= Op
.getOperand(1);
2478 ISD::CondCode CC
= cast
<CondCodeSDNode
>(Op
.getOperand(4))->get();
2479 SDValue TrueVal
= Op
.getOperand(2);
2480 SDValue FalseVal
= Op
.getOperand(3);
2482 unsigned Opc
, SPCC
= ~0U;
2484 // If this is a select_cc of a "setcc", and if the setcc got lowered into
2485 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2486 LookThroughSetCC(LHS
, RHS
, CC
, SPCC
);
2488 SDValue CompareFlag
;
2489 if (LHS
.getValueType().isInteger()) {
2490 CompareFlag
= DAG
.getNode(SPISD::CMPICC
, dl
, MVT::Glue
, LHS
, RHS
);
2491 Opc
= LHS
.getValueType() == MVT::i32
?
2492 SPISD::SELECT_ICC
: SPISD::SELECT_XCC
;
2493 if (SPCC
== ~0U) SPCC
= IntCondCCodeToICC(CC
);
2495 if (!hasHardQuad
&& LHS
.getValueType() == MVT::f128
) {
2496 if (SPCC
== ~0U) SPCC
= FPCondCCodeToFCC(CC
);
2497 CompareFlag
= TLI
.LowerF128Compare(LHS
, RHS
, SPCC
, dl
, DAG
);
2498 Opc
= SPISD::SELECT_ICC
;
2500 CompareFlag
= DAG
.getNode(SPISD::CMPFCC
, dl
, MVT::Glue
, LHS
, RHS
);
2501 Opc
= SPISD::SELECT_FCC
;
2502 if (SPCC
== ~0U) SPCC
= FPCondCCodeToFCC(CC
);
2505 return DAG
.getNode(Opc
, dl
, TrueVal
.getValueType(), TrueVal
, FalseVal
,
2506 DAG
.getConstant(SPCC
, dl
, MVT::i32
), CompareFlag
);
2509 static SDValue
LowerVASTART(SDValue Op
, SelectionDAG
&DAG
,
2510 const SparcTargetLowering
&TLI
) {
2511 MachineFunction
&MF
= DAG
.getMachineFunction();
2512 SparcMachineFunctionInfo
*FuncInfo
= MF
.getInfo
<SparcMachineFunctionInfo
>();
2513 auto PtrVT
= TLI
.getPointerTy(DAG
.getDataLayout());
2515 // Need frame address to find the address of VarArgsFrameIndex.
2516 MF
.getFrameInfo().setFrameAddressIsTaken(true);
2518 // vastart just stores the address of the VarArgsFrameIndex slot into the
2519 // memory location argument.
2522 DAG
.getNode(ISD::ADD
, DL
, PtrVT
, DAG
.getRegister(SP::I6
, PtrVT
),
2523 DAG
.getIntPtrConstant(FuncInfo
->getVarArgsFrameOffset(), DL
));
2524 const Value
*SV
= cast
<SrcValueSDNode
>(Op
.getOperand(2))->getValue();
2525 return DAG
.getStore(Op
.getOperand(0), DL
, Offset
, Op
.getOperand(1),
2526 MachinePointerInfo(SV
));
2529 static SDValue
LowerVAARG(SDValue Op
, SelectionDAG
&DAG
) {
2530 SDNode
*Node
= Op
.getNode();
2531 EVT VT
= Node
->getValueType(0);
2532 SDValue InChain
= Node
->getOperand(0);
2533 SDValue VAListPtr
= Node
->getOperand(1);
2534 EVT PtrVT
= VAListPtr
.getValueType();
2535 const Value
*SV
= cast
<SrcValueSDNode
>(Node
->getOperand(2))->getValue();
2538 DAG
.getLoad(PtrVT
, DL
, InChain
, VAListPtr
, MachinePointerInfo(SV
));
2539 // Increment the pointer, VAList, to the next vaarg.
2540 SDValue NextPtr
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, VAList
,
2541 DAG
.getIntPtrConstant(VT
.getSizeInBits()/8,
2543 // Store the incremented VAList to the legalized pointer.
2544 InChain
= DAG
.getStore(VAList
.getValue(1), DL
, NextPtr
, VAListPtr
,
2545 MachinePointerInfo(SV
));
2546 // Load the actual argument out of the pointer VAList.
2547 // We can't count on greater alignment than the word size.
2549 VT
, DL
, InChain
, VAList
, MachinePointerInfo(),
2550 std::min(PtrVT
.getFixedSizeInBits(), VT
.getFixedSizeInBits()) / 8);
2553 static SDValue
LowerDYNAMIC_STACKALLOC(SDValue Op
, SelectionDAG
&DAG
,
2554 const SparcSubtarget
*Subtarget
) {
2555 SDValue Chain
= Op
.getOperand(0); // Legalize the chain.
2556 SDValue Size
= Op
.getOperand(1); // Legalize the size.
2557 MaybeAlign Alignment
=
2558 cast
<ConstantSDNode
>(Op
.getOperand(2))->getMaybeAlignValue();
2559 Align StackAlign
= Subtarget
->getFrameLowering()->getStackAlign();
2560 EVT VT
= Size
->getValueType(0);
2563 // TODO: implement over-aligned alloca. (Note: also implies
2564 // supporting support for overaligned function frames + dynamic
2565 // allocations, at all, which currently isn't supported)
2566 if (Alignment
&& *Alignment
> StackAlign
) {
2567 const MachineFunction
&MF
= DAG
.getMachineFunction();
2568 report_fatal_error("Function \"" + Twine(MF
.getName()) + "\": "
2569 "over-aligned dynamic alloca not supported.");
2572 // The resultant pointer needs to be above the register spill area
2573 // at the bottom of the stack.
2574 unsigned regSpillArea
;
2575 if (Subtarget
->is64Bit()) {
2578 // On Sparc32, the size of the spill area is 92. Unfortunately,
2579 // that's only 4-byte aligned, not 8-byte aligned (the stack
2580 // pointer is 8-byte aligned). So, if the user asked for an 8-byte
2581 // aligned dynamic allocation, we actually need to add 96 to the
2582 // bottom of the stack, instead of 92, to ensure 8-byte alignment.
2584 // That also means adding 4 to the size of the allocation --
2585 // before applying the 8-byte rounding. Unfortunately, we the
2586 // value we get here has already had rounding applied. So, we need
2587 // to add 8, instead, wasting a bit more memory.
2589 // Further, this only actually needs to be done if the required
2590 // alignment is > 4, but, we've lost that info by this point, too,
2591 // so we always apply it.
2593 // (An alternative approach would be to always reserve 96 bytes
2594 // instead of the required 92, but then we'd waste 4 extra bytes
2595 // in every frame, not just those with dynamic stack allocations)
2597 // TODO: modify code in SelectionDAGBuilder to make this less sad.
2599 Size
= DAG
.getNode(ISD::ADD
, dl
, VT
, Size
,
2600 DAG
.getConstant(8, dl
, VT
));
2604 unsigned SPReg
= SP::O6
;
2605 SDValue SP
= DAG
.getCopyFromReg(Chain
, dl
, SPReg
, VT
);
2606 SDValue NewSP
= DAG
.getNode(ISD::SUB
, dl
, VT
, SP
, Size
); // Value
2607 Chain
= DAG
.getCopyToReg(SP
.getValue(1), dl
, SPReg
, NewSP
); // Output chain
2609 regSpillArea
+= Subtarget
->getStackPointerBias();
2611 SDValue NewVal
= DAG
.getNode(ISD::ADD
, dl
, VT
, NewSP
,
2612 DAG
.getConstant(regSpillArea
, dl
, VT
));
2613 SDValue Ops
[2] = { NewVal
, Chain
};
2614 return DAG
.getMergeValues(Ops
, dl
);
2618 static SDValue
getFLUSHW(SDValue Op
, SelectionDAG
&DAG
) {
2620 SDValue Chain
= DAG
.getNode(SPISD::FLUSHW
,
2621 dl
, MVT::Other
, DAG
.getEntryNode());
2625 static SDValue
getFRAMEADDR(uint64_t depth
, SDValue Op
, SelectionDAG
&DAG
,
2626 const SparcSubtarget
*Subtarget
,
2627 bool AlwaysFlush
= false) {
2628 MachineFrameInfo
&MFI
= DAG
.getMachineFunction().getFrameInfo();
2629 MFI
.setFrameAddressIsTaken(true);
2631 EVT VT
= Op
.getValueType();
2633 unsigned FrameReg
= SP::I6
;
2634 unsigned stackBias
= Subtarget
->getStackPointerBias();
2639 // flush first to make sure the windowed registers' values are in stack
2640 Chain
= (depth
|| AlwaysFlush
) ? getFLUSHW(Op
, DAG
) : DAG
.getEntryNode();
2642 FrameAddr
= DAG
.getCopyFromReg(Chain
, dl
, FrameReg
, VT
);
2644 unsigned Offset
= (Subtarget
->is64Bit()) ? (stackBias
+ 112) : 56;
2647 SDValue Ptr
= DAG
.getNode(ISD::ADD
, dl
, VT
, FrameAddr
,
2648 DAG
.getIntPtrConstant(Offset
, dl
));
2649 FrameAddr
= DAG
.getLoad(VT
, dl
, Chain
, Ptr
, MachinePointerInfo());
2651 if (Subtarget
->is64Bit())
2652 FrameAddr
= DAG
.getNode(ISD::ADD
, dl
, VT
, FrameAddr
,
2653 DAG
.getIntPtrConstant(stackBias
, dl
));
2658 static SDValue
LowerFRAMEADDR(SDValue Op
, SelectionDAG
&DAG
,
2659 const SparcSubtarget
*Subtarget
) {
2661 uint64_t depth
= Op
.getConstantOperandVal(0);
2663 return getFRAMEADDR(depth
, Op
, DAG
, Subtarget
);
2667 static SDValue
LowerRETURNADDR(SDValue Op
, SelectionDAG
&DAG
,
2668 const SparcTargetLowering
&TLI
,
2669 const SparcSubtarget
*Subtarget
) {
2670 MachineFunction
&MF
= DAG
.getMachineFunction();
2671 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
2672 MFI
.setReturnAddressIsTaken(true);
2674 if (TLI
.verifyReturnAddressArgumentIsConstant(Op
, DAG
))
2677 EVT VT
= Op
.getValueType();
2679 uint64_t depth
= Op
.getConstantOperandVal(0);
2683 auto PtrVT
= TLI
.getPointerTy(DAG
.getDataLayout());
2684 unsigned RetReg
= MF
.addLiveIn(SP::I7
, TLI
.getRegClassFor(PtrVT
));
2685 RetAddr
= DAG
.getCopyFromReg(DAG
.getEntryNode(), dl
, RetReg
, VT
);
2689 // Need frame address to find return address of the caller.
2690 SDValue FrameAddr
= getFRAMEADDR(depth
- 1, Op
, DAG
, Subtarget
, true);
2692 unsigned Offset
= (Subtarget
->is64Bit()) ? 120 : 60;
2693 SDValue Ptr
= DAG
.getNode(ISD::ADD
,
2696 DAG
.getIntPtrConstant(Offset
, dl
));
2697 RetAddr
= DAG
.getLoad(VT
, dl
, DAG
.getEntryNode(), Ptr
, MachinePointerInfo());
2702 static SDValue
LowerF64Op(SDValue SrcReg64
, const SDLoc
&dl
, SelectionDAG
&DAG
,
2704 assert(SrcReg64
.getValueType() == MVT::f64
&& "LowerF64Op called on non-double!");
2705 assert(opcode
== ISD::FNEG
|| opcode
== ISD::FABS
);
2707 // Lower fneg/fabs on f64 to fneg/fabs on f32.
2708 // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
2709 // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
2711 // Note: in little-endian, the floating-point value is stored in the
2712 // registers are in the opposite order, so the subreg with the sign
2713 // bit is the highest-numbered (odd), rather than the
2714 // lowest-numbered (even).
2716 SDValue Hi32
= DAG
.getTargetExtractSubreg(SP::sub_even
, dl
, MVT::f32
,
2718 SDValue Lo32
= DAG
.getTargetExtractSubreg(SP::sub_odd
, dl
, MVT::f32
,
2721 if (DAG
.getDataLayout().isLittleEndian())
2722 Lo32
= DAG
.getNode(opcode
, dl
, MVT::f32
, Lo32
);
2724 Hi32
= DAG
.getNode(opcode
, dl
, MVT::f32
, Hi32
);
2726 SDValue DstReg64
= SDValue(DAG
.getMachineNode(TargetOpcode::IMPLICIT_DEF
,
2728 DstReg64
= DAG
.getTargetInsertSubreg(SP::sub_even
, dl
, MVT::f64
,
2730 DstReg64
= DAG
.getTargetInsertSubreg(SP::sub_odd
, dl
, MVT::f64
,
2735 // Lower a f128 load into two f64 loads.
2736 static SDValue
LowerF128Load(SDValue Op
, SelectionDAG
&DAG
)
2739 LoadSDNode
*LdNode
= cast
<LoadSDNode
>(Op
.getNode());
2740 assert(LdNode
->getOffset().isUndef() && "Unexpected node type");
2742 Align Alignment
= commonAlignment(LdNode
->getOriginalAlign(), 8);
2745 DAG
.getLoad(MVT::f64
, dl
, LdNode
->getChain(), LdNode
->getBasePtr(),
2746 LdNode
->getPointerInfo(), Alignment
);
2747 EVT addrVT
= LdNode
->getBasePtr().getValueType();
2748 SDValue LoPtr
= DAG
.getNode(ISD::ADD
, dl
, addrVT
,
2749 LdNode
->getBasePtr(),
2750 DAG
.getConstant(8, dl
, addrVT
));
2751 SDValue Lo64
= DAG
.getLoad(MVT::f64
, dl
, LdNode
->getChain(), LoPtr
,
2752 LdNode
->getPointerInfo().getWithOffset(8),
2755 SDValue SubRegEven
= DAG
.getTargetConstant(SP::sub_even64
, dl
, MVT::i32
);
2756 SDValue SubRegOdd
= DAG
.getTargetConstant(SP::sub_odd64
, dl
, MVT::i32
);
2758 SDNode
*InFP128
= DAG
.getMachineNode(TargetOpcode::IMPLICIT_DEF
,
2760 InFP128
= DAG
.getMachineNode(TargetOpcode::INSERT_SUBREG
, dl
,
2762 SDValue(InFP128
, 0),
2765 InFP128
= DAG
.getMachineNode(TargetOpcode::INSERT_SUBREG
, dl
,
2767 SDValue(InFP128
, 0),
2770 SDValue OutChains
[2] = { SDValue(Hi64
.getNode(), 1),
2771 SDValue(Lo64
.getNode(), 1) };
2772 SDValue OutChain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, OutChains
);
2773 SDValue Ops
[2] = {SDValue(InFP128
,0), OutChain
};
2774 return DAG
.getMergeValues(Ops
, dl
);
2777 static SDValue
LowerLOAD(SDValue Op
, SelectionDAG
&DAG
)
2779 LoadSDNode
*LdNode
= cast
<LoadSDNode
>(Op
.getNode());
2781 EVT MemVT
= LdNode
->getMemoryVT();
2782 if (MemVT
== MVT::f128
)
2783 return LowerF128Load(Op
, DAG
);
2788 // Lower a f128 store into two f64 stores.
2789 static SDValue
LowerF128Store(SDValue Op
, SelectionDAG
&DAG
) {
2791 StoreSDNode
*StNode
= cast
<StoreSDNode
>(Op
.getNode());
2792 assert(StNode
->getOffset().isUndef() && "Unexpected node type");
2794 SDValue SubRegEven
= DAG
.getTargetConstant(SP::sub_even64
, dl
, MVT::i32
);
2795 SDValue SubRegOdd
= DAG
.getTargetConstant(SP::sub_odd64
, dl
, MVT::i32
);
2797 SDNode
*Hi64
= DAG
.getMachineNode(TargetOpcode::EXTRACT_SUBREG
,
2802 SDNode
*Lo64
= DAG
.getMachineNode(TargetOpcode::EXTRACT_SUBREG
,
2808 Align Alignment
= commonAlignment(StNode
->getOriginalAlign(), 8);
2810 SDValue OutChains
[2];
2812 DAG
.getStore(StNode
->getChain(), dl
, SDValue(Hi64
, 0),
2813 StNode
->getBasePtr(), StNode
->getPointerInfo(),
2815 EVT addrVT
= StNode
->getBasePtr().getValueType();
2816 SDValue LoPtr
= DAG
.getNode(ISD::ADD
, dl
, addrVT
,
2817 StNode
->getBasePtr(),
2818 DAG
.getConstant(8, dl
, addrVT
));
2819 OutChains
[1] = DAG
.getStore(StNode
->getChain(), dl
, SDValue(Lo64
, 0), LoPtr
,
2820 StNode
->getPointerInfo().getWithOffset(8),
2822 return DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, OutChains
);
2825 static SDValue
LowerSTORE(SDValue Op
, SelectionDAG
&DAG
)
2828 StoreSDNode
*St
= cast
<StoreSDNode
>(Op
.getNode());
2830 EVT MemVT
= St
->getMemoryVT();
2831 if (MemVT
== MVT::f128
)
2832 return LowerF128Store(Op
, DAG
);
2834 if (MemVT
== MVT::i64
) {
2835 // Custom handling for i64 stores: turn it into a bitcast and a
2837 SDValue Val
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::v2i32
, St
->getValue());
2838 SDValue Chain
= DAG
.getStore(
2839 St
->getChain(), dl
, Val
, St
->getBasePtr(), St
->getPointerInfo(),
2840 St
->getOriginalAlign(), St
->getMemOperand()->getFlags(),
2848 static SDValue
LowerFNEGorFABS(SDValue Op
, SelectionDAG
&DAG
, bool isV9
) {
2849 assert((Op
.getOpcode() == ISD::FNEG
|| Op
.getOpcode() == ISD::FABS
)
2850 && "invalid opcode");
2854 if (Op
.getValueType() == MVT::f64
)
2855 return LowerF64Op(Op
.getOperand(0), dl
, DAG
, Op
.getOpcode());
2856 if (Op
.getValueType() != MVT::f128
)
2859 // Lower fabs/fneg on f128 to fabs/fneg on f64
2860 // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
2861 // (As with LowerF64Op, on little-endian, we need to negate the odd
2864 SDValue SrcReg128
= Op
.getOperand(0);
2865 SDValue Hi64
= DAG
.getTargetExtractSubreg(SP::sub_even64
, dl
, MVT::f64
,
2867 SDValue Lo64
= DAG
.getTargetExtractSubreg(SP::sub_odd64
, dl
, MVT::f64
,
2870 if (DAG
.getDataLayout().isLittleEndian()) {
2872 Lo64
= DAG
.getNode(Op
.getOpcode(), dl
, MVT::f64
, Lo64
);
2874 Lo64
= LowerF64Op(Lo64
, dl
, DAG
, Op
.getOpcode());
2877 Hi64
= DAG
.getNode(Op
.getOpcode(), dl
, MVT::f64
, Hi64
);
2879 Hi64
= LowerF64Op(Hi64
, dl
, DAG
, Op
.getOpcode());
2882 SDValue DstReg128
= SDValue(DAG
.getMachineNode(TargetOpcode::IMPLICIT_DEF
,
2884 DstReg128
= DAG
.getTargetInsertSubreg(SP::sub_even64
, dl
, MVT::f128
,
2886 DstReg128
= DAG
.getTargetInsertSubreg(SP::sub_odd64
, dl
, MVT::f128
,
2891 static SDValue
LowerADDC_ADDE_SUBC_SUBE(SDValue Op
, SelectionDAG
&DAG
) {
2893 if (Op
.getValueType() != MVT::i64
)
2897 SDValue Src1
= Op
.getOperand(0);
2898 SDValue Src1Lo
= DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::i32
, Src1
);
2899 SDValue Src1Hi
= DAG
.getNode(ISD::SRL
, dl
, MVT::i64
, Src1
,
2900 DAG
.getConstant(32, dl
, MVT::i64
));
2901 Src1Hi
= DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::i32
, Src1Hi
);
2903 SDValue Src2
= Op
.getOperand(1);
2904 SDValue Src2Lo
= DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::i32
, Src2
);
2905 SDValue Src2Hi
= DAG
.getNode(ISD::SRL
, dl
, MVT::i64
, Src2
,
2906 DAG
.getConstant(32, dl
, MVT::i64
));
2907 Src2Hi
= DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::i32
, Src2Hi
);
2910 bool hasChain
= false;
2911 unsigned hiOpc
= Op
.getOpcode();
2912 switch (Op
.getOpcode()) {
2913 default: llvm_unreachable("Invalid opcode");
2914 case ISD::ADDC
: hiOpc
= ISD::ADDE
; break;
2915 case ISD::ADDE
: hasChain
= true; break;
2916 case ISD::SUBC
: hiOpc
= ISD::SUBE
; break;
2917 case ISD::SUBE
: hasChain
= true; break;
2920 SDVTList VTs
= DAG
.getVTList(MVT::i32
, MVT::Glue
);
2922 Lo
= DAG
.getNode(Op
.getOpcode(), dl
, VTs
, Src1Lo
, Src2Lo
,
2925 Lo
= DAG
.getNode(Op
.getOpcode(), dl
, VTs
, Src1Lo
, Src2Lo
);
2927 SDValue Hi
= DAG
.getNode(hiOpc
, dl
, VTs
, Src1Hi
, Src2Hi
, Lo
.getValue(1));
2928 SDValue Carry
= Hi
.getValue(1);
2930 Lo
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, MVT::i64
, Lo
);
2931 Hi
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, MVT::i64
, Hi
);
2932 Hi
= DAG
.getNode(ISD::SHL
, dl
, MVT::i64
, Hi
,
2933 DAG
.getConstant(32, dl
, MVT::i64
));
2935 SDValue Dst
= DAG
.getNode(ISD::OR
, dl
, MVT::i64
, Hi
, Lo
);
2936 SDValue Ops
[2] = { Dst
, Carry
};
2937 return DAG
.getMergeValues(Ops
, dl
);
2940 // Custom lower UMULO/SMULO for SPARC. This code is similar to ExpandNode()
2941 // in LegalizeDAG.cpp except the order of arguments to the library function.
2942 static SDValue
LowerUMULO_SMULO(SDValue Op
, SelectionDAG
&DAG
,
2943 const SparcTargetLowering
&TLI
)
2945 unsigned opcode
= Op
.getOpcode();
2946 assert((opcode
== ISD::UMULO
|| opcode
== ISD::SMULO
) && "Invalid Opcode.");
2948 bool isSigned
= (opcode
== ISD::SMULO
);
2950 EVT WideVT
= MVT::i128
;
2952 SDValue LHS
= Op
.getOperand(0);
2954 if (LHS
.getValueType() != VT
)
2957 SDValue ShiftAmt
= DAG
.getConstant(63, dl
, VT
);
2959 SDValue RHS
= Op
.getOperand(1);
2960 SDValue HiLHS
= DAG
.getNode(ISD::SRA
, dl
, VT
, LHS
, ShiftAmt
);
2961 SDValue HiRHS
= DAG
.getNode(ISD::SRA
, dl
, MVT::i64
, RHS
, ShiftAmt
);
2962 SDValue Args
[] = { HiLHS
, LHS
, HiRHS
, RHS
};
2964 TargetLowering::MakeLibCallOptions CallOptions
;
2965 CallOptions
.setSExt(isSigned
);
2966 SDValue MulResult
= TLI
.makeLibCall(DAG
,
2967 RTLIB::MUL_I128
, WideVT
,
2968 Args
, CallOptions
, dl
).first
;
2969 SDValue BottomHalf
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, VT
,
2970 MulResult
, DAG
.getIntPtrConstant(0, dl
));
2971 SDValue TopHalf
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, VT
,
2972 MulResult
, DAG
.getIntPtrConstant(1, dl
));
2974 SDValue Tmp1
= DAG
.getNode(ISD::SRA
, dl
, VT
, BottomHalf
, ShiftAmt
);
2975 TopHalf
= DAG
.getSetCC(dl
, MVT::i32
, TopHalf
, Tmp1
, ISD::SETNE
);
2977 TopHalf
= DAG
.getSetCC(dl
, MVT::i32
, TopHalf
, DAG
.getConstant(0, dl
, VT
),
2980 // MulResult is a node with an illegal type. Because such things are not
2981 // generally permitted during this phase of legalization, ensure that
2982 // nothing is left using the node. The above EXTRACT_ELEMENT nodes should have
2984 assert(MulResult
->use_empty() && "Illegally typed node still in use!");
2986 SDValue Ops
[2] = { BottomHalf
, TopHalf
} ;
2987 return DAG
.getMergeValues(Ops
, dl
);
2990 static SDValue
LowerATOMIC_LOAD_STORE(SDValue Op
, SelectionDAG
&DAG
) {
2991 if (isStrongerThanMonotonic(cast
<AtomicSDNode
>(Op
)->getSuccessOrdering())) {
2992 // Expand with a fence.
2996 // Monotonic load/stores are legal.
3000 SDValue
SparcTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op
,
3001 SelectionDAG
&DAG
) const {
3002 unsigned IntNo
= cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue();
3005 default: return SDValue(); // Don't custom lower most intrinsics.
3006 case Intrinsic::thread_pointer
: {
3007 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3008 return DAG
.getRegister(SP::G7
, PtrVT
);
3013 SDValue
SparcTargetLowering::
3014 LowerOperation(SDValue Op
, SelectionDAG
&DAG
) const {
3016 bool hasHardQuad
= Subtarget
->hasHardQuad();
3017 bool isV9
= Subtarget
->isV9();
3019 switch (Op
.getOpcode()) {
3020 default: llvm_unreachable("Should not custom lower this!");
3022 case ISD::RETURNADDR
: return LowerRETURNADDR(Op
, DAG
, *this,
3024 case ISD::FRAMEADDR
: return LowerFRAMEADDR(Op
, DAG
,
3026 case ISD::GlobalTLSAddress
: return LowerGlobalTLSAddress(Op
, DAG
);
3027 case ISD::GlobalAddress
: return LowerGlobalAddress(Op
, DAG
);
3028 case ISD::BlockAddress
: return LowerBlockAddress(Op
, DAG
);
3029 case ISD::ConstantPool
: return LowerConstantPool(Op
, DAG
);
3030 case ISD::FP_TO_SINT
: return LowerFP_TO_SINT(Op
, DAG
, *this,
3032 case ISD::SINT_TO_FP
: return LowerSINT_TO_FP(Op
, DAG
, *this,
3034 case ISD::FP_TO_UINT
: return LowerFP_TO_UINT(Op
, DAG
, *this,
3036 case ISD::UINT_TO_FP
: return LowerUINT_TO_FP(Op
, DAG
, *this,
3038 case ISD::BR_CC
: return LowerBR_CC(Op
, DAG
, *this,
3040 case ISD::SELECT_CC
: return LowerSELECT_CC(Op
, DAG
, *this,
3042 case ISD::VASTART
: return LowerVASTART(Op
, DAG
, *this);
3043 case ISD::VAARG
: return LowerVAARG(Op
, DAG
);
3044 case ISD::DYNAMIC_STACKALLOC
: return LowerDYNAMIC_STACKALLOC(Op
, DAG
,
3047 case ISD::LOAD
: return LowerLOAD(Op
, DAG
);
3048 case ISD::STORE
: return LowerSTORE(Op
, DAG
);
3049 case ISD::FADD
: return LowerF128Op(Op
, DAG
,
3050 getLibcallName(RTLIB::ADD_F128
), 2);
3051 case ISD::FSUB
: return LowerF128Op(Op
, DAG
,
3052 getLibcallName(RTLIB::SUB_F128
), 2);
3053 case ISD::FMUL
: return LowerF128Op(Op
, DAG
,
3054 getLibcallName(RTLIB::MUL_F128
), 2);
3055 case ISD::FDIV
: return LowerF128Op(Op
, DAG
,
3056 getLibcallName(RTLIB::DIV_F128
), 2);
3057 case ISD::FSQRT
: return LowerF128Op(Op
, DAG
,
3058 getLibcallName(RTLIB::SQRT_F128
),1);
3060 case ISD::FNEG
: return LowerFNEGorFABS(Op
, DAG
, isV9
);
3061 case ISD::FP_EXTEND
: return LowerF128_FPEXTEND(Op
, DAG
, *this);
3062 case ISD::FP_ROUND
: return LowerF128_FPROUND(Op
, DAG
, *this);
3066 case ISD::SUBE
: return LowerADDC_ADDE_SUBC_SUBE(Op
, DAG
);
3068 case ISD::SMULO
: return LowerUMULO_SMULO(Op
, DAG
, *this);
3069 case ISD::ATOMIC_LOAD
:
3070 case ISD::ATOMIC_STORE
: return LowerATOMIC_LOAD_STORE(Op
, DAG
);
3071 case ISD::INTRINSIC_WO_CHAIN
: return LowerINTRINSIC_WO_CHAIN(Op
, DAG
);
3075 SDValue
SparcTargetLowering::bitcastConstantFPToInt(ConstantFPSDNode
*C
,
3077 SelectionDAG
&DAG
) const {
3078 APInt V
= C
->getValueAPF().bitcastToAPInt();
3079 SDValue Lo
= DAG
.getConstant(V
.zextOrTrunc(32), DL
, MVT::i32
);
3080 SDValue Hi
= DAG
.getConstant(V
.lshr(32).zextOrTrunc(32), DL
, MVT::i32
);
3081 if (DAG
.getDataLayout().isLittleEndian())
3083 return DAG
.getBuildVector(MVT::v2i32
, DL
, {Hi
, Lo
});
3086 SDValue
SparcTargetLowering::PerformBITCASTCombine(SDNode
*N
,
3087 DAGCombinerInfo
&DCI
) const {
3089 SDValue Src
= N
->getOperand(0);
3091 if (isa
<ConstantFPSDNode
>(Src
) && N
->getSimpleValueType(0) == MVT::v2i32
&&
3092 Src
.getSimpleValueType() == MVT::f64
)
3093 return bitcastConstantFPToInt(cast
<ConstantFPSDNode
>(Src
), dl
, DCI
.DAG
);
3098 SDValue
SparcTargetLowering::PerformDAGCombine(SDNode
*N
,
3099 DAGCombinerInfo
&DCI
) const {
3100 switch (N
->getOpcode()) {
3104 return PerformBITCASTCombine(N
, DCI
);
3110 SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr
&MI
,
3111 MachineBasicBlock
*BB
) const {
3112 switch (MI
.getOpcode()) {
3113 default: llvm_unreachable("Unknown SELECT_CC!");
3114 case SP::SELECT_CC_Int_ICC
:
3115 case SP::SELECT_CC_FP_ICC
:
3116 case SP::SELECT_CC_DFP_ICC
:
3117 case SP::SELECT_CC_QFP_ICC
:
3118 return expandSelectCC(MI
, BB
, SP::BCOND
);
3119 case SP::SELECT_CC_Int_FCC
:
3120 case SP::SELECT_CC_FP_FCC
:
3121 case SP::SELECT_CC_DFP_FCC
:
3122 case SP::SELECT_CC_QFP_FCC
:
3123 return expandSelectCC(MI
, BB
, SP::FBCOND
);
3128 SparcTargetLowering::expandSelectCC(MachineInstr
&MI
, MachineBasicBlock
*BB
,
3129 unsigned BROpcode
) const {
3130 const TargetInstrInfo
&TII
= *Subtarget
->getInstrInfo();
3131 DebugLoc dl
= MI
.getDebugLoc();
3132 unsigned CC
= (SPCC::CondCodes
)MI
.getOperand(3).getImm();
3134 // To "insert" a SELECT_CC instruction, we actually have to insert the
3135 // triangle control-flow pattern. The incoming instruction knows the
3136 // destination vreg to set, the condition code register to branch on, the
3137 // true/false values to select between, and the condition code for the branch.
3139 // We produce the following control flow:
3145 const BasicBlock
*LLVM_BB
= BB
->getBasicBlock();
3146 MachineFunction::iterator It
= ++BB
->getIterator();
3148 MachineBasicBlock
*ThisMBB
= BB
;
3149 MachineFunction
*F
= BB
->getParent();
3150 MachineBasicBlock
*IfFalseMBB
= F
->CreateMachineBasicBlock(LLVM_BB
);
3151 MachineBasicBlock
*SinkMBB
= F
->CreateMachineBasicBlock(LLVM_BB
);
3152 F
->insert(It
, IfFalseMBB
);
3153 F
->insert(It
, SinkMBB
);
3155 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
3156 SinkMBB
->splice(SinkMBB
->begin(), ThisMBB
,
3157 std::next(MachineBasicBlock::iterator(MI
)), ThisMBB
->end());
3158 SinkMBB
->transferSuccessorsAndUpdatePHIs(ThisMBB
);
3160 // Set the new successors for ThisMBB.
3161 ThisMBB
->addSuccessor(IfFalseMBB
);
3162 ThisMBB
->addSuccessor(SinkMBB
);
3164 BuildMI(ThisMBB
, dl
, TII
.get(BROpcode
))
3168 // IfFalseMBB just falls through to SinkMBB.
3169 IfFalseMBB
->addSuccessor(SinkMBB
);
3171 // %Result = phi [ %TrueValue, ThisMBB ], [ %FalseValue, IfFalseMBB ]
3172 BuildMI(*SinkMBB
, SinkMBB
->begin(), dl
, TII
.get(SP::PHI
),
3173 MI
.getOperand(0).getReg())
3174 .addReg(MI
.getOperand(1).getReg())
3176 .addReg(MI
.getOperand(2).getReg())
3177 .addMBB(IfFalseMBB
);
3179 MI
.eraseFromParent(); // The pseudo instruction is gone now.
3183 //===----------------------------------------------------------------------===//
3184 // Sparc Inline Assembly Support
3185 //===----------------------------------------------------------------------===//
3187 /// getConstraintType - Given a constraint letter, return the type of
3188 /// constraint it is for this target.
3189 SparcTargetLowering::ConstraintType
3190 SparcTargetLowering::getConstraintType(StringRef Constraint
) const {
3191 if (Constraint
.size() == 1) {
3192 switch (Constraint
[0]) {
3197 return C_RegisterClass
;
3203 return TargetLowering::getConstraintType(Constraint
);
3206 TargetLowering::ConstraintWeight
SparcTargetLowering::
3207 getSingleConstraintMatchWeight(AsmOperandInfo
&info
,
3208 const char *constraint
) const {
3209 ConstraintWeight weight
= CW_Invalid
;
3210 Value
*CallOperandVal
= info
.CallOperandVal
;
3211 // If we don't have a value, we can't do a match,
3212 // but allow it at the lowest weight.
3213 if (!CallOperandVal
)
3216 // Look at the constraint type.
3217 switch (*constraint
) {
3219 weight
= TargetLowering::getSingleConstraintMatchWeight(info
, constraint
);
3222 if (ConstantInt
*C
= dyn_cast
<ConstantInt
>(info
.CallOperandVal
)) {
3223 if (isInt
<13>(C
->getSExtValue()))
3224 weight
= CW_Constant
;
3231 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3232 /// vector. If it is invalid, don't add anything to Ops.
3233 void SparcTargetLowering::
3234 LowerAsmOperandForConstraint(SDValue Op
,
3235 std::string
&Constraint
,
3236 std::vector
<SDValue
> &Ops
,
3237 SelectionDAG
&DAG
) const {
3238 SDValue
Result(nullptr, 0);
3240 // Only support length 1 constraints for now.
3241 if (Constraint
.length() > 1)
3244 char ConstraintLetter
= Constraint
[0];
3245 switch (ConstraintLetter
) {
3248 if (ConstantSDNode
*C
= dyn_cast
<ConstantSDNode
>(Op
)) {
3249 if (isInt
<13>(C
->getSExtValue())) {
3250 Result
= DAG
.getTargetConstant(C
->getSExtValue(), SDLoc(Op
),
3258 if (Result
.getNode()) {
3259 Ops
.push_back(Result
);
3262 TargetLowering::LowerAsmOperandForConstraint(Op
, Constraint
, Ops
, DAG
);
3265 std::pair
<unsigned, const TargetRegisterClass
*>
3266 SparcTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo
*TRI
,
3267 StringRef Constraint
,
3269 if (Constraint
.size() == 1) {
3270 switch (Constraint
[0]) {
3272 if (VT
== MVT::v2i32
)
3273 return std::make_pair(0U, &SP::IntPairRegClass
);
3274 else if (Subtarget
->is64Bit())
3275 return std::make_pair(0U, &SP::I64RegsRegClass
);
3277 return std::make_pair(0U, &SP::IntRegsRegClass
);
3279 if (VT
== MVT::f32
|| VT
== MVT::i32
)
3280 return std::make_pair(0U, &SP::FPRegsRegClass
);
3281 else if (VT
== MVT::f64
|| VT
== MVT::i64
)
3282 return std::make_pair(0U, &SP::LowDFPRegsRegClass
);
3283 else if (VT
== MVT::f128
)
3284 return std::make_pair(0U, &SP::LowQFPRegsRegClass
);
3285 // This will generate an error message
3286 return std::make_pair(0U, nullptr);
3288 if (VT
== MVT::f32
|| VT
== MVT::i32
)
3289 return std::make_pair(0U, &SP::FPRegsRegClass
);
3290 else if (VT
== MVT::f64
|| VT
== MVT::i64
)
3291 return std::make_pair(0U, &SP::DFPRegsRegClass
);
3292 else if (VT
== MVT::f128
)
3293 return std::make_pair(0U, &SP::QFPRegsRegClass
);
3294 // This will generate an error message
3295 return std::make_pair(0U, nullptr);
3297 } else if (!Constraint
.empty() && Constraint
.size() <= 5
3298 && Constraint
[0] == '{' && *(Constraint
.end()-1) == '}') {
3299 // constraint = '{r<d>}'
3300 // Remove the braces from around the name.
3301 StringRef
name(Constraint
.data()+1, Constraint
.size()-2);
3302 // Handle register aliases:
3307 uint64_t intVal
= 0;
3308 if (name
.substr(0, 1).equals("r")
3309 && !name
.substr(1).getAsInteger(10, intVal
) && intVal
<= 31) {
3310 const char regTypes
[] = { 'g', 'o', 'l', 'i' };
3311 char regType
= regTypes
[intVal
/8];
3312 char regIdx
= '0' + (intVal
% 8);
3313 char tmp
[] = { '{', regType
, regIdx
, '}', 0 };
3314 std::string newConstraint
= std::string(tmp
);
3315 return TargetLowering::getRegForInlineAsmConstraint(TRI
, newConstraint
,
3318 if (name
.substr(0, 1).equals("f") &&
3319 !name
.substr(1).getAsInteger(10, intVal
) && intVal
<= 63) {
3320 std::string newConstraint
;
3322 if (VT
== MVT::f32
|| VT
== MVT::Other
) {
3323 newConstraint
= "{f" + utostr(intVal
) + "}";
3324 } else if (VT
== MVT::f64
&& (intVal
% 2 == 0)) {
3325 newConstraint
= "{d" + utostr(intVal
/ 2) + "}";
3326 } else if (VT
== MVT::f128
&& (intVal
% 4 == 0)) {
3327 newConstraint
= "{q" + utostr(intVal
/ 4) + "}";
3329 return std::make_pair(0U, nullptr);
3331 return TargetLowering::getRegForInlineAsmConstraint(TRI
, newConstraint
,
3336 return TargetLowering::getRegForInlineAsmConstraint(TRI
, Constraint
, VT
);
3340 SparcTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode
*GA
) const {
3341 // The Sparc target isn't yet aware of offsets.
3345 void SparcTargetLowering::ReplaceNodeResults(SDNode
*N
,
3346 SmallVectorImpl
<SDValue
>& Results
,
3347 SelectionDAG
&DAG
) const {
3351 RTLIB::Libcall libCall
= RTLIB::UNKNOWN_LIBCALL
;
3353 switch (N
->getOpcode()) {
3355 llvm_unreachable("Do not know how to custom type legalize this operation!");
3357 case ISD::FP_TO_SINT
:
3358 case ISD::FP_TO_UINT
:
3359 // Custom lower only if it involves f128 or i64.
3360 if (N
->getOperand(0).getValueType() != MVT::f128
3361 || N
->getValueType(0) != MVT::i64
)
3363 libCall
= ((N
->getOpcode() == ISD::FP_TO_SINT
)
3364 ? RTLIB::FPTOSINT_F128_I64
3365 : RTLIB::FPTOUINT_F128_I64
);
3367 Results
.push_back(LowerF128Op(SDValue(N
, 0),
3369 getLibcallName(libCall
),
3372 case ISD::READCYCLECOUNTER
: {
3373 assert(Subtarget
->hasLeonCycleCounter());
3374 SDValue Lo
= DAG
.getCopyFromReg(N
->getOperand(0), dl
, SP::ASR23
, MVT::i32
);
3375 SDValue Hi
= DAG
.getCopyFromReg(Lo
, dl
, SP::G0
, MVT::i32
);
3376 SDValue Ops
[] = { Lo
, Hi
};
3377 SDValue Pair
= DAG
.getNode(ISD::BUILD_PAIR
, dl
, MVT::i64
, Ops
);
3378 Results
.push_back(Pair
);
3379 Results
.push_back(N
->getOperand(0));
3382 case ISD::SINT_TO_FP
:
3383 case ISD::UINT_TO_FP
:
3384 // Custom lower only if it involves f128 or i64.
3385 if (N
->getValueType(0) != MVT::f128
3386 || N
->getOperand(0).getValueType() != MVT::i64
)
3389 libCall
= ((N
->getOpcode() == ISD::SINT_TO_FP
)
3390 ? RTLIB::SINTTOFP_I64_F128
3391 : RTLIB::UINTTOFP_I64_F128
);
3393 Results
.push_back(LowerF128Op(SDValue(N
, 0),
3395 getLibcallName(libCall
),
3399 LoadSDNode
*Ld
= cast
<LoadSDNode
>(N
);
3400 // Custom handling only for i64: turn i64 load into a v2i32 load,
3402 if (Ld
->getValueType(0) != MVT::i64
|| Ld
->getMemoryVT() != MVT::i64
)
3406 SDValue LoadRes
= DAG
.getExtLoad(
3407 Ld
->getExtensionType(), dl
, MVT::v2i32
, Ld
->getChain(),
3408 Ld
->getBasePtr(), Ld
->getPointerInfo(), MVT::v2i32
,
3409 Ld
->getOriginalAlign(), Ld
->getMemOperand()->getFlags(),
3412 SDValue Res
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::i64
, LoadRes
);
3413 Results
.push_back(Res
);
3414 Results
.push_back(LoadRes
.getValue(1));
3420 // Override to enable LOAD_STACK_GUARD lowering on Linux.
3421 bool SparcTargetLowering::useLoadStackGuardNode() const {
3422 if (!Subtarget
->isTargetLinux())
3423 return TargetLowering::useLoadStackGuardNode();
3427 // Override to disable global variable loading on Linux.
3428 void SparcTargetLowering::insertSSPDeclarations(Module
&M
) const {
3429 if (!Subtarget
->isTargetLinux())
3430 return TargetLowering::insertSSPDeclarations(M
);