1 //===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the interfaces that Sparc uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #include "SparcISelLowering.h"
15 #include "MCTargetDesc/SparcMCExpr.h"
16 #include "MCTargetDesc/SparcMCTargetDesc.h"
17 #include "SparcMachineFunctionInfo.h"
18 #include "SparcRegisterInfo.h"
19 #include "SparcTargetMachine.h"
20 #include "SparcTargetObjectFile.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/ADT/StringSwitch.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/SelectionDAG.h"
29 #include "llvm/CodeGen/SelectionDAGNodes.h"
30 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/DiagnosticInfo.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/Module.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/KnownBits.h"
40 //===----------------------------------------------------------------------===//
41 // Calling Convention Implementation
42 //===----------------------------------------------------------------------===//
44 static bool CC_Sparc_Assign_SRet(unsigned &ValNo
, MVT
&ValVT
,
45 MVT
&LocVT
, CCValAssign::LocInfo
&LocInfo
,
46 ISD::ArgFlagsTy
&ArgFlags
, CCState
&State
)
48 assert (ArgFlags
.isSRet());
50 // Assign SRet argument.
51 State
.addLoc(CCValAssign::getCustomMem(ValNo
, ValVT
,
57 static bool CC_Sparc_Assign_Split_64(unsigned &ValNo
, MVT
&ValVT
,
58 MVT
&LocVT
, CCValAssign::LocInfo
&LocInfo
,
59 ISD::ArgFlagsTy
&ArgFlags
, CCState
&State
)
61 static const MCPhysReg RegList
[] = {
62 SP::I0
, SP::I1
, SP::I2
, SP::I3
, SP::I4
, SP::I5
64 // Try to get first reg.
65 if (Register Reg
= State
.AllocateReg(RegList
)) {
66 State
.addLoc(CCValAssign::getCustomReg(ValNo
, ValVT
, Reg
, LocVT
, LocInfo
));
68 // Assign whole thing in stack.
69 State
.addLoc(CCValAssign::getCustomMem(
70 ValNo
, ValVT
, State
.AllocateStack(8, Align(4)), LocVT
, LocInfo
));
74 // Try to get second reg.
75 if (Register Reg
= State
.AllocateReg(RegList
))
76 State
.addLoc(CCValAssign::getCustomReg(ValNo
, ValVT
, Reg
, LocVT
, LocInfo
));
78 State
.addLoc(CCValAssign::getCustomMem(
79 ValNo
, ValVT
, State
.AllocateStack(4, Align(4)), LocVT
, LocInfo
));
83 static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo
, MVT
&ValVT
,
84 MVT
&LocVT
, CCValAssign::LocInfo
&LocInfo
,
85 ISD::ArgFlagsTy
&ArgFlags
, CCState
&State
)
87 static const MCPhysReg RegList
[] = {
88 SP::I0
, SP::I1
, SP::I2
, SP::I3
, SP::I4
, SP::I5
91 // Try to get first reg.
92 if (Register Reg
= State
.AllocateReg(RegList
))
93 State
.addLoc(CCValAssign::getCustomReg(ValNo
, ValVT
, Reg
, LocVT
, LocInfo
));
97 // Try to get second reg.
98 if (Register Reg
= State
.AllocateReg(RegList
))
99 State
.addLoc(CCValAssign::getCustomReg(ValNo
, ValVT
, Reg
, LocVT
, LocInfo
));
106 // Allocate a full-sized argument for the 64-bit ABI.
107 static bool Analyze_CC_Sparc64_Full(bool IsReturn
, unsigned &ValNo
, MVT
&ValVT
,
108 MVT
&LocVT
, CCValAssign::LocInfo
&LocInfo
,
109 ISD::ArgFlagsTy
&ArgFlags
, CCState
&State
) {
110 assert((LocVT
== MVT::f32
|| LocVT
== MVT::f128
111 || LocVT
.getSizeInBits() == 64) &&
112 "Can't handle non-64 bits locations");
114 // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
115 unsigned size
= (LocVT
== MVT::f128
) ? 16 : 8;
116 Align alignment
= (LocVT
== MVT::f128
) ? Align(16) : Align(8);
117 unsigned Offset
= State
.AllocateStack(size
, alignment
);
120 if (LocVT
== MVT::i64
&& Offset
< 6*8)
121 // Promote integers to %i0-%i5.
122 Reg
= SP::I0
+ Offset
/8;
123 else if (LocVT
== MVT::f64
&& Offset
< 16*8)
124 // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
125 Reg
= SP::D0
+ Offset
/8;
126 else if (LocVT
== MVT::f32
&& Offset
< 16*8)
127 // Promote floats to %f1, %f3, ...
128 Reg
= SP::F1
+ Offset
/4;
129 else if (LocVT
== MVT::f128
&& Offset
< 16*8)
130 // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
131 Reg
= SP::Q0
+ Offset
/16;
133 // Promote to register when possible, otherwise use the stack slot.
135 State
.addLoc(CCValAssign::getReg(ValNo
, ValVT
, Reg
, LocVT
, LocInfo
));
139 // Bail out if this is a return CC and we run out of registers to place
144 // This argument goes on the stack in an 8-byte slot.
145 // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
146 // the right-aligned float. The first 4 bytes of the stack slot are undefined.
147 if (LocVT
== MVT::f32
)
150 State
.addLoc(CCValAssign::getMem(ValNo
, ValVT
, Offset
, LocVT
, LocInfo
));
154 // Allocate a half-sized argument for the 64-bit ABI.
156 // This is used when passing { float, int } structs by value in registers.
157 static bool Analyze_CC_Sparc64_Half(bool IsReturn
, unsigned &ValNo
, MVT
&ValVT
,
158 MVT
&LocVT
, CCValAssign::LocInfo
&LocInfo
,
159 ISD::ArgFlagsTy
&ArgFlags
, CCState
&State
) {
160 assert(LocVT
.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
161 unsigned Offset
= State
.AllocateStack(4, Align(4));
163 if (LocVT
== MVT::f32
&& Offset
< 16*8) {
164 // Promote floats to %f0-%f31.
165 State
.addLoc(CCValAssign::getReg(ValNo
, ValVT
, SP::F0
+ Offset
/4,
170 if (LocVT
== MVT::i32
&& Offset
< 6*8) {
171 // Promote integers to %i0-%i5, using half the register.
172 unsigned Reg
= SP::I0
+ Offset
/8;
174 LocInfo
= CCValAssign::AExt
;
176 // Set the Custom bit if this i32 goes in the high bits of a register.
178 State
.addLoc(CCValAssign::getCustomReg(ValNo
, ValVT
, Reg
,
181 State
.addLoc(CCValAssign::getReg(ValNo
, ValVT
, Reg
, LocVT
, LocInfo
));
185 // Bail out if this is a return CC and we run out of registers to place
190 State
.addLoc(CCValAssign::getMem(ValNo
, ValVT
, Offset
, LocVT
, LocInfo
));
194 static bool CC_Sparc64_Full(unsigned &ValNo
, MVT
&ValVT
, MVT
&LocVT
,
195 CCValAssign::LocInfo
&LocInfo
,
196 ISD::ArgFlagsTy
&ArgFlags
, CCState
&State
) {
197 return Analyze_CC_Sparc64_Full(false, ValNo
, ValVT
, LocVT
, LocInfo
, ArgFlags
,
201 static bool CC_Sparc64_Half(unsigned &ValNo
, MVT
&ValVT
, MVT
&LocVT
,
202 CCValAssign::LocInfo
&LocInfo
,
203 ISD::ArgFlagsTy
&ArgFlags
, CCState
&State
) {
204 return Analyze_CC_Sparc64_Half(false, ValNo
, ValVT
, LocVT
, LocInfo
, ArgFlags
,
208 static bool RetCC_Sparc64_Full(unsigned &ValNo
, MVT
&ValVT
, MVT
&LocVT
,
209 CCValAssign::LocInfo
&LocInfo
,
210 ISD::ArgFlagsTy
&ArgFlags
, CCState
&State
) {
211 return Analyze_CC_Sparc64_Full(true, ValNo
, ValVT
, LocVT
, LocInfo
, ArgFlags
,
215 static bool RetCC_Sparc64_Half(unsigned &ValNo
, MVT
&ValVT
, MVT
&LocVT
,
216 CCValAssign::LocInfo
&LocInfo
,
217 ISD::ArgFlagsTy
&ArgFlags
, CCState
&State
) {
218 return Analyze_CC_Sparc64_Half(true, ValNo
, ValVT
, LocVT
, LocInfo
, ArgFlags
,
222 #include "SparcGenCallingConv.inc"
224 // The calling conventions in SparcCallingConv.td are described in terms of the
225 // callee's register window. This function translates registers to the
226 // corresponding caller window %o register.
227 static unsigned toCallerWindow(unsigned Reg
) {
228 static_assert(SP::I0
+ 7 == SP::I7
&& SP::O0
+ 7 == SP::O7
,
230 if (Reg
>= SP::I0
&& Reg
<= SP::I7
)
231 return Reg
- SP::I0
+ SP::O0
;
235 bool SparcTargetLowering::CanLowerReturn(
236 CallingConv::ID CallConv
, MachineFunction
&MF
, bool isVarArg
,
237 const SmallVectorImpl
<ISD::OutputArg
> &Outs
, LLVMContext
&Context
) const {
238 SmallVector
<CCValAssign
, 16> RVLocs
;
239 CCState
CCInfo(CallConv
, isVarArg
, MF
, RVLocs
, Context
);
240 return CCInfo
.CheckReturn(Outs
, Subtarget
->is64Bit() ? RetCC_Sparc64
245 SparcTargetLowering::LowerReturn(SDValue Chain
, CallingConv::ID CallConv
,
247 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
248 const SmallVectorImpl
<SDValue
> &OutVals
,
249 const SDLoc
&DL
, SelectionDAG
&DAG
) const {
250 if (Subtarget
->is64Bit())
251 return LowerReturn_64(Chain
, CallConv
, IsVarArg
, Outs
, OutVals
, DL
, DAG
);
252 return LowerReturn_32(Chain
, CallConv
, IsVarArg
, Outs
, OutVals
, DL
, DAG
);
256 SparcTargetLowering::LowerReturn_32(SDValue Chain
, CallingConv::ID CallConv
,
258 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
259 const SmallVectorImpl
<SDValue
> &OutVals
,
260 const SDLoc
&DL
, SelectionDAG
&DAG
) const {
261 MachineFunction
&MF
= DAG
.getMachineFunction();
263 // CCValAssign - represent the assignment of the return value to locations.
264 SmallVector
<CCValAssign
, 16> RVLocs
;
266 // CCState - Info about the registers and stack slot.
267 CCState
CCInfo(CallConv
, IsVarArg
, DAG
.getMachineFunction(), RVLocs
,
270 // Analyze return values.
271 CCInfo
.AnalyzeReturn(Outs
, RetCC_Sparc32
);
274 SmallVector
<SDValue
, 4> RetOps(1, Chain
);
275 // Make room for the return address offset.
276 RetOps
.push_back(SDValue());
278 // Copy the result values into the output registers.
279 for (unsigned i
= 0, realRVLocIdx
= 0;
281 ++i
, ++realRVLocIdx
) {
282 CCValAssign
&VA
= RVLocs
[i
];
283 assert(VA
.isRegLoc() && "Can only return in registers!");
285 SDValue Arg
= OutVals
[realRVLocIdx
];
287 if (VA
.needsCustom()) {
288 assert(VA
.getLocVT() == MVT::v2i32
);
289 // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
290 // happen by default if this wasn't a legal type)
292 SDValue Part0
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, DL
, MVT::i32
,
294 DAG
.getConstant(0, DL
, getVectorIdxTy(DAG
.getDataLayout())));
295 SDValue Part1
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, DL
, MVT::i32
,
297 DAG
.getConstant(1, DL
, getVectorIdxTy(DAG
.getDataLayout())));
299 Chain
= DAG
.getCopyToReg(Chain
, DL
, VA
.getLocReg(), Part0
, Glue
);
300 Glue
= Chain
.getValue(1);
301 RetOps
.push_back(DAG
.getRegister(VA
.getLocReg(), VA
.getLocVT()));
302 VA
= RVLocs
[++i
]; // skip ahead to next loc
303 Chain
= DAG
.getCopyToReg(Chain
, DL
, VA
.getLocReg(), Part1
,
306 Chain
= DAG
.getCopyToReg(Chain
, DL
, VA
.getLocReg(), Arg
, Glue
);
308 // Guarantee that all emitted copies are stuck together with flags.
309 Glue
= Chain
.getValue(1);
310 RetOps
.push_back(DAG
.getRegister(VA
.getLocReg(), VA
.getLocVT()));
313 unsigned RetAddrOffset
= 8; // Call Inst + Delay Slot
314 // If the function returns a struct, copy the SRetReturnReg to I0
315 if (MF
.getFunction().hasStructRetAttr()) {
316 SparcMachineFunctionInfo
*SFI
= MF
.getInfo
<SparcMachineFunctionInfo
>();
317 Register Reg
= SFI
->getSRetReturnReg();
319 llvm_unreachable("sret virtual register not created in the entry block");
320 auto PtrVT
= getPointerTy(DAG
.getDataLayout());
321 SDValue Val
= DAG
.getCopyFromReg(Chain
, DL
, Reg
, PtrVT
);
322 Chain
= DAG
.getCopyToReg(Chain
, DL
, SP::I0
, Val
, Glue
);
323 Glue
= Chain
.getValue(1);
324 RetOps
.push_back(DAG
.getRegister(SP::I0
, PtrVT
));
325 RetAddrOffset
= 12; // CallInst + Delay Slot + Unimp
328 RetOps
[0] = Chain
; // Update chain.
329 RetOps
[1] = DAG
.getConstant(RetAddrOffset
, DL
, MVT::i32
);
331 // Add the glue if we have it.
333 RetOps
.push_back(Glue
);
335 return DAG
.getNode(SPISD::RET_GLUE
, DL
, MVT::Other
, RetOps
);
338 // Lower return values for the 64-bit ABI.
339 // Return values are passed the exactly the same way as function arguments.
341 SparcTargetLowering::LowerReturn_64(SDValue Chain
, CallingConv::ID CallConv
,
343 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
344 const SmallVectorImpl
<SDValue
> &OutVals
,
345 const SDLoc
&DL
, SelectionDAG
&DAG
) const {
346 // CCValAssign - represent the assignment of the return value to locations.
347 SmallVector
<CCValAssign
, 16> RVLocs
;
349 // CCState - Info about the registers and stack slot.
350 CCState
CCInfo(CallConv
, IsVarArg
, DAG
.getMachineFunction(), RVLocs
,
353 // Analyze return values.
354 CCInfo
.AnalyzeReturn(Outs
, RetCC_Sparc64
);
357 SmallVector
<SDValue
, 4> RetOps(1, Chain
);
359 // The second operand on the return instruction is the return address offset.
360 // The return address is always %i7+8 with the 64-bit ABI.
361 RetOps
.push_back(DAG
.getConstant(8, DL
, MVT::i32
));
363 // Copy the result values into the output registers.
364 for (unsigned i
= 0; i
!= RVLocs
.size(); ++i
) {
365 CCValAssign
&VA
= RVLocs
[i
];
366 assert(VA
.isRegLoc() && "Can only return in registers!");
367 SDValue OutVal
= OutVals
[i
];
369 // Integer return values must be sign or zero extended by the callee.
370 switch (VA
.getLocInfo()) {
371 case CCValAssign::Full
: break;
372 case CCValAssign::SExt
:
373 OutVal
= DAG
.getNode(ISD::SIGN_EXTEND
, DL
, VA
.getLocVT(), OutVal
);
375 case CCValAssign::ZExt
:
376 OutVal
= DAG
.getNode(ISD::ZERO_EXTEND
, DL
, VA
.getLocVT(), OutVal
);
378 case CCValAssign::AExt
:
379 OutVal
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, VA
.getLocVT(), OutVal
);
382 llvm_unreachable("Unknown loc info!");
385 // The custom bit on an i32 return value indicates that it should be passed
386 // in the high bits of the register.
387 if (VA
.getValVT() == MVT::i32
&& VA
.needsCustom()) {
388 OutVal
= DAG
.getNode(ISD::SHL
, DL
, MVT::i64
, OutVal
,
389 DAG
.getConstant(32, DL
, MVT::i32
));
391 // The next value may go in the low bits of the same register.
392 // Handle both at once.
393 if (i
+1 < RVLocs
.size() && RVLocs
[i
+1].getLocReg() == VA
.getLocReg()) {
394 SDValue NV
= DAG
.getNode(ISD::ZERO_EXTEND
, DL
, MVT::i64
, OutVals
[i
+1]);
395 OutVal
= DAG
.getNode(ISD::OR
, DL
, MVT::i64
, OutVal
, NV
);
396 // Skip the next value, it's already done.
401 Chain
= DAG
.getCopyToReg(Chain
, DL
, VA
.getLocReg(), OutVal
, Glue
);
403 // Guarantee that all emitted copies are stuck together with flags.
404 Glue
= Chain
.getValue(1);
405 RetOps
.push_back(DAG
.getRegister(VA
.getLocReg(), VA
.getLocVT()));
408 RetOps
[0] = Chain
; // Update chain.
410 // Add the flag if we have it.
412 RetOps
.push_back(Glue
);
414 return DAG
.getNode(SPISD::RET_GLUE
, DL
, MVT::Other
, RetOps
);
417 SDValue
SparcTargetLowering::LowerFormalArguments(
418 SDValue Chain
, CallingConv::ID CallConv
, bool IsVarArg
,
419 const SmallVectorImpl
<ISD::InputArg
> &Ins
, const SDLoc
&DL
,
420 SelectionDAG
&DAG
, SmallVectorImpl
<SDValue
> &InVals
) const {
421 if (Subtarget
->is64Bit())
422 return LowerFormalArguments_64(Chain
, CallConv
, IsVarArg
, Ins
,
424 return LowerFormalArguments_32(Chain
, CallConv
, IsVarArg
, Ins
,
428 /// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
429 /// passed in either one or two GPRs, including FP values. TODO: we should
430 /// pass FP values in FP registers for fastcc functions.
431 SDValue
SparcTargetLowering::LowerFormalArguments_32(
432 SDValue Chain
, CallingConv::ID CallConv
, bool isVarArg
,
433 const SmallVectorImpl
<ISD::InputArg
> &Ins
, const SDLoc
&dl
,
434 SelectionDAG
&DAG
, SmallVectorImpl
<SDValue
> &InVals
) const {
435 MachineFunction
&MF
= DAG
.getMachineFunction();
436 MachineRegisterInfo
&RegInfo
= MF
.getRegInfo();
437 SparcMachineFunctionInfo
*FuncInfo
= MF
.getInfo
<SparcMachineFunctionInfo
>();
439 // Assign locations to all of the incoming arguments.
440 SmallVector
<CCValAssign
, 16> ArgLocs
;
441 CCState
CCInfo(CallConv
, isVarArg
, DAG
.getMachineFunction(), ArgLocs
,
443 CCInfo
.AnalyzeFormalArguments(Ins
, CC_Sparc32
);
445 const unsigned StackOffset
= 92;
446 bool IsLittleEndian
= DAG
.getDataLayout().isLittleEndian();
449 for (unsigned i
= 0, e
= ArgLocs
.size(); i
!= e
; ++i
, ++InIdx
) {
450 CCValAssign
&VA
= ArgLocs
[i
];
452 if (Ins
[InIdx
].Flags
.isSRet()) {
454 report_fatal_error("sparc only supports sret on the first parameter");
455 // Get SRet from [%fp+64].
456 int FrameIdx
= MF
.getFrameInfo().CreateFixedObject(4, 64, true);
457 SDValue FIPtr
= DAG
.getFrameIndex(FrameIdx
, MVT::i32
);
459 DAG
.getLoad(MVT::i32
, dl
, Chain
, FIPtr
, MachinePointerInfo());
460 InVals
.push_back(Arg
);
465 if (VA
.needsCustom()) {
466 assert(VA
.getLocVT() == MVT::f64
|| VA
.getLocVT() == MVT::v2i32
);
468 Register VRegHi
= RegInfo
.createVirtualRegister(&SP::IntRegsRegClass
);
469 MF
.getRegInfo().addLiveIn(VA
.getLocReg(), VRegHi
);
470 SDValue HiVal
= DAG
.getCopyFromReg(Chain
, dl
, VRegHi
, MVT::i32
);
473 CCValAssign
&NextVA
= ArgLocs
[++i
];
476 if (NextVA
.isMemLoc()) {
477 int FrameIdx
= MF
.getFrameInfo().
478 CreateFixedObject(4, StackOffset
+NextVA
.getLocMemOffset(),true);
479 SDValue FIPtr
= DAG
.getFrameIndex(FrameIdx
, MVT::i32
);
480 LoVal
= DAG
.getLoad(MVT::i32
, dl
, Chain
, FIPtr
, MachinePointerInfo());
482 Register loReg
= MF
.addLiveIn(NextVA
.getLocReg(),
483 &SP::IntRegsRegClass
);
484 LoVal
= DAG
.getCopyFromReg(Chain
, dl
, loReg
, MVT::i32
);
488 std::swap(LoVal
, HiVal
);
491 DAG
.getNode(ISD::BUILD_PAIR
, dl
, MVT::i64
, LoVal
, HiVal
);
492 WholeValue
= DAG
.getNode(ISD::BITCAST
, dl
, VA
.getLocVT(), WholeValue
);
493 InVals
.push_back(WholeValue
);
496 Register VReg
= RegInfo
.createVirtualRegister(&SP::IntRegsRegClass
);
497 MF
.getRegInfo().addLiveIn(VA
.getLocReg(), VReg
);
498 SDValue Arg
= DAG
.getCopyFromReg(Chain
, dl
, VReg
, MVT::i32
);
499 if (VA
.getLocVT() == MVT::f32
)
500 Arg
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::f32
, Arg
);
501 else if (VA
.getLocVT() != MVT::i32
) {
502 Arg
= DAG
.getNode(ISD::AssertSext
, dl
, MVT::i32
, Arg
,
503 DAG
.getValueType(VA
.getLocVT()));
504 Arg
= DAG
.getNode(ISD::TRUNCATE
, dl
, VA
.getLocVT(), Arg
);
506 InVals
.push_back(Arg
);
510 assert(VA
.isMemLoc());
512 unsigned Offset
= VA
.getLocMemOffset()+StackOffset
;
513 auto PtrVT
= getPointerTy(DAG
.getDataLayout());
515 if (VA
.needsCustom()) {
516 assert(VA
.getValVT() == MVT::f64
|| VA
.getValVT() == MVT::v2i32
);
517 // If it is double-word aligned, just load.
518 if (Offset
% 8 == 0) {
519 int FI
= MF
.getFrameInfo().CreateFixedObject(8,
522 SDValue FIPtr
= DAG
.getFrameIndex(FI
, PtrVT
);
524 DAG
.getLoad(VA
.getValVT(), dl
, Chain
, FIPtr
, MachinePointerInfo());
525 InVals
.push_back(Load
);
529 int FI
= MF
.getFrameInfo().CreateFixedObject(4,
532 SDValue FIPtr
= DAG
.getFrameIndex(FI
, PtrVT
);
534 DAG
.getLoad(MVT::i32
, dl
, Chain
, FIPtr
, MachinePointerInfo());
535 int FI2
= MF
.getFrameInfo().CreateFixedObject(4,
538 SDValue FIPtr2
= DAG
.getFrameIndex(FI2
, PtrVT
);
541 DAG
.getLoad(MVT::i32
, dl
, Chain
, FIPtr2
, MachinePointerInfo());
544 std::swap(LoVal
, HiVal
);
547 DAG
.getNode(ISD::BUILD_PAIR
, dl
, MVT::i64
, LoVal
, HiVal
);
548 WholeValue
= DAG
.getNode(ISD::BITCAST
, dl
, VA
.getValVT(), WholeValue
);
549 InVals
.push_back(WholeValue
);
553 int FI
= MF
.getFrameInfo().CreateFixedObject(4,
556 SDValue FIPtr
= DAG
.getFrameIndex(FI
, PtrVT
);
558 if (VA
.getValVT() == MVT::i32
|| VA
.getValVT() == MVT::f32
) {
559 Load
= DAG
.getLoad(VA
.getValVT(), dl
, Chain
, FIPtr
, MachinePointerInfo());
560 } else if (VA
.getValVT() == MVT::f128
) {
561 report_fatal_error("SPARCv8 does not handle f128 in calls; "
564 // We shouldn't see any other value types here.
565 llvm_unreachable("Unexpected ValVT encountered in frame lowering.");
567 InVals
.push_back(Load
);
570 if (MF
.getFunction().hasStructRetAttr()) {
571 // Copy the SRet Argument to SRetReturnReg.
572 SparcMachineFunctionInfo
*SFI
= MF
.getInfo
<SparcMachineFunctionInfo
>();
573 Register Reg
= SFI
->getSRetReturnReg();
575 Reg
= MF
.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass
);
576 SFI
->setSRetReturnReg(Reg
);
578 SDValue Copy
= DAG
.getCopyToReg(DAG
.getEntryNode(), dl
, Reg
, InVals
[0]);
579 Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Copy
, Chain
);
582 // Store remaining ArgRegs to the stack if this is a varargs function.
584 static const MCPhysReg ArgRegs
[] = {
585 SP::I0
, SP::I1
, SP::I2
, SP::I3
, SP::I4
, SP::I5
587 unsigned NumAllocated
= CCInfo
.getFirstUnallocated(ArgRegs
);
588 const MCPhysReg
*CurArgReg
= ArgRegs
+NumAllocated
, *ArgRegEnd
= ArgRegs
+6;
589 unsigned ArgOffset
= CCInfo
.getStackSize();
590 if (NumAllocated
== 6)
591 ArgOffset
+= StackOffset
;
594 ArgOffset
= 68+4*NumAllocated
;
597 // Remember the vararg offset for the va_start implementation.
598 FuncInfo
->setVarArgsFrameOffset(ArgOffset
);
600 std::vector
<SDValue
> OutChains
;
602 for (; CurArgReg
!= ArgRegEnd
; ++CurArgReg
) {
603 Register VReg
= RegInfo
.createVirtualRegister(&SP::IntRegsRegClass
);
604 MF
.getRegInfo().addLiveIn(*CurArgReg
, VReg
);
605 SDValue Arg
= DAG
.getCopyFromReg(DAG
.getRoot(), dl
, VReg
, MVT::i32
);
607 int FrameIdx
= MF
.getFrameInfo().CreateFixedObject(4, ArgOffset
,
609 SDValue FIPtr
= DAG
.getFrameIndex(FrameIdx
, MVT::i32
);
612 DAG
.getStore(DAG
.getRoot(), dl
, Arg
, FIPtr
, MachinePointerInfo()));
616 if (!OutChains
.empty()) {
617 OutChains
.push_back(Chain
);
618 Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, OutChains
);
625 // Lower formal arguments for the 64 bit ABI.
626 SDValue
SparcTargetLowering::LowerFormalArguments_64(
627 SDValue Chain
, CallingConv::ID CallConv
, bool IsVarArg
,
628 const SmallVectorImpl
<ISD::InputArg
> &Ins
, const SDLoc
&DL
,
629 SelectionDAG
&DAG
, SmallVectorImpl
<SDValue
> &InVals
) const {
630 MachineFunction
&MF
= DAG
.getMachineFunction();
632 // Analyze arguments according to CC_Sparc64.
633 SmallVector
<CCValAssign
, 16> ArgLocs
;
634 CCState
CCInfo(CallConv
, IsVarArg
, DAG
.getMachineFunction(), ArgLocs
,
636 CCInfo
.AnalyzeFormalArguments(Ins
, CC_Sparc64
);
638 // The argument array begins at %fp+BIAS+128, after the register save area.
639 const unsigned ArgArea
= 128;
641 for (const CCValAssign
&VA
: ArgLocs
) {
643 // This argument is passed in a register.
644 // All integer register arguments are promoted by the caller to i64.
646 // Create a virtual register for the promoted live-in value.
647 Register VReg
= MF
.addLiveIn(VA
.getLocReg(),
648 getRegClassFor(VA
.getLocVT()));
649 SDValue Arg
= DAG
.getCopyFromReg(Chain
, DL
, VReg
, VA
.getLocVT());
651 // Get the high bits for i32 struct elements.
652 if (VA
.getValVT() == MVT::i32
&& VA
.needsCustom())
653 Arg
= DAG
.getNode(ISD::SRL
, DL
, VA
.getLocVT(), Arg
,
654 DAG
.getConstant(32, DL
, MVT::i32
));
656 // The caller promoted the argument, so insert an Assert?ext SDNode so we
657 // won't promote the value again in this function.
658 switch (VA
.getLocInfo()) {
659 case CCValAssign::SExt
:
660 Arg
= DAG
.getNode(ISD::AssertSext
, DL
, VA
.getLocVT(), Arg
,
661 DAG
.getValueType(VA
.getValVT()));
663 case CCValAssign::ZExt
:
664 Arg
= DAG
.getNode(ISD::AssertZext
, DL
, VA
.getLocVT(), Arg
,
665 DAG
.getValueType(VA
.getValVT()));
671 // Truncate the register down to the argument type.
673 Arg
= DAG
.getNode(ISD::TRUNCATE
, DL
, VA
.getValVT(), Arg
);
675 InVals
.push_back(Arg
);
679 // The registers are exhausted. This argument was passed on the stack.
680 assert(VA
.isMemLoc());
681 // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
682 // beginning of the arguments area at %fp+BIAS+128.
683 unsigned Offset
= VA
.getLocMemOffset() + ArgArea
;
684 unsigned ValSize
= VA
.getValVT().getSizeInBits() / 8;
685 // Adjust offset for extended arguments, SPARC is big-endian.
686 // The caller will have written the full slot with extended bytes, but we
687 // prefer our own extending loads.
689 Offset
+= 8 - ValSize
;
690 int FI
= MF
.getFrameInfo().CreateFixedObject(ValSize
, Offset
, true);
692 DAG
.getLoad(VA
.getValVT(), DL
, Chain
,
693 DAG
.getFrameIndex(FI
, getPointerTy(MF
.getDataLayout())),
694 MachinePointerInfo::getFixedStack(MF
, FI
)));
700 // This function takes variable arguments, some of which may have been passed
701 // in registers %i0-%i5. Variable floating point arguments are never passed
702 // in floating point registers. They go on %i0-%i5 or on the stack like
703 // integer arguments.
705 // The va_start intrinsic needs to know the offset to the first variable
707 unsigned ArgOffset
= CCInfo
.getStackSize();
708 SparcMachineFunctionInfo
*FuncInfo
= MF
.getInfo
<SparcMachineFunctionInfo
>();
709 // Skip the 128 bytes of register save area.
710 FuncInfo
->setVarArgsFrameOffset(ArgOffset
+ ArgArea
+
711 Subtarget
->getStackPointerBias());
713 // Save the variable arguments that were passed in registers.
714 // The caller is required to reserve stack space for 6 arguments regardless
715 // of how many arguments were actually passed.
716 SmallVector
<SDValue
, 8> OutChains
;
717 for (; ArgOffset
< 6*8; ArgOffset
+= 8) {
718 Register VReg
= MF
.addLiveIn(SP::I0
+ ArgOffset
/8, &SP::I64RegsRegClass
);
719 SDValue VArg
= DAG
.getCopyFromReg(Chain
, DL
, VReg
, MVT::i64
);
720 int FI
= MF
.getFrameInfo().CreateFixedObject(8, ArgOffset
+ ArgArea
, true);
721 auto PtrVT
= getPointerTy(MF
.getDataLayout());
723 DAG
.getStore(Chain
, DL
, VArg
, DAG
.getFrameIndex(FI
, PtrVT
),
724 MachinePointerInfo::getFixedStack(MF
, FI
)));
727 if (!OutChains
.empty())
728 Chain
= DAG
.getNode(ISD::TokenFactor
, DL
, MVT::Other
, OutChains
);
733 // Check whether any of the argument registers are reserved
734 static bool isAnyArgRegReserved(const SparcRegisterInfo
*TRI
,
735 const MachineFunction
&MF
) {
736 // The register window design means that outgoing parameters at O*
737 // will appear in the callee as I*.
738 // Be conservative and check both sides of the register names.
740 llvm::any_of(SP::GPROutgoingArgRegClass
, [TRI
, &MF
](MCPhysReg r
) {
741 return TRI
->isReservedReg(MF
, r
);
744 llvm::any_of(SP::GPRIncomingArgRegClass
, [TRI
, &MF
](MCPhysReg r
) {
745 return TRI
->isReservedReg(MF
, r
);
747 return Outgoing
|| Incoming
;
750 static void emitReservedArgRegCallError(const MachineFunction
&MF
) {
751 const Function
&F
= MF
.getFunction();
752 F
.getContext().diagnose(DiagnosticInfoUnsupported
{
753 F
, ("SPARC doesn't support"
754 " function calls if any of the argument registers is reserved.")});
758 SparcTargetLowering::LowerCall(TargetLowering::CallLoweringInfo
&CLI
,
759 SmallVectorImpl
<SDValue
> &InVals
) const {
760 if (Subtarget
->is64Bit())
761 return LowerCall_64(CLI
, InVals
);
762 return LowerCall_32(CLI
, InVals
);
765 static bool hasReturnsTwiceAttr(SelectionDAG
&DAG
, SDValue Callee
,
766 const CallBase
*Call
) {
768 return Call
->hasFnAttr(Attribute::ReturnsTwice
);
770 const Function
*CalleeFn
= nullptr;
771 if (GlobalAddressSDNode
*G
= dyn_cast
<GlobalAddressSDNode
>(Callee
)) {
772 CalleeFn
= dyn_cast
<Function
>(G
->getGlobal());
773 } else if (ExternalSymbolSDNode
*E
=
774 dyn_cast
<ExternalSymbolSDNode
>(Callee
)) {
775 const Function
&Fn
= DAG
.getMachineFunction().getFunction();
776 const Module
*M
= Fn
.getParent();
777 const char *CalleeName
= E
->getSymbol();
778 CalleeFn
= M
->getFunction(CalleeName
);
783 return CalleeFn
->hasFnAttribute(Attribute::ReturnsTwice
);
786 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
787 /// for tail call optimization.
788 bool SparcTargetLowering::IsEligibleForTailCallOptimization(
789 CCState
&CCInfo
, CallLoweringInfo
&CLI
, MachineFunction
&MF
) const {
791 auto &Outs
= CLI
.Outs
;
792 auto &Caller
= MF
.getFunction();
794 // Do not tail call opt functions with "disable-tail-calls" attribute.
795 if (Caller
.getFnAttribute("disable-tail-calls").getValueAsString() == "true")
798 // Do not tail call opt if the stack is used to pass parameters.
799 // 64-bit targets have a slightly higher limit since the ABI requires
800 // to allocate some space even when all the parameters fit inside registers.
801 unsigned StackSizeLimit
= Subtarget
->is64Bit() ? 48 : 0;
802 if (CCInfo
.getStackSize() > StackSizeLimit
)
805 // Do not tail call opt if either the callee or caller returns
806 // a struct and the other does not.
807 if (!Outs
.empty() && Caller
.hasStructRetAttr() != Outs
[0].Flags
.isSRet())
810 // Byval parameters hand the function a pointer directly into the stack area
811 // we want to reuse during a tail call.
812 for (auto &Arg
: Outs
)
813 if (Arg
.Flags
.isByVal())
819 // Lower a call for the 32-bit ABI.
821 SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo
&CLI
,
822 SmallVectorImpl
<SDValue
> &InVals
) const {
823 SelectionDAG
&DAG
= CLI
.DAG
;
825 SmallVectorImpl
<ISD::OutputArg
> &Outs
= CLI
.Outs
;
826 SmallVectorImpl
<SDValue
> &OutVals
= CLI
.OutVals
;
827 SmallVectorImpl
<ISD::InputArg
> &Ins
= CLI
.Ins
;
828 SDValue Chain
= CLI
.Chain
;
829 SDValue Callee
= CLI
.Callee
;
830 bool &isTailCall
= CLI
.IsTailCall
;
831 CallingConv::ID CallConv
= CLI
.CallConv
;
832 bool isVarArg
= CLI
.IsVarArg
;
833 MachineFunction
&MF
= DAG
.getMachineFunction();
835 // Analyze operands of the call, assigning locations to each operand.
836 SmallVector
<CCValAssign
, 16> ArgLocs
;
837 CCState
CCInfo(CallConv
, isVarArg
, DAG
.getMachineFunction(), ArgLocs
,
839 CCInfo
.AnalyzeCallOperands(Outs
, CC_Sparc32
);
841 isTailCall
= isTailCall
&& IsEligibleForTailCallOptimization(
842 CCInfo
, CLI
, DAG
.getMachineFunction());
844 // Get the size of the outgoing arguments stack space requirement.
845 unsigned ArgsSize
= CCInfo
.getStackSize();
847 // Keep stack frames 8-byte aligned.
848 ArgsSize
= (ArgsSize
+7) & ~7;
850 MachineFrameInfo
&MFI
= DAG
.getMachineFunction().getFrameInfo();
852 // Create local copies for byval args.
853 SmallVector
<SDValue
, 8> ByValArgs
;
854 for (unsigned i
= 0, e
= Outs
.size(); i
!= e
; ++i
) {
855 ISD::ArgFlagsTy Flags
= Outs
[i
].Flags
;
856 if (!Flags
.isByVal())
859 SDValue Arg
= OutVals
[i
];
860 unsigned Size
= Flags
.getByValSize();
861 Align Alignment
= Flags
.getNonZeroByValAlign();
864 int FI
= MFI
.CreateStackObject(Size
, Alignment
, false);
865 SDValue FIPtr
= DAG
.getFrameIndex(FI
, getPointerTy(DAG
.getDataLayout()));
866 SDValue SizeNode
= DAG
.getConstant(Size
, dl
, MVT::i32
);
868 Chain
= DAG
.getMemcpy(Chain
, dl
, FIPtr
, Arg
, SizeNode
, Alignment
,
869 false, // isVolatile,
870 (Size
<= 32), // AlwaysInline if size <= 32,
871 /*CI=*/nullptr, std::nullopt
, MachinePointerInfo(),
872 MachinePointerInfo());
873 ByValArgs
.push_back(FIPtr
);
877 ByValArgs
.push_back(nullVal
);
881 assert(!isTailCall
|| ArgsSize
== 0);
884 Chain
= DAG
.getCALLSEQ_START(Chain
, ArgsSize
, 0, dl
);
886 SmallVector
<std::pair
<unsigned, SDValue
>, 8> RegsToPass
;
887 SmallVector
<SDValue
, 8> MemOpChains
;
889 const unsigned StackOffset
= 92;
890 bool hasStructRetAttr
= false;
891 unsigned SRetArgSize
= 0;
892 // Walk the register/memloc assignments, inserting copies/loads.
893 for (unsigned i
= 0, realArgIdx
= 0, byvalArgIdx
= 0, e
= ArgLocs
.size();
896 CCValAssign
&VA
= ArgLocs
[i
];
897 SDValue Arg
= OutVals
[realArgIdx
];
899 ISD::ArgFlagsTy Flags
= Outs
[realArgIdx
].Flags
;
901 // Use local copy if it is a byval arg.
902 if (Flags
.isByVal()) {
903 Arg
= ByValArgs
[byvalArgIdx
++];
909 // Promote the value if needed.
910 switch (VA
.getLocInfo()) {
911 default: llvm_unreachable("Unknown loc info!");
912 case CCValAssign::Full
: break;
913 case CCValAssign::SExt
:
914 Arg
= DAG
.getNode(ISD::SIGN_EXTEND
, dl
, VA
.getLocVT(), Arg
);
916 case CCValAssign::ZExt
:
917 Arg
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, VA
.getLocVT(), Arg
);
919 case CCValAssign::AExt
:
920 Arg
= DAG
.getNode(ISD::ANY_EXTEND
, dl
, VA
.getLocVT(), Arg
);
922 case CCValAssign::BCvt
:
923 Arg
= DAG
.getNode(ISD::BITCAST
, dl
, VA
.getLocVT(), Arg
);
927 if (Flags
.isSRet()) {
928 assert(VA
.needsCustom());
933 // store SRet argument in %sp+64
934 SDValue StackPtr
= DAG
.getRegister(SP::O6
, MVT::i32
);
935 SDValue PtrOff
= DAG
.getIntPtrConstant(64, dl
);
936 PtrOff
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, StackPtr
, PtrOff
);
937 MemOpChains
.push_back(
938 DAG
.getStore(Chain
, dl
, Arg
, PtrOff
, MachinePointerInfo()));
939 hasStructRetAttr
= true;
940 // sret only allowed on first argument
941 assert(Outs
[realArgIdx
].OrigArgIndex
== 0);
943 DAG
.getDataLayout().getTypeAllocSize(CLI
.getArgs()[0].IndirectType
);
947 if (VA
.needsCustom()) {
948 assert(VA
.getLocVT() == MVT::f64
|| VA
.getLocVT() == MVT::v2i32
);
951 unsigned Offset
= VA
.getLocMemOffset() + StackOffset
;
952 // if it is double-word aligned, just store.
953 if (Offset
% 8 == 0) {
954 SDValue StackPtr
= DAG
.getRegister(SP::O6
, MVT::i32
);
955 SDValue PtrOff
= DAG
.getIntPtrConstant(Offset
, dl
);
956 PtrOff
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, StackPtr
, PtrOff
);
957 MemOpChains
.push_back(
958 DAG
.getStore(Chain
, dl
, Arg
, PtrOff
, MachinePointerInfo()));
963 if (VA
.getLocVT() == MVT::f64
) {
964 // Move from the float value from float registers into the
965 // integer registers.
966 if (ConstantFPSDNode
*C
= dyn_cast
<ConstantFPSDNode
>(Arg
))
967 Arg
= bitcastConstantFPToInt(C
, dl
, DAG
);
969 Arg
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::v2i32
, Arg
);
972 SDValue Part0
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, MVT::i32
,
974 DAG
.getConstant(0, dl
, getVectorIdxTy(DAG
.getDataLayout())));
975 SDValue Part1
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, MVT::i32
,
977 DAG
.getConstant(1, dl
, getVectorIdxTy(DAG
.getDataLayout())));
980 RegsToPass
.push_back(std::make_pair(VA
.getLocReg(), Part0
));
982 CCValAssign
&NextVA
= ArgLocs
[++i
];
983 if (NextVA
.isRegLoc()) {
984 RegsToPass
.push_back(std::make_pair(NextVA
.getLocReg(), Part1
));
986 // Store the second part in stack.
987 unsigned Offset
= NextVA
.getLocMemOffset() + StackOffset
;
988 SDValue StackPtr
= DAG
.getRegister(SP::O6
, MVT::i32
);
989 SDValue PtrOff
= DAG
.getIntPtrConstant(Offset
, dl
);
990 PtrOff
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, StackPtr
, PtrOff
);
991 MemOpChains
.push_back(
992 DAG
.getStore(Chain
, dl
, Part1
, PtrOff
, MachinePointerInfo()));
995 unsigned Offset
= VA
.getLocMemOffset() + StackOffset
;
996 // Store the first part.
997 SDValue StackPtr
= DAG
.getRegister(SP::O6
, MVT::i32
);
998 SDValue PtrOff
= DAG
.getIntPtrConstant(Offset
, dl
);
999 PtrOff
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, StackPtr
, PtrOff
);
1000 MemOpChains
.push_back(
1001 DAG
.getStore(Chain
, dl
, Part0
, PtrOff
, MachinePointerInfo()));
1002 // Store the second part.
1003 PtrOff
= DAG
.getIntPtrConstant(Offset
+ 4, dl
);
1004 PtrOff
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, StackPtr
, PtrOff
);
1005 MemOpChains
.push_back(
1006 DAG
.getStore(Chain
, dl
, Part1
, PtrOff
, MachinePointerInfo()));
1011 // Arguments that can be passed on register must be kept at
1012 // RegsToPass vector
1013 if (VA
.isRegLoc()) {
1014 if (VA
.getLocVT() != MVT::f32
) {
1015 RegsToPass
.push_back(std::make_pair(VA
.getLocReg(), Arg
));
1018 Arg
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::i32
, Arg
);
1019 RegsToPass
.push_back(std::make_pair(VA
.getLocReg(), Arg
));
1023 assert(VA
.isMemLoc());
1025 // Create a store off the stack pointer for this argument.
1026 SDValue StackPtr
= DAG
.getRegister(SP::O6
, MVT::i32
);
1027 SDValue PtrOff
= DAG
.getIntPtrConstant(VA
.getLocMemOffset() + StackOffset
,
1029 PtrOff
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, StackPtr
, PtrOff
);
1030 MemOpChains
.push_back(
1031 DAG
.getStore(Chain
, dl
, Arg
, PtrOff
, MachinePointerInfo()));
1035 // Emit all stores, make sure the occur before any copies into physregs.
1036 if (!MemOpChains
.empty())
1037 Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, MemOpChains
);
1039 // Build a sequence of copy-to-reg nodes chained together with token
1040 // chain and flag operands which copy the outgoing args into registers.
1041 // The InGlue in necessary since all emitted instructions must be
1044 for (unsigned i
= 0, e
= RegsToPass
.size(); i
!= e
; ++i
) {
1045 Register Reg
= RegsToPass
[i
].first
;
1047 Reg
= toCallerWindow(Reg
);
1048 Chain
= DAG
.getCopyToReg(Chain
, dl
, Reg
, RegsToPass
[i
].second
, InGlue
);
1049 InGlue
= Chain
.getValue(1);
1052 bool hasReturnsTwice
= hasReturnsTwiceAttr(DAG
, Callee
, CLI
.CB
);
1054 // If the callee is a GlobalAddress node (quite common, every direct call is)
1055 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1056 // Likewise ExternalSymbol -> TargetExternalSymbol.
1057 unsigned TF
= isPositionIndependent() ? SparcMCExpr::VK_Sparc_WPLT30
1058 : SparcMCExpr::VK_Sparc_WDISP30
;
1059 if (GlobalAddressSDNode
*G
= dyn_cast
<GlobalAddressSDNode
>(Callee
))
1060 Callee
= DAG
.getTargetGlobalAddress(G
->getGlobal(), dl
, MVT::i32
, 0, TF
);
1061 else if (ExternalSymbolSDNode
*E
= dyn_cast
<ExternalSymbolSDNode
>(Callee
))
1062 Callee
= DAG
.getTargetExternalSymbol(E
->getSymbol(), MVT::i32
, TF
);
1064 // Returns a chain & a flag for retval copy to use
1065 SDVTList NodeTys
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
1066 SmallVector
<SDValue
, 8> Ops
;
1067 Ops
.push_back(Chain
);
1068 Ops
.push_back(Callee
);
1069 if (hasStructRetAttr
)
1070 Ops
.push_back(DAG
.getTargetConstant(SRetArgSize
, dl
, MVT::i32
));
1071 for (unsigned i
= 0, e
= RegsToPass
.size(); i
!= e
; ++i
) {
1072 Register Reg
= RegsToPass
[i
].first
;
1074 Reg
= toCallerWindow(Reg
);
1075 Ops
.push_back(DAG
.getRegister(Reg
, RegsToPass
[i
].second
.getValueType()));
1078 // Add a register mask operand representing the call-preserved registers.
1079 const SparcRegisterInfo
*TRI
= Subtarget
->getRegisterInfo();
1080 const uint32_t *Mask
=
1082 ? TRI
->getRTCallPreservedMask(CallConv
)
1083 : TRI
->getCallPreservedMask(DAG
.getMachineFunction(), CallConv
));
1085 if (isAnyArgRegReserved(TRI
, MF
))
1086 emitReservedArgRegCallError(MF
);
1088 assert(Mask
&& "Missing call preserved mask for calling convention");
1089 Ops
.push_back(DAG
.getRegisterMask(Mask
));
1091 if (InGlue
.getNode())
1092 Ops
.push_back(InGlue
);
1095 DAG
.getMachineFunction().getFrameInfo().setHasTailCall();
1096 return DAG
.getNode(SPISD::TAIL_CALL
, dl
, MVT::Other
, Ops
);
1099 Chain
= DAG
.getNode(SPISD::CALL
, dl
, NodeTys
, Ops
);
1100 InGlue
= Chain
.getValue(1);
1102 Chain
= DAG
.getCALLSEQ_END(Chain
, ArgsSize
, 0, InGlue
, dl
);
1103 InGlue
= Chain
.getValue(1);
1105 // Assign locations to each value returned by this call.
1106 SmallVector
<CCValAssign
, 16> RVLocs
;
1107 CCState
RVInfo(CallConv
, isVarArg
, DAG
.getMachineFunction(), RVLocs
,
1110 RVInfo
.AnalyzeCallResult(Ins
, RetCC_Sparc32
);
1112 // Copy all of the result registers out of their specified physreg.
1113 for (unsigned i
= 0; i
!= RVLocs
.size(); ++i
) {
1114 assert(RVLocs
[i
].isRegLoc() && "Can only return in registers!");
1115 if (RVLocs
[i
].getLocVT() == MVT::v2i32
) {
1116 SDValue Vec
= DAG
.getNode(ISD::UNDEF
, dl
, MVT::v2i32
);
1117 SDValue Lo
= DAG
.getCopyFromReg(
1118 Chain
, dl
, toCallerWindow(RVLocs
[i
++].getLocReg()), MVT::i32
, InGlue
);
1119 Chain
= Lo
.getValue(1);
1120 InGlue
= Lo
.getValue(2);
1121 Vec
= DAG
.getNode(ISD::INSERT_VECTOR_ELT
, dl
, MVT::v2i32
, Vec
, Lo
,
1122 DAG
.getConstant(0, dl
, MVT::i32
));
1123 SDValue Hi
= DAG
.getCopyFromReg(
1124 Chain
, dl
, toCallerWindow(RVLocs
[i
].getLocReg()), MVT::i32
, InGlue
);
1125 Chain
= Hi
.getValue(1);
1126 InGlue
= Hi
.getValue(2);
1127 Vec
= DAG
.getNode(ISD::INSERT_VECTOR_ELT
, dl
, MVT::v2i32
, Vec
, Hi
,
1128 DAG
.getConstant(1, dl
, MVT::i32
));
1129 InVals
.push_back(Vec
);
1132 DAG
.getCopyFromReg(Chain
, dl
, toCallerWindow(RVLocs
[i
].getLocReg()),
1133 RVLocs
[i
].getValVT(), InGlue
)
1135 InGlue
= Chain
.getValue(2);
1136 InVals
.push_back(Chain
.getValue(0));
1143 // FIXME? Maybe this could be a TableGen attribute on some registers and
1144 // this table could be generated automatically from RegInfo.
1145 Register
SparcTargetLowering::getRegisterByName(const char* RegName
, LLT VT
,
1146 const MachineFunction
&MF
) const {
1147 Register Reg
= StringSwitch
<Register
>(RegName
)
1148 .Case("i0", SP::I0
).Case("i1", SP::I1
).Case("i2", SP::I2
).Case("i3", SP::I3
)
1149 .Case("i4", SP::I4
).Case("i5", SP::I5
).Case("i6", SP::I6
).Case("i7", SP::I7
)
1150 .Case("o0", SP::O0
).Case("o1", SP::O1
).Case("o2", SP::O2
).Case("o3", SP::O3
)
1151 .Case("o4", SP::O4
).Case("o5", SP::O5
).Case("o6", SP::O6
).Case("o7", SP::O7
)
1152 .Case("l0", SP::L0
).Case("l1", SP::L1
).Case("l2", SP::L2
).Case("l3", SP::L3
)
1153 .Case("l4", SP::L4
).Case("l5", SP::L5
).Case("l6", SP::L6
).Case("l7", SP::L7
)
1154 .Case("g0", SP::G0
).Case("g1", SP::G1
).Case("g2", SP::G2
).Case("g3", SP::G3
)
1155 .Case("g4", SP::G4
).Case("g5", SP::G5
).Case("g6", SP::G6
).Case("g7", SP::G7
)
1158 // If we're directly referencing register names
1159 // (e.g in GCC C extension `register int r asm("g1");`),
1160 // make sure that said register is in the reserve list.
1161 const SparcRegisterInfo
*TRI
= Subtarget
->getRegisterInfo();
1162 if (!TRI
->isReservedReg(MF
, Reg
))
1168 report_fatal_error("Invalid register name global variable");
1171 // Fixup floating point arguments in the ... part of a varargs call.
1173 // The SPARC v9 ABI requires that floating point arguments are treated the same
1174 // as integers when calling a varargs function. This does not apply to the
1175 // fixed arguments that are part of the function's prototype.
1177 // This function post-processes a CCValAssign array created by
1178 // AnalyzeCallOperands().
1179 static void fixupVariableFloatArgs(SmallVectorImpl
<CCValAssign
> &ArgLocs
,
1180 ArrayRef
<ISD::OutputArg
> Outs
) {
1181 for (CCValAssign
&VA
: ArgLocs
) {
1182 MVT ValTy
= VA
.getLocVT();
1183 // FIXME: What about f32 arguments? C promotes them to f64 when calling
1184 // varargs functions.
1185 if (!VA
.isRegLoc() || (ValTy
!= MVT::f64
&& ValTy
!= MVT::f128
))
1187 // The fixed arguments to a varargs function still go in FP registers.
1188 if (Outs
[VA
.getValNo()].IsFixed
)
1191 // This floating point argument should be reassigned.
1192 // Determine the offset into the argument array.
1193 Register firstReg
= (ValTy
== MVT::f64
) ? SP::D0
: SP::Q0
;
1194 unsigned argSize
= (ValTy
== MVT::f64
) ? 8 : 16;
1195 unsigned Offset
= argSize
* (VA
.getLocReg() - firstReg
);
1196 assert(Offset
< 16*8 && "Offset out of range, bad register enum?");
1199 // This argument should go in %i0-%i5.
1200 unsigned IReg
= SP::I0
+ Offset
/8;
1201 if (ValTy
== MVT::f64
)
1202 // Full register, just bitconvert into i64.
1203 VA
= CCValAssign::getReg(VA
.getValNo(), VA
.getValVT(), IReg
, MVT::i64
,
1206 assert(ValTy
== MVT::f128
&& "Unexpected type!");
1207 // Full register, just bitconvert into i128 -- We will lower this into
1208 // two i64s in LowerCall_64.
1209 VA
= CCValAssign::getCustomReg(VA
.getValNo(), VA
.getValVT(), IReg
,
1210 MVT::i128
, CCValAssign::BCvt
);
1213 // This needs to go to memory, we're out of integer registers.
1214 VA
= CCValAssign::getMem(VA
.getValNo(), VA
.getValVT(), Offset
,
1215 VA
.getLocVT(), VA
.getLocInfo());
1220 // Lower a call for the 64-bit ABI.
1222 SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo
&CLI
,
1223 SmallVectorImpl
<SDValue
> &InVals
) const {
1224 SelectionDAG
&DAG
= CLI
.DAG
;
1226 SDValue Chain
= CLI
.Chain
;
1227 auto PtrVT
= getPointerTy(DAG
.getDataLayout());
1228 MachineFunction
&MF
= DAG
.getMachineFunction();
1230 // Analyze operands of the call, assigning locations to each operand.
1231 SmallVector
<CCValAssign
, 16> ArgLocs
;
1232 CCState
CCInfo(CLI
.CallConv
, CLI
.IsVarArg
, DAG
.getMachineFunction(), ArgLocs
,
1234 CCInfo
.AnalyzeCallOperands(CLI
.Outs
, CC_Sparc64
);
1236 CLI
.IsTailCall
= CLI
.IsTailCall
&& IsEligibleForTailCallOptimization(
1237 CCInfo
, CLI
, DAG
.getMachineFunction());
1239 // Get the size of the outgoing arguments stack space requirement.
1240 // The stack offset computed by CC_Sparc64 includes all arguments.
1241 // Called functions expect 6 argument words to exist in the stack frame, used
1243 unsigned StackReserved
= 6 * 8u;
1244 unsigned ArgsSize
= std::max
<unsigned>(StackReserved
, CCInfo
.getStackSize());
1246 // Keep stack frames 16-byte aligned.
1247 ArgsSize
= alignTo(ArgsSize
, 16);
1249 // Varargs calls require special treatment.
1251 fixupVariableFloatArgs(ArgLocs
, CLI
.Outs
);
1253 assert(!CLI
.IsTailCall
|| ArgsSize
== StackReserved
);
1255 // Adjust the stack pointer to make room for the arguments.
1256 // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
1257 // with more than 6 arguments.
1258 if (!CLI
.IsTailCall
)
1259 Chain
= DAG
.getCALLSEQ_START(Chain
, ArgsSize
, 0, DL
);
1261 // Collect the set of registers to pass to the function and their values.
1262 // This will be emitted as a sequence of CopyToReg nodes glued to the call
1264 SmallVector
<std::pair
<Register
, SDValue
>, 8> RegsToPass
;
1266 // Collect chains from all the memory opeations that copy arguments to the
1267 // stack. They must follow the stack pointer adjustment above and precede the
1268 // call instruction itself.
1269 SmallVector
<SDValue
, 8> MemOpChains
;
1271 for (unsigned i
= 0, e
= ArgLocs
.size(); i
!= e
; ++i
) {
1272 const CCValAssign
&VA
= ArgLocs
[i
];
1273 SDValue Arg
= CLI
.OutVals
[i
];
1275 // Promote the value if needed.
1276 switch (VA
.getLocInfo()) {
1278 llvm_unreachable("Unknown location info!");
1279 case CCValAssign::Full
:
1281 case CCValAssign::SExt
:
1282 Arg
= DAG
.getNode(ISD::SIGN_EXTEND
, DL
, VA
.getLocVT(), Arg
);
1284 case CCValAssign::ZExt
:
1285 Arg
= DAG
.getNode(ISD::ZERO_EXTEND
, DL
, VA
.getLocVT(), Arg
);
1287 case CCValAssign::AExt
:
1288 Arg
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, VA
.getLocVT(), Arg
);
1290 case CCValAssign::BCvt
:
1291 // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
1292 // SPARC does not support i128 natively. Lower it into two i64, see below.
1293 if (!VA
.needsCustom() || VA
.getValVT() != MVT::f128
1294 || VA
.getLocVT() != MVT::i128
)
1295 Arg
= DAG
.getNode(ISD::BITCAST
, DL
, VA
.getLocVT(), Arg
);
1299 if (VA
.isRegLoc()) {
1300 if (VA
.needsCustom() && VA
.getValVT() == MVT::f128
1301 && VA
.getLocVT() == MVT::i128
) {
1302 // Store and reload into the integer register reg and reg+1.
1303 unsigned Offset
= 8 * (VA
.getLocReg() - SP::I0
);
1304 unsigned StackOffset
= Offset
+ Subtarget
->getStackPointerBias() + 128;
1305 SDValue StackPtr
= DAG
.getRegister(SP::O6
, PtrVT
);
1306 SDValue HiPtrOff
= DAG
.getIntPtrConstant(StackOffset
, DL
);
1307 HiPtrOff
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, StackPtr
, HiPtrOff
);
1308 SDValue LoPtrOff
= DAG
.getIntPtrConstant(StackOffset
+ 8, DL
);
1309 LoPtrOff
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, StackPtr
, LoPtrOff
);
1311 // Store to %sp+BIAS+128+Offset
1313 DAG
.getStore(Chain
, DL
, Arg
, HiPtrOff
, MachinePointerInfo());
1314 // Load into Reg and Reg+1
1316 DAG
.getLoad(MVT::i64
, DL
, Store
, HiPtrOff
, MachinePointerInfo());
1318 DAG
.getLoad(MVT::i64
, DL
, Store
, LoPtrOff
, MachinePointerInfo());
1320 Register HiReg
= VA
.getLocReg();
1321 Register LoReg
= VA
.getLocReg() + 1;
1322 if (!CLI
.IsTailCall
) {
1323 HiReg
= toCallerWindow(HiReg
);
1324 LoReg
= toCallerWindow(LoReg
);
1327 RegsToPass
.push_back(std::make_pair(HiReg
, Hi64
));
1328 RegsToPass
.push_back(std::make_pair(LoReg
, Lo64
));
1332 // The custom bit on an i32 return value indicates that it should be
1333 // passed in the high bits of the register.
1334 if (VA
.getValVT() == MVT::i32
&& VA
.needsCustom()) {
1335 Arg
= DAG
.getNode(ISD::SHL
, DL
, MVT::i64
, Arg
,
1336 DAG
.getConstant(32, DL
, MVT::i32
));
1338 // The next value may go in the low bits of the same register.
1339 // Handle both at once.
1340 if (i
+1 < ArgLocs
.size() && ArgLocs
[i
+1].isRegLoc() &&
1341 ArgLocs
[i
+1].getLocReg() == VA
.getLocReg()) {
1342 SDValue NV
= DAG
.getNode(ISD::ZERO_EXTEND
, DL
, MVT::i64
,
1344 Arg
= DAG
.getNode(ISD::OR
, DL
, MVT::i64
, Arg
, NV
);
1345 // Skip the next value, it's already done.
1350 Register Reg
= VA
.getLocReg();
1351 if (!CLI
.IsTailCall
)
1352 Reg
= toCallerWindow(Reg
);
1353 RegsToPass
.push_back(std::make_pair(Reg
, Arg
));
1357 assert(VA
.isMemLoc());
1359 // Create a store off the stack pointer for this argument.
1360 SDValue StackPtr
= DAG
.getRegister(SP::O6
, PtrVT
);
1361 // The argument area starts at %fp+BIAS+128 in the callee frame,
1362 // %sp+BIAS+128 in ours.
1363 SDValue PtrOff
= DAG
.getIntPtrConstant(VA
.getLocMemOffset() +
1364 Subtarget
->getStackPointerBias() +
1366 PtrOff
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, StackPtr
, PtrOff
);
1367 MemOpChains
.push_back(
1368 DAG
.getStore(Chain
, DL
, Arg
, PtrOff
, MachinePointerInfo()));
1371 // Emit all stores, make sure they occur before the call.
1372 if (!MemOpChains
.empty())
1373 Chain
= DAG
.getNode(ISD::TokenFactor
, DL
, MVT::Other
, MemOpChains
);
1375 // Build a sequence of CopyToReg nodes glued together with token chain and
1376 // glue operands which copy the outgoing args into registers. The InGlue is
1377 // necessary since all emitted instructions must be stuck together in order
1378 // to pass the live physical registers.
1380 for (unsigned i
= 0, e
= RegsToPass
.size(); i
!= e
; ++i
) {
1381 Chain
= DAG
.getCopyToReg(Chain
, DL
,
1382 RegsToPass
[i
].first
, RegsToPass
[i
].second
, InGlue
);
1383 InGlue
= Chain
.getValue(1);
1386 // If the callee is a GlobalAddress node (quite common, every direct call is)
1387 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1388 // Likewise ExternalSymbol -> TargetExternalSymbol.
1389 SDValue Callee
= CLI
.Callee
;
1390 bool hasReturnsTwice
= hasReturnsTwiceAttr(DAG
, Callee
, CLI
.CB
);
1391 unsigned TF
= isPositionIndependent() ? SparcMCExpr::VK_Sparc_WPLT30
1392 : SparcMCExpr::VK_Sparc_WDISP30
;
1393 if (GlobalAddressSDNode
*G
= dyn_cast
<GlobalAddressSDNode
>(Callee
))
1394 Callee
= DAG
.getTargetGlobalAddress(G
->getGlobal(), DL
, PtrVT
, 0, TF
);
1395 else if (ExternalSymbolSDNode
*E
= dyn_cast
<ExternalSymbolSDNode
>(Callee
))
1396 Callee
= DAG
.getTargetExternalSymbol(E
->getSymbol(), PtrVT
, TF
);
1398 // Build the operands for the call instruction itself.
1399 SmallVector
<SDValue
, 8> Ops
;
1400 Ops
.push_back(Chain
);
1401 Ops
.push_back(Callee
);
1402 for (unsigned i
= 0, e
= RegsToPass
.size(); i
!= e
; ++i
)
1403 Ops
.push_back(DAG
.getRegister(RegsToPass
[i
].first
,
1404 RegsToPass
[i
].second
.getValueType()));
1406 // Add a register mask operand representing the call-preserved registers.
1407 const SparcRegisterInfo
*TRI
= Subtarget
->getRegisterInfo();
1408 const uint32_t *Mask
=
1409 ((hasReturnsTwice
) ? TRI
->getRTCallPreservedMask(CLI
.CallConv
)
1410 : TRI
->getCallPreservedMask(DAG
.getMachineFunction(),
1413 if (isAnyArgRegReserved(TRI
, MF
))
1414 emitReservedArgRegCallError(MF
);
1416 assert(Mask
&& "Missing call preserved mask for calling convention");
1417 Ops
.push_back(DAG
.getRegisterMask(Mask
));
1419 // Make sure the CopyToReg nodes are glued to the call instruction which
1420 // consumes the registers.
1421 if (InGlue
.getNode())
1422 Ops
.push_back(InGlue
);
1424 // Now the call itself.
1425 if (CLI
.IsTailCall
) {
1426 DAG
.getMachineFunction().getFrameInfo().setHasTailCall();
1427 return DAG
.getNode(SPISD::TAIL_CALL
, DL
, MVT::Other
, Ops
);
1429 SDVTList NodeTys
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
1430 Chain
= DAG
.getNode(SPISD::CALL
, DL
, NodeTys
, Ops
);
1431 InGlue
= Chain
.getValue(1);
1433 // Revert the stack pointer immediately after the call.
1434 Chain
= DAG
.getCALLSEQ_END(Chain
, ArgsSize
, 0, InGlue
, DL
);
1435 InGlue
= Chain
.getValue(1);
1437 // Now extract the return values. This is more or less the same as
1438 // LowerFormalArguments_64.
1440 // Assign locations to each value returned by this call.
1441 SmallVector
<CCValAssign
, 16> RVLocs
;
1442 CCState
RVInfo(CLI
.CallConv
, CLI
.IsVarArg
, DAG
.getMachineFunction(), RVLocs
,
1445 // Set inreg flag manually for codegen generated library calls that
1447 if (CLI
.Ins
.size() == 1 && CLI
.Ins
[0].VT
== MVT::f32
&& !CLI
.CB
)
1448 CLI
.Ins
[0].Flags
.setInReg();
1450 RVInfo
.AnalyzeCallResult(CLI
.Ins
, RetCC_Sparc64
);
1452 // Copy all of the result registers out of their specified physreg.
1453 for (unsigned i
= 0; i
!= RVLocs
.size(); ++i
) {
1454 CCValAssign
&VA
= RVLocs
[i
];
1455 assert(VA
.isRegLoc() && "Can only return in registers!");
1456 unsigned Reg
= toCallerWindow(VA
.getLocReg());
1458 // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
1459 // reside in the same register in the high and low bits. Reuse the
1460 // CopyFromReg previous node to avoid duplicate copies.
1462 if (RegisterSDNode
*SrcReg
= dyn_cast
<RegisterSDNode
>(Chain
.getOperand(1)))
1463 if (SrcReg
->getReg() == Reg
&& Chain
->getOpcode() == ISD::CopyFromReg
)
1464 RV
= Chain
.getValue(0);
1466 // But usually we'll create a new CopyFromReg for a different register.
1467 if (!RV
.getNode()) {
1468 RV
= DAG
.getCopyFromReg(Chain
, DL
, Reg
, RVLocs
[i
].getLocVT(), InGlue
);
1469 Chain
= RV
.getValue(1);
1470 InGlue
= Chain
.getValue(2);
1473 // Get the high bits for i32 struct elements.
1474 if (VA
.getValVT() == MVT::i32
&& VA
.needsCustom())
1475 RV
= DAG
.getNode(ISD::SRL
, DL
, VA
.getLocVT(), RV
,
1476 DAG
.getConstant(32, DL
, MVT::i32
));
1478 // The callee promoted the return value, so insert an Assert?ext SDNode so
1479 // we won't promote the value again in this function.
1480 switch (VA
.getLocInfo()) {
1481 case CCValAssign::SExt
:
1482 RV
= DAG
.getNode(ISD::AssertSext
, DL
, VA
.getLocVT(), RV
,
1483 DAG
.getValueType(VA
.getValVT()));
1485 case CCValAssign::ZExt
:
1486 RV
= DAG
.getNode(ISD::AssertZext
, DL
, VA
.getLocVT(), RV
,
1487 DAG
.getValueType(VA
.getValVT()));
1493 // Truncate the register down to the return value type.
1494 if (VA
.isExtInLoc())
1495 RV
= DAG
.getNode(ISD::TRUNCATE
, DL
, VA
.getValVT(), RV
);
1497 InVals
.push_back(RV
);
1503 //===----------------------------------------------------------------------===//
1504 // TargetLowering Implementation
1505 //===----------------------------------------------------------------------===//
1507 TargetLowering::AtomicExpansionKind
SparcTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst
*AI
) const {
1508 if (AI
->getOperation() == AtomicRMWInst::Xchg
&&
1509 AI
->getType()->getPrimitiveSizeInBits() == 32)
1510 return AtomicExpansionKind::None
; // Uses xchg instruction
1512 return AtomicExpansionKind::CmpXChg
;
1515 /// intCondCCodeToRcond - Convert a DAG integer condition code to a SPARC
1516 /// rcond condition.
1517 static SPCC::CondCodes
intCondCCodeToRcond(ISD::CondCode CC
) {
1520 llvm_unreachable("Unknown/unsigned integer condition code!");
1524 return SPCC::REG_NZ
;
1526 return SPCC::REG_LZ
;
1528 return SPCC::REG_GZ
;
1530 return SPCC::REG_LEZ
;
1532 return SPCC::REG_GEZ
;
1536 /// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
1538 static SPCC::CondCodes
IntCondCCodeToICC(ISD::CondCode CC
) {
1540 default: llvm_unreachable("Unknown integer condition code!");
1541 case ISD::SETEQ
: return SPCC::ICC_E
;
1542 case ISD::SETNE
: return SPCC::ICC_NE
;
1543 case ISD::SETLT
: return SPCC::ICC_L
;
1544 case ISD::SETGT
: return SPCC::ICC_G
;
1545 case ISD::SETLE
: return SPCC::ICC_LE
;
1546 case ISD::SETGE
: return SPCC::ICC_GE
;
1547 case ISD::SETULT
: return SPCC::ICC_CS
;
1548 case ISD::SETULE
: return SPCC::ICC_LEU
;
1549 case ISD::SETUGT
: return SPCC::ICC_GU
;
1550 case ISD::SETUGE
: return SPCC::ICC_CC
;
1554 /// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
1556 static SPCC::CondCodes
FPCondCCodeToFCC(ISD::CondCode CC
) {
1558 default: llvm_unreachable("Unknown fp condition code!");
1560 case ISD::SETOEQ
: return SPCC::FCC_E
;
1562 case ISD::SETUNE
: return SPCC::FCC_NE
;
1564 case ISD::SETOLT
: return SPCC::FCC_L
;
1566 case ISD::SETOGT
: return SPCC::FCC_G
;
1568 case ISD::SETOLE
: return SPCC::FCC_LE
;
1570 case ISD::SETOGE
: return SPCC::FCC_GE
;
1571 case ISD::SETULT
: return SPCC::FCC_UL
;
1572 case ISD::SETULE
: return SPCC::FCC_ULE
;
1573 case ISD::SETUGT
: return SPCC::FCC_UG
;
1574 case ISD::SETUGE
: return SPCC::FCC_UGE
;
1575 case ISD::SETUO
: return SPCC::FCC_U
;
1576 case ISD::SETO
: return SPCC::FCC_O
;
1577 case ISD::SETONE
: return SPCC::FCC_LG
;
1578 case ISD::SETUEQ
: return SPCC::FCC_UE
;
1582 SparcTargetLowering::SparcTargetLowering(const TargetMachine
&TM
,
1583 const SparcSubtarget
&STI
)
1584 : TargetLowering(TM
), Subtarget(&STI
) {
1585 MVT PtrVT
= MVT::getIntegerVT(TM
.getPointerSizeInBits(0));
1587 // Instructions which use registers as conditionals examine all the
1588 // bits (as does the pseudo SELECT_CC expansion). I don't think it
1589 // matters much whether it's ZeroOrOneBooleanContent, or
1590 // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
1592 setBooleanContents(ZeroOrOneBooleanContent
);
1593 setBooleanVectorContents(ZeroOrOneBooleanContent
);
1595 // Set up the register classes.
1596 addRegisterClass(MVT::i32
, &SP::IntRegsRegClass
);
1597 if (!Subtarget
->useSoftFloat()) {
1598 addRegisterClass(MVT::f32
, &SP::FPRegsRegClass
);
1599 addRegisterClass(MVT::f64
, &SP::DFPRegsRegClass
);
1600 addRegisterClass(MVT::f128
, &SP::QFPRegsRegClass
);
1602 if (Subtarget
->is64Bit()) {
1603 addRegisterClass(MVT::i64
, &SP::I64RegsRegClass
);
1605 // On 32bit sparc, we define a double-register 32bit register
1606 // class, as well. This is modeled in LLVM as a 2-vector of i32.
1607 addRegisterClass(MVT::v2i32
, &SP::IntPairRegClass
);
1609 // ...but almost all operations must be expanded, so set that as
1611 for (unsigned Op
= 0; Op
< ISD::BUILTIN_OP_END
; ++Op
) {
1612 setOperationAction(Op
, MVT::v2i32
, Expand
);
1614 // Truncating/extending stores/loads are also not supported.
1615 for (MVT VT
: MVT::integer_fixedlen_vector_valuetypes()) {
1616 setLoadExtAction(ISD::SEXTLOAD
, VT
, MVT::v2i32
, Expand
);
1617 setLoadExtAction(ISD::ZEXTLOAD
, VT
, MVT::v2i32
, Expand
);
1618 setLoadExtAction(ISD::EXTLOAD
, VT
, MVT::v2i32
, Expand
);
1620 setLoadExtAction(ISD::SEXTLOAD
, MVT::v2i32
, VT
, Expand
);
1621 setLoadExtAction(ISD::ZEXTLOAD
, MVT::v2i32
, VT
, Expand
);
1622 setLoadExtAction(ISD::EXTLOAD
, MVT::v2i32
, VT
, Expand
);
1624 setTruncStoreAction(VT
, MVT::v2i32
, Expand
);
1625 setTruncStoreAction(MVT::v2i32
, VT
, Expand
);
1627 // However, load and store *are* legal.
1628 setOperationAction(ISD::LOAD
, MVT::v2i32
, Legal
);
1629 setOperationAction(ISD::STORE
, MVT::v2i32
, Legal
);
1630 setOperationAction(ISD::EXTRACT_VECTOR_ELT
, MVT::v2i32
, Legal
);
1631 setOperationAction(ISD::BUILD_VECTOR
, MVT::v2i32
, Legal
);
1633 // And we need to promote i64 loads/stores into vector load/store
1634 setOperationAction(ISD::LOAD
, MVT::i64
, Custom
);
1635 setOperationAction(ISD::STORE
, MVT::i64
, Custom
);
1637 // Sadly, this doesn't work:
1638 // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
1639 // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
1642 // Turn FP extload into load/fpextend
1643 for (MVT VT
: MVT::fp_valuetypes()) {
1644 setLoadExtAction(ISD::EXTLOAD
, VT
, MVT::f16
, Expand
);
1645 setLoadExtAction(ISD::EXTLOAD
, VT
, MVT::f32
, Expand
);
1646 setLoadExtAction(ISD::EXTLOAD
, VT
, MVT::f64
, Expand
);
1649 // Sparc doesn't have i1 sign extending load
1650 for (MVT VT
: MVT::integer_valuetypes())
1651 setLoadExtAction(ISD::SEXTLOAD
, VT
, MVT::i1
, Promote
);
1653 // Turn FP truncstore into trunc + store.
1654 setTruncStoreAction(MVT::f32
, MVT::f16
, Expand
);
1655 setTruncStoreAction(MVT::f64
, MVT::f16
, Expand
);
1656 setTruncStoreAction(MVT::f64
, MVT::f32
, Expand
);
1657 setTruncStoreAction(MVT::f128
, MVT::f16
, Expand
);
1658 setTruncStoreAction(MVT::f128
, MVT::f32
, Expand
);
1659 setTruncStoreAction(MVT::f128
, MVT::f64
, Expand
);
1661 // Custom legalize GlobalAddress nodes into LO/HI parts.
1662 setOperationAction(ISD::GlobalAddress
, PtrVT
, Custom
);
1663 setOperationAction(ISD::GlobalTLSAddress
, PtrVT
, Custom
);
1664 setOperationAction(ISD::ConstantPool
, PtrVT
, Custom
);
1665 setOperationAction(ISD::BlockAddress
, PtrVT
, Custom
);
1667 // Sparc doesn't have sext_inreg, replace them with shl/sra
1668 setOperationAction(ISD::SIGN_EXTEND_INREG
, MVT::i16
, Expand
);
1669 setOperationAction(ISD::SIGN_EXTEND_INREG
, MVT::i8
, Expand
);
1670 setOperationAction(ISD::SIGN_EXTEND_INREG
, MVT::i1
, Expand
);
1672 // Sparc has no REM or DIVREM operations.
1673 setOperationAction(ISD::UREM
, MVT::i32
, Expand
);
1674 setOperationAction(ISD::SREM
, MVT::i32
, Expand
);
1675 setOperationAction(ISD::SDIVREM
, MVT::i32
, Expand
);
1676 setOperationAction(ISD::UDIVREM
, MVT::i32
, Expand
);
1678 // ... nor does SparcV9.
1679 if (Subtarget
->is64Bit()) {
1680 setOperationAction(ISD::UREM
, MVT::i64
, Expand
);
1681 setOperationAction(ISD::SREM
, MVT::i64
, Expand
);
1682 setOperationAction(ISD::SDIVREM
, MVT::i64
, Expand
);
1683 setOperationAction(ISD::UDIVREM
, MVT::i64
, Expand
);
1686 // Custom expand fp<->sint
1687 setOperationAction(ISD::FP_TO_SINT
, MVT::i32
, Custom
);
1688 setOperationAction(ISD::SINT_TO_FP
, MVT::i32
, Custom
);
1689 setOperationAction(ISD::FP_TO_SINT
, MVT::i64
, Custom
);
1690 setOperationAction(ISD::SINT_TO_FP
, MVT::i64
, Custom
);
1692 // Custom Expand fp<->uint
1693 setOperationAction(ISD::FP_TO_UINT
, MVT::i32
, Custom
);
1694 setOperationAction(ISD::UINT_TO_FP
, MVT::i32
, Custom
);
1695 setOperationAction(ISD::FP_TO_UINT
, MVT::i64
, Custom
);
1696 setOperationAction(ISD::UINT_TO_FP
, MVT::i64
, Custom
);
1698 // Lower f16 conversion operations into library calls
1699 setOperationAction(ISD::FP16_TO_FP
, MVT::f32
, Expand
);
1700 setOperationAction(ISD::FP_TO_FP16
, MVT::f32
, Expand
);
1701 setOperationAction(ISD::FP16_TO_FP
, MVT::f64
, Expand
);
1702 setOperationAction(ISD::FP_TO_FP16
, MVT::f64
, Expand
);
1703 setOperationAction(ISD::FP16_TO_FP
, MVT::f128
, Expand
);
1704 setOperationAction(ISD::FP_TO_FP16
, MVT::f128
, Expand
);
1706 setOperationAction(ISD::BITCAST
, MVT::f32
, Expand
);
1707 setOperationAction(ISD::BITCAST
, MVT::i32
, Expand
);
1709 // Sparc has no select or setcc: expand to SELECT_CC.
1710 setOperationAction(ISD::SELECT
, MVT::i32
, Expand
);
1711 setOperationAction(ISD::SELECT
, MVT::f32
, Expand
);
1712 setOperationAction(ISD::SELECT
, MVT::f64
, Expand
);
1713 setOperationAction(ISD::SELECT
, MVT::f128
, Expand
);
1715 setOperationAction(ISD::SETCC
, MVT::i32
, Expand
);
1716 setOperationAction(ISD::SETCC
, MVT::f32
, Expand
);
1717 setOperationAction(ISD::SETCC
, MVT::f64
, Expand
);
1718 setOperationAction(ISD::SETCC
, MVT::f128
, Expand
);
1720 // Sparc doesn't have BRCOND either, it has BR_CC.
1721 setOperationAction(ISD::BRCOND
, MVT::Other
, Expand
);
1722 setOperationAction(ISD::BRIND
, MVT::Other
, Expand
);
1723 setOperationAction(ISD::BR_JT
, MVT::Other
, Expand
);
1724 setOperationAction(ISD::BR_CC
, MVT::i32
, Custom
);
1725 setOperationAction(ISD::BR_CC
, MVT::f32
, Custom
);
1726 setOperationAction(ISD::BR_CC
, MVT::f64
, Custom
);
1727 setOperationAction(ISD::BR_CC
, MVT::f128
, Custom
);
1729 setOperationAction(ISD::SELECT_CC
, MVT::i32
, Custom
);
1730 setOperationAction(ISD::SELECT_CC
, MVT::f32
, Custom
);
1731 setOperationAction(ISD::SELECT_CC
, MVT::f64
, Custom
);
1732 setOperationAction(ISD::SELECT_CC
, MVT::f128
, Custom
);
1734 setOperationAction(ISD::ADDC
, MVT::i32
, Legal
);
1735 setOperationAction(ISD::ADDE
, MVT::i32
, Legal
);
1736 setOperationAction(ISD::SUBC
, MVT::i32
, Legal
);
1737 setOperationAction(ISD::SUBE
, MVT::i32
, Legal
);
1739 if (Subtarget
->is64Bit()) {
1740 setOperationAction(ISD::BITCAST
, MVT::f64
, Expand
);
1741 setOperationAction(ISD::BITCAST
, MVT::i64
, Expand
);
1742 setOperationAction(ISD::SELECT
, MVT::i64
, Expand
);
1743 setOperationAction(ISD::SETCC
, MVT::i64
, Expand
);
1744 setOperationAction(ISD::BR_CC
, MVT::i64
, Custom
);
1745 setOperationAction(ISD::SELECT_CC
, MVT::i64
, Custom
);
1747 setOperationAction(ISD::CTPOP
, MVT::i64
,
1748 Subtarget
->usePopc() ? Legal
: Expand
);
1749 setOperationAction(ISD::CTTZ
, MVT::i64
, Expand
);
1750 setOperationAction(ISD::CTLZ
, MVT::i64
, Expand
);
1751 setOperationAction(ISD::BSWAP
, MVT::i64
, Expand
);
1752 setOperationAction(ISD::ROTL
, MVT::i64
, Expand
);
1753 setOperationAction(ISD::ROTR
, MVT::i64
, Expand
);
1754 setOperationAction(ISD::DYNAMIC_STACKALLOC
, MVT::i64
, Custom
);
1758 // Atomics are supported on SparcV9. 32-bit atomics are also
1759 // supported by some Leon SparcV8 variants. Otherwise, atomics
1761 if (Subtarget
->isV9()) {
1762 // TODO: we _ought_ to be able to support 64-bit atomics on 32-bit sparcv9,
1763 // but it hasn't been implemented in the backend yet.
1764 if (Subtarget
->is64Bit())
1765 setMaxAtomicSizeInBitsSupported(64);
1767 setMaxAtomicSizeInBitsSupported(32);
1768 } else if (Subtarget
->hasLeonCasa())
1769 setMaxAtomicSizeInBitsSupported(32);
1771 setMaxAtomicSizeInBitsSupported(0);
1773 setMinCmpXchgSizeInBits(32);
1775 setOperationAction(ISD::ATOMIC_SWAP
, MVT::i32
, Legal
);
1777 setOperationAction(ISD::ATOMIC_FENCE
, MVT::Other
, Legal
);
1779 // Custom Lower Atomic LOAD/STORE
1780 setOperationAction(ISD::ATOMIC_LOAD
, MVT::i32
, Custom
);
1781 setOperationAction(ISD::ATOMIC_STORE
, MVT::i32
, Custom
);
1783 if (Subtarget
->is64Bit()) {
1784 setOperationAction(ISD::ATOMIC_CMP_SWAP
, MVT::i64
, Legal
);
1785 setOperationAction(ISD::ATOMIC_SWAP
, MVT::i64
, Legal
);
1786 setOperationAction(ISD::ATOMIC_LOAD
, MVT::i64
, Custom
);
1787 setOperationAction(ISD::ATOMIC_STORE
, MVT::i64
, Custom
);
1790 if (!Subtarget
->isV9()) {
1791 // SparcV8 does not have FNEGD and FABSD.
1792 setOperationAction(ISD::FNEG
, MVT::f64
, Custom
);
1793 setOperationAction(ISD::FABS
, MVT::f64
, Custom
);
1796 setOperationAction(ISD::FSIN
, MVT::f128
, Expand
);
1797 setOperationAction(ISD::FCOS
, MVT::f128
, Expand
);
1798 setOperationAction(ISD::FSINCOS
, MVT::f128
, Expand
);
1799 setOperationAction(ISD::FREM
, MVT::f128
, Expand
);
1800 setOperationAction(ISD::FMA
, MVT::f128
, Expand
);
1801 setOperationAction(ISD::FSIN
, MVT::f64
, Expand
);
1802 setOperationAction(ISD::FCOS
, MVT::f64
, Expand
);
1803 setOperationAction(ISD::FSINCOS
, MVT::f64
, Expand
);
1804 setOperationAction(ISD::FREM
, MVT::f64
, Expand
);
1805 setOperationAction(ISD::FMA
, MVT::f64
, Expand
);
1806 setOperationAction(ISD::FSIN
, MVT::f32
, Expand
);
1807 setOperationAction(ISD::FCOS
, MVT::f32
, Expand
);
1808 setOperationAction(ISD::FSINCOS
, MVT::f32
, Expand
);
1809 setOperationAction(ISD::FREM
, MVT::f32
, Expand
);
1810 setOperationAction(ISD::FMA
, MVT::f32
, Expand
);
1811 setOperationAction(ISD::CTTZ
, MVT::i32
, Expand
);
1812 setOperationAction(ISD::CTLZ
, MVT::i32
, Expand
);
1813 setOperationAction(ISD::ROTL
, MVT::i32
, Expand
);
1814 setOperationAction(ISD::ROTR
, MVT::i32
, Expand
);
1815 setOperationAction(ISD::BSWAP
, MVT::i32
, Expand
);
1816 setOperationAction(ISD::FCOPYSIGN
, MVT::f128
, Expand
);
1817 setOperationAction(ISD::FCOPYSIGN
, MVT::f64
, Expand
);
1818 setOperationAction(ISD::FCOPYSIGN
, MVT::f32
, Expand
);
1819 setOperationAction(ISD::FPOW
, MVT::f128
, Expand
);
1820 setOperationAction(ISD::FPOW
, MVT::f64
, Expand
);
1821 setOperationAction(ISD::FPOW
, MVT::f32
, Expand
);
1823 setOperationAction(ISD::SHL_PARTS
, MVT::i32
, Expand
);
1824 setOperationAction(ISD::SRA_PARTS
, MVT::i32
, Expand
);
1825 setOperationAction(ISD::SRL_PARTS
, MVT::i32
, Expand
);
1827 // Expands to [SU]MUL_LOHI.
1828 setOperationAction(ISD::MULHU
, MVT::i32
, Expand
);
1829 setOperationAction(ISD::MULHS
, MVT::i32
, Expand
);
1830 setOperationAction(ISD::MUL
, MVT::i32
, Expand
);
1832 if (Subtarget
->useSoftMulDiv()) {
1833 // .umul works for both signed and unsigned
1834 setOperationAction(ISD::SMUL_LOHI
, MVT::i32
, Expand
);
1835 setOperationAction(ISD::UMUL_LOHI
, MVT::i32
, Expand
);
1836 setLibcallName(RTLIB::MUL_I32
, ".umul");
1838 setOperationAction(ISD::SDIV
, MVT::i32
, Expand
);
1839 setLibcallName(RTLIB::SDIV_I32
, ".div");
1841 setOperationAction(ISD::UDIV
, MVT::i32
, Expand
);
1842 setLibcallName(RTLIB::UDIV_I32
, ".udiv");
1844 setLibcallName(RTLIB::SREM_I32
, ".rem");
1845 setLibcallName(RTLIB::UREM_I32
, ".urem");
1848 if (Subtarget
->is64Bit()) {
1849 setOperationAction(ISD::UMUL_LOHI
, MVT::i64
, Expand
);
1850 setOperationAction(ISD::SMUL_LOHI
, MVT::i64
, Expand
);
1851 setOperationAction(ISD::MULHU
, MVT::i64
, Expand
);
1852 setOperationAction(ISD::MULHS
, MVT::i64
, Expand
);
1854 setOperationAction(ISD::SHL_PARTS
, MVT::i64
, Expand
);
1855 setOperationAction(ISD::SRA_PARTS
, MVT::i64
, Expand
);
1856 setOperationAction(ISD::SRL_PARTS
, MVT::i64
, Expand
);
1859 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1860 setOperationAction(ISD::VASTART
, MVT::Other
, Custom
);
1861 // VAARG needs to be lowered to not do unaligned accesses for doubles.
1862 setOperationAction(ISD::VAARG
, MVT::Other
, Custom
);
1864 setOperationAction(ISD::TRAP
, MVT::Other
, Legal
);
1865 setOperationAction(ISD::DEBUGTRAP
, MVT::Other
, Legal
);
1867 // Use the default implementation.
1868 setOperationAction(ISD::VACOPY
, MVT::Other
, Expand
);
1869 setOperationAction(ISD::VAEND
, MVT::Other
, Expand
);
1870 setOperationAction(ISD::STACKSAVE
, MVT::Other
, Expand
);
1871 setOperationAction(ISD::STACKRESTORE
, MVT::Other
, Expand
);
1872 setOperationAction(ISD::DYNAMIC_STACKALLOC
, MVT::i32
, Custom
);
1874 setStackPointerRegisterToSaveRestore(SP::O6
);
1876 setOperationAction(ISD::CTPOP
, MVT::i32
,
1877 Subtarget
->usePopc() ? Legal
: Expand
);
1879 if (Subtarget
->isV9() && Subtarget
->hasHardQuad()) {
1880 setOperationAction(ISD::LOAD
, MVT::f128
, Legal
);
1881 setOperationAction(ISD::STORE
, MVT::f128
, Legal
);
1883 setOperationAction(ISD::LOAD
, MVT::f128
, Custom
);
1884 setOperationAction(ISD::STORE
, MVT::f128
, Custom
);
1887 if (Subtarget
->hasHardQuad()) {
1888 setOperationAction(ISD::FADD
, MVT::f128
, Legal
);
1889 setOperationAction(ISD::FSUB
, MVT::f128
, Legal
);
1890 setOperationAction(ISD::FMUL
, MVT::f128
, Legal
);
1891 setOperationAction(ISD::FDIV
, MVT::f128
, Legal
);
1892 setOperationAction(ISD::FSQRT
, MVT::f128
, Legal
);
1893 setOperationAction(ISD::FP_EXTEND
, MVT::f128
, Legal
);
1894 setOperationAction(ISD::FP_ROUND
, MVT::f64
, Legal
);
1895 if (Subtarget
->isV9()) {
1896 setOperationAction(ISD::FNEG
, MVT::f128
, Legal
);
1897 setOperationAction(ISD::FABS
, MVT::f128
, Legal
);
1899 setOperationAction(ISD::FNEG
, MVT::f128
, Custom
);
1900 setOperationAction(ISD::FABS
, MVT::f128
, Custom
);
1903 if (!Subtarget
->is64Bit()) {
1904 setLibcallName(RTLIB::FPTOSINT_F128_I64
, "_Q_qtoll");
1905 setLibcallName(RTLIB::FPTOUINT_F128_I64
, "_Q_qtoull");
1906 setLibcallName(RTLIB::SINTTOFP_I64_F128
, "_Q_lltoq");
1907 setLibcallName(RTLIB::UINTTOFP_I64_F128
, "_Q_ulltoq");
1911 // Custom legalize f128 operations.
1913 setOperationAction(ISD::FADD
, MVT::f128
, Custom
);
1914 setOperationAction(ISD::FSUB
, MVT::f128
, Custom
);
1915 setOperationAction(ISD::FMUL
, MVT::f128
, Custom
);
1916 setOperationAction(ISD::FDIV
, MVT::f128
, Custom
);
1917 setOperationAction(ISD::FSQRT
, MVT::f128
, Custom
);
1918 setOperationAction(ISD::FNEG
, MVT::f128
, Custom
);
1919 setOperationAction(ISD::FABS
, MVT::f128
, Custom
);
1921 setOperationAction(ISD::FP_EXTEND
, MVT::f128
, Custom
);
1922 setOperationAction(ISD::FP_ROUND
, MVT::f64
, Custom
);
1923 setOperationAction(ISD::FP_ROUND
, MVT::f32
, Custom
);
1925 // Setup Runtime library names.
1926 if (Subtarget
->is64Bit() && !Subtarget
->useSoftFloat()) {
1927 setLibcallName(RTLIB::ADD_F128
, "_Qp_add");
1928 setLibcallName(RTLIB::SUB_F128
, "_Qp_sub");
1929 setLibcallName(RTLIB::MUL_F128
, "_Qp_mul");
1930 setLibcallName(RTLIB::DIV_F128
, "_Qp_div");
1931 setLibcallName(RTLIB::SQRT_F128
, "_Qp_sqrt");
1932 setLibcallName(RTLIB::FPTOSINT_F128_I32
, "_Qp_qtoi");
1933 setLibcallName(RTLIB::FPTOUINT_F128_I32
, "_Qp_qtoui");
1934 setLibcallName(RTLIB::SINTTOFP_I32_F128
, "_Qp_itoq");
1935 setLibcallName(RTLIB::UINTTOFP_I32_F128
, "_Qp_uitoq");
1936 setLibcallName(RTLIB::FPTOSINT_F128_I64
, "_Qp_qtox");
1937 setLibcallName(RTLIB::FPTOUINT_F128_I64
, "_Qp_qtoux");
1938 setLibcallName(RTLIB::SINTTOFP_I64_F128
, "_Qp_xtoq");
1939 setLibcallName(RTLIB::UINTTOFP_I64_F128
, "_Qp_uxtoq");
1940 setLibcallName(RTLIB::FPEXT_F32_F128
, "_Qp_stoq");
1941 setLibcallName(RTLIB::FPEXT_F64_F128
, "_Qp_dtoq");
1942 setLibcallName(RTLIB::FPROUND_F128_F32
, "_Qp_qtos");
1943 setLibcallName(RTLIB::FPROUND_F128_F64
, "_Qp_qtod");
1944 } else if (!Subtarget
->useSoftFloat()) {
1945 setLibcallName(RTLIB::ADD_F128
, "_Q_add");
1946 setLibcallName(RTLIB::SUB_F128
, "_Q_sub");
1947 setLibcallName(RTLIB::MUL_F128
, "_Q_mul");
1948 setLibcallName(RTLIB::DIV_F128
, "_Q_div");
1949 setLibcallName(RTLIB::SQRT_F128
, "_Q_sqrt");
1950 setLibcallName(RTLIB::FPTOSINT_F128_I32
, "_Q_qtoi");
1951 setLibcallName(RTLIB::FPTOUINT_F128_I32
, "_Q_qtou");
1952 setLibcallName(RTLIB::SINTTOFP_I32_F128
, "_Q_itoq");
1953 setLibcallName(RTLIB::UINTTOFP_I32_F128
, "_Q_utoq");
1954 setLibcallName(RTLIB::FPTOSINT_F128_I64
, "_Q_qtoll");
1955 setLibcallName(RTLIB::FPTOUINT_F128_I64
, "_Q_qtoull");
1956 setLibcallName(RTLIB::SINTTOFP_I64_F128
, "_Q_lltoq");
1957 setLibcallName(RTLIB::UINTTOFP_I64_F128
, "_Q_ulltoq");
1958 setLibcallName(RTLIB::FPEXT_F32_F128
, "_Q_stoq");
1959 setLibcallName(RTLIB::FPEXT_F64_F128
, "_Q_dtoq");
1960 setLibcallName(RTLIB::FPROUND_F128_F32
, "_Q_qtos");
1961 setLibcallName(RTLIB::FPROUND_F128_F64
, "_Q_qtod");
1965 if (Subtarget
->fixAllFDIVSQRT()) {
1966 // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as
1967 // the former instructions generate errata on LEON processors.
1968 setOperationAction(ISD::FDIV
, MVT::f32
, Promote
);
1969 setOperationAction(ISD::FSQRT
, MVT::f32
, Promote
);
1972 if (Subtarget
->hasNoFMULS()) {
1973 setOperationAction(ISD::FMUL
, MVT::f32
, Promote
);
1976 // Custom combine bitcast between f64 and v2i32
1977 if (!Subtarget
->is64Bit())
1978 setTargetDAGCombine(ISD::BITCAST
);
1980 if (Subtarget
->hasLeonCycleCounter())
1981 setOperationAction(ISD::READCYCLECOUNTER
, MVT::i64
, Custom
);
1983 setOperationAction(ISD::INTRINSIC_WO_CHAIN
, MVT::Other
, Custom
);
1985 setMinFunctionAlignment(Align(4));
1987 computeRegisterProperties(Subtarget
->getRegisterInfo());
1990 bool SparcTargetLowering::useSoftFloat() const {
1991 return Subtarget
->useSoftFloat();
1994 const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode
) const {
1995 switch ((SPISD::NodeType
)Opcode
) {
1996 case SPISD::FIRST_NUMBER
: break;
1997 case SPISD::CMPICC
: return "SPISD::CMPICC";
1998 case SPISD::CMPFCC
: return "SPISD::CMPFCC";
1999 case SPISD::CMPFCC_V9
:
2000 return "SPISD::CMPFCC_V9";
2001 case SPISD::BRICC
: return "SPISD::BRICC";
2003 return "SPISD::BPICC";
2005 return "SPISD::BPXCC";
2006 case SPISD::BRFCC
: return "SPISD::BRFCC";
2007 case SPISD::BRFCC_V9
:
2008 return "SPISD::BRFCC_V9";
2010 return "SPISD::BR_REG";
2011 case SPISD::SELECT_ICC
: return "SPISD::SELECT_ICC";
2012 case SPISD::SELECT_XCC
: return "SPISD::SELECT_XCC";
2013 case SPISD::SELECT_FCC
: return "SPISD::SELECT_FCC";
2014 case SPISD::SELECT_REG
:
2015 return "SPISD::SELECT_REG";
2016 case SPISD::Hi
: return "SPISD::Hi";
2017 case SPISD::Lo
: return "SPISD::Lo";
2018 case SPISD::FTOI
: return "SPISD::FTOI";
2019 case SPISD::ITOF
: return "SPISD::ITOF";
2020 case SPISD::FTOX
: return "SPISD::FTOX";
2021 case SPISD::XTOF
: return "SPISD::XTOF";
2022 case SPISD::CALL
: return "SPISD::CALL";
2023 case SPISD::RET_GLUE
: return "SPISD::RET_GLUE";
2024 case SPISD::GLOBAL_BASE_REG
: return "SPISD::GLOBAL_BASE_REG";
2025 case SPISD::FLUSHW
: return "SPISD::FLUSHW";
2026 case SPISD::TLS_ADD
: return "SPISD::TLS_ADD";
2027 case SPISD::TLS_LD
: return "SPISD::TLS_LD";
2028 case SPISD::TLS_CALL
: return "SPISD::TLS_CALL";
2029 case SPISD::TAIL_CALL
: return "SPISD::TAIL_CALL";
2030 case SPISD::LOAD_GDOP
: return "SPISD::LOAD_GDOP";
2035 EVT
SparcTargetLowering::getSetCCResultType(const DataLayout
&, LLVMContext
&,
2039 return VT
.changeVectorElementTypeToInteger();
2042 /// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
2043 /// be zero. Op is expected to be a target specific node. Used by DAG
2045 void SparcTargetLowering::computeKnownBitsForTargetNode
2048 const APInt
&DemandedElts
,
2049 const SelectionDAG
&DAG
,
2050 unsigned Depth
) const {
2054 switch (Op
.getOpcode()) {
2056 case SPISD::SELECT_ICC
:
2057 case SPISD::SELECT_XCC
:
2058 case SPISD::SELECT_FCC
:
2059 Known
= DAG
.computeKnownBits(Op
.getOperand(1), Depth
+ 1);
2060 Known2
= DAG
.computeKnownBits(Op
.getOperand(0), Depth
+ 1);
2062 // Only known if known in both the LHS and RHS.
2063 Known
= Known
.intersectWith(Known2
);
2068 // Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
2069 // set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
2070 static void LookThroughSetCC(SDValue
&LHS
, SDValue
&RHS
,
2071 ISD::CondCode CC
, unsigned &SPCC
) {
2072 if (isNullConstant(RHS
) && CC
== ISD::SETNE
&&
2073 (((LHS
.getOpcode() == SPISD::SELECT_ICC
||
2074 LHS
.getOpcode() == SPISD::SELECT_XCC
) &&
2075 LHS
.getOperand(3).getOpcode() == SPISD::CMPICC
) ||
2076 (LHS
.getOpcode() == SPISD::SELECT_FCC
&&
2077 (LHS
.getOperand(3).getOpcode() == SPISD::CMPFCC
||
2078 LHS
.getOperand(3).getOpcode() == SPISD::CMPFCC_V9
))) &&
2079 isOneConstant(LHS
.getOperand(0)) && isNullConstant(LHS
.getOperand(1))) {
2080 SDValue CMPCC
= LHS
.getOperand(3);
2081 SPCC
= LHS
.getConstantOperandVal(2);
2082 LHS
= CMPCC
.getOperand(0);
2083 RHS
= CMPCC
.getOperand(1);
2087 // Convert to a target node and set target flags.
2088 SDValue
SparcTargetLowering::withTargetFlags(SDValue Op
, unsigned TF
,
2089 SelectionDAG
&DAG
) const {
2090 if (const GlobalAddressSDNode
*GA
= dyn_cast
<GlobalAddressSDNode
>(Op
))
2091 return DAG
.getTargetGlobalAddress(GA
->getGlobal(),
2093 GA
->getValueType(0),
2094 GA
->getOffset(), TF
);
2096 if (const ConstantPoolSDNode
*CP
= dyn_cast
<ConstantPoolSDNode
>(Op
))
2097 return DAG
.getTargetConstantPool(CP
->getConstVal(), CP
->getValueType(0),
2098 CP
->getAlign(), CP
->getOffset(), TF
);
2100 if (const BlockAddressSDNode
*BA
= dyn_cast
<BlockAddressSDNode
>(Op
))
2101 return DAG
.getTargetBlockAddress(BA
->getBlockAddress(),
2106 if (const ExternalSymbolSDNode
*ES
= dyn_cast
<ExternalSymbolSDNode
>(Op
))
2107 return DAG
.getTargetExternalSymbol(ES
->getSymbol(),
2108 ES
->getValueType(0), TF
);
2110 llvm_unreachable("Unhandled address SDNode");
2113 // Split Op into high and low parts according to HiTF and LoTF.
2114 // Return an ADD node combining the parts.
2115 SDValue
SparcTargetLowering::makeHiLoPair(SDValue Op
,
2116 unsigned HiTF
, unsigned LoTF
,
2117 SelectionDAG
&DAG
) const {
2119 EVT VT
= Op
.getValueType();
2120 SDValue Hi
= DAG
.getNode(SPISD::Hi
, DL
, VT
, withTargetFlags(Op
, HiTF
, DAG
));
2121 SDValue Lo
= DAG
.getNode(SPISD::Lo
, DL
, VT
, withTargetFlags(Op
, LoTF
, DAG
));
2122 return DAG
.getNode(ISD::ADD
, DL
, VT
, Hi
, Lo
);
2125 // Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
2126 // or ExternalSymbol SDNode.
2127 SDValue
SparcTargetLowering::makeAddress(SDValue Op
, SelectionDAG
&DAG
) const {
2129 EVT VT
= getPointerTy(DAG
.getDataLayout());
2131 // Handle PIC mode first. SPARC needs a got load for every variable!
2132 if (isPositionIndependent()) {
2133 const Module
*M
= DAG
.getMachineFunction().getFunction().getParent();
2134 PICLevel::Level picLevel
= M
->getPICLevel();
2137 if (picLevel
== PICLevel::SmallPIC
) {
2138 // This is the pic13 code model, the GOT is known to be smaller than 8KiB.
2139 Idx
= DAG
.getNode(SPISD::Lo
, DL
, Op
.getValueType(),
2140 withTargetFlags(Op
, SparcMCExpr::VK_Sparc_GOT13
, DAG
));
2142 // This is the pic32 code model, the GOT is known to be smaller than 4GB.
2143 Idx
= makeHiLoPair(Op
, SparcMCExpr::VK_Sparc_GOT22
,
2144 SparcMCExpr::VK_Sparc_GOT10
, DAG
);
2147 SDValue GlobalBase
= DAG
.getNode(SPISD::GLOBAL_BASE_REG
, DL
, VT
);
2148 SDValue AbsAddr
= DAG
.getNode(ISD::ADD
, DL
, VT
, GlobalBase
, Idx
);
2149 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2150 // function has calls.
2151 MachineFrameInfo
&MFI
= DAG
.getMachineFunction().getFrameInfo();
2152 MFI
.setHasCalls(true);
2153 return DAG
.getLoad(VT
, DL
, DAG
.getEntryNode(), AbsAddr
,
2154 MachinePointerInfo::getGOT(DAG
.getMachineFunction()));
2157 // This is one of the absolute code models.
2158 switch(getTargetMachine().getCodeModel()) {
2160 llvm_unreachable("Unsupported absolute code model");
2161 case CodeModel::Small
:
2163 return makeHiLoPair(Op
, SparcMCExpr::VK_Sparc_HI
,
2164 SparcMCExpr::VK_Sparc_LO
, DAG
);
2165 case CodeModel::Medium
: {
2167 SDValue H44
= makeHiLoPair(Op
, SparcMCExpr::VK_Sparc_H44
,
2168 SparcMCExpr::VK_Sparc_M44
, DAG
);
2169 H44
= DAG
.getNode(ISD::SHL
, DL
, VT
, H44
, DAG
.getConstant(12, DL
, MVT::i32
));
2170 SDValue L44
= withTargetFlags(Op
, SparcMCExpr::VK_Sparc_L44
, DAG
);
2171 L44
= DAG
.getNode(SPISD::Lo
, DL
, VT
, L44
);
2172 return DAG
.getNode(ISD::ADD
, DL
, VT
, H44
, L44
);
2174 case CodeModel::Large
: {
2176 SDValue Hi
= makeHiLoPair(Op
, SparcMCExpr::VK_Sparc_HH
,
2177 SparcMCExpr::VK_Sparc_HM
, DAG
);
2178 Hi
= DAG
.getNode(ISD::SHL
, DL
, VT
, Hi
, DAG
.getConstant(32, DL
, MVT::i32
));
2179 SDValue Lo
= makeHiLoPair(Op
, SparcMCExpr::VK_Sparc_HI
,
2180 SparcMCExpr::VK_Sparc_LO
, DAG
);
2181 return DAG
.getNode(ISD::ADD
, DL
, VT
, Hi
, Lo
);
2186 SDValue
SparcTargetLowering::LowerGlobalAddress(SDValue Op
,
2187 SelectionDAG
&DAG
) const {
2188 return makeAddress(Op
, DAG
);
2191 SDValue
SparcTargetLowering::LowerConstantPool(SDValue Op
,
2192 SelectionDAG
&DAG
) const {
2193 return makeAddress(Op
, DAG
);
2196 SDValue
SparcTargetLowering::LowerBlockAddress(SDValue Op
,
2197 SelectionDAG
&DAG
) const {
2198 return makeAddress(Op
, DAG
);
2201 SDValue
SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op
,
2202 SelectionDAG
&DAG
) const {
2204 GlobalAddressSDNode
*GA
= cast
<GlobalAddressSDNode
>(Op
);
2205 if (DAG
.getTarget().useEmulatedTLS())
2206 return LowerToTLSEmulatedModel(GA
, DAG
);
2209 const GlobalValue
*GV
= GA
->getGlobal();
2210 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
2212 TLSModel::Model model
= getTargetMachine().getTLSModel(GV
);
2214 if (model
== TLSModel::GeneralDynamic
|| model
== TLSModel::LocalDynamic
) {
2215 unsigned HiTF
= ((model
== TLSModel::GeneralDynamic
)
2216 ? SparcMCExpr::VK_Sparc_TLS_GD_HI22
2217 : SparcMCExpr::VK_Sparc_TLS_LDM_HI22
);
2218 unsigned LoTF
= ((model
== TLSModel::GeneralDynamic
)
2219 ? SparcMCExpr::VK_Sparc_TLS_GD_LO10
2220 : SparcMCExpr::VK_Sparc_TLS_LDM_LO10
);
2221 unsigned addTF
= ((model
== TLSModel::GeneralDynamic
)
2222 ? SparcMCExpr::VK_Sparc_TLS_GD_ADD
2223 : SparcMCExpr::VK_Sparc_TLS_LDM_ADD
);
2224 unsigned callTF
= ((model
== TLSModel::GeneralDynamic
)
2225 ? SparcMCExpr::VK_Sparc_TLS_GD_CALL
2226 : SparcMCExpr::VK_Sparc_TLS_LDM_CALL
);
2228 SDValue HiLo
= makeHiLoPair(Op
, HiTF
, LoTF
, DAG
);
2229 SDValue Base
= DAG
.getNode(SPISD::GLOBAL_BASE_REG
, DL
, PtrVT
);
2230 SDValue Argument
= DAG
.getNode(SPISD::TLS_ADD
, DL
, PtrVT
, Base
, HiLo
,
2231 withTargetFlags(Op
, addTF
, DAG
));
2233 SDValue Chain
= DAG
.getEntryNode();
2236 Chain
= DAG
.getCALLSEQ_START(Chain
, 1, 0, DL
);
2237 Chain
= DAG
.getCopyToReg(Chain
, DL
, SP::O0
, Argument
, InGlue
);
2238 InGlue
= Chain
.getValue(1);
2239 SDValue Callee
= DAG
.getTargetExternalSymbol("__tls_get_addr", PtrVT
);
2240 SDValue Symbol
= withTargetFlags(Op
, callTF
, DAG
);
2242 SDVTList NodeTys
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
2243 const uint32_t *Mask
= Subtarget
->getRegisterInfo()->getCallPreservedMask(
2244 DAG
.getMachineFunction(), CallingConv::C
);
2245 assert(Mask
&& "Missing call preserved mask for calling convention");
2246 SDValue Ops
[] = {Chain
,
2249 DAG
.getRegister(SP::O0
, PtrVT
),
2250 DAG
.getRegisterMask(Mask
),
2252 Chain
= DAG
.getNode(SPISD::TLS_CALL
, DL
, NodeTys
, Ops
);
2253 InGlue
= Chain
.getValue(1);
2254 Chain
= DAG
.getCALLSEQ_END(Chain
, 1, 0, InGlue
, DL
);
2255 InGlue
= Chain
.getValue(1);
2256 SDValue Ret
= DAG
.getCopyFromReg(Chain
, DL
, SP::O0
, PtrVT
, InGlue
);
2258 if (model
!= TLSModel::LocalDynamic
)
2261 SDValue Hi
= DAG
.getNode(SPISD::Hi
, DL
, PtrVT
,
2262 withTargetFlags(Op
, SparcMCExpr::VK_Sparc_TLS_LDO_HIX22
, DAG
));
2263 SDValue Lo
= DAG
.getNode(SPISD::Lo
, DL
, PtrVT
,
2264 withTargetFlags(Op
, SparcMCExpr::VK_Sparc_TLS_LDO_LOX10
, DAG
));
2265 HiLo
= DAG
.getNode(ISD::XOR
, DL
, PtrVT
, Hi
, Lo
);
2266 return DAG
.getNode(SPISD::TLS_ADD
, DL
, PtrVT
, Ret
, HiLo
,
2267 withTargetFlags(Op
, SparcMCExpr::VK_Sparc_TLS_LDO_ADD
, DAG
));
2270 if (model
== TLSModel::InitialExec
) {
2271 unsigned ldTF
= ((PtrVT
== MVT::i64
)? SparcMCExpr::VK_Sparc_TLS_IE_LDX
2272 : SparcMCExpr::VK_Sparc_TLS_IE_LD
);
2274 SDValue Base
= DAG
.getNode(SPISD::GLOBAL_BASE_REG
, DL
, PtrVT
);
2276 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2277 // function has calls.
2278 MachineFrameInfo
&MFI
= DAG
.getMachineFunction().getFrameInfo();
2279 MFI
.setHasCalls(true);
2281 SDValue TGA
= makeHiLoPair(Op
,
2282 SparcMCExpr::VK_Sparc_TLS_IE_HI22
,
2283 SparcMCExpr::VK_Sparc_TLS_IE_LO10
, DAG
);
2284 SDValue Ptr
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, Base
, TGA
);
2285 SDValue Offset
= DAG
.getNode(SPISD::TLS_LD
,
2287 withTargetFlags(Op
, ldTF
, DAG
));
2288 return DAG
.getNode(SPISD::TLS_ADD
, DL
, PtrVT
,
2289 DAG
.getRegister(SP::G7
, PtrVT
), Offset
,
2291 SparcMCExpr::VK_Sparc_TLS_IE_ADD
, DAG
));
2294 assert(model
== TLSModel::LocalExec
);
2295 SDValue Hi
= DAG
.getNode(SPISD::Hi
, DL
, PtrVT
,
2296 withTargetFlags(Op
, SparcMCExpr::VK_Sparc_TLS_LE_HIX22
, DAG
));
2297 SDValue Lo
= DAG
.getNode(SPISD::Lo
, DL
, PtrVT
,
2298 withTargetFlags(Op
, SparcMCExpr::VK_Sparc_TLS_LE_LOX10
, DAG
));
2299 SDValue Offset
= DAG
.getNode(ISD::XOR
, DL
, PtrVT
, Hi
, Lo
);
2301 return DAG
.getNode(ISD::ADD
, DL
, PtrVT
,
2302 DAG
.getRegister(SP::G7
, PtrVT
), Offset
);
2305 SDValue
SparcTargetLowering::LowerF128_LibCallArg(SDValue Chain
,
2306 ArgListTy
&Args
, SDValue Arg
,
2308 SelectionDAG
&DAG
) const {
2309 MachineFrameInfo
&MFI
= DAG
.getMachineFunction().getFrameInfo();
2310 EVT ArgVT
= Arg
.getValueType();
2311 Type
*ArgTy
= ArgVT
.getTypeForEVT(*DAG
.getContext());
2317 if (ArgTy
->isFP128Ty()) {
2318 // Create a stack object and pass the pointer to the library function.
2319 int FI
= MFI
.CreateStackObject(16, Align(8), false);
2320 SDValue FIPtr
= DAG
.getFrameIndex(FI
, getPointerTy(DAG
.getDataLayout()));
2321 Chain
= DAG
.getStore(Chain
, DL
, Entry
.Node
, FIPtr
, MachinePointerInfo(),
2325 Entry
.Ty
= PointerType::getUnqual(ArgTy
);
2327 Args
.push_back(Entry
);
2332 SparcTargetLowering::LowerF128Op(SDValue Op
, SelectionDAG
&DAG
,
2333 const char *LibFuncName
,
2334 unsigned numArgs
) const {
2338 MachineFrameInfo
&MFI
= DAG
.getMachineFunction().getFrameInfo();
2339 auto PtrVT
= getPointerTy(DAG
.getDataLayout());
2341 SDValue Callee
= DAG
.getExternalSymbol(LibFuncName
, PtrVT
);
2342 Type
*RetTy
= Op
.getValueType().getTypeForEVT(*DAG
.getContext());
2343 Type
*RetTyABI
= RetTy
;
2344 SDValue Chain
= DAG
.getEntryNode();
2347 if (RetTy
->isFP128Ty()) {
2348 // Create a Stack Object to receive the return value of type f128.
2350 int RetFI
= MFI
.CreateStackObject(16, Align(8), false);
2351 RetPtr
= DAG
.getFrameIndex(RetFI
, PtrVT
);
2352 Entry
.Node
= RetPtr
;
2353 Entry
.Ty
= PointerType::getUnqual(RetTy
);
2354 if (!Subtarget
->is64Bit()) {
2355 Entry
.IsSRet
= true;
2356 Entry
.IndirectType
= RetTy
;
2358 Entry
.IsReturned
= false;
2359 Args
.push_back(Entry
);
2360 RetTyABI
= Type::getVoidTy(*DAG
.getContext());
2363 assert(Op
->getNumOperands() >= numArgs
&& "Not enough operands!");
2364 for (unsigned i
= 0, e
= numArgs
; i
!= e
; ++i
) {
2365 Chain
= LowerF128_LibCallArg(Chain
, Args
, Op
.getOperand(i
), SDLoc(Op
), DAG
);
2367 TargetLowering::CallLoweringInfo
CLI(DAG
);
2368 CLI
.setDebugLoc(SDLoc(Op
)).setChain(Chain
)
2369 .setCallee(CallingConv::C
, RetTyABI
, Callee
, std::move(Args
));
2371 std::pair
<SDValue
, SDValue
> CallInfo
= LowerCallTo(CLI
);
2373 // chain is in second result.
2374 if (RetTyABI
== RetTy
)
2375 return CallInfo
.first
;
2377 assert (RetTy
->isFP128Ty() && "Unexpected return type!");
2379 Chain
= CallInfo
.second
;
2381 // Load RetPtr to get the return value.
2382 return DAG
.getLoad(Op
.getValueType(), SDLoc(Op
), Chain
, RetPtr
,
2383 MachinePointerInfo(), Align(8));
2386 SDValue
SparcTargetLowering::LowerF128Compare(SDValue LHS
, SDValue RHS
,
2387 unsigned &SPCC
, const SDLoc
&DL
,
2388 SelectionDAG
&DAG
) const {
2390 const char *LibCall
= nullptr;
2391 bool is64Bit
= Subtarget
->is64Bit();
2393 default: llvm_unreachable("Unhandled conditional code!");
2394 case SPCC::FCC_E
: LibCall
= is64Bit
? "_Qp_feq" : "_Q_feq"; break;
2395 case SPCC::FCC_NE
: LibCall
= is64Bit
? "_Qp_fne" : "_Q_fne"; break;
2396 case SPCC::FCC_L
: LibCall
= is64Bit
? "_Qp_flt" : "_Q_flt"; break;
2397 case SPCC::FCC_G
: LibCall
= is64Bit
? "_Qp_fgt" : "_Q_fgt"; break;
2398 case SPCC::FCC_LE
: LibCall
= is64Bit
? "_Qp_fle" : "_Q_fle"; break;
2399 case SPCC::FCC_GE
: LibCall
= is64Bit
? "_Qp_fge" : "_Q_fge"; break;
2407 case SPCC::FCC_UE
: LibCall
= is64Bit
? "_Qp_cmp" : "_Q_cmp"; break;
2410 auto PtrVT
= getPointerTy(DAG
.getDataLayout());
2411 SDValue Callee
= DAG
.getExternalSymbol(LibCall
, PtrVT
);
2412 Type
*RetTy
= Type::getInt32Ty(*DAG
.getContext());
2414 SDValue Chain
= DAG
.getEntryNode();
2415 Chain
= LowerF128_LibCallArg(Chain
, Args
, LHS
, DL
, DAG
);
2416 Chain
= LowerF128_LibCallArg(Chain
, Args
, RHS
, DL
, DAG
);
2418 TargetLowering::CallLoweringInfo
CLI(DAG
);
2419 CLI
.setDebugLoc(DL
).setChain(Chain
)
2420 .setCallee(CallingConv::C
, RetTy
, Callee
, std::move(Args
));
2422 std::pair
<SDValue
, SDValue
> CallInfo
= LowerCallTo(CLI
);
2424 // result is in first, and chain is in second result.
2425 SDValue Result
= CallInfo
.first
;
2429 SDValue RHS
= DAG
.getConstant(0, DL
, Result
.getValueType());
2430 SPCC
= SPCC::ICC_NE
;
2431 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2433 case SPCC::FCC_UL
: {
2434 SDValue Mask
= DAG
.getConstant(1, DL
, Result
.getValueType());
2435 Result
= DAG
.getNode(ISD::AND
, DL
, Result
.getValueType(), Result
, Mask
);
2436 SDValue RHS
= DAG
.getConstant(0, DL
, Result
.getValueType());
2437 SPCC
= SPCC::ICC_NE
;
2438 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2440 case SPCC::FCC_ULE
: {
2441 SDValue RHS
= DAG
.getConstant(2, DL
, Result
.getValueType());
2442 SPCC
= SPCC::ICC_NE
;
2443 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2445 case SPCC::FCC_UG
: {
2446 SDValue RHS
= DAG
.getConstant(1, DL
, Result
.getValueType());
2448 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2450 case SPCC::FCC_UGE
: {
2451 SDValue RHS
= DAG
.getConstant(1, DL
, Result
.getValueType());
2452 SPCC
= SPCC::ICC_NE
;
2453 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2456 case SPCC::FCC_U
: {
2457 SDValue RHS
= DAG
.getConstant(3, DL
, Result
.getValueType());
2459 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2461 case SPCC::FCC_O
: {
2462 SDValue RHS
= DAG
.getConstant(3, DL
, Result
.getValueType());
2463 SPCC
= SPCC::ICC_NE
;
2464 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2466 case SPCC::FCC_LG
: {
2467 SDValue Mask
= DAG
.getConstant(3, DL
, Result
.getValueType());
2468 Result
= DAG
.getNode(ISD::AND
, DL
, Result
.getValueType(), Result
, Mask
);
2469 SDValue RHS
= DAG
.getConstant(0, DL
, Result
.getValueType());
2470 SPCC
= SPCC::ICC_NE
;
2471 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2473 case SPCC::FCC_UE
: {
2474 SDValue Mask
= DAG
.getConstant(3, DL
, Result
.getValueType());
2475 Result
= DAG
.getNode(ISD::AND
, DL
, Result
.getValueType(), Result
, Mask
);
2476 SDValue RHS
= DAG
.getConstant(0, DL
, Result
.getValueType());
2478 return DAG
.getNode(SPISD::CMPICC
, DL
, MVT::Glue
, Result
, RHS
);
2484 LowerF128_FPEXTEND(SDValue Op
, SelectionDAG
&DAG
,
2485 const SparcTargetLowering
&TLI
) {
2487 if (Op
.getOperand(0).getValueType() == MVT::f64
)
2488 return TLI
.LowerF128Op(Op
, DAG
,
2489 TLI
.getLibcallName(RTLIB::FPEXT_F64_F128
), 1);
2491 if (Op
.getOperand(0).getValueType() == MVT::f32
)
2492 return TLI
.LowerF128Op(Op
, DAG
,
2493 TLI
.getLibcallName(RTLIB::FPEXT_F32_F128
), 1);
2495 llvm_unreachable("fpextend with non-float operand!");
2500 LowerF128_FPROUND(SDValue Op
, SelectionDAG
&DAG
,
2501 const SparcTargetLowering
&TLI
) {
2502 // FP_ROUND on f64 and f32 are legal.
2503 if (Op
.getOperand(0).getValueType() != MVT::f128
)
2506 if (Op
.getValueType() == MVT::f64
)
2507 return TLI
.LowerF128Op(Op
, DAG
,
2508 TLI
.getLibcallName(RTLIB::FPROUND_F128_F64
), 1);
2509 if (Op
.getValueType() == MVT::f32
)
2510 return TLI
.LowerF128Op(Op
, DAG
,
2511 TLI
.getLibcallName(RTLIB::FPROUND_F128_F32
), 1);
2513 llvm_unreachable("fpround to non-float!");
2517 static SDValue
LowerFP_TO_SINT(SDValue Op
, SelectionDAG
&DAG
,
2518 const SparcTargetLowering
&TLI
,
2521 EVT VT
= Op
.getValueType();
2522 assert(VT
== MVT::i32
|| VT
== MVT::i64
);
2524 // Expand f128 operations to fp128 abi calls.
2525 if (Op
.getOperand(0).getValueType() == MVT::f128
2526 && (!hasHardQuad
|| !TLI
.isTypeLegal(VT
))) {
2527 const char *libName
= TLI
.getLibcallName(VT
== MVT::i32
2528 ? RTLIB::FPTOSINT_F128_I32
2529 : RTLIB::FPTOSINT_F128_I64
);
2530 return TLI
.LowerF128Op(Op
, DAG
, libName
, 1);
2533 // Expand if the resulting type is illegal.
2534 if (!TLI
.isTypeLegal(VT
))
2537 // Otherwise, Convert the fp value to integer in an FP register.
2539 Op
= DAG
.getNode(SPISD::FTOI
, dl
, MVT::f32
, Op
.getOperand(0));
2541 Op
= DAG
.getNode(SPISD::FTOX
, dl
, MVT::f64
, Op
.getOperand(0));
2543 return DAG
.getNode(ISD::BITCAST
, dl
, VT
, Op
);
2546 static SDValue
LowerSINT_TO_FP(SDValue Op
, SelectionDAG
&DAG
,
2547 const SparcTargetLowering
&TLI
,
2550 EVT OpVT
= Op
.getOperand(0).getValueType();
2551 assert(OpVT
== MVT::i32
|| (OpVT
== MVT::i64
));
2553 EVT floatVT
= (OpVT
== MVT::i32
) ? MVT::f32
: MVT::f64
;
2555 // Expand f128 operations to fp128 ABI calls.
2556 if (Op
.getValueType() == MVT::f128
2557 && (!hasHardQuad
|| !TLI
.isTypeLegal(OpVT
))) {
2558 const char *libName
= TLI
.getLibcallName(OpVT
== MVT::i32
2559 ? RTLIB::SINTTOFP_I32_F128
2560 : RTLIB::SINTTOFP_I64_F128
);
2561 return TLI
.LowerF128Op(Op
, DAG
, libName
, 1);
2564 // Expand if the operand type is illegal.
2565 if (!TLI
.isTypeLegal(OpVT
))
2568 // Otherwise, Convert the int value to FP in an FP register.
2569 SDValue Tmp
= DAG
.getNode(ISD::BITCAST
, dl
, floatVT
, Op
.getOperand(0));
2570 unsigned opcode
= (OpVT
== MVT::i32
)? SPISD::ITOF
: SPISD::XTOF
;
2571 return DAG
.getNode(opcode
, dl
, Op
.getValueType(), Tmp
);
2574 static SDValue
LowerFP_TO_UINT(SDValue Op
, SelectionDAG
&DAG
,
2575 const SparcTargetLowering
&TLI
,
2578 EVT VT
= Op
.getValueType();
2580 // Expand if it does not involve f128 or the target has support for
2581 // quad floating point instructions and the resulting type is legal.
2582 if (Op
.getOperand(0).getValueType() != MVT::f128
||
2583 (hasHardQuad
&& TLI
.isTypeLegal(VT
)))
2586 assert(VT
== MVT::i32
|| VT
== MVT::i64
);
2588 return TLI
.LowerF128Op(Op
, DAG
,
2589 TLI
.getLibcallName(VT
== MVT::i32
2590 ? RTLIB::FPTOUINT_F128_I32
2591 : RTLIB::FPTOUINT_F128_I64
),
2595 static SDValue
LowerUINT_TO_FP(SDValue Op
, SelectionDAG
&DAG
,
2596 const SparcTargetLowering
&TLI
,
2599 EVT OpVT
= Op
.getOperand(0).getValueType();
2600 assert(OpVT
== MVT::i32
|| OpVT
== MVT::i64
);
2602 // Expand if it does not involve f128 or the target has support for
2603 // quad floating point instructions and the operand type is legal.
2604 if (Op
.getValueType() != MVT::f128
|| (hasHardQuad
&& TLI
.isTypeLegal(OpVT
)))
2607 return TLI
.LowerF128Op(Op
, DAG
,
2608 TLI
.getLibcallName(OpVT
== MVT::i32
2609 ? RTLIB::UINTTOFP_I32_F128
2610 : RTLIB::UINTTOFP_I64_F128
),
2614 static SDValue
LowerBR_CC(SDValue Op
, SelectionDAG
&DAG
,
2615 const SparcTargetLowering
&TLI
, bool hasHardQuad
,
2616 bool isV9
, bool is64Bit
) {
2617 SDValue Chain
= Op
.getOperand(0);
2618 ISD::CondCode CC
= cast
<CondCodeSDNode
>(Op
.getOperand(1))->get();
2619 SDValue LHS
= Op
.getOperand(2);
2620 SDValue RHS
= Op
.getOperand(3);
2621 SDValue Dest
= Op
.getOperand(4);
2623 unsigned Opc
, SPCC
= ~0U;
2625 // If this is a br_cc of a "setcc", and if the setcc got lowered into
2626 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2627 LookThroughSetCC(LHS
, RHS
, CC
, SPCC
);
2628 assert(LHS
.getValueType() == RHS
.getValueType());
2630 // Get the condition flag.
2631 SDValue CompareFlag
;
2632 if (LHS
.getValueType().isInteger()) {
2633 // On V9 processors running in 64-bit mode, if CC compares two `i64`s
2634 // and the RHS is zero we might be able to use a specialized branch.
2635 if (is64Bit
&& isV9
&& LHS
.getValueType() == MVT::i64
&&
2636 isNullConstant(RHS
) && !ISD::isUnsignedIntSetCC(CC
))
2637 return DAG
.getNode(SPISD::BR_REG
, dl
, MVT::Other
, Chain
, Dest
,
2638 DAG
.getConstant(intCondCCodeToRcond(CC
), dl
, MVT::i32
),
2641 CompareFlag
= DAG
.getNode(SPISD::CMPICC
, dl
, MVT::Glue
, LHS
, RHS
);
2642 if (SPCC
== ~0U) SPCC
= IntCondCCodeToICC(CC
);
2644 // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
2645 Opc
= LHS
.getValueType() == MVT::i32
? SPISD::BPICC
: SPISD::BPXCC
;
2647 // Non-v9 targets don't have xcc.
2650 if (!hasHardQuad
&& LHS
.getValueType() == MVT::f128
) {
2651 if (SPCC
== ~0U) SPCC
= FPCondCCodeToFCC(CC
);
2652 CompareFlag
= TLI
.LowerF128Compare(LHS
, RHS
, SPCC
, dl
, DAG
);
2653 Opc
= isV9
? SPISD::BPICC
: SPISD::BRICC
;
2655 unsigned CmpOpc
= isV9
? SPISD::CMPFCC_V9
: SPISD::CMPFCC
;
2656 CompareFlag
= DAG
.getNode(CmpOpc
, dl
, MVT::Glue
, LHS
, RHS
);
2657 if (SPCC
== ~0U) SPCC
= FPCondCCodeToFCC(CC
);
2658 Opc
= isV9
? SPISD::BRFCC_V9
: SPISD::BRFCC
;
2661 return DAG
.getNode(Opc
, dl
, MVT::Other
, Chain
, Dest
,
2662 DAG
.getConstant(SPCC
, dl
, MVT::i32
), CompareFlag
);
2665 static SDValue
LowerSELECT_CC(SDValue Op
, SelectionDAG
&DAG
,
2666 const SparcTargetLowering
&TLI
, bool hasHardQuad
,
2667 bool isV9
, bool is64Bit
) {
2668 SDValue LHS
= Op
.getOperand(0);
2669 SDValue RHS
= Op
.getOperand(1);
2670 ISD::CondCode CC
= cast
<CondCodeSDNode
>(Op
.getOperand(4))->get();
2671 SDValue TrueVal
= Op
.getOperand(2);
2672 SDValue FalseVal
= Op
.getOperand(3);
2674 unsigned Opc
, SPCC
= ~0U;
2676 // If this is a select_cc of a "setcc", and if the setcc got lowered into
2677 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2678 LookThroughSetCC(LHS
, RHS
, CC
, SPCC
);
2679 assert(LHS
.getValueType() == RHS
.getValueType());
2681 SDValue CompareFlag
;
2682 if (LHS
.getValueType().isInteger()) {
2683 // On V9 processors running in 64-bit mode, if CC compares two `i64`s
2684 // and the RHS is zero we might be able to use a specialized select.
2685 // All SELECT_CC between any two scalar integer types are eligible for
2686 // lowering to specialized instructions. Additionally, f32 and f64 types
2687 // are also eligible, but for f128 we can only use the specialized
2688 // instruction when we have hardquad.
2689 EVT ValType
= TrueVal
.getValueType();
2690 bool IsEligibleType
= ValType
.isScalarInteger() || ValType
== MVT::f32
||
2691 ValType
== MVT::f64
||
2692 (ValType
== MVT::f128
&& hasHardQuad
);
2693 if (is64Bit
&& isV9
&& LHS
.getValueType() == MVT::i64
&&
2694 isNullConstant(RHS
) && !ISD::isUnsignedIntSetCC(CC
) && IsEligibleType
)
2696 SPISD::SELECT_REG
, dl
, TrueVal
.getValueType(), TrueVal
, FalseVal
,
2697 DAG
.getConstant(intCondCCodeToRcond(CC
), dl
, MVT::i32
), LHS
);
2699 CompareFlag
= DAG
.getNode(SPISD::CMPICC
, dl
, MVT::Glue
, LHS
, RHS
);
2700 Opc
= LHS
.getValueType() == MVT::i32
?
2701 SPISD::SELECT_ICC
: SPISD::SELECT_XCC
;
2702 if (SPCC
== ~0U) SPCC
= IntCondCCodeToICC(CC
);
2704 if (!hasHardQuad
&& LHS
.getValueType() == MVT::f128
) {
2705 if (SPCC
== ~0U) SPCC
= FPCondCCodeToFCC(CC
);
2706 CompareFlag
= TLI
.LowerF128Compare(LHS
, RHS
, SPCC
, dl
, DAG
);
2707 Opc
= SPISD::SELECT_ICC
;
2709 unsigned CmpOpc
= isV9
? SPISD::CMPFCC_V9
: SPISD::CMPFCC
;
2710 CompareFlag
= DAG
.getNode(CmpOpc
, dl
, MVT::Glue
, LHS
, RHS
);
2711 Opc
= SPISD::SELECT_FCC
;
2712 if (SPCC
== ~0U) SPCC
= FPCondCCodeToFCC(CC
);
2715 return DAG
.getNode(Opc
, dl
, TrueVal
.getValueType(), TrueVal
, FalseVal
,
2716 DAG
.getConstant(SPCC
, dl
, MVT::i32
), CompareFlag
);
2719 static SDValue
LowerVASTART(SDValue Op
, SelectionDAG
&DAG
,
2720 const SparcTargetLowering
&TLI
) {
2721 MachineFunction
&MF
= DAG
.getMachineFunction();
2722 SparcMachineFunctionInfo
*FuncInfo
= MF
.getInfo
<SparcMachineFunctionInfo
>();
2723 auto PtrVT
= TLI
.getPointerTy(DAG
.getDataLayout());
2725 // Need frame address to find the address of VarArgsFrameIndex.
2726 MF
.getFrameInfo().setFrameAddressIsTaken(true);
2728 // vastart just stores the address of the VarArgsFrameIndex slot into the
2729 // memory location argument.
2732 DAG
.getNode(ISD::ADD
, DL
, PtrVT
, DAG
.getRegister(SP::I6
, PtrVT
),
2733 DAG
.getIntPtrConstant(FuncInfo
->getVarArgsFrameOffset(), DL
));
2734 const Value
*SV
= cast
<SrcValueSDNode
>(Op
.getOperand(2))->getValue();
2735 return DAG
.getStore(Op
.getOperand(0), DL
, Offset
, Op
.getOperand(1),
2736 MachinePointerInfo(SV
));
2739 static SDValue
LowerVAARG(SDValue Op
, SelectionDAG
&DAG
) {
2740 SDNode
*Node
= Op
.getNode();
2741 EVT VT
= Node
->getValueType(0);
2742 SDValue InChain
= Node
->getOperand(0);
2743 SDValue VAListPtr
= Node
->getOperand(1);
2744 EVT PtrVT
= VAListPtr
.getValueType();
2745 const Value
*SV
= cast
<SrcValueSDNode
>(Node
->getOperand(2))->getValue();
2748 DAG
.getLoad(PtrVT
, DL
, InChain
, VAListPtr
, MachinePointerInfo(SV
));
2749 // Increment the pointer, VAList, to the next vaarg.
2750 SDValue NextPtr
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, VAList
,
2751 DAG
.getIntPtrConstant(VT
.getSizeInBits()/8,
2753 // Store the incremented VAList to the legalized pointer.
2754 InChain
= DAG
.getStore(VAList
.getValue(1), DL
, NextPtr
, VAListPtr
,
2755 MachinePointerInfo(SV
));
2756 // Load the actual argument out of the pointer VAList.
2757 // We can't count on greater alignment than the word size.
2759 VT
, DL
, InChain
, VAList
, MachinePointerInfo(),
2760 Align(std::min(PtrVT
.getFixedSizeInBits(), VT
.getFixedSizeInBits()) / 8));
2763 static SDValue
LowerDYNAMIC_STACKALLOC(SDValue Op
, SelectionDAG
&DAG
,
2764 const SparcSubtarget
*Subtarget
) {
2765 SDValue Chain
= Op
.getOperand(0);
2766 SDValue Size
= Op
.getOperand(1);
2767 SDValue Alignment
= Op
.getOperand(2);
2768 MaybeAlign MaybeAlignment
=
2769 cast
<ConstantSDNode
>(Alignment
)->getMaybeAlignValue();
2770 EVT VT
= Size
->getValueType(0);
2773 unsigned SPReg
= SP::O6
;
2774 SDValue SP
= DAG
.getCopyFromReg(Chain
, dl
, SPReg
, VT
);
2776 // The resultant pointer needs to be above the register spill area
2777 // at the bottom of the stack.
2778 unsigned regSpillArea
;
2779 if (Subtarget
->is64Bit()) {
2782 // On Sparc32, the size of the spill area is 92. Unfortunately,
2783 // that's only 4-byte aligned, not 8-byte aligned (the stack
2784 // pointer is 8-byte aligned). So, if the user asked for an 8-byte
2785 // aligned dynamic allocation, we actually need to add 96 to the
2786 // bottom of the stack, instead of 92, to ensure 8-byte alignment.
2788 // That also means adding 4 to the size of the allocation --
2789 // before applying the 8-byte rounding. Unfortunately, we the
2790 // value we get here has already had rounding applied. So, we need
2791 // to add 8, instead, wasting a bit more memory.
2793 // Further, this only actually needs to be done if the required
2794 // alignment is > 4, but, we've lost that info by this point, too,
2795 // so we always apply it.
2797 // (An alternative approach would be to always reserve 96 bytes
2798 // instead of the required 92, but then we'd waste 4 extra bytes
2799 // in every frame, not just those with dynamic stack allocations)
2801 // TODO: modify code in SelectionDAGBuilder to make this less sad.
2803 Size
= DAG
.getNode(ISD::ADD
, dl
, VT
, Size
,
2804 DAG
.getConstant(8, dl
, VT
));
2808 int64_t Bias
= Subtarget
->getStackPointerBias();
2810 // Debias and increment SP past the reserved spill area.
2811 // We need the SP to point to the first usable region before calculating
2812 // anything to prevent any of the pointers from becoming out of alignment when
2813 // we rebias the SP later on.
2814 SDValue StartOfUsableStack
= DAG
.getNode(
2815 ISD::ADD
, dl
, VT
, SP
, DAG
.getConstant(regSpillArea
+ Bias
, dl
, VT
));
2816 SDValue AllocatedPtr
=
2817 DAG
.getNode(ISD::SUB
, dl
, VT
, StartOfUsableStack
, Size
);
2819 bool IsOveraligned
= MaybeAlignment
.has_value();
2820 SDValue AlignedPtr
=
2822 ? DAG
.getNode(ISD::AND
, dl
, VT
, AllocatedPtr
,
2823 DAG
.getSignedConstant(-MaybeAlignment
->value(), dl
, VT
))
2826 // Now that we are done, restore the bias and reserved spill area.
2827 SDValue NewSP
= DAG
.getNode(ISD::SUB
, dl
, VT
, AlignedPtr
,
2828 DAG
.getConstant(regSpillArea
+ Bias
, dl
, VT
));
2829 Chain
= DAG
.getCopyToReg(SP
.getValue(1), dl
, SPReg
, NewSP
);
2830 SDValue Ops
[2] = {AlignedPtr
, Chain
};
2831 return DAG
.getMergeValues(Ops
, dl
);
2835 static SDValue
getFLUSHW(SDValue Op
, SelectionDAG
&DAG
) {
2837 SDValue Chain
= DAG
.getNode(SPISD::FLUSHW
,
2838 dl
, MVT::Other
, DAG
.getEntryNode());
2842 static SDValue
getFRAMEADDR(uint64_t depth
, SDValue Op
, SelectionDAG
&DAG
,
2843 const SparcSubtarget
*Subtarget
,
2844 bool AlwaysFlush
= false) {
2845 MachineFrameInfo
&MFI
= DAG
.getMachineFunction().getFrameInfo();
2846 MFI
.setFrameAddressIsTaken(true);
2848 EVT VT
= Op
.getValueType();
2850 unsigned FrameReg
= SP::I6
;
2851 unsigned stackBias
= Subtarget
->getStackPointerBias();
2856 // flush first to make sure the windowed registers' values are in stack
2857 Chain
= (depth
|| AlwaysFlush
) ? getFLUSHW(Op
, DAG
) : DAG
.getEntryNode();
2859 FrameAddr
= DAG
.getCopyFromReg(Chain
, dl
, FrameReg
, VT
);
2861 unsigned Offset
= (Subtarget
->is64Bit()) ? (stackBias
+ 112) : 56;
2864 SDValue Ptr
= DAG
.getNode(ISD::ADD
, dl
, VT
, FrameAddr
,
2865 DAG
.getIntPtrConstant(Offset
, dl
));
2866 FrameAddr
= DAG
.getLoad(VT
, dl
, Chain
, Ptr
, MachinePointerInfo());
2868 if (Subtarget
->is64Bit())
2869 FrameAddr
= DAG
.getNode(ISD::ADD
, dl
, VT
, FrameAddr
,
2870 DAG
.getIntPtrConstant(stackBias
, dl
));
2875 static SDValue
LowerFRAMEADDR(SDValue Op
, SelectionDAG
&DAG
,
2876 const SparcSubtarget
*Subtarget
) {
2878 uint64_t depth
= Op
.getConstantOperandVal(0);
2880 return getFRAMEADDR(depth
, Op
, DAG
, Subtarget
);
2884 static SDValue
LowerRETURNADDR(SDValue Op
, SelectionDAG
&DAG
,
2885 const SparcTargetLowering
&TLI
,
2886 const SparcSubtarget
*Subtarget
) {
2887 MachineFunction
&MF
= DAG
.getMachineFunction();
2888 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
2889 MFI
.setReturnAddressIsTaken(true);
2891 if (TLI
.verifyReturnAddressArgumentIsConstant(Op
, DAG
))
2894 EVT VT
= Op
.getValueType();
2896 uint64_t depth
= Op
.getConstantOperandVal(0);
2900 auto PtrVT
= TLI
.getPointerTy(DAG
.getDataLayout());
2901 Register RetReg
= MF
.addLiveIn(SP::I7
, TLI
.getRegClassFor(PtrVT
));
2902 RetAddr
= DAG
.getCopyFromReg(DAG
.getEntryNode(), dl
, RetReg
, VT
);
2906 // Need frame address to find return address of the caller.
2907 SDValue FrameAddr
= getFRAMEADDR(depth
- 1, Op
, DAG
, Subtarget
, true);
2909 unsigned Offset
= (Subtarget
->is64Bit()) ? 120 : 60;
2910 SDValue Ptr
= DAG
.getNode(ISD::ADD
,
2913 DAG
.getIntPtrConstant(Offset
, dl
));
2914 RetAddr
= DAG
.getLoad(VT
, dl
, DAG
.getEntryNode(), Ptr
, MachinePointerInfo());
2919 static SDValue
LowerF64Op(SDValue SrcReg64
, const SDLoc
&dl
, SelectionDAG
&DAG
,
2921 assert(SrcReg64
.getValueType() == MVT::f64
&& "LowerF64Op called on non-double!");
2922 assert(opcode
== ISD::FNEG
|| opcode
== ISD::FABS
);
2924 // Lower fneg/fabs on f64 to fneg/fabs on f32.
2925 // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
2926 // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
2928 // Note: in little-endian, the floating-point value is stored in the
2929 // registers are in the opposite order, so the subreg with the sign
2930 // bit is the highest-numbered (odd), rather than the
2931 // lowest-numbered (even).
2933 SDValue Hi32
= DAG
.getTargetExtractSubreg(SP::sub_even
, dl
, MVT::f32
,
2935 SDValue Lo32
= DAG
.getTargetExtractSubreg(SP::sub_odd
, dl
, MVT::f32
,
2938 if (DAG
.getDataLayout().isLittleEndian())
2939 Lo32
= DAG
.getNode(opcode
, dl
, MVT::f32
, Lo32
);
2941 Hi32
= DAG
.getNode(opcode
, dl
, MVT::f32
, Hi32
);
2943 SDValue DstReg64
= SDValue(DAG
.getMachineNode(TargetOpcode::IMPLICIT_DEF
,
2945 DstReg64
= DAG
.getTargetInsertSubreg(SP::sub_even
, dl
, MVT::f64
,
2947 DstReg64
= DAG
.getTargetInsertSubreg(SP::sub_odd
, dl
, MVT::f64
,
2952 // Lower a f128 load into two f64 loads.
2953 static SDValue
LowerF128Load(SDValue Op
, SelectionDAG
&DAG
)
2956 LoadSDNode
*LdNode
= cast
<LoadSDNode
>(Op
.getNode());
2957 assert(LdNode
->getOffset().isUndef() && "Unexpected node type");
2959 Align Alignment
= commonAlignment(LdNode
->getOriginalAlign(), 8);
2962 DAG
.getLoad(MVT::f64
, dl
, LdNode
->getChain(), LdNode
->getBasePtr(),
2963 LdNode
->getPointerInfo(), Alignment
);
2964 EVT addrVT
= LdNode
->getBasePtr().getValueType();
2965 SDValue LoPtr
= DAG
.getNode(ISD::ADD
, dl
, addrVT
,
2966 LdNode
->getBasePtr(),
2967 DAG
.getConstant(8, dl
, addrVT
));
2968 SDValue Lo64
= DAG
.getLoad(MVT::f64
, dl
, LdNode
->getChain(), LoPtr
,
2969 LdNode
->getPointerInfo().getWithOffset(8),
2972 SDValue SubRegEven
= DAG
.getTargetConstant(SP::sub_even64
, dl
, MVT::i32
);
2973 SDValue SubRegOdd
= DAG
.getTargetConstant(SP::sub_odd64
, dl
, MVT::i32
);
2975 SDNode
*InFP128
= DAG
.getMachineNode(TargetOpcode::IMPLICIT_DEF
,
2977 InFP128
= DAG
.getMachineNode(TargetOpcode::INSERT_SUBREG
, dl
,
2979 SDValue(InFP128
, 0),
2982 InFP128
= DAG
.getMachineNode(TargetOpcode::INSERT_SUBREG
, dl
,
2984 SDValue(InFP128
, 0),
2987 SDValue OutChains
[2] = { SDValue(Hi64
.getNode(), 1),
2988 SDValue(Lo64
.getNode(), 1) };
2989 SDValue OutChain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, OutChains
);
2990 SDValue Ops
[2] = {SDValue(InFP128
,0), OutChain
};
2991 return DAG
.getMergeValues(Ops
, dl
);
2994 static SDValue
LowerLOAD(SDValue Op
, SelectionDAG
&DAG
)
2996 LoadSDNode
*LdNode
= cast
<LoadSDNode
>(Op
.getNode());
2998 EVT MemVT
= LdNode
->getMemoryVT();
2999 if (MemVT
== MVT::f128
)
3000 return LowerF128Load(Op
, DAG
);
3005 // Lower a f128 store into two f64 stores.
3006 static SDValue
LowerF128Store(SDValue Op
, SelectionDAG
&DAG
) {
3008 StoreSDNode
*StNode
= cast
<StoreSDNode
>(Op
.getNode());
3009 assert(StNode
->getOffset().isUndef() && "Unexpected node type");
3011 SDValue SubRegEven
= DAG
.getTargetConstant(SP::sub_even64
, dl
, MVT::i32
);
3012 SDValue SubRegOdd
= DAG
.getTargetConstant(SP::sub_odd64
, dl
, MVT::i32
);
3014 SDNode
*Hi64
= DAG
.getMachineNode(TargetOpcode::EXTRACT_SUBREG
,
3019 SDNode
*Lo64
= DAG
.getMachineNode(TargetOpcode::EXTRACT_SUBREG
,
3025 Align Alignment
= commonAlignment(StNode
->getOriginalAlign(), 8);
3027 SDValue OutChains
[2];
3029 DAG
.getStore(StNode
->getChain(), dl
, SDValue(Hi64
, 0),
3030 StNode
->getBasePtr(), StNode
->getPointerInfo(),
3032 EVT addrVT
= StNode
->getBasePtr().getValueType();
3033 SDValue LoPtr
= DAG
.getNode(ISD::ADD
, dl
, addrVT
,
3034 StNode
->getBasePtr(),
3035 DAG
.getConstant(8, dl
, addrVT
));
3036 OutChains
[1] = DAG
.getStore(StNode
->getChain(), dl
, SDValue(Lo64
, 0), LoPtr
,
3037 StNode
->getPointerInfo().getWithOffset(8),
3039 return DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, OutChains
);
3042 static SDValue
LowerSTORE(SDValue Op
, SelectionDAG
&DAG
)
3045 StoreSDNode
*St
= cast
<StoreSDNode
>(Op
.getNode());
3047 EVT MemVT
= St
->getMemoryVT();
3048 if (MemVT
== MVT::f128
)
3049 return LowerF128Store(Op
, DAG
);
3051 if (MemVT
== MVT::i64
) {
3052 // Custom handling for i64 stores: turn it into a bitcast and a
3054 SDValue Val
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::v2i32
, St
->getValue());
3055 SDValue Chain
= DAG
.getStore(
3056 St
->getChain(), dl
, Val
, St
->getBasePtr(), St
->getPointerInfo(),
3057 St
->getOriginalAlign(), St
->getMemOperand()->getFlags(),
3065 static SDValue
LowerFNEGorFABS(SDValue Op
, SelectionDAG
&DAG
, bool isV9
) {
3066 assert((Op
.getOpcode() == ISD::FNEG
|| Op
.getOpcode() == ISD::FABS
)
3067 && "invalid opcode");
3071 if (Op
.getValueType() == MVT::f64
)
3072 return LowerF64Op(Op
.getOperand(0), dl
, DAG
, Op
.getOpcode());
3073 if (Op
.getValueType() != MVT::f128
)
3076 // Lower fabs/fneg on f128 to fabs/fneg on f64
3077 // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
3078 // (As with LowerF64Op, on little-endian, we need to negate the odd
3081 SDValue SrcReg128
= Op
.getOperand(0);
3082 SDValue Hi64
= DAG
.getTargetExtractSubreg(SP::sub_even64
, dl
, MVT::f64
,
3084 SDValue Lo64
= DAG
.getTargetExtractSubreg(SP::sub_odd64
, dl
, MVT::f64
,
3087 if (DAG
.getDataLayout().isLittleEndian()) {
3089 Lo64
= DAG
.getNode(Op
.getOpcode(), dl
, MVT::f64
, Lo64
);
3091 Lo64
= LowerF64Op(Lo64
, dl
, DAG
, Op
.getOpcode());
3094 Hi64
= DAG
.getNode(Op
.getOpcode(), dl
, MVT::f64
, Hi64
);
3096 Hi64
= LowerF64Op(Hi64
, dl
, DAG
, Op
.getOpcode());
3099 SDValue DstReg128
= SDValue(DAG
.getMachineNode(TargetOpcode::IMPLICIT_DEF
,
3101 DstReg128
= DAG
.getTargetInsertSubreg(SP::sub_even64
, dl
, MVT::f128
,
3103 DstReg128
= DAG
.getTargetInsertSubreg(SP::sub_odd64
, dl
, MVT::f128
,
3108 static SDValue
LowerATOMIC_LOAD_STORE(SDValue Op
, SelectionDAG
&DAG
) {
3109 if (isStrongerThanMonotonic(cast
<AtomicSDNode
>(Op
)->getSuccessOrdering())) {
3110 // Expand with a fence.
3114 // Monotonic load/stores are legal.
3118 SDValue
SparcTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op
,
3119 SelectionDAG
&DAG
) const {
3120 unsigned IntNo
= Op
.getConstantOperandVal(0);
3123 default: return SDValue(); // Don't custom lower most intrinsics.
3124 case Intrinsic::thread_pointer
: {
3125 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3126 return DAG
.getRegister(SP::G7
, PtrVT
);
3131 SDValue
SparcTargetLowering::
3132 LowerOperation(SDValue Op
, SelectionDAG
&DAG
) const {
3134 bool hasHardQuad
= Subtarget
->hasHardQuad();
3135 bool isV9
= Subtarget
->isV9();
3136 bool is64Bit
= Subtarget
->is64Bit();
3138 switch (Op
.getOpcode()) {
3139 default: llvm_unreachable("Should not custom lower this!");
3141 case ISD::RETURNADDR
: return LowerRETURNADDR(Op
, DAG
, *this,
3143 case ISD::FRAMEADDR
: return LowerFRAMEADDR(Op
, DAG
,
3145 case ISD::GlobalTLSAddress
: return LowerGlobalTLSAddress(Op
, DAG
);
3146 case ISD::GlobalAddress
: return LowerGlobalAddress(Op
, DAG
);
3147 case ISD::BlockAddress
: return LowerBlockAddress(Op
, DAG
);
3148 case ISD::ConstantPool
: return LowerConstantPool(Op
, DAG
);
3149 case ISD::FP_TO_SINT
: return LowerFP_TO_SINT(Op
, DAG
, *this,
3151 case ISD::SINT_TO_FP
: return LowerSINT_TO_FP(Op
, DAG
, *this,
3153 case ISD::FP_TO_UINT
: return LowerFP_TO_UINT(Op
, DAG
, *this,
3155 case ISD::UINT_TO_FP
: return LowerUINT_TO_FP(Op
, DAG
, *this,
3158 return LowerBR_CC(Op
, DAG
, *this, hasHardQuad
, isV9
, is64Bit
);
3159 case ISD::SELECT_CC
:
3160 return LowerSELECT_CC(Op
, DAG
, *this, hasHardQuad
, isV9
, is64Bit
);
3161 case ISD::VASTART
: return LowerVASTART(Op
, DAG
, *this);
3162 case ISD::VAARG
: return LowerVAARG(Op
, DAG
);
3163 case ISD::DYNAMIC_STACKALLOC
: return LowerDYNAMIC_STACKALLOC(Op
, DAG
,
3166 case ISD::LOAD
: return LowerLOAD(Op
, DAG
);
3167 case ISD::STORE
: return LowerSTORE(Op
, DAG
);
3168 case ISD::FADD
: return LowerF128Op(Op
, DAG
,
3169 getLibcallName(RTLIB::ADD_F128
), 2);
3170 case ISD::FSUB
: return LowerF128Op(Op
, DAG
,
3171 getLibcallName(RTLIB::SUB_F128
), 2);
3172 case ISD::FMUL
: return LowerF128Op(Op
, DAG
,
3173 getLibcallName(RTLIB::MUL_F128
), 2);
3174 case ISD::FDIV
: return LowerF128Op(Op
, DAG
,
3175 getLibcallName(RTLIB::DIV_F128
), 2);
3176 case ISD::FSQRT
: return LowerF128Op(Op
, DAG
,
3177 getLibcallName(RTLIB::SQRT_F128
),1);
3179 case ISD::FNEG
: return LowerFNEGorFABS(Op
, DAG
, isV9
);
3180 case ISD::FP_EXTEND
: return LowerF128_FPEXTEND(Op
, DAG
, *this);
3181 case ISD::FP_ROUND
: return LowerF128_FPROUND(Op
, DAG
, *this);
3182 case ISD::ATOMIC_LOAD
:
3183 case ISD::ATOMIC_STORE
: return LowerATOMIC_LOAD_STORE(Op
, DAG
);
3184 case ISD::INTRINSIC_WO_CHAIN
: return LowerINTRINSIC_WO_CHAIN(Op
, DAG
);
3188 SDValue
SparcTargetLowering::bitcastConstantFPToInt(ConstantFPSDNode
*C
,
3190 SelectionDAG
&DAG
) const {
3191 APInt V
= C
->getValueAPF().bitcastToAPInt();
3192 SDValue Lo
= DAG
.getConstant(V
.zextOrTrunc(32), DL
, MVT::i32
);
3193 SDValue Hi
= DAG
.getConstant(V
.lshr(32).zextOrTrunc(32), DL
, MVT::i32
);
3194 if (DAG
.getDataLayout().isLittleEndian())
3196 return DAG
.getBuildVector(MVT::v2i32
, DL
, {Hi
, Lo
});
3199 SDValue
SparcTargetLowering::PerformBITCASTCombine(SDNode
*N
,
3200 DAGCombinerInfo
&DCI
) const {
3202 SDValue Src
= N
->getOperand(0);
3204 if (isa
<ConstantFPSDNode
>(Src
) && N
->getSimpleValueType(0) == MVT::v2i32
&&
3205 Src
.getSimpleValueType() == MVT::f64
)
3206 return bitcastConstantFPToInt(cast
<ConstantFPSDNode
>(Src
), dl
, DCI
.DAG
);
3211 SDValue
SparcTargetLowering::PerformDAGCombine(SDNode
*N
,
3212 DAGCombinerInfo
&DCI
) const {
3213 switch (N
->getOpcode()) {
3217 return PerformBITCASTCombine(N
, DCI
);
3223 SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr
&MI
,
3224 MachineBasicBlock
*BB
) const {
3225 switch (MI
.getOpcode()) {
3226 default: llvm_unreachable("Unknown SELECT_CC!");
3227 case SP::SELECT_CC_Int_ICC
:
3228 case SP::SELECT_CC_FP_ICC
:
3229 case SP::SELECT_CC_DFP_ICC
:
3230 case SP::SELECT_CC_QFP_ICC
:
3231 if (Subtarget
->isV9())
3232 return expandSelectCC(MI
, BB
, SP::BPICC
);
3233 return expandSelectCC(MI
, BB
, SP::BCOND
);
3234 case SP::SELECT_CC_Int_XCC
:
3235 case SP::SELECT_CC_FP_XCC
:
3236 case SP::SELECT_CC_DFP_XCC
:
3237 case SP::SELECT_CC_QFP_XCC
:
3238 return expandSelectCC(MI
, BB
, SP::BPXCC
);
3239 case SP::SELECT_CC_Int_FCC
:
3240 case SP::SELECT_CC_FP_FCC
:
3241 case SP::SELECT_CC_DFP_FCC
:
3242 case SP::SELECT_CC_QFP_FCC
:
3243 if (Subtarget
->isV9())
3244 return expandSelectCC(MI
, BB
, SP::FBCOND_V9
);
3245 return expandSelectCC(MI
, BB
, SP::FBCOND
);
3250 SparcTargetLowering::expandSelectCC(MachineInstr
&MI
, MachineBasicBlock
*BB
,
3251 unsigned BROpcode
) const {
3252 const TargetInstrInfo
&TII
= *Subtarget
->getInstrInfo();
3253 DebugLoc dl
= MI
.getDebugLoc();
3254 unsigned CC
= (SPCC::CondCodes
)MI
.getOperand(3).getImm();
3256 // To "insert" a SELECT_CC instruction, we actually have to insert the
3257 // triangle control-flow pattern. The incoming instruction knows the
3258 // destination vreg to set, the condition code register to branch on, the
3259 // true/false values to select between, and the condition code for the branch.
3261 // We produce the following control flow:
3267 const BasicBlock
*LLVM_BB
= BB
->getBasicBlock();
3268 MachineFunction::iterator It
= ++BB
->getIterator();
3270 MachineBasicBlock
*ThisMBB
= BB
;
3271 MachineFunction
*F
= BB
->getParent();
3272 MachineBasicBlock
*IfFalseMBB
= F
->CreateMachineBasicBlock(LLVM_BB
);
3273 MachineBasicBlock
*SinkMBB
= F
->CreateMachineBasicBlock(LLVM_BB
);
3274 F
->insert(It
, IfFalseMBB
);
3275 F
->insert(It
, SinkMBB
);
3277 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
3278 SinkMBB
->splice(SinkMBB
->begin(), ThisMBB
,
3279 std::next(MachineBasicBlock::iterator(MI
)), ThisMBB
->end());
3280 SinkMBB
->transferSuccessorsAndUpdatePHIs(ThisMBB
);
3282 // Set the new successors for ThisMBB.
3283 ThisMBB
->addSuccessor(IfFalseMBB
);
3284 ThisMBB
->addSuccessor(SinkMBB
);
3286 BuildMI(ThisMBB
, dl
, TII
.get(BROpcode
))
3290 // IfFalseMBB just falls through to SinkMBB.
3291 IfFalseMBB
->addSuccessor(SinkMBB
);
3293 // %Result = phi [ %TrueValue, ThisMBB ], [ %FalseValue, IfFalseMBB ]
3294 BuildMI(*SinkMBB
, SinkMBB
->begin(), dl
, TII
.get(SP::PHI
),
3295 MI
.getOperand(0).getReg())
3296 .addReg(MI
.getOperand(1).getReg())
3298 .addReg(MI
.getOperand(2).getReg())
3299 .addMBB(IfFalseMBB
);
3301 MI
.eraseFromParent(); // The pseudo instruction is gone now.
3305 //===----------------------------------------------------------------------===//
3306 // Sparc Inline Assembly Support
3307 //===----------------------------------------------------------------------===//
3309 /// getConstraintType - Given a constraint letter, return the type of
3310 /// constraint it is for this target.
3311 SparcTargetLowering::ConstraintType
3312 SparcTargetLowering::getConstraintType(StringRef Constraint
) const {
3313 if (Constraint
.size() == 1) {
3314 switch (Constraint
[0]) {
3319 return C_RegisterClass
;
3325 return TargetLowering::getConstraintType(Constraint
);
3328 TargetLowering::ConstraintWeight
SparcTargetLowering::
3329 getSingleConstraintMatchWeight(AsmOperandInfo
&info
,
3330 const char *constraint
) const {
3331 ConstraintWeight weight
= CW_Invalid
;
3332 Value
*CallOperandVal
= info
.CallOperandVal
;
3333 // If we don't have a value, we can't do a match,
3334 // but allow it at the lowest weight.
3335 if (!CallOperandVal
)
3338 // Look at the constraint type.
3339 switch (*constraint
) {
3341 weight
= TargetLowering::getSingleConstraintMatchWeight(info
, constraint
);
3344 if (ConstantInt
*C
= dyn_cast
<ConstantInt
>(info
.CallOperandVal
)) {
3345 if (isInt
<13>(C
->getSExtValue()))
3346 weight
= CW_Constant
;
3353 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3354 /// vector. If it is invalid, don't add anything to Ops.
3355 void SparcTargetLowering::LowerAsmOperandForConstraint(
3356 SDValue Op
, StringRef Constraint
, std::vector
<SDValue
> &Ops
,
3357 SelectionDAG
&DAG
) const {
3360 // Only support length 1 constraints for now.
3361 if (Constraint
.size() > 1)
3364 char ConstraintLetter
= Constraint
[0];
3365 switch (ConstraintLetter
) {
3368 if (ConstantSDNode
*C
= dyn_cast
<ConstantSDNode
>(Op
)) {
3369 if (isInt
<13>(C
->getSExtValue())) {
3370 Result
= DAG
.getSignedTargetConstant(C
->getSExtValue(), SDLoc(Op
),
3378 if (Result
.getNode()) {
3379 Ops
.push_back(Result
);
3382 TargetLowering::LowerAsmOperandForConstraint(Op
, Constraint
, Ops
, DAG
);
3385 std::pair
<unsigned, const TargetRegisterClass
*>
3386 SparcTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo
*TRI
,
3387 StringRef Constraint
,
3389 if (Constraint
.empty())
3390 return std::make_pair(0U, nullptr);
3392 if (Constraint
.size() == 1) {
3393 switch (Constraint
[0]) {
3395 if (VT
== MVT::v2i32
)
3396 return std::make_pair(0U, &SP::IntPairRegClass
);
3397 else if (Subtarget
->is64Bit())
3398 return std::make_pair(0U, &SP::I64RegsRegClass
);
3400 return std::make_pair(0U, &SP::IntRegsRegClass
);
3402 if (VT
== MVT::f32
|| VT
== MVT::i32
)
3403 return std::make_pair(0U, &SP::FPRegsRegClass
);
3404 else if (VT
== MVT::f64
|| VT
== MVT::i64
)
3405 return std::make_pair(0U, &SP::LowDFPRegsRegClass
);
3406 else if (VT
== MVT::f128
)
3407 return std::make_pair(0U, &SP::LowQFPRegsRegClass
);
3408 // This will generate an error message
3409 return std::make_pair(0U, nullptr);
3411 if (VT
== MVT::f32
|| VT
== MVT::i32
)
3412 return std::make_pair(0U, &SP::FPRegsRegClass
);
3413 else if (VT
== MVT::f64
|| VT
== MVT::i64
)
3414 return std::make_pair(0U, &SP::DFPRegsRegClass
);
3415 else if (VT
== MVT::f128
)
3416 return std::make_pair(0U, &SP::QFPRegsRegClass
);
3417 // This will generate an error message
3418 return std::make_pair(0U, nullptr);
3422 if (Constraint
.front() != '{')
3423 return std::make_pair(0U, nullptr);
3425 assert(Constraint
.back() == '}' && "Not a brace enclosed constraint?");
3426 StringRef
RegName(Constraint
.data() + 1, Constraint
.size() - 2);
3427 if (RegName
.empty())
3428 return std::make_pair(0U, nullptr);
3430 unsigned long long RegNo
;
3431 // Handle numbered register aliases.
3432 if (RegName
[0] == 'r' &&
3433 getAsUnsignedInteger(RegName
.begin() + 1, 10, RegNo
)) {
3439 return std::make_pair(0U, nullptr);
3440 const char RegTypes
[] = {'g', 'o', 'l', 'i'};
3441 char RegType
= RegTypes
[RegNo
/ 8];
3442 char RegIndex
= '0' + (RegNo
% 8);
3443 char Tmp
[] = {'{', RegType
, RegIndex
, '}', 0};
3444 return getRegForInlineAsmConstraint(TRI
, Tmp
, VT
);
3447 // Rewrite the fN constraint according to the value type if needed.
3448 if (VT
!= MVT::f32
&& VT
!= MVT::Other
&& RegName
[0] == 'f' &&
3449 getAsUnsignedInteger(RegName
.begin() + 1, 10, RegNo
)) {
3450 if (VT
== MVT::f64
&& (RegNo
% 2 == 0)) {
3451 return getRegForInlineAsmConstraint(
3452 TRI
, StringRef("{d" + utostr(RegNo
/ 2) + "}"), VT
);
3453 } else if (VT
== MVT::f128
&& (RegNo
% 4 == 0)) {
3454 return getRegForInlineAsmConstraint(
3455 TRI
, StringRef("{q" + utostr(RegNo
/ 4) + "}"), VT
);
3457 return std::make_pair(0U, nullptr);
3462 TargetLowering::getRegForInlineAsmConstraint(TRI
, Constraint
, VT
);
3463 if (!ResultPair
.second
)
3464 return std::make_pair(0U, nullptr);
3466 // Force the use of I64Regs over IntRegs for 64-bit values.
3467 if (Subtarget
->is64Bit() && VT
== MVT::i64
) {
3468 assert(ResultPair
.second
== &SP::IntRegsRegClass
&&
3469 "Unexpected register class");
3470 return std::make_pair(ResultPair
.first
, &SP::I64RegsRegClass
);
3477 SparcTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode
*GA
) const {
3478 // The Sparc target isn't yet aware of offsets.
3482 void SparcTargetLowering::ReplaceNodeResults(SDNode
*N
,
3483 SmallVectorImpl
<SDValue
>& Results
,
3484 SelectionDAG
&DAG
) const {
3488 RTLIB::Libcall libCall
= RTLIB::UNKNOWN_LIBCALL
;
3490 switch (N
->getOpcode()) {
3492 llvm_unreachable("Do not know how to custom type legalize this operation!");
3494 case ISD::FP_TO_SINT
:
3495 case ISD::FP_TO_UINT
:
3496 // Custom lower only if it involves f128 or i64.
3497 if (N
->getOperand(0).getValueType() != MVT::f128
3498 || N
->getValueType(0) != MVT::i64
)
3500 libCall
= ((N
->getOpcode() == ISD::FP_TO_SINT
)
3501 ? RTLIB::FPTOSINT_F128_I64
3502 : RTLIB::FPTOUINT_F128_I64
);
3504 Results
.push_back(LowerF128Op(SDValue(N
, 0),
3506 getLibcallName(libCall
),
3509 case ISD::READCYCLECOUNTER
: {
3510 assert(Subtarget
->hasLeonCycleCounter());
3511 SDValue Lo
= DAG
.getCopyFromReg(N
->getOperand(0), dl
, SP::ASR23
, MVT::i32
);
3512 SDValue Hi
= DAG
.getCopyFromReg(Lo
, dl
, SP::G0
, MVT::i32
);
3513 SDValue Ops
[] = { Lo
, Hi
};
3514 SDValue Pair
= DAG
.getNode(ISD::BUILD_PAIR
, dl
, MVT::i64
, Ops
);
3515 Results
.push_back(Pair
);
3516 Results
.push_back(N
->getOperand(0));
3519 case ISD::SINT_TO_FP
:
3520 case ISD::UINT_TO_FP
:
3521 // Custom lower only if it involves f128 or i64.
3522 if (N
->getValueType(0) != MVT::f128
3523 || N
->getOperand(0).getValueType() != MVT::i64
)
3526 libCall
= ((N
->getOpcode() == ISD::SINT_TO_FP
)
3527 ? RTLIB::SINTTOFP_I64_F128
3528 : RTLIB::UINTTOFP_I64_F128
);
3530 Results
.push_back(LowerF128Op(SDValue(N
, 0),
3532 getLibcallName(libCall
),
3536 LoadSDNode
*Ld
= cast
<LoadSDNode
>(N
);
3537 // Custom handling only for i64: turn i64 load into a v2i32 load,
3539 if (Ld
->getValueType(0) != MVT::i64
|| Ld
->getMemoryVT() != MVT::i64
)
3543 SDValue LoadRes
= DAG
.getExtLoad(
3544 Ld
->getExtensionType(), dl
, MVT::v2i32
, Ld
->getChain(),
3545 Ld
->getBasePtr(), Ld
->getPointerInfo(), MVT::v2i32
,
3546 Ld
->getOriginalAlign(), Ld
->getMemOperand()->getFlags(),
3549 SDValue Res
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::i64
, LoadRes
);
3550 Results
.push_back(Res
);
3551 Results
.push_back(LoadRes
.getValue(1));
3557 // Override to enable LOAD_STACK_GUARD lowering on Linux.
3558 bool SparcTargetLowering::useLoadStackGuardNode(const Module
&M
) const {
3559 if (!Subtarget
->isTargetLinux())
3560 return TargetLowering::useLoadStackGuardNode(M
);
3564 // Override to disable global variable loading on Linux.
3565 void SparcTargetLowering::insertSSPDeclarations(Module
&M
) const {
3566 if (!Subtarget
->isTargetLinux())
3567 return TargetLowering::insertSSPDeclarations(M
);
3570 void SparcTargetLowering::AdjustInstrPostInstrSelection(MachineInstr
&MI
,
3571 SDNode
*Node
) const {
3572 assert(MI
.getOpcode() == SP::SUBCCrr
|| MI
.getOpcode() == SP::SUBCCri
);
3573 // If the result is dead, replace it with %g0.
3574 if (!Node
->hasAnyUseOfValue(0))
3575 MI
.getOperand(0).setReg(SP::G0
);