1 //===- XtensaISelLowering.cpp - Xtensa DAG Lowering Implementation --------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the interfaces that Xtensa uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #include "XtensaISelLowering.h"
15 #include "XtensaConstantPoolValue.h"
16 #include "XtensaSubtarget.h"
17 #include "XtensaTargetMachine.h"
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineJumpTableInfo.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
24 #include "llvm/Support/Debug.h"
25 #include "llvm/Support/ErrorHandling.h"
26 #include "llvm/Support/MathExtras.h"
27 #include "llvm/Support/raw_ostream.h"
32 #define DEBUG_TYPE "xtensa-lower"
34 // Return true if we must use long (in fact, indirect) function call.
35 // It's simplified version, production implimentation must
36 // resolve a functions in ROM (usually glibc functions)
37 static bool isLongCall(const char *str
) {
38 // Currently always use long calls
42 XtensaTargetLowering::XtensaTargetLowering(const TargetMachine
&TM
,
43 const XtensaSubtarget
&STI
)
44 : TargetLowering(TM
), Subtarget(STI
) {
46 // Set up the register classes.
47 addRegisterClass(MVT::i32
, &Xtensa::ARRegClass
);
49 // Set up special registers.
50 setStackPointerRegisterToSaveRestore(Xtensa::SP
);
52 setSchedulingPreference(Sched::RegPressure
);
54 setMinFunctionAlignment(Align(4));
56 setOperationAction(ISD::Constant
, MVT::i32
, Custom
);
57 setOperationAction(ISD::Constant
, MVT::i64
, Expand
);
59 setBooleanContents(ZeroOrOneBooleanContent
);
61 setOperationAction(ISD::SIGN_EXTEND_INREG
, MVT::i1
, Expand
);
62 setOperationAction(ISD::SIGN_EXTEND_INREG
, MVT::i8
, Expand
);
63 setOperationAction(ISD::SIGN_EXTEND_INREG
, MVT::i16
, Expand
);
65 setOperationAction(ISD::BITCAST
, MVT::i32
, Expand
);
66 setOperationAction(ISD::BITCAST
, MVT::f32
, Expand
);
67 setOperationAction(ISD::UINT_TO_FP
, MVT::i32
, Expand
);
68 setOperationAction(ISD::SINT_TO_FP
, MVT::i32
, Expand
);
69 setOperationAction(ISD::FP_TO_UINT
, MVT::i32
, Expand
);
70 setOperationAction(ISD::FP_TO_SINT
, MVT::i32
, Expand
);
72 // No sign extend instructions for i1
73 for (MVT VT
: MVT::integer_valuetypes()) {
74 setLoadExtAction(ISD::SEXTLOAD
, VT
, MVT::i1
, Promote
);
75 setLoadExtAction(ISD::ZEXTLOAD
, VT
, MVT::i1
, Promote
);
76 setLoadExtAction(ISD::EXTLOAD
, VT
, MVT::i1
, Promote
);
79 setOperationAction(ISD::ConstantPool
, PtrVT
, Custom
);
80 setOperationAction(ISD::GlobalAddress
, PtrVT
, Custom
);
81 setOperationAction(ISD::BlockAddress
, PtrVT
, Custom
);
82 setOperationAction(ISD::JumpTable
, PtrVT
, Custom
);
84 // Expand jump table branches as address arithmetic followed by an
86 setOperationAction(ISD::BR_JT
, MVT::Other
, Custom
);
88 setOperationAction(ISD::BR_CC
, MVT::i32
, Legal
);
89 setOperationAction(ISD::BR_CC
, MVT::i64
, Expand
);
90 setOperationAction(ISD::BR_CC
, MVT::f32
, Expand
);
92 setOperationAction(ISD::SELECT
, MVT::i32
, Expand
);
93 setOperationAction(ISD::SELECT_CC
, MVT::i32
, Custom
);
94 setOperationAction(ISD::SETCC
, MVT::i32
, Expand
);
96 setCondCodeAction(ISD::SETGT
, MVT::i32
, Expand
);
97 setCondCodeAction(ISD::SETLE
, MVT::i32
, Expand
);
98 setCondCodeAction(ISD::SETUGT
, MVT::i32
, Expand
);
99 setCondCodeAction(ISD::SETULE
, MVT::i32
, Expand
);
101 // Implement custom stack allocations
102 setOperationAction(ISD::DYNAMIC_STACKALLOC
, PtrVT
, Custom
);
103 // Implement custom stack save and restore
104 setOperationAction(ISD::STACKSAVE
, MVT::Other
, Custom
);
105 setOperationAction(ISD::STACKRESTORE
, MVT::Other
, Custom
);
107 // Compute derived properties from the register classes
108 computeRegisterProperties(STI
.getRegisterInfo());
111 bool XtensaTargetLowering::isOffsetFoldingLegal(
112 const GlobalAddressSDNode
*GA
) const {
113 // The Xtensa target isn't yet aware of offsets.
117 //===----------------------------------------------------------------------===//
118 // Calling conventions
119 //===----------------------------------------------------------------------===//
121 #include "XtensaGenCallingConv.inc"
123 static bool CC_Xtensa_Custom(unsigned ValNo
, MVT ValVT
, MVT LocVT
,
124 CCValAssign::LocInfo LocInfo
,
125 ISD::ArgFlagsTy ArgFlags
, CCState
&State
) {
126 static const MCPhysReg IntRegs
[] = {Xtensa::A2
, Xtensa::A3
, Xtensa::A4
,
127 Xtensa::A5
, Xtensa::A6
, Xtensa::A7
};
129 if (ArgFlags
.isByVal()) {
130 Align ByValAlign
= ArgFlags
.getNonZeroByValAlign();
131 unsigned ByValSize
= ArgFlags
.getByValSize();
135 if (ByValAlign
< Align(4)) {
136 ByValAlign
= Align(4);
138 unsigned Offset
= State
.AllocateStack(ByValSize
, ByValAlign
);
139 State
.addLoc(CCValAssign::getMem(ValNo
, ValVT
, Offset
, LocVT
, LocInfo
));
140 // Mark all unused registers as allocated to avoid misuse
141 // of such registers.
142 while (State
.AllocateReg(IntRegs
))
147 // Promote i8 and i16
148 if (LocVT
== MVT::i8
|| LocVT
== MVT::i16
) {
150 if (ArgFlags
.isSExt())
151 LocInfo
= CCValAssign::SExt
;
152 else if (ArgFlags
.isZExt())
153 LocInfo
= CCValAssign::ZExt
;
155 LocInfo
= CCValAssign::AExt
;
160 Align OrigAlign
= ArgFlags
.getNonZeroOrigAlign();
161 bool needs64BitAlign
= (ValVT
== MVT::i32
&& OrigAlign
== Align(8));
162 bool needs128BitAlign
= (ValVT
== MVT::i32
&& OrigAlign
== Align(16));
164 if (ValVT
== MVT::i32
) {
165 Register
= State
.AllocateReg(IntRegs
);
166 // If this is the first part of an i64 arg,
167 // the allocated register must be either A2, A4 or A6.
168 if (needs64BitAlign
&& (Register
== Xtensa::A3
|| Register
== Xtensa::A5
||
169 Register
== Xtensa::A7
))
170 Register
= State
.AllocateReg(IntRegs
);
171 // arguments with 16byte alignment must be passed in the first register or
173 if (needs128BitAlign
&& (Register
!= Xtensa::A2
))
174 while ((Register
= State
.AllocateReg(IntRegs
)))
177 } else if (ValVT
== MVT::f64
) {
178 // Allocate int register and shadow next int register.
179 Register
= State
.AllocateReg(IntRegs
);
180 if (Register
== Xtensa::A3
|| Register
== Xtensa::A5
||
181 Register
== Xtensa::A7
)
182 Register
= State
.AllocateReg(IntRegs
);
183 State
.AllocateReg(IntRegs
);
186 report_fatal_error("Cannot handle this ValVT.");
190 unsigned Offset
= State
.AllocateStack(ValVT
.getStoreSize(), OrigAlign
);
191 State
.addLoc(CCValAssign::getMem(ValNo
, ValVT
, Offset
, LocVT
, LocInfo
));
193 State
.addLoc(CCValAssign::getReg(ValNo
, ValVT
, Register
, LocVT
, LocInfo
));
199 CCAssignFn
*XtensaTargetLowering::CCAssignFnForCall(CallingConv::ID CC
,
200 bool IsVarArg
) const {
201 return CC_Xtensa_Custom
;
204 SDValue
XtensaTargetLowering::LowerFormalArguments(
205 SDValue Chain
, CallingConv::ID CallConv
, bool IsVarArg
,
206 const SmallVectorImpl
<ISD::InputArg
> &Ins
, const SDLoc
&DL
,
207 SelectionDAG
&DAG
, SmallVectorImpl
<SDValue
> &InVals
) const {
208 MachineFunction
&MF
= DAG
.getMachineFunction();
209 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
211 // Used with vargs to acumulate store chains.
212 std::vector
<SDValue
> OutChains
;
215 report_fatal_error("Var arg not supported by FormalArguments Lowering");
217 // Assign locations to all of the incoming arguments.
218 SmallVector
<CCValAssign
, 16> ArgLocs
;
219 CCState
CCInfo(CallConv
, IsVarArg
, DAG
.getMachineFunction(), ArgLocs
,
222 CCInfo
.AnalyzeFormalArguments(Ins
, CCAssignFnForCall(CallConv
, IsVarArg
));
224 for (unsigned i
= 0, e
= ArgLocs
.size(); i
!= e
; ++i
) {
225 CCValAssign
&VA
= ArgLocs
[i
];
226 // Arguments stored on registers
228 EVT RegVT
= VA
.getLocVT();
229 const TargetRegisterClass
*RC
;
231 if (RegVT
== MVT::i32
)
232 RC
= &Xtensa::ARRegClass
;
234 report_fatal_error("RegVT not supported by FormalArguments Lowering");
236 // Transform the arguments stored on
237 // physical registers into virtual ones
238 unsigned Register
= MF
.addLiveIn(VA
.getLocReg(), RC
);
239 SDValue ArgValue
= DAG
.getCopyFromReg(Chain
, DL
, Register
, RegVT
);
241 // If this is an 8 or 16-bit value, it has been passed promoted
242 // to 32 bits. Insert an assert[sz]ext to capture this, then
243 // truncate to the right size.
244 if (VA
.getLocInfo() != CCValAssign::Full
) {
246 if (VA
.getLocInfo() == CCValAssign::SExt
)
247 Opcode
= ISD::AssertSext
;
248 else if (VA
.getLocInfo() == CCValAssign::ZExt
)
249 Opcode
= ISD::AssertZext
;
251 ArgValue
= DAG
.getNode(Opcode
, DL
, RegVT
, ArgValue
,
252 DAG
.getValueType(VA
.getValVT()));
253 ArgValue
= DAG
.getNode((VA
.getValVT() == MVT::f32
) ? ISD::BITCAST
255 DL
, VA
.getValVT(), ArgValue
);
258 InVals
.push_back(ArgValue
);
261 assert(VA
.isMemLoc());
263 EVT ValVT
= VA
.getValVT();
265 // The stack pointer offset is relative to the caller stack frame.
266 int FI
= MFI
.CreateFixedObject(ValVT
.getStoreSize(), VA
.getLocMemOffset(),
269 if (Ins
[VA
.getValNo()].Flags
.isByVal()) {
270 // Assume that in this case load operation is created
271 SDValue FIN
= DAG
.getFrameIndex(FI
, MVT::i32
);
272 InVals
.push_back(FIN
);
274 // Create load nodes to retrieve arguments from the stack
276 DAG
.getFrameIndex(FI
, getFrameIndexTy(DAG
.getDataLayout()));
277 InVals
.push_back(DAG
.getLoad(
278 ValVT
, DL
, Chain
, FIN
,
279 MachinePointerInfo::getFixedStack(DAG
.getMachineFunction(), FI
)));
284 // All stores are grouped in one node to allow the matching between
285 // the size of Ins and InVals. This only happens when on varg functions
286 if (!OutChains
.empty()) {
287 OutChains
.push_back(Chain
);
288 Chain
= DAG
.getNode(ISD::TokenFactor
, DL
, MVT::Other
, OutChains
);
295 XtensaTargetLowering::LowerCall(CallLoweringInfo
&CLI
,
296 SmallVectorImpl
<SDValue
> &InVals
) const {
297 SelectionDAG
&DAG
= CLI
.DAG
;
299 SmallVector
<ISD::OutputArg
, 32> &Outs
= CLI
.Outs
;
300 SmallVector
<SDValue
, 32> &OutVals
= CLI
.OutVals
;
301 SmallVector
<ISD::InputArg
, 32> &Ins
= CLI
.Ins
;
302 SDValue Chain
= CLI
.Chain
;
303 SDValue Callee
= CLI
.Callee
;
304 bool &IsTailCall
= CLI
.IsTailCall
;
305 CallingConv::ID CallConv
= CLI
.CallConv
;
306 bool IsVarArg
= CLI
.IsVarArg
;
308 MachineFunction
&MF
= DAG
.getMachineFunction();
309 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
310 const TargetFrameLowering
*TFL
= Subtarget
.getFrameLowering();
312 // TODO: Support tail call optimization.
315 // Analyze the operands of the call, assigning locations to each operand.
316 SmallVector
<CCValAssign
, 16> ArgLocs
;
317 CCState
CCInfo(CallConv
, IsVarArg
, MF
, ArgLocs
, *DAG
.getContext());
319 CCAssignFn
*CC
= CCAssignFnForCall(CallConv
, IsVarArg
);
321 CCInfo
.AnalyzeCallOperands(Outs
, CC
);
323 // Get a count of how many bytes are to be pushed on the stack.
324 unsigned NumBytes
= CCInfo
.getStackSize();
326 Align StackAlignment
= TFL
->getStackAlign();
327 unsigned NextStackOffset
= alignTo(NumBytes
, StackAlignment
);
329 Chain
= DAG
.getCALLSEQ_START(Chain
, NextStackOffset
, 0, DL
);
331 // Copy argument values to their designated locations.
332 std::deque
<std::pair
<unsigned, SDValue
>> RegsToPass
;
333 SmallVector
<SDValue
, 8> MemOpChains
;
335 for (unsigned I
= 0, E
= ArgLocs
.size(); I
!= E
; ++I
) {
336 CCValAssign
&VA
= ArgLocs
[I
];
337 SDValue ArgValue
= OutVals
[I
];
338 ISD::ArgFlagsTy Flags
= Outs
[I
].Flags
;
341 // Queue up the argument copies and emit them at the end.
342 RegsToPass
.push_back(std::make_pair(VA
.getLocReg(), ArgValue
));
343 else if (Flags
.isByVal()) {
344 assert(VA
.isMemLoc());
345 assert(Flags
.getByValSize() &&
346 "ByVal args of size 0 should have been ignored by front-end.");
347 assert(!IsTailCall
&&
348 "Do not tail-call optimize if there is a byval argument.");
350 if (!StackPtr
.getNode())
351 StackPtr
= DAG
.getCopyFromReg(Chain
, DL
, Xtensa::SP
, PtrVT
);
352 unsigned Offset
= VA
.getLocMemOffset();
353 SDValue Address
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, StackPtr
,
354 DAG
.getIntPtrConstant(Offset
, DL
));
355 SDValue SizeNode
= DAG
.getConstant(Flags
.getByValSize(), DL
, MVT::i32
);
356 SDValue Memcpy
= DAG
.getMemcpy(
357 Chain
, DL
, Address
, ArgValue
, SizeNode
, Flags
.getNonZeroByValAlign(),
358 /*isVolatile=*/false, /*AlwaysInline=*/false,
359 /*CI=*/nullptr, std::nullopt
, MachinePointerInfo(), MachinePointerInfo());
360 MemOpChains
.push_back(Memcpy
);
362 assert(VA
.isMemLoc() && "Argument not register or memory");
364 // Work out the address of the stack slot. Unpromoted ints and
365 // floats are passed as right-justified 8-byte values.
366 if (!StackPtr
.getNode())
367 StackPtr
= DAG
.getCopyFromReg(Chain
, DL
, Xtensa::SP
, PtrVT
);
368 unsigned Offset
= VA
.getLocMemOffset();
369 SDValue Address
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, StackPtr
,
370 DAG
.getIntPtrConstant(Offset
, DL
));
373 MemOpChains
.push_back(
374 DAG
.getStore(Chain
, DL
, ArgValue
, Address
, MachinePointerInfo()));
378 // Join the stores, which are independent of one another.
379 if (!MemOpChains
.empty())
380 Chain
= DAG
.getNode(ISD::TokenFactor
, DL
, MVT::Other
, MemOpChains
);
382 // Build a sequence of copy-to-reg nodes, chained and glued together.
384 for (unsigned I
= 0, E
= RegsToPass
.size(); I
!= E
; ++I
) {
385 unsigned Reg
= RegsToPass
[I
].first
;
386 Chain
= DAG
.getCopyToReg(Chain
, DL
, Reg
, RegsToPass
[I
].second
, Glue
);
387 Glue
= Chain
.getValue(1);
390 unsigned char TF
= 0;
392 // Accept direct calls by converting symbolic call addresses to the
393 // associated Target* opcodes.
394 if (ExternalSymbolSDNode
*E
= dyn_cast
<ExternalSymbolSDNode
>(Callee
)) {
395 name
= E
->getSymbol();
396 TF
= E
->getTargetFlags();
397 if (isPositionIndependent()) {
398 report_fatal_error("PIC relocations is not supported");
400 Callee
= DAG
.getTargetExternalSymbol(E
->getSymbol(), PtrVT
, TF
);
401 } else if (GlobalAddressSDNode
*G
= dyn_cast
<GlobalAddressSDNode
>(Callee
)) {
402 const GlobalValue
*GV
= G
->getGlobal();
403 name
= GV
->getName().str();
406 if ((!name
.empty()) && isLongCall(name
.c_str())) {
407 // Create a constant pool entry for the callee address
408 XtensaCP::XtensaCPModifier Modifier
= XtensaCP::no_modifier
;
410 XtensaConstantPoolValue
*CPV
= XtensaConstantPoolSymbol::Create(
411 *DAG
.getContext(), name
.c_str(), 0 /* XtensaCLabelIndex */, false,
414 // Get the address of the callee into a register
415 SDValue CPAddr
= DAG
.getTargetConstantPool(CPV
, PtrVT
, Align(4), 0, TF
);
416 SDValue CPWrap
= getAddrPCRel(CPAddr
, DAG
);
420 // The first call operand is the chain and the second is the target address.
421 SmallVector
<SDValue
, 8> Ops
;
422 Ops
.push_back(Chain
);
423 Ops
.push_back(Callee
);
425 // Add a register mask operand representing the call-preserved registers.
426 const TargetRegisterInfo
*TRI
= Subtarget
.getRegisterInfo();
427 const uint32_t *Mask
= TRI
->getCallPreservedMask(MF
, CallConv
);
428 assert(Mask
&& "Missing call preserved mask for calling convention");
429 Ops
.push_back(DAG
.getRegisterMask(Mask
));
431 // Add argument registers to the end of the list so that they are
432 // known live into the call.
433 for (unsigned I
= 0, E
= RegsToPass
.size(); I
!= E
; ++I
) {
434 unsigned Reg
= RegsToPass
[I
].first
;
435 Ops
.push_back(DAG
.getRegister(Reg
, RegsToPass
[I
].second
.getValueType()));
438 // Glue the call to the argument copies, if any.
442 SDVTList NodeTys
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
443 Chain
= DAG
.getNode(XtensaISD::CALL
, DL
, NodeTys
, Ops
);
444 Glue
= Chain
.getValue(1);
446 // Mark the end of the call, which is glued to the call itself.
447 Chain
= DAG
.getCALLSEQ_END(Chain
, DAG
.getConstant(NumBytes
, DL
, PtrVT
, true),
448 DAG
.getConstant(0, DL
, PtrVT
, true), Glue
, DL
);
449 Glue
= Chain
.getValue(1);
451 // Assign locations to each value returned by this call.
452 SmallVector
<CCValAssign
, 16> RetLocs
;
453 CCState
RetCCInfo(CallConv
, IsVarArg
, MF
, RetLocs
, *DAG
.getContext());
454 RetCCInfo
.AnalyzeCallResult(Ins
, RetCC_Xtensa
);
456 // Copy all of the result registers out of their specified physreg.
457 for (unsigned I
= 0, E
= RetLocs
.size(); I
!= E
; ++I
) {
458 CCValAssign
&VA
= RetLocs
[I
];
460 // Copy the value out, gluing the copy to the end of the call sequence.
461 unsigned Reg
= VA
.getLocReg();
462 SDValue RetValue
= DAG
.getCopyFromReg(Chain
, DL
, Reg
, VA
.getLocVT(), Glue
);
463 Chain
= RetValue
.getValue(1);
464 Glue
= RetValue
.getValue(2);
466 InVals
.push_back(RetValue
);
471 bool XtensaTargetLowering::CanLowerReturn(
472 CallingConv::ID CallConv
, MachineFunction
&MF
, bool IsVarArg
,
473 const SmallVectorImpl
<ISD::OutputArg
> &Outs
, LLVMContext
&Context
) const {
474 SmallVector
<CCValAssign
, 16> RVLocs
;
475 CCState
CCInfo(CallConv
, IsVarArg
, MF
, RVLocs
, Context
);
476 return CCInfo
.CheckReturn(Outs
, RetCC_Xtensa
);
480 XtensaTargetLowering::LowerReturn(SDValue Chain
, CallingConv::ID CallConv
,
482 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
483 const SmallVectorImpl
<SDValue
> &OutVals
,
484 const SDLoc
&DL
, SelectionDAG
&DAG
) const {
486 report_fatal_error("VarArg not supported");
488 MachineFunction
&MF
= DAG
.getMachineFunction();
490 // Assign locations to each returned value.
491 SmallVector
<CCValAssign
, 16> RetLocs
;
492 CCState
RetCCInfo(CallConv
, IsVarArg
, MF
, RetLocs
, *DAG
.getContext());
493 RetCCInfo
.AnalyzeReturn(Outs
, RetCC_Xtensa
);
496 // Quick exit for void returns
498 return DAG
.getNode(XtensaISD::RET
, DL
, MVT::Other
, Chain
);
500 // Copy the result values into the output registers.
501 SmallVector
<SDValue
, 4> RetOps
;
502 RetOps
.push_back(Chain
);
503 for (unsigned I
= 0, E
= RetLocs
.size(); I
!= E
; ++I
) {
504 CCValAssign
&VA
= RetLocs
[I
];
505 SDValue RetValue
= OutVals
[I
];
507 // Make the return register live on exit.
508 assert(VA
.isRegLoc() && "Can only return in registers!");
510 // Chain and glue the copies together.
511 unsigned Register
= VA
.getLocReg();
512 Chain
= DAG
.getCopyToReg(Chain
, DL
, Register
, RetValue
, Glue
);
513 Glue
= Chain
.getValue(1);
514 RetOps
.push_back(DAG
.getRegister(Register
, VA
.getLocVT()));
517 // Update chain and glue.
520 RetOps
.push_back(Glue
);
522 return DAG
.getNode(XtensaISD::RET
, DL
, MVT::Other
, RetOps
);
525 static unsigned getBranchOpcode(ISD::CondCode Cond
) {
548 llvm_unreachable("Unknown branch kind");
552 SDValue
XtensaTargetLowering::LowerSELECT_CC(SDValue Op
,
553 SelectionDAG
&DAG
) const {
555 EVT Ty
= Op
.getOperand(0).getValueType();
556 SDValue LHS
= Op
.getOperand(0);
557 SDValue RHS
= Op
.getOperand(1);
558 SDValue TrueValue
= Op
.getOperand(2);
559 SDValue FalseValue
= Op
.getOperand(3);
560 ISD::CondCode CC
= cast
<CondCodeSDNode
>(Op
->getOperand(4))->get();
562 unsigned BrOpcode
= getBranchOpcode(CC
);
563 SDValue TargetCC
= DAG
.getConstant(BrOpcode
, DL
, MVT::i32
);
565 return DAG
.getNode(XtensaISD::SELECT_CC
, DL
, Ty
, LHS
, RHS
, TrueValue
,
566 FalseValue
, TargetCC
);
569 SDValue
XtensaTargetLowering::LowerImmediate(SDValue Op
,
570 SelectionDAG
&DAG
) const {
571 const ConstantSDNode
*CN
= cast
<ConstantSDNode
>(Op
);
573 APInt APVal
= CN
->getAPIntValue();
574 int64_t Value
= APVal
.getSExtValue();
575 if (Op
.getValueType() == MVT::i32
) {
576 // Check if use node maybe lowered to the MOVI instruction
577 if (Value
> -2048 && Value
<= 2047)
579 // Check if use node maybe lowered to the ADDMI instruction
580 SDNode
&OpNode
= *Op
.getNode();
581 if ((OpNode
.hasOneUse() && OpNode
.use_begin()->getOpcode() == ISD::ADD
) &&
582 isShiftedInt
<16, 8>(Value
))
584 Type
*Ty
= Type::getInt32Ty(*DAG
.getContext());
585 Constant
*CV
= ConstantInt::get(Ty
, Value
);
586 SDValue CP
= DAG
.getConstantPool(CV
, MVT::i32
);
592 SDValue
XtensaTargetLowering::LowerGlobalAddress(SDValue Op
,
593 SelectionDAG
&DAG
) const {
594 const GlobalAddressSDNode
*G
= cast
<GlobalAddressSDNode
>(Op
);
596 auto PtrVT
= Op
.getValueType();
597 const GlobalValue
*GV
= G
->getGlobal();
599 SDValue CPAddr
= DAG
.getTargetConstantPool(GV
, PtrVT
, Align(4));
600 SDValue CPWrap
= getAddrPCRel(CPAddr
, DAG
);
605 SDValue
XtensaTargetLowering::LowerBlockAddress(SDValue Op
,
606 SelectionDAG
&DAG
) const {
607 BlockAddressSDNode
*Node
= cast
<BlockAddressSDNode
>(Op
);
608 const BlockAddress
*BA
= Node
->getBlockAddress();
609 EVT PtrVT
= Op
.getValueType();
611 XtensaConstantPoolValue
*CPV
=
612 XtensaConstantPoolConstant::Create(BA
, 0, XtensaCP::CPBlockAddress
);
613 SDValue CPAddr
= DAG
.getTargetConstantPool(CPV
, PtrVT
, Align(4));
614 SDValue CPWrap
= getAddrPCRel(CPAddr
, DAG
);
619 SDValue
XtensaTargetLowering::LowerBR_JT(SDValue Op
, SelectionDAG
&DAG
) const {
620 SDValue Chain
= Op
.getOperand(0);
621 SDValue Table
= Op
.getOperand(1);
622 SDValue Index
= Op
.getOperand(2);
624 JumpTableSDNode
*JT
= cast
<JumpTableSDNode
>(Table
);
625 MachineFunction
&MF
= DAG
.getMachineFunction();
626 const MachineJumpTableInfo
*MJTI
= MF
.getJumpTableInfo();
627 SDValue TargetJT
= DAG
.getTargetJumpTable(JT
->getIndex(), MVT::i32
);
628 const DataLayout
&TD
= DAG
.getDataLayout();
629 EVT PtrVT
= Table
.getValueType();
630 unsigned EntrySize
= MJTI
->getEntrySize(TD
);
632 Index
= DAG
.getNode(ISD::MUL
, DL
, Index
.getValueType(), Index
,
633 DAG
.getConstant(EntrySize
, DL
, Index
.getValueType()));
634 SDValue Addr
= DAG
.getNode(ISD::ADD
, DL
, Index
.getValueType(), Index
, Table
);
636 DAG
.getLoad(PtrVT
, DL
, Chain
, Addr
,
637 MachinePointerInfo::getJumpTable(DAG
.getMachineFunction()));
639 return DAG
.getNode(XtensaISD::BR_JT
, DL
, MVT::Other
, LD
.getValue(1), LD
,
643 SDValue
XtensaTargetLowering::LowerJumpTable(SDValue Op
,
644 SelectionDAG
&DAG
) const {
645 JumpTableSDNode
*JT
= cast
<JumpTableSDNode
>(Op
);
646 EVT PtrVT
= Op
.getValueType();
648 // Create a constant pool entry for the callee address
649 XtensaConstantPoolValue
*CPV
=
650 XtensaConstantPoolJumpTable::Create(*DAG
.getContext(), JT
->getIndex());
652 // Get the address of the callee into a register
653 SDValue CPAddr
= DAG
.getTargetConstantPool(CPV
, PtrVT
, Align(4));
655 return getAddrPCRel(CPAddr
, DAG
);
658 SDValue
XtensaTargetLowering::getAddrPCRel(SDValue Op
,
659 SelectionDAG
&DAG
) const {
661 EVT Ty
= Op
.getValueType();
662 return DAG
.getNode(XtensaISD::PCREL_WRAPPER
, DL
, Ty
, Op
);
665 SDValue
XtensaTargetLowering::LowerConstantPool(ConstantPoolSDNode
*CP
,
666 SelectionDAG
&DAG
) const {
667 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
669 if (!CP
->isMachineConstantPoolEntry()) {
670 Result
= DAG
.getTargetConstantPool(CP
->getConstVal(), PtrVT
, CP
->getAlign(),
673 report_fatal_error("This constantpool type is not supported yet");
676 return getAddrPCRel(Result
, DAG
);
679 SDValue
XtensaTargetLowering::LowerSTACKSAVE(SDValue Op
,
680 SelectionDAG
&DAG
) const {
681 return DAG
.getCopyFromReg(Op
.getOperand(0), SDLoc(Op
), Xtensa::SP
,
685 SDValue
XtensaTargetLowering::LowerSTACKRESTORE(SDValue Op
,
686 SelectionDAG
&DAG
) const {
687 return DAG
.getCopyToReg(Op
.getOperand(0), SDLoc(Op
), Xtensa::SP
,
691 SDValue
XtensaTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op
,
692 SelectionDAG
&DAG
) const {
693 SDValue Chain
= Op
.getOperand(0); // Legalize the chain.
694 SDValue Size
= Op
.getOperand(1); // Legalize the size.
695 EVT VT
= Size
->getValueType(0);
698 // Round up Size to 32
700 DAG
.getNode(ISD::ADD
, DL
, VT
, Size
, DAG
.getConstant(31, DL
, MVT::i32
));
701 SDValue SizeRoundUp
= DAG
.getNode(ISD::AND
, DL
, VT
, SizeTmp
,
702 DAG
.getConstant(~31, DL
, MVT::i32
));
704 unsigned SPReg
= Xtensa::SP
;
705 SDValue SP
= DAG
.getCopyFromReg(Chain
, DL
, SPReg
, VT
);
706 SDValue NewSP
= DAG
.getNode(ISD::SUB
, DL
, VT
, SP
, SizeRoundUp
); // Value
707 Chain
= DAG
.getCopyToReg(SP
.getValue(1), DL
, SPReg
, NewSP
); // Output chain
709 SDValue NewVal
= DAG
.getCopyFromReg(Chain
, DL
, SPReg
, MVT::i32
);
710 Chain
= NewVal
.getValue(1);
712 SDValue Ops
[2] = {NewVal
, Chain
};
713 return DAG
.getMergeValues(Ops
, DL
);
716 SDValue
XtensaTargetLowering::LowerOperation(SDValue Op
,
717 SelectionDAG
&DAG
) const {
718 switch (Op
.getOpcode()) {
720 return LowerBR_JT(Op
, DAG
);
722 return LowerImmediate(Op
, DAG
);
723 case ISD::GlobalAddress
:
724 return LowerGlobalAddress(Op
, DAG
);
725 case ISD::BlockAddress
:
726 return LowerBlockAddress(Op
, DAG
);
728 return LowerJumpTable(Op
, DAG
);
729 case ISD::ConstantPool
:
730 return LowerConstantPool(cast
<ConstantPoolSDNode
>(Op
), DAG
);
732 return LowerSELECT_CC(Op
, DAG
);
734 return LowerSTACKSAVE(Op
, DAG
);
735 case ISD::STACKRESTORE
:
736 return LowerSTACKRESTORE(Op
, DAG
);
737 case ISD::DYNAMIC_STACKALLOC
:
738 return LowerDYNAMIC_STACKALLOC(Op
, DAG
);
740 report_fatal_error("Unexpected node to lower");
744 const char *XtensaTargetLowering::getTargetNodeName(unsigned Opcode
) const {
746 case XtensaISD::BR_JT
:
747 return "XtensaISD::BR_JT";
748 case XtensaISD::CALL
:
749 return "XtensaISD::CALL";
750 case XtensaISD::PCREL_WRAPPER
:
751 return "XtensaISD::PCREL_WRAPPER";
753 return "XtensaISD::RET";
754 case XtensaISD::SELECT_CC
:
755 return "XtensaISD::SELECT_CC";
760 //===----------------------------------------------------------------------===//
762 //===----------------------------------------------------------------------===//
765 XtensaTargetLowering::emitSelectCC(MachineInstr
&MI
,
766 MachineBasicBlock
*MBB
) const {
767 const TargetInstrInfo
&TII
= *Subtarget
.getInstrInfo();
768 DebugLoc DL
= MI
.getDebugLoc();
770 MachineOperand
&LHS
= MI
.getOperand(1);
771 MachineOperand
&RHS
= MI
.getOperand(2);
772 MachineOperand
&TrueValue
= MI
.getOperand(3);
773 MachineOperand
&FalseValue
= MI
.getOperand(4);
774 unsigned BrKind
= MI
.getOperand(5).getImm();
776 // To "insert" a SELECT_CC instruction, we actually have to insert
777 // CopyMBB and SinkMBB blocks and add branch to MBB. We build phi
778 // operation in SinkMBB like phi (TrueVakue,FalseValue), where TrueValue
779 // is passed from MMB and FalseValue is passed from CopyMBB.
785 // The incoming instruction knows the
786 // destination vreg to set, the condition code register to branch on, the
787 // true/false values to select between, and a branch opcode to use.
788 const BasicBlock
*LLVM_BB
= MBB
->getBasicBlock();
789 MachineFunction::iterator It
= ++MBB
->getIterator();
791 MachineFunction
*F
= MBB
->getParent();
792 MachineBasicBlock
*CopyMBB
= F
->CreateMachineBasicBlock(LLVM_BB
);
793 MachineBasicBlock
*SinkMBB
= F
->CreateMachineBasicBlock(LLVM_BB
);
795 F
->insert(It
, CopyMBB
);
796 F
->insert(It
, SinkMBB
);
798 // Transfer the remainder of MBB and its successor edges to SinkMBB.
799 SinkMBB
->splice(SinkMBB
->begin(), MBB
,
800 std::next(MachineBasicBlock::iterator(MI
)), MBB
->end());
801 SinkMBB
->transferSuccessorsAndUpdatePHIs(MBB
);
803 MBB
->addSuccessor(CopyMBB
);
804 MBB
->addSuccessor(SinkMBB
);
806 BuildMI(MBB
, DL
, TII
.get(BrKind
))
807 .addReg(LHS
.getReg())
808 .addReg(RHS
.getReg())
811 CopyMBB
->addSuccessor(SinkMBB
);
814 // %Result = phi [ %FalseValue, CopyMBB ], [ %TrueValue, MBB ]
817 BuildMI(*SinkMBB
, SinkMBB
->begin(), DL
, TII
.get(Xtensa::PHI
),
818 MI
.getOperand(0).getReg())
819 .addReg(FalseValue
.getReg())
821 .addReg(TrueValue
.getReg())
824 MI
.eraseFromParent(); // The pseudo instruction is gone now.
828 MachineBasicBlock
*XtensaTargetLowering::EmitInstrWithCustomInserter(
829 MachineInstr
&MI
, MachineBasicBlock
*MBB
) const {
830 switch (MI
.getOpcode()) {
832 return emitSelectCC(MI
, MBB
);
834 llvm_unreachable("Unexpected instr type to insert");