1 //===-- AVRISelLowering.cpp - AVR DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that AVR uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #include "AVRISelLowering.h"
17 #include "llvm/ADT/StringSwitch.h"
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/Support/ErrorHandling.h"
28 #include "AVRMachineFunctionInfo.h"
29 #include "AVRTargetMachine.h"
30 #include "MCTargetDesc/AVRMCTargetDesc.h"
34 AVRTargetLowering::AVRTargetLowering(AVRTargetMachine
&tm
)
35 : TargetLowering(tm
) {
36 // Set up the register classes.
37 addRegisterClass(MVT::i8
, &AVR::GPR8RegClass
);
38 addRegisterClass(MVT::i16
, &AVR::DREGSRegClass
);
40 // Compute derived properties from the register classes.
41 computeRegisterProperties(tm
.getSubtargetImpl()->getRegisterInfo());
43 setBooleanContents(ZeroOrOneBooleanContent
);
44 setBooleanVectorContents(ZeroOrOneBooleanContent
);
45 setSchedulingPreference(Sched::RegPressure
);
46 setStackPointerRegisterToSaveRestore(AVR::SP
);
47 setSupportsUnalignedAtomics(true);
49 setOperationAction(ISD::GlobalAddress
, MVT::i16
, Custom
);
50 setOperationAction(ISD::BlockAddress
, MVT::i16
, Custom
);
52 setOperationAction(ISD::STACKSAVE
, MVT::Other
, Expand
);
53 setOperationAction(ISD::STACKRESTORE
, MVT::Other
, Expand
);
54 setOperationAction(ISD::DYNAMIC_STACKALLOC
, MVT::i8
, Expand
);
55 setOperationAction(ISD::DYNAMIC_STACKALLOC
, MVT::i16
, Expand
);
57 for (MVT VT
: MVT::integer_valuetypes()) {
58 for (auto N
: {ISD::EXTLOAD
, ISD::SEXTLOAD
, ISD::ZEXTLOAD
}) {
59 setLoadExtAction(N
, VT
, MVT::i1
, Promote
);
60 setLoadExtAction(N
, VT
, MVT::i8
, Expand
);
64 setTruncStoreAction(MVT::i16
, MVT::i8
, Expand
);
66 for (MVT VT
: MVT::integer_valuetypes()) {
67 setOperationAction(ISD::ADDC
, VT
, Legal
);
68 setOperationAction(ISD::SUBC
, VT
, Legal
);
69 setOperationAction(ISD::ADDE
, VT
, Legal
);
70 setOperationAction(ISD::SUBE
, VT
, Legal
);
73 // sub (x, imm) gets canonicalized to add (x, -imm), so for illegal types
74 // revert into a sub since we don't have an add with immediate instruction.
75 setOperationAction(ISD::ADD
, MVT::i32
, Custom
);
76 setOperationAction(ISD::ADD
, MVT::i64
, Custom
);
78 // our shift instructions are only able to shift 1 bit at a time, so handle
79 // this in a custom way.
80 setOperationAction(ISD::SRA
, MVT::i8
, Custom
);
81 setOperationAction(ISD::SHL
, MVT::i8
, Custom
);
82 setOperationAction(ISD::SRL
, MVT::i8
, Custom
);
83 setOperationAction(ISD::SRA
, MVT::i16
, Custom
);
84 setOperationAction(ISD::SHL
, MVT::i16
, Custom
);
85 setOperationAction(ISD::SRL
, MVT::i16
, Custom
);
86 setOperationAction(ISD::SHL_PARTS
, MVT::i16
, Expand
);
87 setOperationAction(ISD::SRA_PARTS
, MVT::i16
, Expand
);
88 setOperationAction(ISD::SRL_PARTS
, MVT::i16
, Expand
);
90 setOperationAction(ISD::ROTL
, MVT::i8
, Custom
);
91 setOperationAction(ISD::ROTL
, MVT::i16
, Custom
);
92 setOperationAction(ISD::ROTR
, MVT::i8
, Custom
);
93 setOperationAction(ISD::ROTR
, MVT::i16
, Custom
);
95 setOperationAction(ISD::BR_CC
, MVT::i8
, Custom
);
96 setOperationAction(ISD::BR_CC
, MVT::i16
, Custom
);
97 setOperationAction(ISD::BR_CC
, MVT::i32
, Custom
);
98 setOperationAction(ISD::BR_CC
, MVT::i64
, Custom
);
99 setOperationAction(ISD::BRCOND
, MVT::Other
, Expand
);
101 setOperationAction(ISD::SELECT_CC
, MVT::i8
, Custom
);
102 setOperationAction(ISD::SELECT_CC
, MVT::i16
, Custom
);
103 setOperationAction(ISD::SELECT_CC
, MVT::i32
, Expand
);
104 setOperationAction(ISD::SELECT_CC
, MVT::i64
, Expand
);
105 setOperationAction(ISD::SETCC
, MVT::i8
, Custom
);
106 setOperationAction(ISD::SETCC
, MVT::i16
, Custom
);
107 setOperationAction(ISD::SETCC
, MVT::i32
, Custom
);
108 setOperationAction(ISD::SETCC
, MVT::i64
, Custom
);
109 setOperationAction(ISD::SELECT
, MVT::i8
, Expand
);
110 setOperationAction(ISD::SELECT
, MVT::i16
, Expand
);
112 setOperationAction(ISD::BSWAP
, MVT::i16
, Expand
);
114 // Add support for postincrement and predecrement load/stores.
115 setIndexedLoadAction(ISD::POST_INC
, MVT::i8
, Legal
);
116 setIndexedLoadAction(ISD::POST_INC
, MVT::i16
, Legal
);
117 setIndexedLoadAction(ISD::PRE_DEC
, MVT::i8
, Legal
);
118 setIndexedLoadAction(ISD::PRE_DEC
, MVT::i16
, Legal
);
119 setIndexedStoreAction(ISD::POST_INC
, MVT::i8
, Legal
);
120 setIndexedStoreAction(ISD::POST_INC
, MVT::i16
, Legal
);
121 setIndexedStoreAction(ISD::PRE_DEC
, MVT::i8
, Legal
);
122 setIndexedStoreAction(ISD::PRE_DEC
, MVT::i16
, Legal
);
124 setOperationAction(ISD::BR_JT
, MVT::Other
, Expand
);
126 setOperationAction(ISD::VASTART
, MVT::Other
, Custom
);
127 setOperationAction(ISD::VAEND
, MVT::Other
, Expand
);
128 setOperationAction(ISD::VAARG
, MVT::Other
, Expand
);
129 setOperationAction(ISD::VACOPY
, MVT::Other
, Expand
);
131 // Atomic operations which must be lowered to rtlib calls
132 for (MVT VT
: MVT::integer_valuetypes()) {
133 setOperationAction(ISD::ATOMIC_SWAP
, VT
, Expand
);
134 setOperationAction(ISD::ATOMIC_CMP_SWAP
, VT
, Expand
);
135 setOperationAction(ISD::ATOMIC_LOAD_NAND
, VT
, Expand
);
136 setOperationAction(ISD::ATOMIC_LOAD_MAX
, VT
, Expand
);
137 setOperationAction(ISD::ATOMIC_LOAD_MIN
, VT
, Expand
);
138 setOperationAction(ISD::ATOMIC_LOAD_UMAX
, VT
, Expand
);
139 setOperationAction(ISD::ATOMIC_LOAD_UMIN
, VT
, Expand
);
142 // Division/remainder
143 setOperationAction(ISD::UDIV
, MVT::i8
, Expand
);
144 setOperationAction(ISD::UDIV
, MVT::i16
, Expand
);
145 setOperationAction(ISD::UREM
, MVT::i8
, Expand
);
146 setOperationAction(ISD::UREM
, MVT::i16
, Expand
);
147 setOperationAction(ISD::SDIV
, MVT::i8
, Expand
);
148 setOperationAction(ISD::SDIV
, MVT::i16
, Expand
);
149 setOperationAction(ISD::SREM
, MVT::i8
, Expand
);
150 setOperationAction(ISD::SREM
, MVT::i16
, Expand
);
152 // Make division and modulus custom
153 for (MVT VT
: MVT::integer_valuetypes()) {
154 setOperationAction(ISD::UDIVREM
, VT
, Custom
);
155 setOperationAction(ISD::SDIVREM
, VT
, Custom
);
158 // Do not use MUL. The AVR instructions are closer to SMUL_LOHI &co.
159 setOperationAction(ISD::MUL
, MVT::i8
, Expand
);
160 setOperationAction(ISD::MUL
, MVT::i16
, Expand
);
162 // Expand 16 bit multiplications.
163 setOperationAction(ISD::SMUL_LOHI
, MVT::i16
, Expand
);
164 setOperationAction(ISD::UMUL_LOHI
, MVT::i16
, Expand
);
166 for (MVT VT
: MVT::integer_valuetypes()) {
167 setOperationAction(ISD::MULHS
, VT
, Expand
);
168 setOperationAction(ISD::MULHU
, VT
, Expand
);
171 for (MVT VT
: MVT::integer_valuetypes()) {
172 setOperationAction(ISD::CTPOP
, VT
, Expand
);
173 setOperationAction(ISD::CTLZ
, VT
, Expand
);
174 setOperationAction(ISD::CTTZ
, VT
, Expand
);
177 for (MVT VT
: MVT::integer_valuetypes()) {
178 setOperationAction(ISD::SIGN_EXTEND_INREG
, VT
, Expand
);
179 // TODO: The generated code is pretty poor. Investigate using the
180 // same "shift and subtract with carry" trick that we do for
181 // extending 8-bit to 16-bit. This may require infrastructure
182 // improvements in how we treat 16-bit "registers" to be feasible.
185 // Division rtlib functions (not supported)
186 setLibcallName(RTLIB::SDIV_I8
, nullptr);
187 setLibcallName(RTLIB::SDIV_I16
, nullptr);
188 setLibcallName(RTLIB::SDIV_I32
, nullptr);
189 setLibcallName(RTLIB::SDIV_I64
, nullptr);
190 setLibcallName(RTLIB::SDIV_I128
, nullptr);
191 setLibcallName(RTLIB::UDIV_I8
, nullptr);
192 setLibcallName(RTLIB::UDIV_I16
, nullptr);
193 setLibcallName(RTLIB::UDIV_I32
, nullptr);
194 setLibcallName(RTLIB::UDIV_I64
, nullptr);
195 setLibcallName(RTLIB::UDIV_I128
, nullptr);
197 // Modulus rtlib functions (not supported)
198 setLibcallName(RTLIB::SREM_I8
, nullptr);
199 setLibcallName(RTLIB::SREM_I16
, nullptr);
200 setLibcallName(RTLIB::SREM_I32
, nullptr);
201 setLibcallName(RTLIB::SREM_I64
, nullptr);
202 setLibcallName(RTLIB::SREM_I128
, nullptr);
203 setLibcallName(RTLIB::UREM_I8
, nullptr);
204 setLibcallName(RTLIB::UREM_I16
, nullptr);
205 setLibcallName(RTLIB::UREM_I32
, nullptr);
206 setLibcallName(RTLIB::UREM_I64
, nullptr);
207 setLibcallName(RTLIB::UREM_I128
, nullptr);
209 // Division and modulus rtlib functions
210 setLibcallName(RTLIB::SDIVREM_I8
, "__divmodqi4");
211 setLibcallName(RTLIB::SDIVREM_I16
, "__divmodhi4");
212 setLibcallName(RTLIB::SDIVREM_I32
, "__divmodsi4");
213 setLibcallName(RTLIB::SDIVREM_I64
, "__divmoddi4");
214 setLibcallName(RTLIB::SDIVREM_I128
, "__divmodti4");
215 setLibcallName(RTLIB::UDIVREM_I8
, "__udivmodqi4");
216 setLibcallName(RTLIB::UDIVREM_I16
, "__udivmodhi4");
217 setLibcallName(RTLIB::UDIVREM_I32
, "__udivmodsi4");
218 setLibcallName(RTLIB::UDIVREM_I64
, "__udivmoddi4");
219 setLibcallName(RTLIB::UDIVREM_I128
, "__udivmodti4");
221 // Several of the runtime library functions use a special calling conv
222 setLibcallCallingConv(RTLIB::SDIVREM_I8
, CallingConv::AVR_BUILTIN
);
223 setLibcallCallingConv(RTLIB::SDIVREM_I16
, CallingConv::AVR_BUILTIN
);
224 setLibcallCallingConv(RTLIB::UDIVREM_I8
, CallingConv::AVR_BUILTIN
);
225 setLibcallCallingConv(RTLIB::UDIVREM_I16
, CallingConv::AVR_BUILTIN
);
227 // Trigonometric rtlib functions
228 setLibcallName(RTLIB::SIN_F32
, "sin");
229 setLibcallName(RTLIB::COS_F32
, "cos");
231 setMinFunctionAlignment(1);
232 setMinimumJumpTableEntries(INT_MAX
);
235 const char *AVRTargetLowering::getTargetNodeName(unsigned Opcode
) const {
264 EVT
AVRTargetLowering::getSetCCResultType(const DataLayout
&DL
, LLVMContext
&,
266 assert(!VT
.isVector() && "No AVR SetCC type for vectors!");
270 SDValue
AVRTargetLowering::LowerShifts(SDValue Op
, SelectionDAG
&DAG
) const {
271 //:TODO: this function has to be completely rewritten to produce optimal
272 // code, for now it's producing very long but correct code.
274 const SDNode
*N
= Op
.getNode();
275 EVT VT
= Op
.getValueType();
278 // Expand non-constant shifts to loops.
279 if (!isa
<ConstantSDNode
>(N
->getOperand(1))) {
280 switch (Op
.getOpcode()) {
282 llvm_unreachable("Invalid shift opcode!");
284 return DAG
.getNode(AVRISD::LSLLOOP
, dl
, VT
, N
->getOperand(0),
287 return DAG
.getNode(AVRISD::LSRLOOP
, dl
, VT
, N
->getOperand(0),
290 return DAG
.getNode(AVRISD::ROLLOOP
, dl
, VT
, N
->getOperand(0),
293 return DAG
.getNode(AVRISD::RORLOOP
, dl
, VT
, N
->getOperand(0),
296 return DAG
.getNode(AVRISD::ASRLOOP
, dl
, VT
, N
->getOperand(0),
301 uint64_t ShiftAmount
= cast
<ConstantSDNode
>(N
->getOperand(1))->getZExtValue();
302 SDValue Victim
= N
->getOperand(0);
304 switch (Op
.getOpcode()) {
321 llvm_unreachable("Invalid shift opcode");
324 while (ShiftAmount
--) {
325 Victim
= DAG
.getNode(Opc8
, dl
, VT
, Victim
);
331 SDValue
AVRTargetLowering::LowerDivRem(SDValue Op
, SelectionDAG
&DAG
) const {
332 unsigned Opcode
= Op
->getOpcode();
333 assert((Opcode
== ISD::SDIVREM
|| Opcode
== ISD::UDIVREM
) &&
334 "Invalid opcode for Div/Rem lowering");
335 bool IsSigned
= (Opcode
== ISD::SDIVREM
);
336 EVT VT
= Op
->getValueType(0);
337 Type
*Ty
= VT
.getTypeForEVT(*DAG
.getContext());
340 switch (VT
.getSimpleVT().SimpleTy
) {
342 llvm_unreachable("Unexpected request for libcall!");
344 LC
= IsSigned
? RTLIB::SDIVREM_I8
: RTLIB::UDIVREM_I8
;
347 LC
= IsSigned
? RTLIB::SDIVREM_I16
: RTLIB::UDIVREM_I16
;
350 LC
= IsSigned
? RTLIB::SDIVREM_I32
: RTLIB::UDIVREM_I32
;
353 LC
= IsSigned
? RTLIB::SDIVREM_I64
: RTLIB::UDIVREM_I64
;
356 LC
= IsSigned
? RTLIB::SDIVREM_I128
: RTLIB::UDIVREM_I128
;
360 SDValue InChain
= DAG
.getEntryNode();
362 TargetLowering::ArgListTy Args
;
363 TargetLowering::ArgListEntry Entry
;
364 for (SDValue
const &Value
: Op
->op_values()) {
366 Entry
.Ty
= Value
.getValueType().getTypeForEVT(*DAG
.getContext());
367 Entry
.IsSExt
= IsSigned
;
368 Entry
.IsZExt
= !IsSigned
;
369 Args
.push_back(Entry
);
372 SDValue Callee
= DAG
.getExternalSymbol(getLibcallName(LC
),
373 getPointerTy(DAG
.getDataLayout()));
375 Type
*RetTy
= (Type
*)StructType::get(Ty
, Ty
);
378 TargetLowering::CallLoweringInfo
CLI(DAG
);
381 .setLibCallee(getLibcallCallingConv(LC
), RetTy
, Callee
, std::move(Args
))
383 .setSExtResult(IsSigned
)
384 .setZExtResult(!IsSigned
);
386 std::pair
<SDValue
, SDValue
> CallInfo
= LowerCallTo(CLI
);
387 return CallInfo
.first
;
390 SDValue
AVRTargetLowering::LowerGlobalAddress(SDValue Op
,
391 SelectionDAG
&DAG
) const {
392 auto DL
= DAG
.getDataLayout();
394 const GlobalValue
*GV
= cast
<GlobalAddressSDNode
>(Op
)->getGlobal();
395 int64_t Offset
= cast
<GlobalAddressSDNode
>(Op
)->getOffset();
397 // Create the TargetGlobalAddress node, folding in the constant offset.
399 DAG
.getTargetGlobalAddress(GV
, SDLoc(Op
), getPointerTy(DL
), Offset
);
400 return DAG
.getNode(AVRISD::WRAPPER
, SDLoc(Op
), getPointerTy(DL
), Result
);
403 SDValue
AVRTargetLowering::LowerBlockAddress(SDValue Op
,
404 SelectionDAG
&DAG
) const {
405 auto DL
= DAG
.getDataLayout();
406 const BlockAddress
*BA
= cast
<BlockAddressSDNode
>(Op
)->getBlockAddress();
408 SDValue Result
= DAG
.getTargetBlockAddress(BA
, getPointerTy(DL
));
410 return DAG
.getNode(AVRISD::WRAPPER
, SDLoc(Op
), getPointerTy(DL
), Result
);
413 /// IntCCToAVRCC - Convert a DAG integer condition code to an AVR CC.
414 static AVRCC::CondCodes
intCCToAVRCC(ISD::CondCode CC
) {
417 llvm_unreachable("Unknown condition code!");
419 return AVRCC::COND_EQ
;
421 return AVRCC::COND_NE
;
423 return AVRCC::COND_GE
;
425 return AVRCC::COND_LT
;
427 return AVRCC::COND_SH
;
429 return AVRCC::COND_LO
;
433 /// Returns appropriate AVR CMP/CMPC nodes and corresponding condition code for
434 /// the given operands.
435 SDValue
AVRTargetLowering::getAVRCmp(SDValue LHS
, SDValue RHS
, ISD::CondCode CC
,
436 SDValue
&AVRcc
, SelectionDAG
&DAG
,
439 EVT VT
= LHS
.getValueType();
440 bool UseTest
= false;
446 // Swap operands and reverse the branching condition.
452 if (const ConstantSDNode
*C
= dyn_cast
<ConstantSDNode
>(RHS
)) {
453 switch (C
->getSExtValue()) {
455 // When doing lhs > -1 use a tst instruction on the top part of lhs
456 // and use brpl instead of using a chain of cp/cpc.
458 AVRcc
= DAG
.getConstant(AVRCC::COND_PL
, DL
, MVT::i8
);
462 // Turn lhs > 0 into 0 < lhs since 0 can be materialized with
463 // __zero_reg__ in lhs.
465 LHS
= DAG
.getConstant(0, DL
, VT
);
470 // Turn lhs < rhs with lhs constant into rhs >= lhs+1, this allows
471 // us to fold the constant into the cmp instruction.
472 RHS
= DAG
.getConstant(C
->getSExtValue() + 1, DL
, VT
);
479 // Swap operands and reverse the branching condition.
485 if (const ConstantSDNode
*C
= dyn_cast
<ConstantSDNode
>(RHS
)) {
486 switch (C
->getSExtValue()) {
488 // Turn lhs < 1 into 0 >= lhs since 0 can be materialized with
489 // __zero_reg__ in lhs.
491 LHS
= DAG
.getConstant(0, DL
, VT
);
496 // When doing lhs < 0 use a tst instruction on the top part of lhs
497 // and use brmi instead of using a chain of cp/cpc.
499 AVRcc
= DAG
.getConstant(AVRCC::COND_MI
, DL
, MVT::i8
);
507 // Swap operands and reverse the branching condition.
513 // Turn lhs < rhs with lhs constant into rhs >= lhs+1, this allows us to
514 // fold the constant into the cmp instruction.
515 if (const ConstantSDNode
*C
= dyn_cast
<ConstantSDNode
>(RHS
)) {
516 RHS
= DAG
.getConstant(C
->getSExtValue() + 1, DL
, VT
);
520 // Swap operands and reverse the branching condition.
527 // Expand 32 and 64 bit comparisons with custom CMP and CMPC nodes instead of
528 // using the default and/or/xor expansion code which is much longer.
529 if (VT
== MVT::i32
) {
530 SDValue LHSlo
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
, MVT::i16
, LHS
,
531 DAG
.getIntPtrConstant(0, DL
));
532 SDValue LHShi
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
, MVT::i16
, LHS
,
533 DAG
.getIntPtrConstant(1, DL
));
534 SDValue RHSlo
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
, MVT::i16
, RHS
,
535 DAG
.getIntPtrConstant(0, DL
));
536 SDValue RHShi
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
, MVT::i16
, RHS
,
537 DAG
.getIntPtrConstant(1, DL
));
540 // When using tst we only care about the highest part.
541 SDValue Top
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
, MVT::i8
, LHShi
,
542 DAG
.getIntPtrConstant(1, DL
));
543 Cmp
= DAG
.getNode(AVRISD::TST
, DL
, MVT::Glue
, Top
);
545 Cmp
= DAG
.getNode(AVRISD::CMP
, DL
, MVT::Glue
, LHSlo
, RHSlo
);
546 Cmp
= DAG
.getNode(AVRISD::CMPC
, DL
, MVT::Glue
, LHShi
, RHShi
, Cmp
);
548 } else if (VT
== MVT::i64
) {
549 SDValue LHS_0
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
, MVT::i32
, LHS
,
550 DAG
.getIntPtrConstant(0, DL
));
551 SDValue LHS_1
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
, MVT::i32
, LHS
,
552 DAG
.getIntPtrConstant(1, DL
));
554 SDValue LHS0
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
, MVT::i16
, LHS_0
,
555 DAG
.getIntPtrConstant(0, DL
));
556 SDValue LHS1
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
, MVT::i16
, LHS_0
,
557 DAG
.getIntPtrConstant(1, DL
));
558 SDValue LHS2
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
, MVT::i16
, LHS_1
,
559 DAG
.getIntPtrConstant(0, DL
));
560 SDValue LHS3
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
, MVT::i16
, LHS_1
,
561 DAG
.getIntPtrConstant(1, DL
));
563 SDValue RHS_0
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
, MVT::i32
, RHS
,
564 DAG
.getIntPtrConstant(0, DL
));
565 SDValue RHS_1
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
, MVT::i32
, RHS
,
566 DAG
.getIntPtrConstant(1, DL
));
568 SDValue RHS0
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
, MVT::i16
, RHS_0
,
569 DAG
.getIntPtrConstant(0, DL
));
570 SDValue RHS1
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
, MVT::i16
, RHS_0
,
571 DAG
.getIntPtrConstant(1, DL
));
572 SDValue RHS2
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
, MVT::i16
, RHS_1
,
573 DAG
.getIntPtrConstant(0, DL
));
574 SDValue RHS3
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
, MVT::i16
, RHS_1
,
575 DAG
.getIntPtrConstant(1, DL
));
578 // When using tst we only care about the highest part.
579 SDValue Top
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
, MVT::i8
, LHS3
,
580 DAG
.getIntPtrConstant(1, DL
));
581 Cmp
= DAG
.getNode(AVRISD::TST
, DL
, MVT::Glue
, Top
);
583 Cmp
= DAG
.getNode(AVRISD::CMP
, DL
, MVT::Glue
, LHS0
, RHS0
);
584 Cmp
= DAG
.getNode(AVRISD::CMPC
, DL
, MVT::Glue
, LHS1
, RHS1
, Cmp
);
585 Cmp
= DAG
.getNode(AVRISD::CMPC
, DL
, MVT::Glue
, LHS2
, RHS2
, Cmp
);
586 Cmp
= DAG
.getNode(AVRISD::CMPC
, DL
, MVT::Glue
, LHS3
, RHS3
, Cmp
);
588 } else if (VT
== MVT::i8
|| VT
== MVT::i16
) {
590 // When using tst we only care about the highest part.
591 Cmp
= DAG
.getNode(AVRISD::TST
, DL
, MVT::Glue
,
594 : DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
, MVT::i8
,
595 LHS
, DAG
.getIntPtrConstant(1, DL
)));
597 Cmp
= DAG
.getNode(AVRISD::CMP
, DL
, MVT::Glue
, LHS
, RHS
);
600 llvm_unreachable("Invalid comparison size");
603 // When using a test instruction AVRcc is already set.
605 AVRcc
= DAG
.getConstant(intCCToAVRCC(CC
), DL
, MVT::i8
);
611 SDValue
AVRTargetLowering::LowerBR_CC(SDValue Op
, SelectionDAG
&DAG
) const {
612 SDValue Chain
= Op
.getOperand(0);
613 ISD::CondCode CC
= cast
<CondCodeSDNode
>(Op
.getOperand(1))->get();
614 SDValue LHS
= Op
.getOperand(2);
615 SDValue RHS
= Op
.getOperand(3);
616 SDValue Dest
= Op
.getOperand(4);
620 SDValue Cmp
= getAVRCmp(LHS
, RHS
, CC
, TargetCC
, DAG
, dl
);
622 return DAG
.getNode(AVRISD::BRCOND
, dl
, MVT::Other
, Chain
, Dest
, TargetCC
,
626 SDValue
AVRTargetLowering::LowerSELECT_CC(SDValue Op
, SelectionDAG
&DAG
) const {
627 SDValue LHS
= Op
.getOperand(0);
628 SDValue RHS
= Op
.getOperand(1);
629 SDValue TrueV
= Op
.getOperand(2);
630 SDValue FalseV
= Op
.getOperand(3);
631 ISD::CondCode CC
= cast
<CondCodeSDNode
>(Op
.getOperand(4))->get();
635 SDValue Cmp
= getAVRCmp(LHS
, RHS
, CC
, TargetCC
, DAG
, dl
);
637 SDVTList VTs
= DAG
.getVTList(Op
.getValueType(), MVT::Glue
);
638 SDValue Ops
[] = {TrueV
, FalseV
, TargetCC
, Cmp
};
640 return DAG
.getNode(AVRISD::SELECT_CC
, dl
, VTs
, Ops
);
643 SDValue
AVRTargetLowering::LowerSETCC(SDValue Op
, SelectionDAG
&DAG
) const {
644 SDValue LHS
= Op
.getOperand(0);
645 SDValue RHS
= Op
.getOperand(1);
646 ISD::CondCode CC
= cast
<CondCodeSDNode
>(Op
.getOperand(2))->get();
650 SDValue Cmp
= getAVRCmp(LHS
, RHS
, CC
, TargetCC
, DAG
, DL
);
652 SDValue TrueV
= DAG
.getConstant(1, DL
, Op
.getValueType());
653 SDValue FalseV
= DAG
.getConstant(0, DL
, Op
.getValueType());
654 SDVTList VTs
= DAG
.getVTList(Op
.getValueType(), MVT::Glue
);
655 SDValue Ops
[] = {TrueV
, FalseV
, TargetCC
, Cmp
};
657 return DAG
.getNode(AVRISD::SELECT_CC
, DL
, VTs
, Ops
);
660 SDValue
AVRTargetLowering::LowerVASTART(SDValue Op
, SelectionDAG
&DAG
) const {
661 const MachineFunction
&MF
= DAG
.getMachineFunction();
662 const AVRMachineFunctionInfo
*AFI
= MF
.getInfo
<AVRMachineFunctionInfo
>();
663 const Value
*SV
= cast
<SrcValueSDNode
>(Op
.getOperand(2))->getValue();
664 auto DL
= DAG
.getDataLayout();
667 // Vastart just stores the address of the VarArgsFrameIndex slot into the
668 // memory location argument.
669 SDValue FI
= DAG
.getFrameIndex(AFI
->getVarArgsFrameIndex(), getPointerTy(DL
));
671 return DAG
.getStore(Op
.getOperand(0), dl
, FI
, Op
.getOperand(1),
672 MachinePointerInfo(SV
), 0);
675 SDValue
AVRTargetLowering::LowerOperation(SDValue Op
, SelectionDAG
&DAG
) const {
676 switch (Op
.getOpcode()) {
678 llvm_unreachable("Don't know how to custom lower this!");
684 return LowerShifts(Op
, DAG
);
685 case ISD::GlobalAddress
:
686 return LowerGlobalAddress(Op
, DAG
);
687 case ISD::BlockAddress
:
688 return LowerBlockAddress(Op
, DAG
);
690 return LowerBR_CC(Op
, DAG
);
692 return LowerSELECT_CC(Op
, DAG
);
694 return LowerSETCC(Op
, DAG
);
696 return LowerVASTART(Op
, DAG
);
699 return LowerDivRem(Op
, DAG
);
705 /// Replace a node with an illegal result type
706 /// with a new node built out of custom code.
707 void AVRTargetLowering::ReplaceNodeResults(SDNode
*N
,
708 SmallVectorImpl
<SDValue
> &Results
,
709 SelectionDAG
&DAG
) const {
712 switch (N
->getOpcode()) {
714 // Convert add (x, imm) into sub (x, -imm).
715 if (const ConstantSDNode
*C
= dyn_cast
<ConstantSDNode
>(N
->getOperand(1))) {
716 SDValue Sub
= DAG
.getNode(
717 ISD::SUB
, DL
, N
->getValueType(0), N
->getOperand(0),
718 DAG
.getConstant(-C
->getAPIntValue(), DL
, C
->getValueType(0)));
719 Results
.push_back(Sub
);
724 SDValue Res
= LowerOperation(SDValue(N
, 0), DAG
);
726 for (unsigned I
= 0, E
= Res
->getNumValues(); I
!= E
; ++I
)
727 Results
.push_back(Res
.getValue(I
));
734 /// Return true if the addressing mode represented
735 /// by AM is legal for this target, for a load/store of the specified type.
736 bool AVRTargetLowering::isLegalAddressingMode(const DataLayout
&DL
,
737 const AddrMode
&AM
, Type
*Ty
,
738 unsigned AS
, Instruction
*I
) const {
739 int64_t Offs
= AM
.BaseOffs
;
741 // Allow absolute addresses.
742 if (AM
.BaseGV
&& !AM
.HasBaseReg
&& AM
.Scale
== 0 && Offs
== 0) {
746 // Flash memory instructions only allow zero offsets.
747 if (isa
<PointerType
>(Ty
) && AS
== AVR::ProgramMemory
) {
751 // Allow reg+<6bit> offset.
754 if (AM
.BaseGV
== 0 && AM
.HasBaseReg
&& AM
.Scale
== 0 && isUInt
<6>(Offs
)) {
761 /// Returns true by value, base pointer and
762 /// offset pointer and addressing mode by reference if the node's address
763 /// can be legally represented as pre-indexed load / store address.
764 bool AVRTargetLowering::getPreIndexedAddressParts(SDNode
*N
, SDValue
&Base
,
766 ISD::MemIndexedMode
&AM
,
767 SelectionDAG
&DAG
) const {
772 if (const LoadSDNode
*LD
= dyn_cast
<LoadSDNode
>(N
)) {
773 VT
= LD
->getMemoryVT();
774 Op
= LD
->getBasePtr().getNode();
775 if (LD
->getExtensionType() != ISD::NON_EXTLOAD
)
777 if (AVR::isProgramMemoryAccess(LD
)) {
780 } else if (const StoreSDNode
*ST
= dyn_cast
<StoreSDNode
>(N
)) {
781 VT
= ST
->getMemoryVT();
782 Op
= ST
->getBasePtr().getNode();
783 if (AVR::isProgramMemoryAccess(ST
)) {
790 if (VT
!= MVT::i8
&& VT
!= MVT::i16
) {
794 if (Op
->getOpcode() != ISD::ADD
&& Op
->getOpcode() != ISD::SUB
) {
798 if (const ConstantSDNode
*RHS
= dyn_cast
<ConstantSDNode
>(Op
->getOperand(1))) {
799 int RHSC
= RHS
->getSExtValue();
800 if (Op
->getOpcode() == ISD::SUB
)
803 if ((VT
== MVT::i16
&& RHSC
!= -2) || (VT
== MVT::i8
&& RHSC
!= -1)) {
807 Base
= Op
->getOperand(0);
808 Offset
= DAG
.getConstant(RHSC
, DL
, MVT::i8
);
817 /// Returns true by value, base pointer and
818 /// offset pointer and addressing mode by reference if this node can be
819 /// combined with a load / store to form a post-indexed load / store.
820 bool AVRTargetLowering::getPostIndexedAddressParts(SDNode
*N
, SDNode
*Op
,
823 ISD::MemIndexedMode
&AM
,
824 SelectionDAG
&DAG
) const {
828 if (const LoadSDNode
*LD
= dyn_cast
<LoadSDNode
>(N
)) {
829 VT
= LD
->getMemoryVT();
830 if (LD
->getExtensionType() != ISD::NON_EXTLOAD
)
832 } else if (const StoreSDNode
*ST
= dyn_cast
<StoreSDNode
>(N
)) {
833 VT
= ST
->getMemoryVT();
834 if (AVR::isProgramMemoryAccess(ST
)) {
841 if (VT
!= MVT::i8
&& VT
!= MVT::i16
) {
845 if (Op
->getOpcode() != ISD::ADD
&& Op
->getOpcode() != ISD::SUB
) {
849 if (const ConstantSDNode
*RHS
= dyn_cast
<ConstantSDNode
>(Op
->getOperand(1))) {
850 int RHSC
= RHS
->getSExtValue();
851 if (Op
->getOpcode() == ISD::SUB
)
853 if ((VT
== MVT::i16
&& RHSC
!= 2) || (VT
== MVT::i8
&& RHSC
!= 1)) {
857 Base
= Op
->getOperand(0);
858 Offset
= DAG
.getConstant(RHSC
, DL
, MVT::i8
);
867 bool AVRTargetLowering::isOffsetFoldingLegal(
868 const GlobalAddressSDNode
*GA
) const {
872 //===----------------------------------------------------------------------===//
873 // Formal Arguments Calling Convention Implementation
874 //===----------------------------------------------------------------------===//
876 #include "AVRGenCallingConv.inc"
878 /// For each argument in a function store the number of pieces it is composed
880 static void parseFunctionArgs(const SmallVectorImpl
<ISD::InputArg
> &Ins
,
881 SmallVectorImpl
<unsigned> &Out
) {
882 for (const ISD::InputArg
&Arg
: Ins
) {
883 if(Arg
.PartOffset
> 0) continue;
884 unsigned Bytes
= ((Arg
.ArgVT
.getSizeInBits()) + 7) / 8;
886 Out
.push_back((Bytes
+ 1) / 2);
890 /// For external symbols there is no function prototype information so we
891 /// have to rely directly on argument sizes.
892 static void parseExternFuncCallArgs(const SmallVectorImpl
<ISD::OutputArg
> &In
,
893 SmallVectorImpl
<unsigned> &Out
) {
894 for (unsigned i
= 0, e
= In
.size(); i
!= e
;) {
897 while ((i
!= e
) && (In
[i
].PartOffset
== Offset
)) {
898 Offset
+= In
[i
].VT
.getStoreSize();
906 static StringRef
getFunctionName(TargetLowering::CallLoweringInfo
&CLI
) {
907 SDValue Callee
= CLI
.Callee
;
909 if (const ExternalSymbolSDNode
*G
= dyn_cast
<ExternalSymbolSDNode
>(Callee
)) {
910 return G
->getSymbol();
913 if (const GlobalAddressSDNode
*G
= dyn_cast
<GlobalAddressSDNode
>(Callee
)) {
914 return G
->getGlobal()->getName();
917 llvm_unreachable("don't know how to get the name for this callee");
920 /// Analyze incoming and outgoing function arguments. We need custom C++ code
921 /// to handle special constraints in the ABI like reversing the order of the
922 /// pieces of splitted arguments. In addition, all pieces of a certain argument
923 /// have to be passed either using registers or the stack but never mixing both.
924 static void analyzeStandardArguments(TargetLowering::CallLoweringInfo
*CLI
,
925 const Function
*F
, const DataLayout
*TD
,
926 const SmallVectorImpl
<ISD::OutputArg
> *Outs
,
927 const SmallVectorImpl
<ISD::InputArg
> *Ins
,
928 CallingConv::ID CallConv
,
929 SmallVectorImpl
<CCValAssign
> &ArgLocs
,
930 CCState
&CCInfo
, bool IsCall
, bool IsVarArg
) {
931 static const MCPhysReg RegList8
[] = {AVR::R24
, AVR::R22
, AVR::R20
,
932 AVR::R18
, AVR::R16
, AVR::R14
,
933 AVR::R12
, AVR::R10
, AVR::R8
};
934 static const MCPhysReg RegList16
[] = {AVR::R25R24
, AVR::R23R22
, AVR::R21R20
,
935 AVR::R19R18
, AVR::R17R16
, AVR::R15R14
,
936 AVR::R13R12
, AVR::R11R10
, AVR::R9R8
};
938 // Variadic functions do not need all the analisys below.
940 CCInfo
.AnalyzeCallOperands(*Outs
, ArgCC_AVR_Vararg
);
942 CCInfo
.AnalyzeFormalArguments(*Ins
, ArgCC_AVR_Vararg
);
947 // Fill in the Args array which will contain original argument sizes.
948 SmallVector
<unsigned, 8> Args
;
950 parseExternFuncCallArgs(*Outs
, Args
);
952 assert(F
!= nullptr && "function should not be null");
953 parseFunctionArgs(*Ins
, Args
);
956 unsigned RegsLeft
= array_lengthof(RegList8
), ValNo
= 0;
957 // Variadic functions always use the stack.
958 bool UsesStack
= false;
959 for (unsigned i
= 0, pos
= 0, e
= Args
.size(); i
!= e
; ++i
) {
960 unsigned Size
= Args
[i
];
962 // If we have a zero-sized argument, don't attempt to lower it.
963 // AVR-GCC does not support zero-sized arguments and so we need not
964 // worry about ABI compatibility.
965 if (Size
== 0) continue;
967 MVT LocVT
= (IsCall
) ? (*Outs
)[pos
].VT
: (*Ins
)[pos
].VT
;
969 // If we have plenty of regs to pass the whole argument do it.
970 if (!UsesStack
&& (Size
<= RegsLeft
)) {
971 const MCPhysReg
*RegList
= (LocVT
== MVT::i16
) ? RegList16
: RegList8
;
973 for (unsigned j
= 0; j
!= Size
; ++j
) {
974 unsigned Reg
= CCInfo
.AllocateReg(
975 ArrayRef
<MCPhysReg
>(RegList
, array_lengthof(RegList8
)));
977 CCValAssign::getReg(ValNo
++, LocVT
, Reg
, LocVT
, CCValAssign::Full
));
981 // Reverse the order of the pieces to agree with the "big endian" format
982 // required in the calling convention ABI.
983 std::reverse(ArgLocs
.begin() + pos
, ArgLocs
.begin() + pos
+ Size
);
985 // Pass the rest of arguments using the stack.
987 for (unsigned j
= 0; j
!= Size
; ++j
) {
988 unsigned Offset
= CCInfo
.AllocateStack(
989 TD
->getTypeAllocSize(EVT(LocVT
).getTypeForEVT(CCInfo
.getContext())),
990 TD
->getABITypeAlignment(
991 EVT(LocVT
).getTypeForEVT(CCInfo
.getContext())));
992 CCInfo
.addLoc(CCValAssign::getMem(ValNo
++, LocVT
, Offset
, LocVT
,
1000 static void analyzeBuiltinArguments(TargetLowering::CallLoweringInfo
&CLI
,
1001 const Function
*F
, const DataLayout
*TD
,
1002 const SmallVectorImpl
<ISD::OutputArg
> *Outs
,
1003 const SmallVectorImpl
<ISD::InputArg
> *Ins
,
1004 CallingConv::ID CallConv
,
1005 SmallVectorImpl
<CCValAssign
> &ArgLocs
,
1006 CCState
&CCInfo
, bool IsCall
, bool IsVarArg
) {
1007 StringRef FuncName
= getFunctionName(CLI
);
1009 if (FuncName
.startswith("__udivmod") || FuncName
.startswith("__divmod")) {
1010 CCInfo
.AnalyzeCallOperands(*Outs
, ArgCC_AVR_BUILTIN_DIV
);
1012 analyzeStandardArguments(&CLI
, F
, TD
, Outs
, Ins
,
1013 CallConv
, ArgLocs
, CCInfo
,
1018 static void analyzeArguments(TargetLowering::CallLoweringInfo
*CLI
,
1019 const Function
*F
, const DataLayout
*TD
,
1020 const SmallVectorImpl
<ISD::OutputArg
> *Outs
,
1021 const SmallVectorImpl
<ISD::InputArg
> *Ins
,
1022 CallingConv::ID CallConv
,
1023 SmallVectorImpl
<CCValAssign
> &ArgLocs
,
1024 CCState
&CCInfo
, bool IsCall
, bool IsVarArg
) {
1026 case CallingConv::AVR_BUILTIN
: {
1027 analyzeBuiltinArguments(*CLI
, F
, TD
, Outs
, Ins
,
1028 CallConv
, ArgLocs
, CCInfo
,
1033 analyzeStandardArguments(CLI
, F
, TD
, Outs
, Ins
,
1034 CallConv
, ArgLocs
, CCInfo
,
1041 SDValue
AVRTargetLowering::LowerFormalArguments(
1042 SDValue Chain
, CallingConv::ID CallConv
, bool isVarArg
,
1043 const SmallVectorImpl
<ISD::InputArg
> &Ins
, const SDLoc
&dl
, SelectionDAG
&DAG
,
1044 SmallVectorImpl
<SDValue
> &InVals
) const {
1045 MachineFunction
&MF
= DAG
.getMachineFunction();
1046 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
1047 auto DL
= DAG
.getDataLayout();
1049 // Assign locations to all of the incoming arguments.
1050 SmallVector
<CCValAssign
, 16> ArgLocs
;
1051 CCState
CCInfo(CallConv
, isVarArg
, DAG
.getMachineFunction(), ArgLocs
,
1054 analyzeArguments(nullptr, &MF
.getFunction(), &DL
, 0, &Ins
, CallConv
, ArgLocs
, CCInfo
,
1058 for (CCValAssign
&VA
: ArgLocs
) {
1060 // Arguments stored on registers.
1061 if (VA
.isRegLoc()) {
1062 EVT RegVT
= VA
.getLocVT();
1063 const TargetRegisterClass
*RC
;
1064 if (RegVT
== MVT::i8
) {
1065 RC
= &AVR::GPR8RegClass
;
1066 } else if (RegVT
== MVT::i16
) {
1067 RC
= &AVR::DREGSRegClass
;
1069 llvm_unreachable("Unknown argument type!");
1072 unsigned Reg
= MF
.addLiveIn(VA
.getLocReg(), RC
);
1073 ArgValue
= DAG
.getCopyFromReg(Chain
, dl
, Reg
, RegVT
);
1075 // :NOTE: Clang should not promote any i8 into i16 but for safety the
1076 // following code will handle zexts or sexts generated by other
1077 // front ends. Otherwise:
1078 // If this is an 8 bit value, it is really passed promoted
1079 // to 16 bits. Insert an assert[sz]ext to capture this, then
1080 // truncate to the right size.
1081 switch (VA
.getLocInfo()) {
1083 llvm_unreachable("Unknown loc info!");
1084 case CCValAssign::Full
:
1086 case CCValAssign::BCvt
:
1087 ArgValue
= DAG
.getNode(ISD::BITCAST
, dl
, VA
.getValVT(), ArgValue
);
1089 case CCValAssign::SExt
:
1090 ArgValue
= DAG
.getNode(ISD::AssertSext
, dl
, RegVT
, ArgValue
,
1091 DAG
.getValueType(VA
.getValVT()));
1092 ArgValue
= DAG
.getNode(ISD::TRUNCATE
, dl
, VA
.getValVT(), ArgValue
);
1094 case CCValAssign::ZExt
:
1095 ArgValue
= DAG
.getNode(ISD::AssertZext
, dl
, RegVT
, ArgValue
,
1096 DAG
.getValueType(VA
.getValVT()));
1097 ArgValue
= DAG
.getNode(ISD::TRUNCATE
, dl
, VA
.getValVT(), ArgValue
);
1101 InVals
.push_back(ArgValue
);
1104 assert(VA
.isMemLoc());
1106 EVT LocVT
= VA
.getLocVT();
1108 // Create the frame index object for this incoming parameter.
1109 int FI
= MFI
.CreateFixedObject(LocVT
.getSizeInBits() / 8,
1110 VA
.getLocMemOffset(), true);
1112 // Create the SelectionDAG nodes corresponding to a load
1113 // from this parameter.
1114 SDValue FIN
= DAG
.getFrameIndex(FI
, getPointerTy(DL
));
1115 InVals
.push_back(DAG
.getLoad(LocVT
, dl
, Chain
, FIN
,
1116 MachinePointerInfo::getFixedStack(MF
, FI
),
1121 // If the function takes variable number of arguments, make a frame index for
1122 // the start of the first vararg value... for expansion of llvm.va_start.
1124 unsigned StackSize
= CCInfo
.getNextStackOffset();
1125 AVRMachineFunctionInfo
*AFI
= MF
.getInfo
<AVRMachineFunctionInfo
>();
1127 AFI
->setVarArgsFrameIndex(MFI
.CreateFixedObject(2, StackSize
, true));
1133 //===----------------------------------------------------------------------===//
1134 // Call Calling Convention Implementation
1135 //===----------------------------------------------------------------------===//
1137 SDValue
AVRTargetLowering::LowerCall(TargetLowering::CallLoweringInfo
&CLI
,
1138 SmallVectorImpl
<SDValue
> &InVals
) const {
1139 SelectionDAG
&DAG
= CLI
.DAG
;
1141 SmallVectorImpl
<ISD::OutputArg
> &Outs
= CLI
.Outs
;
1142 SmallVectorImpl
<SDValue
> &OutVals
= CLI
.OutVals
;
1143 SmallVectorImpl
<ISD::InputArg
> &Ins
= CLI
.Ins
;
1144 SDValue Chain
= CLI
.Chain
;
1145 SDValue Callee
= CLI
.Callee
;
1146 bool &isTailCall
= CLI
.IsTailCall
;
1147 CallingConv::ID CallConv
= CLI
.CallConv
;
1148 bool isVarArg
= CLI
.IsVarArg
;
1150 MachineFunction
&MF
= DAG
.getMachineFunction();
1152 // AVR does not yet support tail call optimization.
1155 // Analyze operands of the call, assigning locations to each operand.
1156 SmallVector
<CCValAssign
, 16> ArgLocs
;
1157 CCState
CCInfo(CallConv
, isVarArg
, DAG
.getMachineFunction(), ArgLocs
,
1160 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
1161 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
1162 // node so that legalize doesn't hack it.
1163 const Function
*F
= nullptr;
1164 if (const GlobalAddressSDNode
*G
= dyn_cast
<GlobalAddressSDNode
>(Callee
)) {
1165 const GlobalValue
*GV
= G
->getGlobal();
1167 F
= cast
<Function
>(GV
);
1169 DAG
.getTargetGlobalAddress(GV
, DL
, getPointerTy(DAG
.getDataLayout()));
1170 } else if (const ExternalSymbolSDNode
*ES
=
1171 dyn_cast
<ExternalSymbolSDNode
>(Callee
)) {
1172 Callee
= DAG
.getTargetExternalSymbol(ES
->getSymbol(),
1173 getPointerTy(DAG
.getDataLayout()));
1176 analyzeArguments(&CLI
, F
, &DAG
.getDataLayout(), &Outs
, 0, CallConv
, ArgLocs
, CCInfo
,
1179 // Get a count of how many bytes are to be pushed on the stack.
1180 unsigned NumBytes
= CCInfo
.getNextStackOffset();
1182 Chain
= DAG
.getCALLSEQ_START(Chain
, NumBytes
, 0, DL
);
1184 SmallVector
<std::pair
<unsigned, SDValue
>, 8> RegsToPass
;
1186 // First, walk the register assignments, inserting copies.
1188 bool HasStackArgs
= false;
1189 for (AI
= 0, AE
= ArgLocs
.size(); AI
!= AE
; ++AI
) {
1190 CCValAssign
&VA
= ArgLocs
[AI
];
1191 EVT RegVT
= VA
.getLocVT();
1192 SDValue Arg
= OutVals
[AI
];
1194 // Promote the value if needed. With Clang this should not happen.
1195 switch (VA
.getLocInfo()) {
1197 llvm_unreachable("Unknown loc info!");
1198 case CCValAssign::Full
:
1200 case CCValAssign::SExt
:
1201 Arg
= DAG
.getNode(ISD::SIGN_EXTEND
, DL
, RegVT
, Arg
);
1203 case CCValAssign::ZExt
:
1204 Arg
= DAG
.getNode(ISD::ZERO_EXTEND
, DL
, RegVT
, Arg
);
1206 case CCValAssign::AExt
:
1207 Arg
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, RegVT
, Arg
);
1209 case CCValAssign::BCvt
:
1210 Arg
= DAG
.getNode(ISD::BITCAST
, DL
, RegVT
, Arg
);
1214 // Stop when we encounter a stack argument, we need to process them
1215 // in reverse order in the loop below.
1216 if (VA
.isMemLoc()) {
1217 HasStackArgs
= true;
1221 // Arguments that can be passed on registers must be kept in the RegsToPass
1223 RegsToPass
.push_back(std::make_pair(VA
.getLocReg(), Arg
));
1226 // Second, stack arguments have to walked in reverse order by inserting
1227 // chained stores, this ensures their order is not changed by the scheduler
1228 // and that the push instruction sequence generated is correct, otherwise they
1229 // can be freely intermixed.
1231 for (AE
= AI
, AI
= ArgLocs
.size(); AI
!= AE
; --AI
) {
1232 unsigned Loc
= AI
- 1;
1233 CCValAssign
&VA
= ArgLocs
[Loc
];
1234 SDValue Arg
= OutVals
[Loc
];
1236 assert(VA
.isMemLoc());
1238 // SP points to one stack slot further so add one to adjust it.
1239 SDValue PtrOff
= DAG
.getNode(
1240 ISD::ADD
, DL
, getPointerTy(DAG
.getDataLayout()),
1241 DAG
.getRegister(AVR::SP
, getPointerTy(DAG
.getDataLayout())),
1242 DAG
.getIntPtrConstant(VA
.getLocMemOffset() + 1, DL
));
1245 DAG
.getStore(Chain
, DL
, Arg
, PtrOff
,
1246 MachinePointerInfo::getStack(MF
, VA
.getLocMemOffset()),
1251 // Build a sequence of copy-to-reg nodes chained together with token chain and
1252 // flag operands which copy the outgoing args into registers. The InFlag in
1253 // necessary since all emited instructions must be stuck together.
1255 for (auto Reg
: RegsToPass
) {
1256 Chain
= DAG
.getCopyToReg(Chain
, DL
, Reg
.first
, Reg
.second
, InFlag
);
1257 InFlag
= Chain
.getValue(1);
1260 // Returns a chain & a flag for retval copy to use.
1261 SDVTList NodeTys
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
1262 SmallVector
<SDValue
, 8> Ops
;
1263 Ops
.push_back(Chain
);
1264 Ops
.push_back(Callee
);
1266 // Add argument registers to the end of the list so that they are known live
1268 for (auto Reg
: RegsToPass
) {
1269 Ops
.push_back(DAG
.getRegister(Reg
.first
, Reg
.second
.getValueType()));
1272 // Add a register mask operand representing the call-preserved registers.
1273 const AVRTargetMachine
&TM
= (const AVRTargetMachine
&)getTargetMachine();
1274 const TargetRegisterInfo
*TRI
= TM
.getSubtargetImpl()->getRegisterInfo();
1275 const uint32_t *Mask
=
1276 TRI
->getCallPreservedMask(DAG
.getMachineFunction(), CallConv
);
1277 assert(Mask
&& "Missing call preserved mask for calling convention");
1278 Ops
.push_back(DAG
.getRegisterMask(Mask
));
1280 if (InFlag
.getNode()) {
1281 Ops
.push_back(InFlag
);
1284 Chain
= DAG
.getNode(AVRISD::CALL
, DL
, NodeTys
, Ops
);
1285 InFlag
= Chain
.getValue(1);
1287 // Create the CALLSEQ_END node.
1288 Chain
= DAG
.getCALLSEQ_END(Chain
, DAG
.getIntPtrConstant(NumBytes
, DL
, true),
1289 DAG
.getIntPtrConstant(0, DL
, true), InFlag
, DL
);
1292 InFlag
= Chain
.getValue(1);
1295 // Handle result values, copying them out of physregs into vregs that we
1297 return LowerCallResult(Chain
, InFlag
, CallConv
, isVarArg
, Ins
, DL
, DAG
,
1301 /// Lower the result values of a call into the
1302 /// appropriate copies out of appropriate physical registers.
1304 SDValue
AVRTargetLowering::LowerCallResult(
1305 SDValue Chain
, SDValue InFlag
, CallingConv::ID CallConv
, bool isVarArg
,
1306 const SmallVectorImpl
<ISD::InputArg
> &Ins
, const SDLoc
&dl
, SelectionDAG
&DAG
,
1307 SmallVectorImpl
<SDValue
> &InVals
) const {
1309 // Assign locations to each value returned by this call.
1310 SmallVector
<CCValAssign
, 16> RVLocs
;
1311 CCState
CCInfo(CallConv
, isVarArg
, DAG
.getMachineFunction(), RVLocs
,
1314 // Handle runtime calling convs.
1315 auto CCFunction
= CCAssignFnForReturn(CallConv
);
1316 CCInfo
.AnalyzeCallResult(Ins
, CCFunction
);
1318 if (CallConv
!= CallingConv::AVR_BUILTIN
&& RVLocs
.size() > 1) {
1319 // Reverse splitted return values to get the "big endian" format required
1320 // to agree with the calling convention ABI.
1321 std::reverse(RVLocs
.begin(), RVLocs
.end());
1324 // Copy all of the result registers out of their specified physreg.
1325 for (CCValAssign
const &RVLoc
: RVLocs
) {
1326 Chain
= DAG
.getCopyFromReg(Chain
, dl
, RVLoc
.getLocReg(), RVLoc
.getValVT(),
1329 InFlag
= Chain
.getValue(2);
1330 InVals
.push_back(Chain
.getValue(0));
1336 //===----------------------------------------------------------------------===//
1337 // Return Value Calling Convention Implementation
1338 //===----------------------------------------------------------------------===//
1340 CCAssignFn
*AVRTargetLowering::CCAssignFnForReturn(CallingConv::ID CC
) const {
1342 case CallingConv::AVR_BUILTIN
:
1343 return RetCC_AVR_BUILTIN
;
1350 AVRTargetLowering::CanLowerReturn(CallingConv::ID CallConv
,
1351 MachineFunction
&MF
, bool isVarArg
,
1352 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
1353 LLVMContext
&Context
) const
1355 SmallVector
<CCValAssign
, 16> RVLocs
;
1356 CCState
CCInfo(CallConv
, isVarArg
, MF
, RVLocs
, Context
);
1358 auto CCFunction
= CCAssignFnForReturn(CallConv
);
1359 return CCInfo
.CheckReturn(Outs
, CCFunction
);
1363 AVRTargetLowering::LowerReturn(SDValue Chain
, CallingConv::ID CallConv
,
1365 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
1366 const SmallVectorImpl
<SDValue
> &OutVals
,
1367 const SDLoc
&dl
, SelectionDAG
&DAG
) const {
1368 // CCValAssign - represent the assignment of the return value to locations.
1369 SmallVector
<CCValAssign
, 16> RVLocs
;
1371 // CCState - Info about the registers and stack slot.
1372 CCState
CCInfo(CallConv
, isVarArg
, DAG
.getMachineFunction(), RVLocs
,
1375 // Analyze return values.
1376 auto CCFunction
= CCAssignFnForReturn(CallConv
);
1377 CCInfo
.AnalyzeReturn(Outs
, CCFunction
);
1379 // If this is the first return lowered for this function, add the regs to
1380 // the liveout set for the function.
1381 MachineFunction
&MF
= DAG
.getMachineFunction();
1382 unsigned e
= RVLocs
.size();
1384 // Reverse splitted return values to get the "big endian" format required
1385 // to agree with the calling convention ABI.
1387 std::reverse(RVLocs
.begin(), RVLocs
.end());
1391 SmallVector
<SDValue
, 4> RetOps(1, Chain
);
1392 // Copy the result values into the output registers.
1393 for (unsigned i
= 0; i
!= e
; ++i
) {
1394 CCValAssign
&VA
= RVLocs
[i
];
1395 assert(VA
.isRegLoc() && "Can only return in registers!");
1397 Chain
= DAG
.getCopyToReg(Chain
, dl
, VA
.getLocReg(), OutVals
[i
], Flag
);
1399 // Guarantee that all emitted copies are stuck together with flags.
1400 Flag
= Chain
.getValue(1);
1401 RetOps
.push_back(DAG
.getRegister(VA
.getLocReg(), VA
.getLocVT()));
1404 // Don't emit the ret/reti instruction when the naked attribute is present in
1405 // the function being compiled.
1406 if (MF
.getFunction().getAttributes().hasAttribute(
1407 AttributeList::FunctionIndex
, Attribute::Naked
)) {
1412 (CallConv
== CallingConv::AVR_INTR
|| CallConv
== CallingConv::AVR_SIGNAL
)
1416 RetOps
[0] = Chain
; // Update chain.
1418 if (Flag
.getNode()) {
1419 RetOps
.push_back(Flag
);
1422 return DAG
.getNode(RetOpc
, dl
, MVT::Other
, RetOps
);
1425 //===----------------------------------------------------------------------===//
1427 //===----------------------------------------------------------------------===//
1429 MachineBasicBlock
*AVRTargetLowering::insertShift(MachineInstr
&MI
,
1430 MachineBasicBlock
*BB
) const {
1432 const TargetRegisterClass
*RC
;
1433 bool HasRepeatedOperand
= false;
1434 MachineFunction
*F
= BB
->getParent();
1435 MachineRegisterInfo
&RI
= F
->getRegInfo();
1436 const AVRTargetMachine
&TM
= (const AVRTargetMachine
&)getTargetMachine();
1437 const TargetInstrInfo
&TII
= *TM
.getSubtargetImpl()->getInstrInfo();
1438 DebugLoc dl
= MI
.getDebugLoc();
1440 switch (MI
.getOpcode()) {
1442 llvm_unreachable("Invalid shift opcode!");
1444 Opc
= AVR::ADDRdRr
; // LSL is an alias of ADD Rd, Rd
1445 RC
= &AVR::GPR8RegClass
;
1446 HasRepeatedOperand
= true;
1450 RC
= &AVR::DREGSRegClass
;
1454 RC
= &AVR::GPR8RegClass
;
1458 RC
= &AVR::DREGSRegClass
;
1462 RC
= &AVR::GPR8RegClass
;
1466 RC
= &AVR::DREGSRegClass
;
1469 Opc
= AVR::ADCRdRr
; // ROL is an alias of ADC Rd, Rd
1470 RC
= &AVR::GPR8RegClass
;
1471 HasRepeatedOperand
= true;
1475 RC
= &AVR::DREGSRegClass
;
1479 RC
= &AVR::GPR8RegClass
;
1483 RC
= &AVR::DREGSRegClass
;
1487 const BasicBlock
*LLVM_BB
= BB
->getBasicBlock();
1489 MachineFunction::iterator I
;
1490 for (I
= BB
->getIterator(); I
!= F
->end() && &(*I
) != BB
; ++I
);
1491 if (I
!= F
->end()) ++I
;
1493 // Create loop block.
1494 MachineBasicBlock
*LoopBB
= F
->CreateMachineBasicBlock(LLVM_BB
);
1495 MachineBasicBlock
*RemBB
= F
->CreateMachineBasicBlock(LLVM_BB
);
1497 F
->insert(I
, LoopBB
);
1498 F
->insert(I
, RemBB
);
1500 // Update machine-CFG edges by transferring all successors of the current
1501 // block to the block containing instructions after shift.
1502 RemBB
->splice(RemBB
->begin(), BB
, std::next(MachineBasicBlock::iterator(MI
)),
1504 RemBB
->transferSuccessorsAndUpdatePHIs(BB
);
1506 // Add adges BB => LoopBB => RemBB, BB => RemBB, LoopBB => LoopBB.
1507 BB
->addSuccessor(LoopBB
);
1508 BB
->addSuccessor(RemBB
);
1509 LoopBB
->addSuccessor(RemBB
);
1510 LoopBB
->addSuccessor(LoopBB
);
1512 unsigned ShiftAmtReg
= RI
.createVirtualRegister(&AVR::LD8RegClass
);
1513 unsigned ShiftAmtReg2
= RI
.createVirtualRegister(&AVR::LD8RegClass
);
1514 unsigned ShiftReg
= RI
.createVirtualRegister(RC
);
1515 unsigned ShiftReg2
= RI
.createVirtualRegister(RC
);
1516 unsigned ShiftAmtSrcReg
= MI
.getOperand(2).getReg();
1517 unsigned SrcReg
= MI
.getOperand(1).getReg();
1518 unsigned DstReg
= MI
.getOperand(0).getReg();
1523 BuildMI(BB
, dl
, TII
.get(AVR::CPIRdK
)).addReg(ShiftAmtSrcReg
).addImm(0);
1524 BuildMI(BB
, dl
, TII
.get(AVR::BREQk
)).addMBB(RemBB
);
1527 // ShiftReg = phi [%SrcReg, BB], [%ShiftReg2, LoopBB]
1528 // ShiftAmt = phi [%N, BB], [%ShiftAmt2, LoopBB]
1529 // ShiftReg2 = shift ShiftReg
1530 // ShiftAmt2 = ShiftAmt - 1;
1531 BuildMI(LoopBB
, dl
, TII
.get(AVR::PHI
), ShiftReg
)
1536 BuildMI(LoopBB
, dl
, TII
.get(AVR::PHI
), ShiftAmtReg
)
1537 .addReg(ShiftAmtSrcReg
)
1539 .addReg(ShiftAmtReg2
)
1542 auto ShiftMI
= BuildMI(LoopBB
, dl
, TII
.get(Opc
), ShiftReg2
).addReg(ShiftReg
);
1543 if (HasRepeatedOperand
)
1544 ShiftMI
.addReg(ShiftReg
);
1546 BuildMI(LoopBB
, dl
, TII
.get(AVR::SUBIRdK
), ShiftAmtReg2
)
1547 .addReg(ShiftAmtReg
)
1549 BuildMI(LoopBB
, dl
, TII
.get(AVR::BRNEk
)).addMBB(LoopBB
);
1552 // DestReg = phi [%SrcReg, BB], [%ShiftReg, LoopBB]
1553 BuildMI(*RemBB
, RemBB
->begin(), dl
, TII
.get(AVR::PHI
), DstReg
)
1559 MI
.eraseFromParent(); // The pseudo instruction is gone now.
1563 static bool isCopyMulResult(MachineBasicBlock::iterator
const &I
) {
1564 if (I
->getOpcode() == AVR::COPY
) {
1565 unsigned SrcReg
= I
->getOperand(1).getReg();
1566 return (SrcReg
== AVR::R0
|| SrcReg
== AVR::R1
);
1572 // The mul instructions wreak havock on our zero_reg R1. We need to clear it
1573 // after the result has been evacuated. This is probably not the best way to do
1574 // it, but it works for now.
1575 MachineBasicBlock
*AVRTargetLowering::insertMul(MachineInstr
&MI
,
1576 MachineBasicBlock
*BB
) const {
1577 const AVRTargetMachine
&TM
= (const AVRTargetMachine
&)getTargetMachine();
1578 const TargetInstrInfo
&TII
= *TM
.getSubtargetImpl()->getInstrInfo();
1579 MachineBasicBlock::iterator
I(MI
);
1580 ++I
; // in any case insert *after* the mul instruction
1581 if (isCopyMulResult(I
))
1583 if (isCopyMulResult(I
))
1585 BuildMI(*BB
, I
, MI
.getDebugLoc(), TII
.get(AVR::EORRdRr
), AVR::R1
)
1592 AVRTargetLowering::EmitInstrWithCustomInserter(MachineInstr
&MI
,
1593 MachineBasicBlock
*MBB
) const {
1594 int Opc
= MI
.getOpcode();
1596 // Pseudo shift instructions with a non constant shift amount are expanded
1609 return insertShift(MI
, MBB
);
1612 return insertMul(MI
, MBB
);
1615 assert((Opc
== AVR::Select16
|| Opc
== AVR::Select8
) &&
1616 "Unexpected instr type to insert");
1618 const AVRInstrInfo
&TII
= (const AVRInstrInfo
&)*MI
.getParent()
1622 DebugLoc dl
= MI
.getDebugLoc();
1624 // To "insert" a SELECT instruction, we insert the diamond
1625 // control-flow pattern. The incoming instruction knows the
1626 // destination vreg to set, the condition code register to branch
1627 // on, the true/false values to select between, and a branch opcode
1630 MachineFunction
*MF
= MBB
->getParent();
1631 const BasicBlock
*LLVM_BB
= MBB
->getBasicBlock();
1632 MachineBasicBlock
*trueMBB
= MF
->CreateMachineBasicBlock(LLVM_BB
);
1633 MachineBasicBlock
*falseMBB
= MF
->CreateMachineBasicBlock(LLVM_BB
);
1635 MachineFunction::iterator I
;
1636 for (I
= MF
->begin(); I
!= MF
->end() && &(*I
) != MBB
; ++I
);
1637 if (I
!= MF
->end()) ++I
;
1638 MF
->insert(I
, trueMBB
);
1639 MF
->insert(I
, falseMBB
);
1641 // Transfer remaining instructions and all successors of the current
1642 // block to the block which will contain the Phi node for the
1644 trueMBB
->splice(trueMBB
->begin(), MBB
,
1645 std::next(MachineBasicBlock::iterator(MI
)), MBB
->end());
1646 trueMBB
->transferSuccessorsAndUpdatePHIs(MBB
);
1648 AVRCC::CondCodes CC
= (AVRCC::CondCodes
)MI
.getOperand(3).getImm();
1649 BuildMI(MBB
, dl
, TII
.getBrCond(CC
)).addMBB(trueMBB
);
1650 BuildMI(MBB
, dl
, TII
.get(AVR::RJMPk
)).addMBB(falseMBB
);
1651 MBB
->addSuccessor(falseMBB
);
1652 MBB
->addSuccessor(trueMBB
);
1654 // Unconditionally flow back to the true block
1655 BuildMI(falseMBB
, dl
, TII
.get(AVR::RJMPk
)).addMBB(trueMBB
);
1656 falseMBB
->addSuccessor(trueMBB
);
1658 // Set up the Phi node to determine where we came from
1659 BuildMI(*trueMBB
, trueMBB
->begin(), dl
, TII
.get(AVR::PHI
), MI
.getOperand(0).getReg())
1660 .addReg(MI
.getOperand(1).getReg())
1662 .addReg(MI
.getOperand(2).getReg())
1665 MI
.eraseFromParent(); // The pseudo instruction is gone now.
1669 //===----------------------------------------------------------------------===//
1670 // Inline Asm Support
1671 //===----------------------------------------------------------------------===//
1673 AVRTargetLowering::ConstraintType
1674 AVRTargetLowering::getConstraintType(StringRef Constraint
) const {
1675 if (Constraint
.size() == 1) {
1676 // See http://www.nongnu.org/avr-libc/user-manual/inline_asm.html
1677 switch (Constraint
[0]) {
1678 case 'a': // Simple upper registers
1679 case 'b': // Base pointer registers pairs
1680 case 'd': // Upper register
1681 case 'l': // Lower registers
1682 case 'e': // Pointer register pairs
1683 case 'q': // Stack pointer register
1684 case 'r': // Any register
1685 case 'w': // Special upper register pairs
1686 return C_RegisterClass
;
1687 case 't': // Temporary register
1688 case 'x': case 'X': // Pointer register pair X
1689 case 'y': case 'Y': // Pointer register pair Y
1690 case 'z': case 'Z': // Pointer register pair Z
1692 case 'Q': // A memory address based on Y or Z pointer with displacement.
1694 case 'G': // Floating point constant
1695 case 'I': // 6-bit positive integer constant
1696 case 'J': // 6-bit negative integer constant
1697 case 'K': // Integer constant (Range: 2)
1698 case 'L': // Integer constant (Range: 0)
1699 case 'M': // 8-bit integer constant
1700 case 'N': // Integer constant (Range: -1)
1701 case 'O': // Integer constant (Range: 8, 16, 24)
1702 case 'P': // Integer constant (Range: 1)
1703 case 'R': // Integer constant (Range: -6 to 5)x
1710 return TargetLowering::getConstraintType(Constraint
);
1714 AVRTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode
) const {
1715 // Not sure if this is actually the right thing to do, but we got to do
1716 // *something* [agnat]
1717 switch (ConstraintCode
[0]) {
1719 return InlineAsm::Constraint_Q
;
1721 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode
);
1724 AVRTargetLowering::ConstraintWeight
1725 AVRTargetLowering::getSingleConstraintMatchWeight(
1726 AsmOperandInfo
&info
, const char *constraint
) const {
1727 ConstraintWeight weight
= CW_Invalid
;
1728 Value
*CallOperandVal
= info
.CallOperandVal
;
1730 // If we don't have a value, we can't do a match,
1731 // but allow it at the lowest weight.
1732 // (this behaviour has been copied from the ARM backend)
1733 if (!CallOperandVal
) {
1737 // Look at the constraint type.
1738 switch (*constraint
) {
1740 weight
= TargetLowering::getSingleConstraintMatchWeight(info
, constraint
);
1745 weight
= CW_Register
;
1756 weight
= CW_SpecificReg
;
1759 if (const ConstantFP
*C
= dyn_cast
<ConstantFP
>(CallOperandVal
)) {
1761 weight
= CW_Constant
;
1766 if (const ConstantInt
*C
= dyn_cast
<ConstantInt
>(CallOperandVal
)) {
1767 if (isUInt
<6>(C
->getZExtValue())) {
1768 weight
= CW_Constant
;
1773 if (const ConstantInt
*C
= dyn_cast
<ConstantInt
>(CallOperandVal
)) {
1774 if ((C
->getSExtValue() >= -63) && (C
->getSExtValue() <= 0)) {
1775 weight
= CW_Constant
;
1780 if (const ConstantInt
*C
= dyn_cast
<ConstantInt
>(CallOperandVal
)) {
1781 if (C
->getZExtValue() == 2) {
1782 weight
= CW_Constant
;
1787 if (const ConstantInt
*C
= dyn_cast
<ConstantInt
>(CallOperandVal
)) {
1788 if (C
->getZExtValue() == 0) {
1789 weight
= CW_Constant
;
1794 if (const ConstantInt
*C
= dyn_cast
<ConstantInt
>(CallOperandVal
)) {
1795 if (isUInt
<8>(C
->getZExtValue())) {
1796 weight
= CW_Constant
;
1801 if (const ConstantInt
*C
= dyn_cast
<ConstantInt
>(CallOperandVal
)) {
1802 if (C
->getSExtValue() == -1) {
1803 weight
= CW_Constant
;
1808 if (const ConstantInt
*C
= dyn_cast
<ConstantInt
>(CallOperandVal
)) {
1809 if ((C
->getZExtValue() == 8) || (C
->getZExtValue() == 16) ||
1810 (C
->getZExtValue() == 24)) {
1811 weight
= CW_Constant
;
1816 if (const ConstantInt
*C
= dyn_cast
<ConstantInt
>(CallOperandVal
)) {
1817 if (C
->getZExtValue() == 1) {
1818 weight
= CW_Constant
;
1823 if (const ConstantInt
*C
= dyn_cast
<ConstantInt
>(CallOperandVal
)) {
1824 if ((C
->getSExtValue() >= -6) && (C
->getSExtValue() <= 5)) {
1825 weight
= CW_Constant
;
1837 std::pair
<unsigned, const TargetRegisterClass
*>
1838 AVRTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo
*TRI
,
1839 StringRef Constraint
,
1841 auto STI
= static_cast<const AVRTargetMachine
&>(this->getTargetMachine())
1842 .getSubtargetImpl();
1844 // We only support i8 and i16.
1846 //:FIXME: remove this assert for now since it gets sometimes executed
1847 // assert((VT == MVT::i16 || VT == MVT::i8) && "Wrong operand type.");
1849 if (Constraint
.size() == 1) {
1850 switch (Constraint
[0]) {
1851 case 'a': // Simple upper registers r16..r23.
1852 return std::make_pair(0U, &AVR::LD8loRegClass
);
1853 case 'b': // Base pointer registers: y, z.
1854 return std::make_pair(0U, &AVR::PTRDISPREGSRegClass
);
1855 case 'd': // Upper registers r16..r31.
1856 return std::make_pair(0U, &AVR::LD8RegClass
);
1857 case 'l': // Lower registers r0..r15.
1858 return std::make_pair(0U, &AVR::GPR8loRegClass
);
1859 case 'e': // Pointer register pairs: x, y, z.
1860 return std::make_pair(0U, &AVR::PTRREGSRegClass
);
1861 case 'q': // Stack pointer register: SPH:SPL.
1862 return std::make_pair(0U, &AVR::GPRSPRegClass
);
1863 case 'r': // Any register: r0..r31.
1865 return std::make_pair(0U, &AVR::GPR8RegClass
);
1867 assert(VT
== MVT::i16
&& "inline asm constraint too large");
1868 return std::make_pair(0U, &AVR::DREGSRegClass
);
1869 case 't': // Temporary register: r0.
1870 return std::make_pair(unsigned(AVR::R0
), &AVR::GPR8RegClass
);
1871 case 'w': // Special upper register pairs: r24, r26, r28, r30.
1872 return std::make_pair(0U, &AVR::IWREGSRegClass
);
1873 case 'x': // Pointer register pair X: r27:r26.
1875 return std::make_pair(unsigned(AVR::R27R26
), &AVR::PTRREGSRegClass
);
1876 case 'y': // Pointer register pair Y: r29:r28.
1878 return std::make_pair(unsigned(AVR::R29R28
), &AVR::PTRREGSRegClass
);
1879 case 'z': // Pointer register pair Z: r31:r30.
1881 return std::make_pair(unsigned(AVR::R31R30
), &AVR::PTRREGSRegClass
);
1887 return TargetLowering::getRegForInlineAsmConstraint(STI
->getRegisterInfo(),
1891 void AVRTargetLowering::LowerAsmOperandForConstraint(SDValue Op
,
1892 std::string
&Constraint
,
1893 std::vector
<SDValue
> &Ops
,
1894 SelectionDAG
&DAG
) const {
1895 SDValue
Result(0, 0);
1897 EVT Ty
= Op
.getValueType();
1899 // Currently only support length 1 constraints.
1900 if (Constraint
.length() != 1) {
1904 char ConstraintLetter
= Constraint
[0];
1905 switch (ConstraintLetter
) {
1908 // Deal with integers first:
1918 const ConstantSDNode
*C
= dyn_cast
<ConstantSDNode
>(Op
);
1923 int64_t CVal64
= C
->getSExtValue();
1924 uint64_t CUVal64
= C
->getZExtValue();
1925 switch (ConstraintLetter
) {
1927 if (!isUInt
<6>(CUVal64
))
1929 Result
= DAG
.getTargetConstant(CUVal64
, DL
, Ty
);
1932 if (CVal64
< -63 || CVal64
> 0)
1934 Result
= DAG
.getTargetConstant(CVal64
, DL
, Ty
);
1939 Result
= DAG
.getTargetConstant(CUVal64
, DL
, Ty
);
1944 Result
= DAG
.getTargetConstant(CUVal64
, DL
, Ty
);
1947 if (!isUInt
<8>(CUVal64
))
1949 // i8 type may be printed as a negative number,
1950 // e.g. 254 would be printed as -2,
1951 // so we force it to i16 at least.
1952 if (Ty
.getSimpleVT() == MVT::i8
) {
1955 Result
= DAG
.getTargetConstant(CUVal64
, DL
, Ty
);
1960 Result
= DAG
.getTargetConstant(CVal64
, DL
, Ty
);
1962 case 'O': // 8, 16, 24
1963 if (CUVal64
!= 8 && CUVal64
!= 16 && CUVal64
!= 24)
1965 Result
= DAG
.getTargetConstant(CUVal64
, DL
, Ty
);
1970 Result
= DAG
.getTargetConstant(CUVal64
, DL
, Ty
);
1973 if (CVal64
< -6 || CVal64
> 5)
1975 Result
= DAG
.getTargetConstant(CVal64
, DL
, Ty
);
1982 const ConstantFPSDNode
*FC
= dyn_cast
<ConstantFPSDNode
>(Op
);
1983 if (!FC
|| !FC
->isZero())
1985 // Soften float to i8 0
1986 Result
= DAG
.getTargetConstant(0, DL
, MVT::i8
);
1990 if (Result
.getNode()) {
1991 Ops
.push_back(Result
);
1995 return TargetLowering::LowerAsmOperandForConstraint(Op
, Constraint
, Ops
, DAG
);
1998 unsigned AVRTargetLowering::getRegisterByName(const char *RegName
,
2000 SelectionDAG
&DAG
) const {
2003 if (VT
== MVT::i8
) {
2004 Reg
= StringSwitch
<unsigned>(RegName
)
2005 .Case("r0", AVR::R0
).Case("r1", AVR::R1
).Case("r2", AVR::R2
)
2006 .Case("r3", AVR::R3
).Case("r4", AVR::R4
).Case("r5", AVR::R5
)
2007 .Case("r6", AVR::R6
).Case("r7", AVR::R7
).Case("r8", AVR::R8
)
2008 .Case("r9", AVR::R9
).Case("r10", AVR::R10
).Case("r11", AVR::R11
)
2009 .Case("r12", AVR::R12
).Case("r13", AVR::R13
).Case("r14", AVR::R14
)
2010 .Case("r15", AVR::R15
).Case("r16", AVR::R16
).Case("r17", AVR::R17
)
2011 .Case("r18", AVR::R18
).Case("r19", AVR::R19
).Case("r20", AVR::R20
)
2012 .Case("r21", AVR::R21
).Case("r22", AVR::R22
).Case("r23", AVR::R23
)
2013 .Case("r24", AVR::R24
).Case("r25", AVR::R25
).Case("r26", AVR::R26
)
2014 .Case("r27", AVR::R27
).Case("r28", AVR::R28
).Case("r29", AVR::R29
)
2015 .Case("r30", AVR::R30
).Case("r31", AVR::R31
)
2016 .Case("X", AVR::R27R26
).Case("Y", AVR::R29R28
).Case("Z", AVR::R31R30
)
2019 Reg
= StringSwitch
<unsigned>(RegName
)
2020 .Case("r0", AVR::R1R0
).Case("r2", AVR::R3R2
)
2021 .Case("r4", AVR::R5R4
).Case("r6", AVR::R7R6
)
2022 .Case("r8", AVR::R9R8
).Case("r10", AVR::R11R10
)
2023 .Case("r12", AVR::R13R12
).Case("r14", AVR::R15R14
)
2024 .Case("r16", AVR::R17R16
).Case("r18", AVR::R19R18
)
2025 .Case("r20", AVR::R21R20
).Case("r22", AVR::R23R22
)
2026 .Case("r24", AVR::R25R24
).Case("r26", AVR::R27R26
)
2027 .Case("r28", AVR::R29R28
).Case("r30", AVR::R31R30
)
2028 .Case("X", AVR::R27R26
).Case("Y", AVR::R29R28
).Case("Z", AVR::R31R30
)
2035 report_fatal_error("Invalid register name global variable");
2038 } // end of namespace llvm