1 //===- llvm/lib/Target/ARM/ARMCallLowering.cpp - Call lowering ------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This file implements the lowering of LLVM calls to machine code calls for
14 //===----------------------------------------------------------------------===//
16 #include "ARMCallLowering.h"
17 #include "ARMBaseInstrInfo.h"
18 #include "ARMISelLowering.h"
19 #include "ARMSubtarget.h"
20 #include "Utils/ARMBaseInfo.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/CodeGen/Analysis.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
25 #include "llvm/CodeGen/GlobalISel/Utils.h"
26 #include "llvm/CodeGen/LowLevelType.h"
27 #include "llvm/CodeGen/MachineBasicBlock.h"
28 #include "llvm/CodeGen/MachineFrameInfo.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineInstrBuilder.h"
31 #include "llvm/CodeGen/MachineMemOperand.h"
32 #include "llvm/CodeGen/MachineOperand.h"
33 #include "llvm/CodeGen/MachineRegisterInfo.h"
34 #include "llvm/CodeGen/TargetRegisterInfo.h"
35 #include "llvm/CodeGen/TargetSubtargetInfo.h"
36 #include "llvm/CodeGen/ValueTypes.h"
37 #include "llvm/IR/Attributes.h"
38 #include "llvm/IR/DataLayout.h"
39 #include "llvm/IR/DerivedTypes.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/Type.h"
42 #include "llvm/IR/Value.h"
43 #include "llvm/Support/Casting.h"
44 #include "llvm/Support/LowLevelTypeImpl.h"
45 #include "llvm/Support/MachineValueType.h"
53 ARMCallLowering::ARMCallLowering(const ARMTargetLowering
&TLI
)
54 : CallLowering(&TLI
) {}
56 static bool isSupportedType(const DataLayout
&DL
, const ARMTargetLowering
&TLI
,
61 if (T
->isStructTy()) {
62 // For now we only allow homogeneous structs that we can manipulate with
63 // G_MERGE_VALUES and G_UNMERGE_VALUES
64 auto StructT
= cast
<StructType
>(T
);
65 for (unsigned i
= 1, e
= StructT
->getNumElements(); i
!= e
; ++i
)
66 if (StructT
->getElementType(i
) != StructT
->getElementType(0))
71 EVT VT
= TLI
.getValueType(DL
, T
, true);
72 if (!VT
.isSimple() || VT
.isVector() ||
73 !(VT
.isInteger() || VT
.isFloatingPoint()))
76 unsigned VTSize
= VT
.getSimpleVT().getSizeInBits();
79 // FIXME: Support i64 too
80 return VT
.isFloatingPoint();
82 return VTSize
== 1 || VTSize
== 8 || VTSize
== 16 || VTSize
== 32;
87 /// Helper class for values going out through an ABI boundary (used for handling
88 /// function return values and call parameters).
89 struct OutgoingValueHandler
: public CallLowering::ValueHandler
{
90 OutgoingValueHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
,
91 MachineInstrBuilder
&MIB
, CCAssignFn
*AssignFn
)
92 : ValueHandler(MIRBuilder
, MRI
, AssignFn
), MIB(MIB
) {}
94 unsigned getStackAddress(uint64_t Size
, int64_t Offset
,
95 MachinePointerInfo
&MPO
) override
{
96 assert((Size
== 1 || Size
== 2 || Size
== 4 || Size
== 8) &&
99 LLT p0
= LLT::pointer(0, 32);
100 LLT s32
= LLT::scalar(32);
101 unsigned SPReg
= MRI
.createGenericVirtualRegister(p0
);
102 MIRBuilder
.buildCopy(SPReg
, ARM::SP
);
104 unsigned OffsetReg
= MRI
.createGenericVirtualRegister(s32
);
105 MIRBuilder
.buildConstant(OffsetReg
, Offset
);
107 unsigned AddrReg
= MRI
.createGenericVirtualRegister(p0
);
108 MIRBuilder
.buildGEP(AddrReg
, SPReg
, OffsetReg
);
110 MPO
= MachinePointerInfo::getStack(MIRBuilder
.getMF(), Offset
);
114 void assignValueToReg(unsigned ValVReg
, unsigned PhysReg
,
115 CCValAssign
&VA
) override
{
116 assert(VA
.isRegLoc() && "Value shouldn't be assigned to reg");
117 assert(VA
.getLocReg() == PhysReg
&& "Assigning to the wrong reg?");
119 assert(VA
.getValVT().getSizeInBits() <= 64 && "Unsupported value size");
120 assert(VA
.getLocVT().getSizeInBits() <= 64 && "Unsupported location size");
122 unsigned ExtReg
= extendRegister(ValVReg
, VA
);
123 MIRBuilder
.buildCopy(PhysReg
, ExtReg
);
124 MIB
.addUse(PhysReg
, RegState::Implicit
);
127 void assignValueToAddress(unsigned ValVReg
, unsigned Addr
, uint64_t Size
,
128 MachinePointerInfo
&MPO
, CCValAssign
&VA
) override
{
129 assert((Size
== 1 || Size
== 2 || Size
== 4 || Size
== 8) &&
132 unsigned ExtReg
= extendRegister(ValVReg
, VA
);
133 auto MMO
= MIRBuilder
.getMF().getMachineMemOperand(
134 MPO
, MachineMemOperand::MOStore
, VA
.getLocVT().getStoreSize(),
136 MIRBuilder
.buildStore(ExtReg
, Addr
, *MMO
);
139 unsigned assignCustomValue(const CallLowering::ArgInfo
&Arg
,
140 ArrayRef
<CCValAssign
> VAs
) override
{
141 CCValAssign VA
= VAs
[0];
142 assert(VA
.needsCustom() && "Value doesn't need custom handling");
143 assert(VA
.getValVT() == MVT::f64
&& "Unsupported type");
145 CCValAssign NextVA
= VAs
[1];
146 assert(NextVA
.needsCustom() && "Value doesn't need custom handling");
147 assert(NextVA
.getValVT() == MVT::f64
&& "Unsupported type");
149 assert(VA
.getValNo() == NextVA
.getValNo() &&
150 "Values belong to different arguments");
152 assert(VA
.isRegLoc() && "Value should be in reg");
153 assert(NextVA
.isRegLoc() && "Value should be in reg");
155 unsigned NewRegs
[] = {MRI
.createGenericVirtualRegister(LLT::scalar(32)),
156 MRI
.createGenericVirtualRegister(LLT::scalar(32))};
157 MIRBuilder
.buildUnmerge(NewRegs
, Arg
.Reg
);
159 bool IsLittle
= MIRBuilder
.getMF().getSubtarget
<ARMSubtarget
>().isLittle();
161 std::swap(NewRegs
[0], NewRegs
[1]);
163 assignValueToReg(NewRegs
[0], VA
.getLocReg(), VA
);
164 assignValueToReg(NewRegs
[1], NextVA
.getLocReg(), NextVA
);
169 bool assignArg(unsigned ValNo
, MVT ValVT
, MVT LocVT
,
170 CCValAssign::LocInfo LocInfo
,
171 const CallLowering::ArgInfo
&Info
, CCState
&State
) override
{
172 if (AssignFn(ValNo
, ValVT
, LocVT
, LocInfo
, Info
.Flags
, State
))
176 std::max(StackSize
, static_cast<uint64_t>(State
.getNextStackOffset()));
180 MachineInstrBuilder
&MIB
;
181 uint64_t StackSize
= 0;
184 } // end anonymous namespace
186 void ARMCallLowering::splitToValueTypes(
187 const ArgInfo
&OrigArg
, SmallVectorImpl
<ArgInfo
> &SplitArgs
,
188 MachineFunction
&MF
, const SplitArgTy
&PerformArgSplit
) const {
189 const ARMTargetLowering
&TLI
= *getTLI
<ARMTargetLowering
>();
190 LLVMContext
&Ctx
= OrigArg
.Ty
->getContext();
191 const DataLayout
&DL
= MF
.getDataLayout();
192 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
193 const Function
&F
= MF
.getFunction();
195 SmallVector
<EVT
, 4> SplitVTs
;
196 SmallVector
<uint64_t, 4> Offsets
;
197 ComputeValueVTs(TLI
, DL
, OrigArg
.Ty
, SplitVTs
, &Offsets
, 0);
199 if (SplitVTs
.size() == 1) {
200 // Even if there is no splitting to do, we still want to replace the
201 // original type (e.g. pointer type -> integer).
202 auto Flags
= OrigArg
.Flags
;
203 unsigned OriginalAlignment
= DL
.getABITypeAlignment(OrigArg
.Ty
);
204 Flags
.setOrigAlign(OriginalAlignment
);
205 SplitArgs
.emplace_back(OrigArg
.Reg
, SplitVTs
[0].getTypeForEVT(Ctx
), Flags
,
210 unsigned FirstRegIdx
= SplitArgs
.size();
211 for (unsigned i
= 0, e
= SplitVTs
.size(); i
!= e
; ++i
) {
212 EVT SplitVT
= SplitVTs
[i
];
213 Type
*SplitTy
= SplitVT
.getTypeForEVT(Ctx
);
214 auto Flags
= OrigArg
.Flags
;
216 unsigned OriginalAlignment
= DL
.getABITypeAlignment(SplitTy
);
217 Flags
.setOrigAlign(OriginalAlignment
);
219 bool NeedsConsecutiveRegisters
=
220 TLI
.functionArgumentNeedsConsecutiveRegisters(
221 SplitTy
, F
.getCallingConv(), F
.isVarArg());
222 if (NeedsConsecutiveRegisters
) {
223 Flags
.setInConsecutiveRegs();
225 Flags
.setInConsecutiveRegsLast();
229 ArgInfo
{MRI
.createGenericVirtualRegister(getLLTForType(*SplitTy
, DL
)),
230 SplitTy
, Flags
, OrigArg
.IsFixed
});
233 for (unsigned i
= 0; i
< Offsets
.size(); ++i
)
234 PerformArgSplit(SplitArgs
[FirstRegIdx
+ i
].Reg
, Offsets
[i
] * 8);
237 /// Lower the return value for the already existing \p Ret. This assumes that
238 /// \p MIRBuilder's insertion point is correct.
239 bool ARMCallLowering::lowerReturnVal(MachineIRBuilder
&MIRBuilder
,
240 const Value
*Val
, unsigned VReg
,
241 MachineInstrBuilder
&Ret
) const {
243 // Nothing to do here.
246 auto &MF
= MIRBuilder
.getMF();
247 const auto &F
= MF
.getFunction();
249 auto DL
= MF
.getDataLayout();
250 auto &TLI
= *getTLI
<ARMTargetLowering
>();
251 if (!isSupportedType(DL
, TLI
, Val
->getType()))
254 SmallVector
<ArgInfo
, 4> SplitVTs
;
255 SmallVector
<unsigned, 4> Regs
;
256 ArgInfo
RetInfo(VReg
, Val
->getType());
257 setArgFlags(RetInfo
, AttributeList::ReturnIndex
, DL
, F
);
258 splitToValueTypes(RetInfo
, SplitVTs
, MF
, [&](unsigned Reg
, uint64_t Offset
) {
263 MIRBuilder
.buildUnmerge(Regs
, VReg
);
265 CCAssignFn
*AssignFn
=
266 TLI
.CCAssignFnForReturn(F
.getCallingConv(), F
.isVarArg());
268 OutgoingValueHandler
RetHandler(MIRBuilder
, MF
.getRegInfo(), Ret
, AssignFn
);
269 return handleAssignments(MIRBuilder
, SplitVTs
, RetHandler
);
272 bool ARMCallLowering::lowerReturn(MachineIRBuilder
&MIRBuilder
,
273 const Value
*Val
, unsigned VReg
) const {
274 assert(!Val
== !VReg
&& "Return value without a vreg");
276 auto const &ST
= MIRBuilder
.getMF().getSubtarget
<ARMSubtarget
>();
277 unsigned Opcode
= ST
.getReturnOpcode();
278 auto Ret
= MIRBuilder
.buildInstrNoInsert(Opcode
).add(predOps(ARMCC::AL
));
280 if (!lowerReturnVal(MIRBuilder
, Val
, VReg
, Ret
))
283 MIRBuilder
.insertInstr(Ret
);
289 /// Helper class for values coming in through an ABI boundary (used for handling
290 /// formal arguments and call return values).
291 struct IncomingValueHandler
: public CallLowering::ValueHandler
{
292 IncomingValueHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
,
294 : ValueHandler(MIRBuilder
, MRI
, AssignFn
) {}
296 unsigned getStackAddress(uint64_t Size
, int64_t Offset
,
297 MachinePointerInfo
&MPO
) override
{
298 assert((Size
== 1 || Size
== 2 || Size
== 4 || Size
== 8) &&
301 auto &MFI
= MIRBuilder
.getMF().getFrameInfo();
303 int FI
= MFI
.CreateFixedObject(Size
, Offset
, true);
304 MPO
= MachinePointerInfo::getFixedStack(MIRBuilder
.getMF(), FI
);
307 MRI
.createGenericVirtualRegister(LLT::pointer(MPO
.getAddrSpace(), 32));
308 MIRBuilder
.buildFrameIndex(AddrReg
, FI
);
313 void assignValueToAddress(unsigned ValVReg
, unsigned Addr
, uint64_t Size
,
314 MachinePointerInfo
&MPO
, CCValAssign
&VA
) override
{
315 assert((Size
== 1 || Size
== 2 || Size
== 4 || Size
== 8) &&
318 if (VA
.getLocInfo() == CCValAssign::SExt
||
319 VA
.getLocInfo() == CCValAssign::ZExt
) {
320 // If the value is zero- or sign-extended, its size becomes 4 bytes, so
321 // that's what we should load.
323 assert(MRI
.getType(ValVReg
).isScalar() && "Only scalars supported atm");
325 auto LoadVReg
= MRI
.createGenericVirtualRegister(LLT::scalar(32));
326 buildLoad(LoadVReg
, Addr
, Size
, /* Alignment */ 0, MPO
);
327 MIRBuilder
.buildTrunc(ValVReg
, LoadVReg
);
329 // If the value is not extended, a simple load will suffice.
330 buildLoad(ValVReg
, Addr
, Size
, /* Alignment */ 0, MPO
);
334 void buildLoad(unsigned Val
, unsigned Addr
, uint64_t Size
, unsigned Alignment
,
335 MachinePointerInfo
&MPO
) {
336 auto MMO
= MIRBuilder
.getMF().getMachineMemOperand(
337 MPO
, MachineMemOperand::MOLoad
, Size
, Alignment
);
338 MIRBuilder
.buildLoad(Val
, Addr
, *MMO
);
341 void assignValueToReg(unsigned ValVReg
, unsigned PhysReg
,
342 CCValAssign
&VA
) override
{
343 assert(VA
.isRegLoc() && "Value shouldn't be assigned to reg");
344 assert(VA
.getLocReg() == PhysReg
&& "Assigning to the wrong reg?");
346 auto ValSize
= VA
.getValVT().getSizeInBits();
347 auto LocSize
= VA
.getLocVT().getSizeInBits();
349 assert(ValSize
<= 64 && "Unsupported value size");
350 assert(LocSize
<= 64 && "Unsupported location size");
352 markPhysRegUsed(PhysReg
);
353 if (ValSize
== LocSize
) {
354 MIRBuilder
.buildCopy(ValVReg
, PhysReg
);
356 assert(ValSize
< LocSize
&& "Extensions not supported");
358 // We cannot create a truncating copy, nor a trunc of a physical register.
359 // Therefore, we need to copy the content of the physical register into a
360 // virtual one and then truncate that.
362 MRI
.createGenericVirtualRegister(LLT::scalar(LocSize
));
363 MIRBuilder
.buildCopy(PhysRegToVReg
, PhysReg
);
364 MIRBuilder
.buildTrunc(ValVReg
, PhysRegToVReg
);
368 unsigned assignCustomValue(const ARMCallLowering::ArgInfo
&Arg
,
369 ArrayRef
<CCValAssign
> VAs
) override
{
370 CCValAssign VA
= VAs
[0];
371 assert(VA
.needsCustom() && "Value doesn't need custom handling");
372 assert(VA
.getValVT() == MVT::f64
&& "Unsupported type");
374 CCValAssign NextVA
= VAs
[1];
375 assert(NextVA
.needsCustom() && "Value doesn't need custom handling");
376 assert(NextVA
.getValVT() == MVT::f64
&& "Unsupported type");
378 assert(VA
.getValNo() == NextVA
.getValNo() &&
379 "Values belong to different arguments");
381 assert(VA
.isRegLoc() && "Value should be in reg");
382 assert(NextVA
.isRegLoc() && "Value should be in reg");
384 unsigned NewRegs
[] = {MRI
.createGenericVirtualRegister(LLT::scalar(32)),
385 MRI
.createGenericVirtualRegister(LLT::scalar(32))};
387 assignValueToReg(NewRegs
[0], VA
.getLocReg(), VA
);
388 assignValueToReg(NewRegs
[1], NextVA
.getLocReg(), NextVA
);
390 bool IsLittle
= MIRBuilder
.getMF().getSubtarget
<ARMSubtarget
>().isLittle();
392 std::swap(NewRegs
[0], NewRegs
[1]);
394 MIRBuilder
.buildMerge(Arg
.Reg
, NewRegs
);
399 /// Marking a physical register as used is different between formal
400 /// parameters, where it's a basic block live-in, and call returns, where it's
401 /// an implicit-def of the call instruction.
402 virtual void markPhysRegUsed(unsigned PhysReg
) = 0;
405 struct FormalArgHandler
: public IncomingValueHandler
{
406 FormalArgHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
,
408 : IncomingValueHandler(MIRBuilder
, MRI
, AssignFn
) {}
410 void markPhysRegUsed(unsigned PhysReg
) override
{
411 MIRBuilder
.getMBB().addLiveIn(PhysReg
);
415 } // end anonymous namespace
417 bool ARMCallLowering::lowerFormalArguments(MachineIRBuilder
&MIRBuilder
,
419 ArrayRef
<unsigned> VRegs
) const {
420 auto &TLI
= *getTLI
<ARMTargetLowering
>();
421 auto Subtarget
= TLI
.getSubtarget();
423 if (Subtarget
->isThumb())
426 // Quick exit if there aren't any args
433 auto &MF
= MIRBuilder
.getMF();
434 auto &MBB
= MIRBuilder
.getMBB();
435 auto DL
= MF
.getDataLayout();
437 for (auto &Arg
: F
.args()) {
438 if (!isSupportedType(DL
, TLI
, Arg
.getType()))
440 if (Arg
.hasByValOrInAllocaAttr())
444 CCAssignFn
*AssignFn
=
445 TLI
.CCAssignFnForCall(F
.getCallingConv(), F
.isVarArg());
447 FormalArgHandler
ArgHandler(MIRBuilder
, MIRBuilder
.getMF().getRegInfo(),
450 SmallVector
<ArgInfo
, 8> ArgInfos
;
451 SmallVector
<unsigned, 4> SplitRegs
;
453 for (auto &Arg
: F
.args()) {
454 ArgInfo
AInfo(VRegs
[Idx
], Arg
.getType());
455 setArgFlags(AInfo
, Idx
+ AttributeList::FirstArgIndex
, DL
, F
);
459 splitToValueTypes(AInfo
, ArgInfos
, MF
, [&](unsigned Reg
, uint64_t Offset
) {
460 SplitRegs
.push_back(Reg
);
463 if (!SplitRegs
.empty())
464 MIRBuilder
.buildMerge(VRegs
[Idx
], SplitRegs
);
470 MIRBuilder
.setInstr(*MBB
.begin());
472 if (!handleAssignments(MIRBuilder
, ArgInfos
, ArgHandler
))
475 // Move back to the end of the basic block.
476 MIRBuilder
.setMBB(MBB
);
482 struct CallReturnHandler
: public IncomingValueHandler
{
483 CallReturnHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
,
484 MachineInstrBuilder MIB
, CCAssignFn
*AssignFn
)
485 : IncomingValueHandler(MIRBuilder
, MRI
, AssignFn
), MIB(MIB
) {}
487 void markPhysRegUsed(unsigned PhysReg
) override
{
488 MIB
.addDef(PhysReg
, RegState::Implicit
);
491 MachineInstrBuilder MIB
;
494 } // end anonymous namespace
496 bool ARMCallLowering::lowerCall(MachineIRBuilder
&MIRBuilder
,
497 CallingConv::ID CallConv
,
498 const MachineOperand
&Callee
,
499 const ArgInfo
&OrigRet
,
500 ArrayRef
<ArgInfo
> OrigArgs
) const {
501 MachineFunction
&MF
= MIRBuilder
.getMF();
502 const auto &TLI
= *getTLI
<ARMTargetLowering
>();
503 const auto &DL
= MF
.getDataLayout();
504 const auto &STI
= MF
.getSubtarget
<ARMSubtarget
>();
505 const TargetRegisterInfo
*TRI
= STI
.getRegisterInfo();
506 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
508 if (STI
.genLongCalls())
511 auto CallSeqStart
= MIRBuilder
.buildInstr(ARM::ADJCALLSTACKDOWN
);
513 // Create the call instruction so we can add the implicit uses of arg
514 // registers, but don't insert it yet.
515 bool isDirect
= !Callee
.isReg();
520 : STI
.hasV4TOps() ? ARM::BX_CALL
: ARM::BMOVPCRX_CALL
;
521 auto MIB
= MIRBuilder
.buildInstrNoInsert(CallOpcode
)
523 .addRegMask(TRI
->getCallPreservedMask(MF
, CallConv
));
524 if (Callee
.isReg()) {
525 auto CalleeReg
= Callee
.getReg();
526 if (CalleeReg
&& !TRI
->isPhysicalRegister(CalleeReg
))
527 MIB
->getOperand(0).setReg(constrainOperandRegClass(
528 MF
, *TRI
, MRI
, *STI
.getInstrInfo(), *STI
.getRegBankInfo(),
529 *MIB
.getInstr(), MIB
->getDesc(), Callee
, 0));
532 SmallVector
<ArgInfo
, 8> ArgInfos
;
533 for (auto Arg
: OrigArgs
) {
534 if (!isSupportedType(DL
, TLI
, Arg
.Ty
))
540 if (Arg
.Flags
.isByVal())
543 SmallVector
<unsigned, 8> Regs
;
544 splitToValueTypes(Arg
, ArgInfos
, MF
, [&](unsigned Reg
, uint64_t Offset
) {
549 MIRBuilder
.buildUnmerge(Regs
, Arg
.Reg
);
552 auto ArgAssignFn
= TLI
.CCAssignFnForCall(CallConv
, /*IsVarArg=*/false);
553 OutgoingValueHandler
ArgHandler(MIRBuilder
, MRI
, MIB
, ArgAssignFn
);
554 if (!handleAssignments(MIRBuilder
, ArgInfos
, ArgHandler
))
557 // Now we can add the actual call instruction to the correct basic block.
558 MIRBuilder
.insertInstr(MIB
);
560 if (!OrigRet
.Ty
->isVoidTy()) {
561 if (!isSupportedType(DL
, TLI
, OrigRet
.Ty
))
565 SmallVector
<unsigned, 8> SplitRegs
;
566 splitToValueTypes(OrigRet
, ArgInfos
, MF
,
567 [&](unsigned Reg
, uint64_t Offset
) {
568 SplitRegs
.push_back(Reg
);
571 auto RetAssignFn
= TLI
.CCAssignFnForReturn(CallConv
, /*IsVarArg=*/false);
572 CallReturnHandler
RetHandler(MIRBuilder
, MRI
, MIB
, RetAssignFn
);
573 if (!handleAssignments(MIRBuilder
, ArgInfos
, RetHandler
))
576 if (!SplitRegs
.empty()) {
577 // We have split the value and allocated each individual piece, now build
579 MIRBuilder
.buildMerge(OrigRet
.Reg
, SplitRegs
);
583 // We now know the size of the stack - update the ADJCALLSTACKDOWN
585 CallSeqStart
.addImm(ArgHandler
.StackSize
).addImm(0).add(predOps(ARMCC::AL
));
587 MIRBuilder
.buildInstr(ARM::ADJCALLSTACKUP
)
588 .addImm(ArgHandler
.StackSize
)
590 .add(predOps(ARMCC::AL
));