1 //===- llvm/lib/Target/ARM/ARMCallLowering.cpp - Call lowering ------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file implements the lowering of LLVM calls to machine code calls for
13 //===----------------------------------------------------------------------===//
15 #include "ARMCallLowering.h"
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMISelLowering.h"
18 #include "ARMSubtarget.h"
19 #include "Utils/ARMBaseInfo.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/CodeGen/Analysis.h"
22 #include "llvm/CodeGen/CallingConvLower.h"
23 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
24 #include "llvm/CodeGen/GlobalISel/Utils.h"
25 #include "llvm/CodeGen/LowLevelType.h"
26 #include "llvm/CodeGen/MachineBasicBlock.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineInstrBuilder.h"
30 #include "llvm/CodeGen/MachineMemOperand.h"
31 #include "llvm/CodeGen/MachineOperand.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/TargetRegisterInfo.h"
34 #include "llvm/CodeGen/TargetSubtargetInfo.h"
35 #include "llvm/CodeGen/ValueTypes.h"
36 #include "llvm/IR/Attributes.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/DerivedTypes.h"
39 #include "llvm/IR/Function.h"
40 #include "llvm/IR/Type.h"
41 #include "llvm/IR/Value.h"
42 #include "llvm/Support/Casting.h"
43 #include "llvm/Support/LowLevelTypeImpl.h"
44 #include "llvm/Support/MachineValueType.h"
52 ARMCallLowering::ARMCallLowering(const ARMTargetLowering
&TLI
)
53 : CallLowering(&TLI
) {}
55 static bool isSupportedType(const DataLayout
&DL
, const ARMTargetLowering
&TLI
,
58 return isSupportedType(DL
, TLI
, T
->getArrayElementType());
60 if (T
->isStructTy()) {
61 // For now we only allow homogeneous structs that we can manipulate with
62 // G_MERGE_VALUES and G_UNMERGE_VALUES
63 auto StructT
= cast
<StructType
>(T
);
64 for (unsigned i
= 1, e
= StructT
->getNumElements(); i
!= e
; ++i
)
65 if (StructT
->getElementType(i
) != StructT
->getElementType(0))
67 return isSupportedType(DL
, TLI
, StructT
->getElementType(0));
70 EVT VT
= TLI
.getValueType(DL
, T
, true);
71 if (!VT
.isSimple() || VT
.isVector() ||
72 !(VT
.isInteger() || VT
.isFloatingPoint()))
75 unsigned VTSize
= VT
.getSimpleVT().getSizeInBits();
78 // FIXME: Support i64 too
79 return VT
.isFloatingPoint();
81 return VTSize
== 1 || VTSize
== 8 || VTSize
== 16 || VTSize
== 32;
86 /// Helper class for values going out through an ABI boundary (used for handling
87 /// function return values and call parameters).
88 struct OutgoingValueHandler
: public CallLowering::ValueHandler
{
89 OutgoingValueHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
,
90 MachineInstrBuilder
&MIB
, CCAssignFn
*AssignFn
)
91 : ValueHandler(MIRBuilder
, MRI
, AssignFn
), MIB(MIB
) {}
93 Register
getStackAddress(uint64_t Size
, int64_t Offset
,
94 MachinePointerInfo
&MPO
) override
{
95 assert((Size
== 1 || Size
== 2 || Size
== 4 || Size
== 8) &&
98 LLT p0
= LLT::pointer(0, 32);
99 LLT s32
= LLT::scalar(32);
100 Register SPReg
= MRI
.createGenericVirtualRegister(p0
);
101 MIRBuilder
.buildCopy(SPReg
, Register(ARM::SP
));
103 Register OffsetReg
= MRI
.createGenericVirtualRegister(s32
);
104 MIRBuilder
.buildConstant(OffsetReg
, Offset
);
106 Register AddrReg
= MRI
.createGenericVirtualRegister(p0
);
107 MIRBuilder
.buildGEP(AddrReg
, SPReg
, OffsetReg
);
109 MPO
= MachinePointerInfo::getStack(MIRBuilder
.getMF(), Offset
);
113 void assignValueToReg(Register ValVReg
, Register PhysReg
,
114 CCValAssign
&VA
) override
{
115 assert(VA
.isRegLoc() && "Value shouldn't be assigned to reg");
116 assert(VA
.getLocReg() == PhysReg
&& "Assigning to the wrong reg?");
118 assert(VA
.getValVT().getSizeInBits() <= 64 && "Unsupported value size");
119 assert(VA
.getLocVT().getSizeInBits() <= 64 && "Unsupported location size");
121 Register ExtReg
= extendRegister(ValVReg
, VA
);
122 MIRBuilder
.buildCopy(PhysReg
, ExtReg
);
123 MIB
.addUse(PhysReg
, RegState::Implicit
);
126 void assignValueToAddress(Register ValVReg
, Register Addr
, uint64_t Size
,
127 MachinePointerInfo
&MPO
, CCValAssign
&VA
) override
{
128 assert((Size
== 1 || Size
== 2 || Size
== 4 || Size
== 8) &&
131 Register ExtReg
= extendRegister(ValVReg
, VA
);
132 auto MMO
= MIRBuilder
.getMF().getMachineMemOperand(
133 MPO
, MachineMemOperand::MOStore
, VA
.getLocVT().getStoreSize(),
135 MIRBuilder
.buildStore(ExtReg
, Addr
, *MMO
);
138 unsigned assignCustomValue(const CallLowering::ArgInfo
&Arg
,
139 ArrayRef
<CCValAssign
> VAs
) override
{
140 assert(Arg
.Regs
.size() == 1 && "Can't handle multple regs yet");
142 CCValAssign VA
= VAs
[0];
143 assert(VA
.needsCustom() && "Value doesn't need custom handling");
144 assert(VA
.getValVT() == MVT::f64
&& "Unsupported type");
146 CCValAssign NextVA
= VAs
[1];
147 assert(NextVA
.needsCustom() && "Value doesn't need custom handling");
148 assert(NextVA
.getValVT() == MVT::f64
&& "Unsupported type");
150 assert(VA
.getValNo() == NextVA
.getValNo() &&
151 "Values belong to different arguments");
153 assert(VA
.isRegLoc() && "Value should be in reg");
154 assert(NextVA
.isRegLoc() && "Value should be in reg");
156 Register NewRegs
[] = {MRI
.createGenericVirtualRegister(LLT::scalar(32)),
157 MRI
.createGenericVirtualRegister(LLT::scalar(32))};
158 MIRBuilder
.buildUnmerge(NewRegs
, Arg
.Regs
[0]);
160 bool IsLittle
= MIRBuilder
.getMF().getSubtarget
<ARMSubtarget
>().isLittle();
162 std::swap(NewRegs
[0], NewRegs
[1]);
164 assignValueToReg(NewRegs
[0], VA
.getLocReg(), VA
);
165 assignValueToReg(NewRegs
[1], NextVA
.getLocReg(), NextVA
);
170 bool assignArg(unsigned ValNo
, MVT ValVT
, MVT LocVT
,
171 CCValAssign::LocInfo LocInfo
,
172 const CallLowering::ArgInfo
&Info
, CCState
&State
) override
{
173 if (AssignFn(ValNo
, ValVT
, LocVT
, LocInfo
, Info
.Flags
, State
))
177 std::max(StackSize
, static_cast<uint64_t>(State
.getNextStackOffset()));
181 MachineInstrBuilder
&MIB
;
182 uint64_t StackSize
= 0;
185 } // end anonymous namespace
187 void ARMCallLowering::splitToValueTypes(const ArgInfo
&OrigArg
,
188 SmallVectorImpl
<ArgInfo
> &SplitArgs
,
189 MachineFunction
&MF
) const {
190 const ARMTargetLowering
&TLI
= *getTLI
<ARMTargetLowering
>();
191 LLVMContext
&Ctx
= OrigArg
.Ty
->getContext();
192 const DataLayout
&DL
= MF
.getDataLayout();
193 const Function
&F
= MF
.getFunction();
195 SmallVector
<EVT
, 4> SplitVTs
;
196 ComputeValueVTs(TLI
, DL
, OrigArg
.Ty
, SplitVTs
, nullptr, nullptr, 0);
197 assert(OrigArg
.Regs
.size() == SplitVTs
.size() && "Regs / types mismatch");
199 if (SplitVTs
.size() == 1) {
200 // Even if there is no splitting to do, we still want to replace the
201 // original type (e.g. pointer type -> integer).
202 auto Flags
= OrigArg
.Flags
;
203 unsigned OriginalAlignment
= DL
.getABITypeAlignment(OrigArg
.Ty
);
204 Flags
.setOrigAlign(OriginalAlignment
);
205 SplitArgs
.emplace_back(OrigArg
.Regs
[0], SplitVTs
[0].getTypeForEVT(Ctx
),
206 Flags
, OrigArg
.IsFixed
);
210 // Create one ArgInfo for each virtual register.
211 for (unsigned i
= 0, e
= SplitVTs
.size(); i
!= e
; ++i
) {
212 EVT SplitVT
= SplitVTs
[i
];
213 Type
*SplitTy
= SplitVT
.getTypeForEVT(Ctx
);
214 auto Flags
= OrigArg
.Flags
;
216 unsigned OriginalAlignment
= DL
.getABITypeAlignment(SplitTy
);
217 Flags
.setOrigAlign(OriginalAlignment
);
219 bool NeedsConsecutiveRegisters
=
220 TLI
.functionArgumentNeedsConsecutiveRegisters(
221 SplitTy
, F
.getCallingConv(), F
.isVarArg());
222 if (NeedsConsecutiveRegisters
) {
223 Flags
.setInConsecutiveRegs();
225 Flags
.setInConsecutiveRegsLast();
228 // FIXME: We also want to split SplitTy further.
229 Register PartReg
= OrigArg
.Regs
[i
];
230 SplitArgs
.emplace_back(PartReg
, SplitTy
, Flags
, OrigArg
.IsFixed
);
234 /// Lower the return value for the already existing \p Ret. This assumes that
235 /// \p MIRBuilder's insertion point is correct.
236 bool ARMCallLowering::lowerReturnVal(MachineIRBuilder
&MIRBuilder
,
237 const Value
*Val
, ArrayRef
<Register
> VRegs
,
238 MachineInstrBuilder
&Ret
) const {
240 // Nothing to do here.
243 auto &MF
= MIRBuilder
.getMF();
244 const auto &F
= MF
.getFunction();
246 auto DL
= MF
.getDataLayout();
247 auto &TLI
= *getTLI
<ARMTargetLowering
>();
248 if (!isSupportedType(DL
, TLI
, Val
->getType()))
251 ArgInfo
OrigRetInfo(VRegs
, Val
->getType());
252 setArgFlags(OrigRetInfo
, AttributeList::ReturnIndex
, DL
, F
);
254 SmallVector
<ArgInfo
, 4> SplitRetInfos
;
255 splitToValueTypes(OrigRetInfo
, SplitRetInfos
, MF
);
257 CCAssignFn
*AssignFn
=
258 TLI
.CCAssignFnForReturn(F
.getCallingConv(), F
.isVarArg());
260 OutgoingValueHandler
RetHandler(MIRBuilder
, MF
.getRegInfo(), Ret
, AssignFn
);
261 return handleAssignments(MIRBuilder
, SplitRetInfos
, RetHandler
);
264 bool ARMCallLowering::lowerReturn(MachineIRBuilder
&MIRBuilder
,
266 ArrayRef
<Register
> VRegs
) const {
267 assert(!Val
== VRegs
.empty() && "Return value without a vreg");
269 auto const &ST
= MIRBuilder
.getMF().getSubtarget
<ARMSubtarget
>();
270 unsigned Opcode
= ST
.getReturnOpcode();
271 auto Ret
= MIRBuilder
.buildInstrNoInsert(Opcode
).add(predOps(ARMCC::AL
));
273 if (!lowerReturnVal(MIRBuilder
, Val
, VRegs
, Ret
))
276 MIRBuilder
.insertInstr(Ret
);
282 /// Helper class for values coming in through an ABI boundary (used for handling
283 /// formal arguments and call return values).
284 struct IncomingValueHandler
: public CallLowering::ValueHandler
{
285 IncomingValueHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
,
287 : ValueHandler(MIRBuilder
, MRI
, AssignFn
) {}
289 bool isArgumentHandler() const override
{ return true; }
291 Register
getStackAddress(uint64_t Size
, int64_t Offset
,
292 MachinePointerInfo
&MPO
) override
{
293 assert((Size
== 1 || Size
== 2 || Size
== 4 || Size
== 8) &&
296 auto &MFI
= MIRBuilder
.getMF().getFrameInfo();
298 int FI
= MFI
.CreateFixedObject(Size
, Offset
, true);
299 MPO
= MachinePointerInfo::getFixedStack(MIRBuilder
.getMF(), FI
);
302 MRI
.createGenericVirtualRegister(LLT::pointer(MPO
.getAddrSpace(), 32));
303 MIRBuilder
.buildFrameIndex(AddrReg
, FI
);
308 void assignValueToAddress(Register ValVReg
, Register Addr
, uint64_t Size
,
309 MachinePointerInfo
&MPO
, CCValAssign
&VA
) override
{
310 assert((Size
== 1 || Size
== 2 || Size
== 4 || Size
== 8) &&
313 if (VA
.getLocInfo() == CCValAssign::SExt
||
314 VA
.getLocInfo() == CCValAssign::ZExt
) {
315 // If the value is zero- or sign-extended, its size becomes 4 bytes, so
316 // that's what we should load.
318 assert(MRI
.getType(ValVReg
).isScalar() && "Only scalars supported atm");
320 auto LoadVReg
= MRI
.createGenericVirtualRegister(LLT::scalar(32));
321 buildLoad(LoadVReg
, Addr
, Size
, /* Alignment */ 1, MPO
);
322 MIRBuilder
.buildTrunc(ValVReg
, LoadVReg
);
324 // If the value is not extended, a simple load will suffice.
325 buildLoad(ValVReg
, Addr
, Size
, /* Alignment */ 1, MPO
);
329 void buildLoad(Register Val
, Register Addr
, uint64_t Size
, unsigned Alignment
,
330 MachinePointerInfo
&MPO
) {
331 auto MMO
= MIRBuilder
.getMF().getMachineMemOperand(
332 MPO
, MachineMemOperand::MOLoad
, Size
, Alignment
);
333 MIRBuilder
.buildLoad(Val
, Addr
, *MMO
);
336 void assignValueToReg(Register ValVReg
, Register PhysReg
,
337 CCValAssign
&VA
) override
{
338 assert(VA
.isRegLoc() && "Value shouldn't be assigned to reg");
339 assert(VA
.getLocReg() == PhysReg
&& "Assigning to the wrong reg?");
341 auto ValSize
= VA
.getValVT().getSizeInBits();
342 auto LocSize
= VA
.getLocVT().getSizeInBits();
344 assert(ValSize
<= 64 && "Unsupported value size");
345 assert(LocSize
<= 64 && "Unsupported location size");
347 markPhysRegUsed(PhysReg
);
348 if (ValSize
== LocSize
) {
349 MIRBuilder
.buildCopy(ValVReg
, PhysReg
);
351 assert(ValSize
< LocSize
&& "Extensions not supported");
353 // We cannot create a truncating copy, nor a trunc of a physical register.
354 // Therefore, we need to copy the content of the physical register into a
355 // virtual one and then truncate that.
357 MRI
.createGenericVirtualRegister(LLT::scalar(LocSize
));
358 MIRBuilder
.buildCopy(PhysRegToVReg
, PhysReg
);
359 MIRBuilder
.buildTrunc(ValVReg
, PhysRegToVReg
);
363 unsigned assignCustomValue(const ARMCallLowering::ArgInfo
&Arg
,
364 ArrayRef
<CCValAssign
> VAs
) override
{
365 assert(Arg
.Regs
.size() == 1 && "Can't handle multple regs yet");
367 CCValAssign VA
= VAs
[0];
368 assert(VA
.needsCustom() && "Value doesn't need custom handling");
369 assert(VA
.getValVT() == MVT::f64
&& "Unsupported type");
371 CCValAssign NextVA
= VAs
[1];
372 assert(NextVA
.needsCustom() && "Value doesn't need custom handling");
373 assert(NextVA
.getValVT() == MVT::f64
&& "Unsupported type");
375 assert(VA
.getValNo() == NextVA
.getValNo() &&
376 "Values belong to different arguments");
378 assert(VA
.isRegLoc() && "Value should be in reg");
379 assert(NextVA
.isRegLoc() && "Value should be in reg");
381 Register NewRegs
[] = {MRI
.createGenericVirtualRegister(LLT::scalar(32)),
382 MRI
.createGenericVirtualRegister(LLT::scalar(32))};
384 assignValueToReg(NewRegs
[0], VA
.getLocReg(), VA
);
385 assignValueToReg(NewRegs
[1], NextVA
.getLocReg(), NextVA
);
387 bool IsLittle
= MIRBuilder
.getMF().getSubtarget
<ARMSubtarget
>().isLittle();
389 std::swap(NewRegs
[0], NewRegs
[1]);
391 MIRBuilder
.buildMerge(Arg
.Regs
[0], NewRegs
);
396 /// Marking a physical register as used is different between formal
397 /// parameters, where it's a basic block live-in, and call returns, where it's
398 /// an implicit-def of the call instruction.
399 virtual void markPhysRegUsed(unsigned PhysReg
) = 0;
402 struct FormalArgHandler
: public IncomingValueHandler
{
403 FormalArgHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
,
405 : IncomingValueHandler(MIRBuilder
, MRI
, AssignFn
) {}
407 void markPhysRegUsed(unsigned PhysReg
) override
{
408 MIRBuilder
.getMRI()->addLiveIn(PhysReg
);
409 MIRBuilder
.getMBB().addLiveIn(PhysReg
);
413 } // end anonymous namespace
415 bool ARMCallLowering::lowerFormalArguments(
416 MachineIRBuilder
&MIRBuilder
, const Function
&F
,
417 ArrayRef
<ArrayRef
<Register
>> VRegs
) const {
418 auto &TLI
= *getTLI
<ARMTargetLowering
>();
419 auto Subtarget
= TLI
.getSubtarget();
421 if (Subtarget
->isThumb1Only())
424 // Quick exit if there aren't any args
431 auto &MF
= MIRBuilder
.getMF();
432 auto &MBB
= MIRBuilder
.getMBB();
433 auto DL
= MF
.getDataLayout();
435 for (auto &Arg
: F
.args()) {
436 if (!isSupportedType(DL
, TLI
, Arg
.getType()))
438 if (Arg
.hasByValOrInAllocaAttr())
442 CCAssignFn
*AssignFn
=
443 TLI
.CCAssignFnForCall(F
.getCallingConv(), F
.isVarArg());
445 FormalArgHandler
ArgHandler(MIRBuilder
, MIRBuilder
.getMF().getRegInfo(),
448 SmallVector
<ArgInfo
, 8> SplitArgInfos
;
450 for (auto &Arg
: F
.args()) {
451 ArgInfo
OrigArgInfo(VRegs
[Idx
], Arg
.getType());
453 setArgFlags(OrigArgInfo
, Idx
+ AttributeList::FirstArgIndex
, DL
, F
);
454 splitToValueTypes(OrigArgInfo
, SplitArgInfos
, MF
);
460 MIRBuilder
.setInstr(*MBB
.begin());
462 if (!handleAssignments(MIRBuilder
, SplitArgInfos
, ArgHandler
))
465 // Move back to the end of the basic block.
466 MIRBuilder
.setMBB(MBB
);
472 struct CallReturnHandler
: public IncomingValueHandler
{
473 CallReturnHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
,
474 MachineInstrBuilder MIB
, CCAssignFn
*AssignFn
)
475 : IncomingValueHandler(MIRBuilder
, MRI
, AssignFn
), MIB(MIB
) {}
477 void markPhysRegUsed(unsigned PhysReg
) override
{
478 MIB
.addDef(PhysReg
, RegState::Implicit
);
481 MachineInstrBuilder MIB
;
484 // FIXME: This should move to the ARMSubtarget when it supports all the opcodes.
485 unsigned getCallOpcode(const ARMSubtarget
&STI
, bool isDirect
) {
487 return STI
.isThumb() ? ARM::tBL
: ARM::BL
;
498 return ARM::BMOVPCRX_CALL
;
500 } // end anonymous namespace
502 bool ARMCallLowering::lowerCall(MachineIRBuilder
&MIRBuilder
,
503 CallingConv::ID CallConv
,
504 const MachineOperand
&Callee
,
505 const ArgInfo
&OrigRet
,
506 ArrayRef
<ArgInfo
> OrigArgs
,
507 const MDNode
*KnownCallees
) const {
508 MachineFunction
&MF
= MIRBuilder
.getMF();
509 const auto &TLI
= *getTLI
<ARMTargetLowering
>();
510 const auto &DL
= MF
.getDataLayout();
511 const auto &STI
= MF
.getSubtarget
<ARMSubtarget
>();
512 const TargetRegisterInfo
*TRI
= STI
.getRegisterInfo();
513 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
515 if (STI
.genLongCalls())
518 if (STI
.isThumb1Only())
521 auto CallSeqStart
= MIRBuilder
.buildInstr(ARM::ADJCALLSTACKDOWN
);
523 // Create the call instruction so we can add the implicit uses of arg
524 // registers, but don't insert it yet.
525 bool IsDirect
= !Callee
.isReg();
526 auto CallOpcode
= getCallOpcode(STI
, IsDirect
);
527 auto MIB
= MIRBuilder
.buildInstrNoInsert(CallOpcode
);
529 bool IsThumb
= STI
.isThumb();
531 MIB
.add(predOps(ARMCC::AL
));
535 auto CalleeReg
= Callee
.getReg();
536 if (CalleeReg
&& !Register::isPhysicalRegister(CalleeReg
)) {
537 unsigned CalleeIdx
= IsThumb
? 2 : 0;
538 MIB
->getOperand(CalleeIdx
).setReg(constrainOperandRegClass(
539 MF
, *TRI
, MRI
, *STI
.getInstrInfo(), *STI
.getRegBankInfo(),
540 *MIB
.getInstr(), MIB
->getDesc(), Callee
, CalleeIdx
));
544 MIB
.addRegMask(TRI
->getCallPreservedMask(MF
, CallConv
));
546 bool IsVarArg
= false;
547 SmallVector
<ArgInfo
, 8> ArgInfos
;
548 for (auto Arg
: OrigArgs
) {
549 if (!isSupportedType(DL
, TLI
, Arg
.Ty
))
555 if (Arg
.Flags
.isByVal())
558 splitToValueTypes(Arg
, ArgInfos
, MF
);
561 auto ArgAssignFn
= TLI
.CCAssignFnForCall(CallConv
, IsVarArg
);
562 OutgoingValueHandler
ArgHandler(MIRBuilder
, MRI
, MIB
, ArgAssignFn
);
563 if (!handleAssignments(MIRBuilder
, ArgInfos
, ArgHandler
))
566 // Now we can add the actual call instruction to the correct basic block.
567 MIRBuilder
.insertInstr(MIB
);
569 if (!OrigRet
.Ty
->isVoidTy()) {
570 if (!isSupportedType(DL
, TLI
, OrigRet
.Ty
))
574 splitToValueTypes(OrigRet
, ArgInfos
, MF
);
575 auto RetAssignFn
= TLI
.CCAssignFnForReturn(CallConv
, IsVarArg
);
576 CallReturnHandler
RetHandler(MIRBuilder
, MRI
, MIB
, RetAssignFn
);
577 if (!handleAssignments(MIRBuilder
, ArgInfos
, RetHandler
))
581 // We now know the size of the stack - update the ADJCALLSTACKDOWN
583 CallSeqStart
.addImm(ArgHandler
.StackSize
).addImm(0).add(predOps(ARMCC::AL
));
585 MIRBuilder
.buildInstr(ARM::ADJCALLSTACKUP
)
586 .addImm(ArgHandler
.StackSize
)
588 .add(predOps(ARMCC::AL
));