1 //===- llvm/lib/Target/ARM/ARMCallLowering.cpp - Call lowering ------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file implements the lowering of LLVM calls to machine code calls for
13 //===----------------------------------------------------------------------===//
15 #include "ARMCallLowering.h"
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMISelLowering.h"
18 #include "ARMSubtarget.h"
19 #include "Utils/ARMBaseInfo.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/CodeGen/Analysis.h"
22 #include "llvm/CodeGen/CallingConvLower.h"
23 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
24 #include "llvm/CodeGen/GlobalISel/Utils.h"
25 #include "llvm/CodeGen/LowLevelType.h"
26 #include "llvm/CodeGen/MachineBasicBlock.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineInstrBuilder.h"
30 #include "llvm/CodeGen/MachineMemOperand.h"
31 #include "llvm/CodeGen/MachineOperand.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/TargetRegisterInfo.h"
34 #include "llvm/CodeGen/TargetSubtargetInfo.h"
35 #include "llvm/CodeGen/ValueTypes.h"
36 #include "llvm/IR/Attributes.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/DerivedTypes.h"
39 #include "llvm/IR/Function.h"
40 #include "llvm/IR/Type.h"
41 #include "llvm/IR/Value.h"
42 #include "llvm/Support/Casting.h"
43 #include "llvm/Support/LowLevelTypeImpl.h"
44 #include "llvm/Support/MachineValueType.h"
52 ARMCallLowering::ARMCallLowering(const ARMTargetLowering
&TLI
)
53 : CallLowering(&TLI
) {}
55 static bool isSupportedType(const DataLayout
&DL
, const ARMTargetLowering
&TLI
,
58 return isSupportedType(DL
, TLI
, T
->getArrayElementType());
60 if (T
->isStructTy()) {
61 // For now we only allow homogeneous structs that we can manipulate with
62 // G_MERGE_VALUES and G_UNMERGE_VALUES
63 auto StructT
= cast
<StructType
>(T
);
64 for (unsigned i
= 1, e
= StructT
->getNumElements(); i
!= e
; ++i
)
65 if (StructT
->getElementType(i
) != StructT
->getElementType(0))
67 return isSupportedType(DL
, TLI
, StructT
->getElementType(0));
70 EVT VT
= TLI
.getValueType(DL
, T
, true);
71 if (!VT
.isSimple() || VT
.isVector() ||
72 !(VT
.isInteger() || VT
.isFloatingPoint()))
75 unsigned VTSize
= VT
.getSimpleVT().getSizeInBits();
78 // FIXME: Support i64 too
79 return VT
.isFloatingPoint();
81 return VTSize
== 1 || VTSize
== 8 || VTSize
== 16 || VTSize
== 32;
86 /// Helper class for values going out through an ABI boundary (used for handling
87 /// function return values and call parameters).
88 struct OutgoingValueHandler
: public CallLowering::ValueHandler
{
89 OutgoingValueHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
,
90 MachineInstrBuilder
&MIB
, CCAssignFn
*AssignFn
)
91 : ValueHandler(MIRBuilder
, MRI
, AssignFn
), MIB(MIB
) {}
93 Register
getStackAddress(uint64_t Size
, int64_t Offset
,
94 MachinePointerInfo
&MPO
) override
{
95 assert((Size
== 1 || Size
== 2 || Size
== 4 || Size
== 8) &&
98 LLT p0
= LLT::pointer(0, 32);
99 LLT s32
= LLT::scalar(32);
100 Register SPReg
= MRI
.createGenericVirtualRegister(p0
);
101 MIRBuilder
.buildCopy(SPReg
, Register(ARM::SP
));
103 Register OffsetReg
= MRI
.createGenericVirtualRegister(s32
);
104 MIRBuilder
.buildConstant(OffsetReg
, Offset
);
106 Register AddrReg
= MRI
.createGenericVirtualRegister(p0
);
107 MIRBuilder
.buildGEP(AddrReg
, SPReg
, OffsetReg
);
109 MPO
= MachinePointerInfo::getStack(MIRBuilder
.getMF(), Offset
);
113 void assignValueToReg(Register ValVReg
, Register PhysReg
,
114 CCValAssign
&VA
) override
{
115 assert(VA
.isRegLoc() && "Value shouldn't be assigned to reg");
116 assert(VA
.getLocReg() == PhysReg
&& "Assigning to the wrong reg?");
118 assert(VA
.getValVT().getSizeInBits() <= 64 && "Unsupported value size");
119 assert(VA
.getLocVT().getSizeInBits() <= 64 && "Unsupported location size");
121 Register ExtReg
= extendRegister(ValVReg
, VA
);
122 MIRBuilder
.buildCopy(PhysReg
, ExtReg
);
123 MIB
.addUse(PhysReg
, RegState::Implicit
);
126 void assignValueToAddress(Register ValVReg
, Register Addr
, uint64_t Size
,
127 MachinePointerInfo
&MPO
, CCValAssign
&VA
) override
{
128 assert((Size
== 1 || Size
== 2 || Size
== 4 || Size
== 8) &&
131 Register ExtReg
= extendRegister(ValVReg
, VA
);
132 auto MMO
= MIRBuilder
.getMF().getMachineMemOperand(
133 MPO
, MachineMemOperand::MOStore
, VA
.getLocVT().getStoreSize(),
135 MIRBuilder
.buildStore(ExtReg
, Addr
, *MMO
);
138 unsigned assignCustomValue(const CallLowering::ArgInfo
&Arg
,
139 ArrayRef
<CCValAssign
> VAs
) override
{
140 assert(Arg
.Regs
.size() == 1 && "Can't handle multple regs yet");
142 CCValAssign VA
= VAs
[0];
143 assert(VA
.needsCustom() && "Value doesn't need custom handling");
144 assert(VA
.getValVT() == MVT::f64
&& "Unsupported type");
146 CCValAssign NextVA
= VAs
[1];
147 assert(NextVA
.needsCustom() && "Value doesn't need custom handling");
148 assert(NextVA
.getValVT() == MVT::f64
&& "Unsupported type");
150 assert(VA
.getValNo() == NextVA
.getValNo() &&
151 "Values belong to different arguments");
153 assert(VA
.isRegLoc() && "Value should be in reg");
154 assert(NextVA
.isRegLoc() && "Value should be in reg");
156 Register NewRegs
[] = {MRI
.createGenericVirtualRegister(LLT::scalar(32)),
157 MRI
.createGenericVirtualRegister(LLT::scalar(32))};
158 MIRBuilder
.buildUnmerge(NewRegs
, Arg
.Regs
[0]);
160 bool IsLittle
= MIRBuilder
.getMF().getSubtarget
<ARMSubtarget
>().isLittle();
162 std::swap(NewRegs
[0], NewRegs
[1]);
164 assignValueToReg(NewRegs
[0], VA
.getLocReg(), VA
);
165 assignValueToReg(NewRegs
[1], NextVA
.getLocReg(), NextVA
);
170 bool assignArg(unsigned ValNo
, MVT ValVT
, MVT LocVT
,
171 CCValAssign::LocInfo LocInfo
,
172 const CallLowering::ArgInfo
&Info
, ISD::ArgFlagsTy Flags
,
173 CCState
&State
) override
{
174 if (AssignFn(ValNo
, ValVT
, LocVT
, LocInfo
, Flags
, State
))
178 std::max(StackSize
, static_cast<uint64_t>(State
.getNextStackOffset()));
182 MachineInstrBuilder
&MIB
;
183 uint64_t StackSize
= 0;
186 } // end anonymous namespace
188 void ARMCallLowering::splitToValueTypes(const ArgInfo
&OrigArg
,
189 SmallVectorImpl
<ArgInfo
> &SplitArgs
,
190 MachineFunction
&MF
) const {
191 const ARMTargetLowering
&TLI
= *getTLI
<ARMTargetLowering
>();
192 LLVMContext
&Ctx
= OrigArg
.Ty
->getContext();
193 const DataLayout
&DL
= MF
.getDataLayout();
194 const Function
&F
= MF
.getFunction();
196 SmallVector
<EVT
, 4> SplitVTs
;
197 ComputeValueVTs(TLI
, DL
, OrigArg
.Ty
, SplitVTs
, nullptr, nullptr, 0);
198 assert(OrigArg
.Regs
.size() == SplitVTs
.size() && "Regs / types mismatch");
200 if (SplitVTs
.size() == 1) {
201 // Even if there is no splitting to do, we still want to replace the
202 // original type (e.g. pointer type -> integer).
203 auto Flags
= OrigArg
.Flags
[0];
204 unsigned OriginalAlignment
= DL
.getABITypeAlignment(OrigArg
.Ty
);
205 Flags
.setOrigAlign(OriginalAlignment
);
206 SplitArgs
.emplace_back(OrigArg
.Regs
[0], SplitVTs
[0].getTypeForEVT(Ctx
),
207 Flags
, OrigArg
.IsFixed
);
211 // Create one ArgInfo for each virtual register.
212 for (unsigned i
= 0, e
= SplitVTs
.size(); i
!= e
; ++i
) {
213 EVT SplitVT
= SplitVTs
[i
];
214 Type
*SplitTy
= SplitVT
.getTypeForEVT(Ctx
);
215 auto Flags
= OrigArg
.Flags
[0];
217 unsigned OriginalAlignment
= DL
.getABITypeAlignment(SplitTy
);
218 Flags
.setOrigAlign(OriginalAlignment
);
220 bool NeedsConsecutiveRegisters
=
221 TLI
.functionArgumentNeedsConsecutiveRegisters(
222 SplitTy
, F
.getCallingConv(), F
.isVarArg());
223 if (NeedsConsecutiveRegisters
) {
224 Flags
.setInConsecutiveRegs();
226 Flags
.setInConsecutiveRegsLast();
229 // FIXME: We also want to split SplitTy further.
230 Register PartReg
= OrigArg
.Regs
[i
];
231 SplitArgs
.emplace_back(PartReg
, SplitTy
, Flags
, OrigArg
.IsFixed
);
235 /// Lower the return value for the already existing \p Ret. This assumes that
236 /// \p MIRBuilder's insertion point is correct.
237 bool ARMCallLowering::lowerReturnVal(MachineIRBuilder
&MIRBuilder
,
238 const Value
*Val
, ArrayRef
<Register
> VRegs
,
239 MachineInstrBuilder
&Ret
) const {
241 // Nothing to do here.
244 auto &MF
= MIRBuilder
.getMF();
245 const auto &F
= MF
.getFunction();
247 auto DL
= MF
.getDataLayout();
248 auto &TLI
= *getTLI
<ARMTargetLowering
>();
249 if (!isSupportedType(DL
, TLI
, Val
->getType()))
252 ArgInfo
OrigRetInfo(VRegs
, Val
->getType());
253 setArgFlags(OrigRetInfo
, AttributeList::ReturnIndex
, DL
, F
);
255 SmallVector
<ArgInfo
, 4> SplitRetInfos
;
256 splitToValueTypes(OrigRetInfo
, SplitRetInfos
, MF
);
258 CCAssignFn
*AssignFn
=
259 TLI
.CCAssignFnForReturn(F
.getCallingConv(), F
.isVarArg());
261 OutgoingValueHandler
RetHandler(MIRBuilder
, MF
.getRegInfo(), Ret
, AssignFn
);
262 return handleAssignments(MIRBuilder
, SplitRetInfos
, RetHandler
);
265 bool ARMCallLowering::lowerReturn(MachineIRBuilder
&MIRBuilder
,
267 ArrayRef
<Register
> VRegs
) const {
268 assert(!Val
== VRegs
.empty() && "Return value without a vreg");
270 auto const &ST
= MIRBuilder
.getMF().getSubtarget
<ARMSubtarget
>();
271 unsigned Opcode
= ST
.getReturnOpcode();
272 auto Ret
= MIRBuilder
.buildInstrNoInsert(Opcode
).add(predOps(ARMCC::AL
));
274 if (!lowerReturnVal(MIRBuilder
, Val
, VRegs
, Ret
))
277 MIRBuilder
.insertInstr(Ret
);
283 /// Helper class for values coming in through an ABI boundary (used for handling
284 /// formal arguments and call return values).
285 struct IncomingValueHandler
: public CallLowering::ValueHandler
{
286 IncomingValueHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
,
288 : ValueHandler(MIRBuilder
, MRI
, AssignFn
) {}
290 bool isIncomingArgumentHandler() const override
{ return true; }
292 Register
getStackAddress(uint64_t Size
, int64_t Offset
,
293 MachinePointerInfo
&MPO
) override
{
294 assert((Size
== 1 || Size
== 2 || Size
== 4 || Size
== 8) &&
297 auto &MFI
= MIRBuilder
.getMF().getFrameInfo();
299 int FI
= MFI
.CreateFixedObject(Size
, Offset
, true);
300 MPO
= MachinePointerInfo::getFixedStack(MIRBuilder
.getMF(), FI
);
303 MRI
.createGenericVirtualRegister(LLT::pointer(MPO
.getAddrSpace(), 32));
304 MIRBuilder
.buildFrameIndex(AddrReg
, FI
);
309 void assignValueToAddress(Register ValVReg
, Register Addr
, uint64_t Size
,
310 MachinePointerInfo
&MPO
, CCValAssign
&VA
) override
{
311 assert((Size
== 1 || Size
== 2 || Size
== 4 || Size
== 8) &&
314 if (VA
.getLocInfo() == CCValAssign::SExt
||
315 VA
.getLocInfo() == CCValAssign::ZExt
) {
316 // If the value is zero- or sign-extended, its size becomes 4 bytes, so
317 // that's what we should load.
319 assert(MRI
.getType(ValVReg
).isScalar() && "Only scalars supported atm");
321 auto LoadVReg
= MRI
.createGenericVirtualRegister(LLT::scalar(32));
322 buildLoad(LoadVReg
, Addr
, Size
, /* Alignment */ 1, MPO
);
323 MIRBuilder
.buildTrunc(ValVReg
, LoadVReg
);
325 // If the value is not extended, a simple load will suffice.
326 buildLoad(ValVReg
, Addr
, Size
, /* Alignment */ 1, MPO
);
330 void buildLoad(Register Val
, Register Addr
, uint64_t Size
, unsigned Alignment
,
331 MachinePointerInfo
&MPO
) {
332 auto MMO
= MIRBuilder
.getMF().getMachineMemOperand(
333 MPO
, MachineMemOperand::MOLoad
, Size
, Alignment
);
334 MIRBuilder
.buildLoad(Val
, Addr
, *MMO
);
337 void assignValueToReg(Register ValVReg
, Register PhysReg
,
338 CCValAssign
&VA
) override
{
339 assert(VA
.isRegLoc() && "Value shouldn't be assigned to reg");
340 assert(VA
.getLocReg() == PhysReg
&& "Assigning to the wrong reg?");
342 auto ValSize
= VA
.getValVT().getSizeInBits();
343 auto LocSize
= VA
.getLocVT().getSizeInBits();
345 assert(ValSize
<= 64 && "Unsupported value size");
346 assert(LocSize
<= 64 && "Unsupported location size");
348 markPhysRegUsed(PhysReg
);
349 if (ValSize
== LocSize
) {
350 MIRBuilder
.buildCopy(ValVReg
, PhysReg
);
352 assert(ValSize
< LocSize
&& "Extensions not supported");
354 // We cannot create a truncating copy, nor a trunc of a physical register.
355 // Therefore, we need to copy the content of the physical register into a
356 // virtual one and then truncate that.
358 MRI
.createGenericVirtualRegister(LLT::scalar(LocSize
));
359 MIRBuilder
.buildCopy(PhysRegToVReg
, PhysReg
);
360 MIRBuilder
.buildTrunc(ValVReg
, PhysRegToVReg
);
364 unsigned assignCustomValue(const ARMCallLowering::ArgInfo
&Arg
,
365 ArrayRef
<CCValAssign
> VAs
) override
{
366 assert(Arg
.Regs
.size() == 1 && "Can't handle multple regs yet");
368 CCValAssign VA
= VAs
[0];
369 assert(VA
.needsCustom() && "Value doesn't need custom handling");
370 assert(VA
.getValVT() == MVT::f64
&& "Unsupported type");
372 CCValAssign NextVA
= VAs
[1];
373 assert(NextVA
.needsCustom() && "Value doesn't need custom handling");
374 assert(NextVA
.getValVT() == MVT::f64
&& "Unsupported type");
376 assert(VA
.getValNo() == NextVA
.getValNo() &&
377 "Values belong to different arguments");
379 assert(VA
.isRegLoc() && "Value should be in reg");
380 assert(NextVA
.isRegLoc() && "Value should be in reg");
382 Register NewRegs
[] = {MRI
.createGenericVirtualRegister(LLT::scalar(32)),
383 MRI
.createGenericVirtualRegister(LLT::scalar(32))};
385 assignValueToReg(NewRegs
[0], VA
.getLocReg(), VA
);
386 assignValueToReg(NewRegs
[1], NextVA
.getLocReg(), NextVA
);
388 bool IsLittle
= MIRBuilder
.getMF().getSubtarget
<ARMSubtarget
>().isLittle();
390 std::swap(NewRegs
[0], NewRegs
[1]);
392 MIRBuilder
.buildMerge(Arg
.Regs
[0], NewRegs
);
397 /// Marking a physical register as used is different between formal
398 /// parameters, where it's a basic block live-in, and call returns, where it's
399 /// an implicit-def of the call instruction.
400 virtual void markPhysRegUsed(unsigned PhysReg
) = 0;
403 struct FormalArgHandler
: public IncomingValueHandler
{
404 FormalArgHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
,
406 : IncomingValueHandler(MIRBuilder
, MRI
, AssignFn
) {}
408 void markPhysRegUsed(unsigned PhysReg
) override
{
409 MIRBuilder
.getMRI()->addLiveIn(PhysReg
);
410 MIRBuilder
.getMBB().addLiveIn(PhysReg
);
414 } // end anonymous namespace
416 bool ARMCallLowering::lowerFormalArguments(
417 MachineIRBuilder
&MIRBuilder
, const Function
&F
,
418 ArrayRef
<ArrayRef
<Register
>> VRegs
) const {
419 auto &TLI
= *getTLI
<ARMTargetLowering
>();
420 auto Subtarget
= TLI
.getSubtarget();
422 if (Subtarget
->isThumb1Only())
425 // Quick exit if there aren't any args
432 auto &MF
= MIRBuilder
.getMF();
433 auto &MBB
= MIRBuilder
.getMBB();
434 auto DL
= MF
.getDataLayout();
436 for (auto &Arg
: F
.args()) {
437 if (!isSupportedType(DL
, TLI
, Arg
.getType()))
439 if (Arg
.hasByValOrInAllocaAttr())
443 CCAssignFn
*AssignFn
=
444 TLI
.CCAssignFnForCall(F
.getCallingConv(), F
.isVarArg());
446 FormalArgHandler
ArgHandler(MIRBuilder
, MIRBuilder
.getMF().getRegInfo(),
449 SmallVector
<ArgInfo
, 8> SplitArgInfos
;
451 for (auto &Arg
: F
.args()) {
452 ArgInfo
OrigArgInfo(VRegs
[Idx
], Arg
.getType());
454 setArgFlags(OrigArgInfo
, Idx
+ AttributeList::FirstArgIndex
, DL
, F
);
455 splitToValueTypes(OrigArgInfo
, SplitArgInfos
, MF
);
461 MIRBuilder
.setInstr(*MBB
.begin());
463 if (!handleAssignments(MIRBuilder
, SplitArgInfos
, ArgHandler
))
466 // Move back to the end of the basic block.
467 MIRBuilder
.setMBB(MBB
);
473 struct CallReturnHandler
: public IncomingValueHandler
{
474 CallReturnHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
,
475 MachineInstrBuilder MIB
, CCAssignFn
*AssignFn
)
476 : IncomingValueHandler(MIRBuilder
, MRI
, AssignFn
), MIB(MIB
) {}
478 void markPhysRegUsed(unsigned PhysReg
) override
{
479 MIB
.addDef(PhysReg
, RegState::Implicit
);
482 MachineInstrBuilder MIB
;
485 // FIXME: This should move to the ARMSubtarget when it supports all the opcodes.
486 unsigned getCallOpcode(const ARMSubtarget
&STI
, bool isDirect
) {
488 return STI
.isThumb() ? ARM::tBL
: ARM::BL
;
499 return ARM::BMOVPCRX_CALL
;
501 } // end anonymous namespace
503 bool ARMCallLowering::lowerCall(MachineIRBuilder
&MIRBuilder
, CallLoweringInfo
&Info
) const {
504 MachineFunction
&MF
= MIRBuilder
.getMF();
505 const auto &TLI
= *getTLI
<ARMTargetLowering
>();
506 const auto &DL
= MF
.getDataLayout();
507 const auto &STI
= MF
.getSubtarget
<ARMSubtarget
>();
508 const TargetRegisterInfo
*TRI
= STI
.getRegisterInfo();
509 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
511 if (STI
.genLongCalls())
514 if (STI
.isThumb1Only())
517 auto CallSeqStart
= MIRBuilder
.buildInstr(ARM::ADJCALLSTACKDOWN
);
519 // Create the call instruction so we can add the implicit uses of arg
520 // registers, but don't insert it yet.
521 bool IsDirect
= !Info
.Callee
.isReg();
522 auto CallOpcode
= getCallOpcode(STI
, IsDirect
);
523 auto MIB
= MIRBuilder
.buildInstrNoInsert(CallOpcode
);
525 bool IsThumb
= STI
.isThumb();
527 MIB
.add(predOps(ARMCC::AL
));
529 MIB
.add(Info
.Callee
);
531 auto CalleeReg
= Info
.Callee
.getReg();
532 if (CalleeReg
&& !Register::isPhysicalRegister(CalleeReg
)) {
533 unsigned CalleeIdx
= IsThumb
? 2 : 0;
534 MIB
->getOperand(CalleeIdx
).setReg(constrainOperandRegClass(
535 MF
, *TRI
, MRI
, *STI
.getInstrInfo(), *STI
.getRegBankInfo(),
536 *MIB
.getInstr(), MIB
->getDesc(), Info
.Callee
, CalleeIdx
));
540 MIB
.addRegMask(TRI
->getCallPreservedMask(MF
, Info
.CallConv
));
542 bool IsVarArg
= false;
543 SmallVector
<ArgInfo
, 8> ArgInfos
;
544 for (auto Arg
: Info
.OrigArgs
) {
545 if (!isSupportedType(DL
, TLI
, Arg
.Ty
))
551 if (Arg
.Flags
[0].isByVal())
554 splitToValueTypes(Arg
, ArgInfos
, MF
);
557 auto ArgAssignFn
= TLI
.CCAssignFnForCall(Info
.CallConv
, IsVarArg
);
558 OutgoingValueHandler
ArgHandler(MIRBuilder
, MRI
, MIB
, ArgAssignFn
);
559 if (!handleAssignments(MIRBuilder
, ArgInfos
, ArgHandler
))
562 // Now we can add the actual call instruction to the correct basic block.
563 MIRBuilder
.insertInstr(MIB
);
565 if (!Info
.OrigRet
.Ty
->isVoidTy()) {
566 if (!isSupportedType(DL
, TLI
, Info
.OrigRet
.Ty
))
570 splitToValueTypes(Info
.OrigRet
, ArgInfos
, MF
);
571 auto RetAssignFn
= TLI
.CCAssignFnForReturn(Info
.CallConv
, IsVarArg
);
572 CallReturnHandler
RetHandler(MIRBuilder
, MRI
, MIB
, RetAssignFn
);
573 if (!handleAssignments(MIRBuilder
, ArgInfos
, RetHandler
))
577 // We now know the size of the stack - update the ADJCALLSTACKDOWN
579 CallSeqStart
.addImm(ArgHandler
.StackSize
).addImm(0).add(predOps(ARMCC::AL
));
581 MIRBuilder
.buildInstr(ARM::ADJCALLSTACKUP
)
582 .addImm(ArgHandler
.StackSize
)
584 .add(predOps(ARMCC::AL
));