1 //===-- lib/CodeGen/GlobalISel/InlineAsmLowering.cpp ----------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file implements the lowering from LLVM IR inline asm to MIR INLINEASM
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h"
15 #include "llvm/CodeGen/Analysis.h"
16 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
17 #include "llvm/CodeGen/GlobalISel/Utils.h"
18 #include "llvm/CodeGen/MachineOperand.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
26 #define DEBUG_TYPE "inline-asm-lowering"
30 void InlineAsmLowering::anchor() {}
34 /// GISelAsmOperandInfo - This contains information for each constraint that we
36 class GISelAsmOperandInfo
: public TargetLowering::AsmOperandInfo
{
38 /// Regs - If this is a register or register class operand, this
39 /// contains the set of assigned registers corresponding to the operand.
40 SmallVector
<Register
, 1> Regs
;
42 explicit GISelAsmOperandInfo(const TargetLowering::AsmOperandInfo
&Info
)
43 : TargetLowering::AsmOperandInfo(Info
) {}
46 using GISelAsmOperandInfoVector
= SmallVector
<GISelAsmOperandInfo
, 16>;
52 explicit ExtraFlags(const CallBase
&CB
) {
53 const InlineAsm
*IA
= cast
<InlineAsm
>(CB
.getCalledOperand());
54 if (IA
->hasSideEffects())
55 Flags
|= InlineAsm::Extra_HasSideEffects
;
56 if (IA
->isAlignStack())
57 Flags
|= InlineAsm::Extra_IsAlignStack
;
58 if (CB
.isConvergent())
59 Flags
|= InlineAsm::Extra_IsConvergent
;
60 Flags
|= IA
->getDialect() * InlineAsm::Extra_AsmDialect
;
63 void update(const TargetLowering::AsmOperandInfo
&OpInfo
) {
64 // Ideally, we would only check against memory constraints. However, the
65 // meaning of an Other constraint can be target-specific and we can't easily
66 // reason about it. Therefore, be conservative and set MayLoad/MayStore
67 // for Other constraints as well.
68 if (OpInfo
.ConstraintType
== TargetLowering::C_Memory
||
69 OpInfo
.ConstraintType
== TargetLowering::C_Other
) {
70 if (OpInfo
.Type
== InlineAsm::isInput
)
71 Flags
|= InlineAsm::Extra_MayLoad
;
72 else if (OpInfo
.Type
== InlineAsm::isOutput
)
73 Flags
|= InlineAsm::Extra_MayStore
;
74 else if (OpInfo
.Type
== InlineAsm::isClobber
)
75 Flags
|= (InlineAsm::Extra_MayLoad
| InlineAsm::Extra_MayStore
);
79 unsigned get() const { return Flags
; }
84 /// Assign virtual/physical registers for the specified register operand.
85 static void getRegistersForValue(MachineFunction
&MF
,
86 MachineIRBuilder
&MIRBuilder
,
87 GISelAsmOperandInfo
&OpInfo
,
88 GISelAsmOperandInfo
&RefOpInfo
) {
90 const TargetLowering
&TLI
= *MF
.getSubtarget().getTargetLowering();
91 const TargetRegisterInfo
&TRI
= *MF
.getSubtarget().getRegisterInfo();
93 // No work to do for memory operations.
94 if (OpInfo
.ConstraintType
== TargetLowering::C_Memory
)
97 // If this is a constraint for a single physreg, or a constraint for a
98 // register class, find it.
100 const TargetRegisterClass
*RC
;
101 std::tie(AssignedReg
, RC
) = TLI
.getRegForInlineAsmConstraint(
102 &TRI
, RefOpInfo
.ConstraintCode
, RefOpInfo
.ConstraintVT
);
103 // RC is unset only on failure. Return immediately.
107 // No need to allocate a matching input constraint since the constraint it's
108 // matching to has already been allocated.
109 if (OpInfo
.isMatchingInputConstraint())
112 // Initialize NumRegs.
113 unsigned NumRegs
= 1;
114 if (OpInfo
.ConstraintVT
!= MVT::Other
)
116 TLI
.getNumRegisters(MF
.getFunction().getContext(), OpInfo
.ConstraintVT
);
118 // If this is a constraint for a specific physical register, but the type of
119 // the operand requires more than one register to be passed, we allocate the
120 // required amount of physical registers, starting from the selected physical
122 // For this, first retrieve a register iterator for the given register class
123 TargetRegisterClass::iterator I
= RC
->begin();
124 MachineRegisterInfo
&RegInfo
= MF
.getRegInfo();
126 // Advance the iterator to the assigned register (if set)
128 for (; *I
!= AssignedReg
; ++I
)
129 assert(I
!= RC
->end() && "AssignedReg should be a member of provided RC");
132 // Finally, assign the registers. If the AssignedReg isn't set, create virtual
133 // registers with the provided register class
134 for (; NumRegs
; --NumRegs
, ++I
) {
135 assert(I
!= RC
->end() && "Ran out of registers to allocate!");
136 Register R
= AssignedReg
? Register(*I
) : RegInfo
.createVirtualRegister(RC
);
137 OpInfo
.Regs
.push_back(R
);
141 /// Return an integer indicating how general CT is.
142 static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT
) {
144 case TargetLowering::C_Immediate
:
145 case TargetLowering::C_Other
:
146 case TargetLowering::C_Unknown
:
148 case TargetLowering::C_Register
:
150 case TargetLowering::C_RegisterClass
:
152 case TargetLowering::C_Memory
:
155 llvm_unreachable("Invalid constraint type");
158 static void chooseConstraint(TargetLowering::AsmOperandInfo
&OpInfo
,
159 const TargetLowering
*TLI
) {
160 assert(OpInfo
.Codes
.size() > 1 && "Doesn't have multiple constraint options");
161 unsigned BestIdx
= 0;
162 TargetLowering::ConstraintType BestType
= TargetLowering::C_Unknown
;
163 int BestGenerality
= -1;
165 // Loop over the options, keeping track of the most general one.
166 for (unsigned i
= 0, e
= OpInfo
.Codes
.size(); i
!= e
; ++i
) {
167 TargetLowering::ConstraintType CType
=
168 TLI
->getConstraintType(OpInfo
.Codes
[i
]);
170 // Indirect 'other' or 'immediate' constraints are not allowed.
171 if (OpInfo
.isIndirect
&& !(CType
== TargetLowering::C_Memory
||
172 CType
== TargetLowering::C_Register
||
173 CType
== TargetLowering::C_RegisterClass
))
176 // If this is an 'other' or 'immediate' constraint, see if the operand is
177 // valid for it. For example, on X86 we might have an 'rI' constraint. If
178 // the operand is an integer in the range [0..31] we want to use I (saving a
179 // load of a register), otherwise we must use 'r'.
180 if (CType
== TargetLowering::C_Other
||
181 CType
== TargetLowering::C_Immediate
) {
182 assert(OpInfo
.Codes
[i
].size() == 1 &&
183 "Unhandled multi-letter 'other' constraint");
184 // FIXME: prefer immediate constraints if the target allows it
187 // Things with matching constraints can only be registers, per gcc
188 // documentation. This mainly affects "g" constraints.
189 if (CType
== TargetLowering::C_Memory
&& OpInfo
.hasMatchingInput())
192 // This constraint letter is more general than the previous one, use it.
193 int Generality
= getConstraintGenerality(CType
);
194 if (Generality
> BestGenerality
) {
197 BestGenerality
= Generality
;
201 OpInfo
.ConstraintCode
= OpInfo
.Codes
[BestIdx
];
202 OpInfo
.ConstraintType
= BestType
;
205 static void computeConstraintToUse(const TargetLowering
*TLI
,
206 TargetLowering::AsmOperandInfo
&OpInfo
) {
207 assert(!OpInfo
.Codes
.empty() && "Must have at least one constraint");
209 // Single-letter constraints ('r') are very common.
210 if (OpInfo
.Codes
.size() == 1) {
211 OpInfo
.ConstraintCode
= OpInfo
.Codes
[0];
212 OpInfo
.ConstraintType
= TLI
->getConstraintType(OpInfo
.ConstraintCode
);
214 chooseConstraint(OpInfo
, TLI
);
217 // 'X' matches anything.
218 if (OpInfo
.ConstraintCode
== "X" && OpInfo
.CallOperandVal
) {
219 // Labels and constants are handled elsewhere ('X' is the only thing
220 // that matches labels). For Functions, the type here is the type of
221 // the result, which is not what we want to look at; leave them alone.
222 Value
*Val
= OpInfo
.CallOperandVal
;
223 if (isa
<BasicBlock
>(Val
) || isa
<ConstantInt
>(Val
) || isa
<Function
>(Val
))
226 // Otherwise, try to resolve it to something we know about by looking at
227 // the actual operand type.
228 if (const char *Repl
= TLI
->LowerXConstraint(OpInfo
.ConstraintVT
)) {
229 OpInfo
.ConstraintCode
= Repl
;
230 OpInfo
.ConstraintType
= TLI
->getConstraintType(OpInfo
.ConstraintCode
);
235 static unsigned getNumOpRegs(const MachineInstr
&I
, unsigned OpIdx
) {
236 unsigned Flag
= I
.getOperand(OpIdx
).getImm();
237 return InlineAsm::getNumOperandRegisters(Flag
);
240 static bool buildAnyextOrCopy(Register Dst
, Register Src
,
241 MachineIRBuilder
&MIRBuilder
) {
242 const TargetRegisterInfo
*TRI
=
243 MIRBuilder
.getMF().getSubtarget().getRegisterInfo();
244 MachineRegisterInfo
*MRI
= MIRBuilder
.getMRI();
246 auto SrcTy
= MRI
->getType(Src
);
247 if (!SrcTy
.isValid()) {
248 LLVM_DEBUG(dbgs() << "Source type for copy is not valid\n");
251 unsigned SrcSize
= TRI
->getRegSizeInBits(Src
, *MRI
);
252 unsigned DstSize
= TRI
->getRegSizeInBits(Dst
, *MRI
);
254 if (DstSize
< SrcSize
) {
255 LLVM_DEBUG(dbgs() << "Input can't fit in destination reg class\n");
259 // Attempt to anyext small scalar sources.
260 if (DstSize
> SrcSize
) {
261 if (!SrcTy
.isScalar()) {
262 LLVM_DEBUG(dbgs() << "Can't extend non-scalar input to size of"
263 "destination register class\n");
266 Src
= MIRBuilder
.buildAnyExt(LLT::scalar(DstSize
), Src
).getReg(0);
269 MIRBuilder
.buildCopy(Dst
, Src
);
273 bool InlineAsmLowering::lowerInlineAsm(
274 MachineIRBuilder
&MIRBuilder
, const CallBase
&Call
,
275 std::function
<ArrayRef
<Register
>(const Value
&Val
)> GetOrCreateVRegs
)
277 const InlineAsm
*IA
= cast
<InlineAsm
>(Call
.getCalledOperand());
279 /// ConstraintOperands - Information about all of the constraints.
280 GISelAsmOperandInfoVector ConstraintOperands
;
282 MachineFunction
&MF
= MIRBuilder
.getMF();
283 const Function
&F
= MF
.getFunction();
284 const DataLayout
&DL
= F
.getParent()->getDataLayout();
285 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
287 MachineRegisterInfo
*MRI
= MIRBuilder
.getMRI();
289 TargetLowering::AsmOperandInfoVector TargetConstraints
=
290 TLI
->ParseConstraints(DL
, TRI
, Call
);
292 ExtraFlags
ExtraInfo(Call
);
293 unsigned ArgNo
= 0; // ArgNo - The argument of the CallInst.
294 unsigned ResNo
= 0; // ResNo - The result number of the next output.
295 for (auto &T
: TargetConstraints
) {
296 ConstraintOperands
.push_back(GISelAsmOperandInfo(T
));
297 GISelAsmOperandInfo
&OpInfo
= ConstraintOperands
.back();
299 // Compute the value type for each operand.
300 if (OpInfo
.Type
== InlineAsm::isInput
||
301 (OpInfo
.Type
== InlineAsm::isOutput
&& OpInfo
.isIndirect
)) {
303 OpInfo
.CallOperandVal
= const_cast<Value
*>(Call
.getArgOperand(ArgNo
++));
305 if (isa
<BasicBlock
>(OpInfo
.CallOperandVal
)) {
306 LLVM_DEBUG(dbgs() << "Basic block input operands not supported yet\n");
310 Type
*OpTy
= OpInfo
.CallOperandVal
->getType();
312 // If this is an indirect operand, the operand is a pointer to the
314 if (OpInfo
.isIndirect
) {
315 PointerType
*PtrTy
= dyn_cast
<PointerType
>(OpTy
);
317 report_fatal_error("Indirect operand for inline asm not a pointer!");
318 OpTy
= PtrTy
->getElementType();
321 // FIXME: Support aggregate input operands
322 if (!OpTy
->isSingleValueType()) {
324 dbgs() << "Aggregate input operands are not supported yet\n");
328 OpInfo
.ConstraintVT
=
329 TLI
->getAsmOperandValueType(DL
, OpTy
, true).getSimpleVT();
331 } else if (OpInfo
.Type
== InlineAsm::isOutput
&& !OpInfo
.isIndirect
) {
332 assert(!Call
.getType()->isVoidTy() && "Bad inline asm!");
333 if (StructType
*STy
= dyn_cast
<StructType
>(Call
.getType())) {
334 OpInfo
.ConstraintVT
=
335 TLI
->getSimpleValueType(DL
, STy
->getElementType(ResNo
));
337 assert(ResNo
== 0 && "Asm only has one result!");
338 OpInfo
.ConstraintVT
=
339 TLI
->getAsmOperandValueType(DL
, Call
.getType()).getSimpleVT();
343 OpInfo
.ConstraintVT
= MVT::Other
;
346 if (OpInfo
.ConstraintVT
== MVT::i64x8
)
349 // Compute the constraint code and ConstraintType to use.
350 computeConstraintToUse(TLI
, OpInfo
);
352 // The selected constraint type might expose new sideeffects
353 ExtraInfo
.update(OpInfo
);
356 // At this point, all operand types are decided.
357 // Create the MachineInstr, but don't insert it yet since input
358 // operands still need to insert instructions before this one
359 auto Inst
= MIRBuilder
.buildInstrNoInsert(TargetOpcode::INLINEASM
)
360 .addExternalSymbol(IA
->getAsmString().c_str())
361 .addImm(ExtraInfo
.get());
363 // Starting from this operand: flag followed by register(s) will be added as
364 // operands to Inst for each constraint. Used for matching input constraints.
365 unsigned StartIdx
= Inst
->getNumOperands();
367 // Collects the output operands for later processing
368 GISelAsmOperandInfoVector OutputOperands
;
370 for (auto &OpInfo
: ConstraintOperands
) {
371 GISelAsmOperandInfo
&RefOpInfo
=
372 OpInfo
.isMatchingInputConstraint()
373 ? ConstraintOperands
[OpInfo
.getMatchedOperand()]
376 // Assign registers for register operands
377 getRegistersForValue(MF
, MIRBuilder
, OpInfo
, RefOpInfo
);
379 switch (OpInfo
.Type
) {
380 case InlineAsm::isOutput
:
381 if (OpInfo
.ConstraintType
== TargetLowering::C_Memory
) {
382 unsigned ConstraintID
=
383 TLI
->getInlineAsmMemConstraint(OpInfo
.ConstraintCode
);
384 assert(ConstraintID
!= InlineAsm::Constraint_Unknown
&&
385 "Failed to convert memory constraint code to constraint id.");
387 // Add information to the INLINEASM instruction to know about this
389 unsigned OpFlags
= InlineAsm::getFlagWord(InlineAsm::Kind_Mem
, 1);
390 OpFlags
= InlineAsm::getFlagWordForMem(OpFlags
, ConstraintID
);
391 Inst
.addImm(OpFlags
);
392 ArrayRef
<Register
> SourceRegs
=
393 GetOrCreateVRegs(*OpInfo
.CallOperandVal
);
395 SourceRegs
.size() == 1 &&
396 "Expected the memory output to fit into a single virtual register");
397 Inst
.addReg(SourceRegs
[0]);
399 // Otherwise, this outputs to a register (directly for C_Register /
400 // C_RegisterClass. Find a register that we can use.
401 assert(OpInfo
.ConstraintType
== TargetLowering::C_Register
||
402 OpInfo
.ConstraintType
== TargetLowering::C_RegisterClass
);
404 if (OpInfo
.Regs
.empty()) {
406 << "Couldn't allocate output register for constraint\n");
410 // Add information to the INLINEASM instruction to know that this
412 unsigned Flag
= InlineAsm::getFlagWord(
413 OpInfo
.isEarlyClobber
? InlineAsm::Kind_RegDefEarlyClobber
414 : InlineAsm::Kind_RegDef
,
416 if (OpInfo
.Regs
.front().isVirtual()) {
417 // Put the register class of the virtual registers in the flag word.
418 // That way, later passes can recompute register class constraints for
419 // inline assembly as well as normal instructions. Don't do this for
420 // tied operands that can use the regclass information from the def.
421 const TargetRegisterClass
*RC
= MRI
->getRegClass(OpInfo
.Regs
.front());
422 Flag
= InlineAsm::getFlagWordForRegClass(Flag
, RC
->getID());
427 for (Register Reg
: OpInfo
.Regs
) {
429 RegState::Define
| getImplRegState(Reg
.isPhysical()) |
430 (OpInfo
.isEarlyClobber
? RegState::EarlyClobber
: 0));
433 // Remember this output operand for later processing
434 OutputOperands
.push_back(OpInfo
);
438 case InlineAsm::isInput
: {
439 if (OpInfo
.isMatchingInputConstraint()) {
440 unsigned DefIdx
= OpInfo
.getMatchedOperand();
441 // Find operand with register def that corresponds to DefIdx.
442 unsigned InstFlagIdx
= StartIdx
;
443 for (unsigned i
= 0; i
< DefIdx
; ++i
)
444 InstFlagIdx
+= getNumOpRegs(*Inst
, InstFlagIdx
) + 1;
445 assert(getNumOpRegs(*Inst
, InstFlagIdx
) == 1 && "Wrong flag");
447 unsigned MatchedOperandFlag
= Inst
->getOperand(InstFlagIdx
).getImm();
448 if (InlineAsm::isMemKind(MatchedOperandFlag
)) {
449 LLVM_DEBUG(dbgs() << "Matching input constraint to mem operand not "
450 "supported. This should be target specific.\n");
453 if (!InlineAsm::isRegDefKind(MatchedOperandFlag
) &&
454 !InlineAsm::isRegDefEarlyClobberKind(MatchedOperandFlag
)) {
455 LLVM_DEBUG(dbgs() << "Unknown matching constraint\n");
459 // We want to tie input to register in next operand.
460 unsigned DefRegIdx
= InstFlagIdx
+ 1;
461 Register Def
= Inst
->getOperand(DefRegIdx
).getReg();
463 ArrayRef
<Register
> SrcRegs
= GetOrCreateVRegs(*OpInfo
.CallOperandVal
);
464 assert(SrcRegs
.size() == 1 && "Single register is expected here");
466 // When Def is physreg: use given input.
467 Register In
= SrcRegs
[0];
468 // When Def is vreg: copy input to new vreg with same reg class as Def.
469 if (Def
.isVirtual()) {
470 In
= MRI
->createVirtualRegister(MRI
->getRegClass(Def
));
471 if (!buildAnyextOrCopy(In
, SrcRegs
[0], MIRBuilder
))
475 // Add Flag and input register operand (In) to Inst. Tie In to Def.
476 unsigned UseFlag
= InlineAsm::getFlagWord(InlineAsm::Kind_RegUse
, 1);
477 unsigned Flag
= InlineAsm::getFlagWordForMatchingOp(UseFlag
, DefIdx
);
480 Inst
->tieOperands(DefRegIdx
, Inst
->getNumOperands() - 1);
484 if (OpInfo
.ConstraintType
== TargetLowering::C_Other
&&
486 LLVM_DEBUG(dbgs() << "Indirect input operands with unknown constraint "
487 "not supported yet\n");
491 if (OpInfo
.ConstraintType
== TargetLowering::C_Immediate
||
492 OpInfo
.ConstraintType
== TargetLowering::C_Other
) {
494 std::vector
<MachineOperand
> Ops
;
495 if (!lowerAsmOperandForConstraint(OpInfo
.CallOperandVal
,
496 OpInfo
.ConstraintCode
, Ops
,
498 LLVM_DEBUG(dbgs() << "Don't support constraint: "
499 << OpInfo
.ConstraintCode
<< " yet\n");
503 assert(Ops
.size() > 0 &&
504 "Expected constraint to be lowered to at least one operand");
506 // Add information to the INLINEASM node to know about this input.
508 InlineAsm::getFlagWord(InlineAsm::Kind_Imm
, Ops
.size());
509 Inst
.addImm(OpFlags
);
514 if (OpInfo
.ConstraintType
== TargetLowering::C_Memory
) {
516 if (!OpInfo
.isIndirect
) {
518 << "Cannot indirectify memory input operands yet\n");
522 assert(OpInfo
.isIndirect
&& "Operand must be indirect to be a mem!");
524 unsigned ConstraintID
=
525 TLI
->getInlineAsmMemConstraint(OpInfo
.ConstraintCode
);
526 unsigned OpFlags
= InlineAsm::getFlagWord(InlineAsm::Kind_Mem
, 1);
527 OpFlags
= InlineAsm::getFlagWordForMem(OpFlags
, ConstraintID
);
528 Inst
.addImm(OpFlags
);
529 ArrayRef
<Register
> SourceRegs
=
530 GetOrCreateVRegs(*OpInfo
.CallOperandVal
);
532 SourceRegs
.size() == 1 &&
533 "Expected the memory input to fit into a single virtual register");
534 Inst
.addReg(SourceRegs
[0]);
538 assert((OpInfo
.ConstraintType
== TargetLowering::C_RegisterClass
||
539 OpInfo
.ConstraintType
== TargetLowering::C_Register
) &&
540 "Unknown constraint type!");
542 if (OpInfo
.isIndirect
) {
543 LLVM_DEBUG(dbgs() << "Can't handle indirect register inputs yet "
545 << OpInfo
.ConstraintCode
<< "'\n");
549 // Copy the input into the appropriate registers.
550 if (OpInfo
.Regs
.empty()) {
553 << "Couldn't allocate input register for register constraint\n");
557 unsigned NumRegs
= OpInfo
.Regs
.size();
558 ArrayRef
<Register
> SourceRegs
= GetOrCreateVRegs(*OpInfo
.CallOperandVal
);
559 assert(NumRegs
== SourceRegs
.size() &&
560 "Expected the number of input registers to match the number of "
564 LLVM_DEBUG(dbgs() << "Input operands with multiple input registers are "
565 "not supported yet\n");
569 unsigned Flag
= InlineAsm::getFlagWord(InlineAsm::Kind_RegUse
, NumRegs
);
570 if (OpInfo
.Regs
.front().isVirtual()) {
571 // Put the register class of the virtual registers in the flag word.
572 const TargetRegisterClass
*RC
= MRI
->getRegClass(OpInfo
.Regs
.front());
573 Flag
= InlineAsm::getFlagWordForRegClass(Flag
, RC
->getID());
576 if (!buildAnyextOrCopy(OpInfo
.Regs
[0], SourceRegs
[0], MIRBuilder
))
578 Inst
.addReg(OpInfo
.Regs
[0]);
582 case InlineAsm::isClobber
: {
584 unsigned NumRegs
= OpInfo
.Regs
.size();
587 InlineAsm::getFlagWord(InlineAsm::Kind_Clobber
, NumRegs
);
590 for (Register Reg
: OpInfo
.Regs
) {
591 Inst
.addReg(Reg
, RegState::Define
| RegState::EarlyClobber
|
592 getImplRegState(Reg
.isPhysical()));
600 if (const MDNode
*SrcLoc
= Call
.getMetadata("srcloc"))
601 Inst
.addMetadata(SrcLoc
);
603 // All inputs are handled, insert the instruction now
604 MIRBuilder
.insertInstr(Inst
);
606 // Finally, copy the output operands into the output registers
607 ArrayRef
<Register
> ResRegs
= GetOrCreateVRegs(Call
);
608 if (ResRegs
.size() != OutputOperands
.size()) {
609 LLVM_DEBUG(dbgs() << "Expected the number of output registers to match the "
610 "number of destination registers\n");
613 for (unsigned int i
= 0, e
= ResRegs
.size(); i
< e
; i
++) {
614 GISelAsmOperandInfo
&OpInfo
= OutputOperands
[i
];
616 if (OpInfo
.Regs
.empty())
619 switch (OpInfo
.ConstraintType
) {
620 case TargetLowering::C_Register
:
621 case TargetLowering::C_RegisterClass
: {
622 if (OpInfo
.Regs
.size() > 1) {
623 LLVM_DEBUG(dbgs() << "Output operands with multiple defining "
624 "registers are not supported yet\n");
628 Register SrcReg
= OpInfo
.Regs
[0];
629 unsigned SrcSize
= TRI
->getRegSizeInBits(SrcReg
, *MRI
);
630 if (MRI
->getType(ResRegs
[i
]).getSizeInBits() < SrcSize
) {
631 // First copy the non-typed virtual register into a generic virtual
634 MRI
->createGenericVirtualRegister(LLT::scalar(SrcSize
));
635 MIRBuilder
.buildCopy(Tmp1Reg
, SrcReg
);
636 // Need to truncate the result of the register
637 MIRBuilder
.buildTrunc(ResRegs
[i
], Tmp1Reg
);
639 MIRBuilder
.buildCopy(ResRegs
[i
], SrcReg
);
643 case TargetLowering::C_Immediate
:
644 case TargetLowering::C_Other
:
646 dbgs() << "Cannot lower target specific output constraints yet\n");
648 case TargetLowering::C_Memory
:
649 break; // Already handled.
650 case TargetLowering::C_Unknown
:
651 LLVM_DEBUG(dbgs() << "Unexpected unknown constraint\n");
659 bool InlineAsmLowering::lowerAsmOperandForConstraint(
660 Value
*Val
, StringRef Constraint
, std::vector
<MachineOperand
> &Ops
,
661 MachineIRBuilder
&MIRBuilder
) const {
662 if (Constraint
.size() > 1)
665 char ConstraintLetter
= Constraint
[0];
666 switch (ConstraintLetter
) {
669 case 'i': // Simple Integer or Relocatable Constant
670 case 'n': // immediate integer with a known value.
671 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Val
)) {
672 assert(CI
->getBitWidth() <= 64 &&
673 "expected immediate to fit into 64-bits");
674 // Boolean constants should be zero-extended, others are sign-extended
675 bool IsBool
= CI
->getBitWidth() == 1;
676 int64_t ExtVal
= IsBool
? CI
->getZExtValue() : CI
->getSExtValue();
677 Ops
.push_back(MachineOperand::CreateImm(ExtVal
));