1 //===-- lib/CodeGen/GlobalISel/InlineAsmLowering.cpp ----------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file implements the lowering from LLVM IR inline asm to MIR INLINEASM
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h"
15 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
16 #include "llvm/CodeGen/MachineOperand.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/CodeGen/TargetLowering.h"
19 #include "llvm/IR/Module.h"
21 #define DEBUG_TYPE "inline-asm-lowering"
25 void InlineAsmLowering::anchor() {}
29 /// GISelAsmOperandInfo - This contains information for each constraint that we
31 class GISelAsmOperandInfo
: public TargetLowering::AsmOperandInfo
{
33 /// Regs - If this is a register or register class operand, this
34 /// contains the set of assigned registers corresponding to the operand.
35 SmallVector
<Register
, 1> Regs
;
37 explicit GISelAsmOperandInfo(const TargetLowering::AsmOperandInfo
&Info
)
38 : TargetLowering::AsmOperandInfo(Info
) {}
41 using GISelAsmOperandInfoVector
= SmallVector
<GISelAsmOperandInfo
, 16>;
47 explicit ExtraFlags(const CallBase
&CB
) {
48 const InlineAsm
*IA
= cast
<InlineAsm
>(CB
.getCalledOperand());
49 if (IA
->hasSideEffects())
50 Flags
|= InlineAsm::Extra_HasSideEffects
;
51 if (IA
->isAlignStack())
52 Flags
|= InlineAsm::Extra_IsAlignStack
;
53 if (CB
.isConvergent())
54 Flags
|= InlineAsm::Extra_IsConvergent
;
55 Flags
|= IA
->getDialect() * InlineAsm::Extra_AsmDialect
;
58 void update(const TargetLowering::AsmOperandInfo
&OpInfo
) {
59 // Ideally, we would only check against memory constraints. However, the
60 // meaning of an Other constraint can be target-specific and we can't easily
61 // reason about it. Therefore, be conservative and set MayLoad/MayStore
62 // for Other constraints as well.
63 if (OpInfo
.ConstraintType
== TargetLowering::C_Memory
||
64 OpInfo
.ConstraintType
== TargetLowering::C_Other
) {
65 if (OpInfo
.Type
== InlineAsm::isInput
)
66 Flags
|= InlineAsm::Extra_MayLoad
;
67 else if (OpInfo
.Type
== InlineAsm::isOutput
)
68 Flags
|= InlineAsm::Extra_MayStore
;
69 else if (OpInfo
.Type
== InlineAsm::isClobber
)
70 Flags
|= (InlineAsm::Extra_MayLoad
| InlineAsm::Extra_MayStore
);
74 unsigned get() const { return Flags
; }
79 /// Assign virtual/physical registers for the specified register operand.
80 static void getRegistersForValue(MachineFunction
&MF
,
81 MachineIRBuilder
&MIRBuilder
,
82 GISelAsmOperandInfo
&OpInfo
,
83 GISelAsmOperandInfo
&RefOpInfo
) {
85 const TargetLowering
&TLI
= *MF
.getSubtarget().getTargetLowering();
86 const TargetRegisterInfo
&TRI
= *MF
.getSubtarget().getRegisterInfo();
88 // No work to do for memory operations.
89 if (OpInfo
.ConstraintType
== TargetLowering::C_Memory
)
92 // If this is a constraint for a single physreg, or a constraint for a
93 // register class, find it.
95 const TargetRegisterClass
*RC
;
96 std::tie(AssignedReg
, RC
) = TLI
.getRegForInlineAsmConstraint(
97 &TRI
, RefOpInfo
.ConstraintCode
, RefOpInfo
.ConstraintVT
);
98 // RC is unset only on failure. Return immediately.
102 // No need to allocate a matching input constraint since the constraint it's
103 // matching to has already been allocated.
104 if (OpInfo
.isMatchingInputConstraint())
107 // Initialize NumRegs.
108 unsigned NumRegs
= 1;
109 if (OpInfo
.ConstraintVT
!= MVT::Other
)
111 TLI
.getNumRegisters(MF
.getFunction().getContext(), OpInfo
.ConstraintVT
);
113 // If this is a constraint for a specific physical register, but the type of
114 // the operand requires more than one register to be passed, we allocate the
115 // required amount of physical registers, starting from the selected physical
117 // For this, first retrieve a register iterator for the given register class
118 TargetRegisterClass::iterator I
= RC
->begin();
119 MachineRegisterInfo
&RegInfo
= MF
.getRegInfo();
121 // Advance the iterator to the assigned register (if set)
123 for (; *I
!= AssignedReg
; ++I
)
124 assert(I
!= RC
->end() && "AssignedReg should be a member of provided RC");
127 // Finally, assign the registers. If the AssignedReg isn't set, create virtual
128 // registers with the provided register class
129 for (; NumRegs
; --NumRegs
, ++I
) {
130 assert(I
!= RC
->end() && "Ran out of registers to allocate!");
131 Register R
= AssignedReg
? Register(*I
) : RegInfo
.createVirtualRegister(RC
);
132 OpInfo
.Regs
.push_back(R
);
136 /// Return an integer indicating how general CT is.
137 static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT
) {
139 case TargetLowering::C_Immediate
:
140 case TargetLowering::C_Other
:
141 case TargetLowering::C_Unknown
:
143 case TargetLowering::C_Register
:
145 case TargetLowering::C_RegisterClass
:
147 case TargetLowering::C_Memory
:
148 case TargetLowering::C_Address
:
151 llvm_unreachable("Invalid constraint type");
154 static void chooseConstraint(TargetLowering::AsmOperandInfo
&OpInfo
,
155 const TargetLowering
*TLI
) {
156 assert(OpInfo
.Codes
.size() > 1 && "Doesn't have multiple constraint options");
157 unsigned BestIdx
= 0;
158 TargetLowering::ConstraintType BestType
= TargetLowering::C_Unknown
;
159 int BestGenerality
= -1;
161 // Loop over the options, keeping track of the most general one.
162 for (unsigned i
= 0, e
= OpInfo
.Codes
.size(); i
!= e
; ++i
) {
163 TargetLowering::ConstraintType CType
=
164 TLI
->getConstraintType(OpInfo
.Codes
[i
]);
166 // Indirect 'other' or 'immediate' constraints are not allowed.
167 if (OpInfo
.isIndirect
&& !(CType
== TargetLowering::C_Memory
||
168 CType
== TargetLowering::C_Register
||
169 CType
== TargetLowering::C_RegisterClass
))
172 // If this is an 'other' or 'immediate' constraint, see if the operand is
173 // valid for it. For example, on X86 we might have an 'rI' constraint. If
174 // the operand is an integer in the range [0..31] we want to use I (saving a
175 // load of a register), otherwise we must use 'r'.
176 if (CType
== TargetLowering::C_Other
||
177 CType
== TargetLowering::C_Immediate
) {
178 assert(OpInfo
.Codes
[i
].size() == 1 &&
179 "Unhandled multi-letter 'other' constraint");
180 // FIXME: prefer immediate constraints if the target allows it
183 // Things with matching constraints can only be registers, per gcc
184 // documentation. This mainly affects "g" constraints.
185 if (CType
== TargetLowering::C_Memory
&& OpInfo
.hasMatchingInput())
188 // This constraint letter is more general than the previous one, use it.
189 int Generality
= getConstraintGenerality(CType
);
190 if (Generality
> BestGenerality
) {
193 BestGenerality
= Generality
;
197 OpInfo
.ConstraintCode
= OpInfo
.Codes
[BestIdx
];
198 OpInfo
.ConstraintType
= BestType
;
201 static void computeConstraintToUse(const TargetLowering
*TLI
,
202 TargetLowering::AsmOperandInfo
&OpInfo
) {
203 assert(!OpInfo
.Codes
.empty() && "Must have at least one constraint");
205 // Single-letter constraints ('r') are very common.
206 if (OpInfo
.Codes
.size() == 1) {
207 OpInfo
.ConstraintCode
= OpInfo
.Codes
[0];
208 OpInfo
.ConstraintType
= TLI
->getConstraintType(OpInfo
.ConstraintCode
);
210 chooseConstraint(OpInfo
, TLI
);
213 // 'X' matches anything.
214 if (OpInfo
.ConstraintCode
== "X" && OpInfo
.CallOperandVal
) {
215 // Labels and constants are handled elsewhere ('X' is the only thing
216 // that matches labels). For Functions, the type here is the type of
217 // the result, which is not what we want to look at; leave them alone.
218 Value
*Val
= OpInfo
.CallOperandVal
;
219 if (isa
<BasicBlock
>(Val
) || isa
<ConstantInt
>(Val
) || isa
<Function
>(Val
))
222 // Otherwise, try to resolve it to something we know about by looking at
223 // the actual operand type.
224 if (const char *Repl
= TLI
->LowerXConstraint(OpInfo
.ConstraintVT
)) {
225 OpInfo
.ConstraintCode
= Repl
;
226 OpInfo
.ConstraintType
= TLI
->getConstraintType(OpInfo
.ConstraintCode
);
231 static unsigned getNumOpRegs(const MachineInstr
&I
, unsigned OpIdx
) {
232 unsigned Flag
= I
.getOperand(OpIdx
).getImm();
233 return InlineAsm::getNumOperandRegisters(Flag
);
236 static bool buildAnyextOrCopy(Register Dst
, Register Src
,
237 MachineIRBuilder
&MIRBuilder
) {
238 const TargetRegisterInfo
*TRI
=
239 MIRBuilder
.getMF().getSubtarget().getRegisterInfo();
240 MachineRegisterInfo
*MRI
= MIRBuilder
.getMRI();
242 auto SrcTy
= MRI
->getType(Src
);
243 if (!SrcTy
.isValid()) {
244 LLVM_DEBUG(dbgs() << "Source type for copy is not valid\n");
247 unsigned SrcSize
= TRI
->getRegSizeInBits(Src
, *MRI
);
248 unsigned DstSize
= TRI
->getRegSizeInBits(Dst
, *MRI
);
250 if (DstSize
< SrcSize
) {
251 LLVM_DEBUG(dbgs() << "Input can't fit in destination reg class\n");
255 // Attempt to anyext small scalar sources.
256 if (DstSize
> SrcSize
) {
257 if (!SrcTy
.isScalar()) {
258 LLVM_DEBUG(dbgs() << "Can't extend non-scalar input to size of"
259 "destination register class\n");
262 Src
= MIRBuilder
.buildAnyExt(LLT::scalar(DstSize
), Src
).getReg(0);
265 MIRBuilder
.buildCopy(Dst
, Src
);
269 bool InlineAsmLowering::lowerInlineAsm(
270 MachineIRBuilder
&MIRBuilder
, const CallBase
&Call
,
271 std::function
<ArrayRef
<Register
>(const Value
&Val
)> GetOrCreateVRegs
)
273 const InlineAsm
*IA
= cast
<InlineAsm
>(Call
.getCalledOperand());
275 /// ConstraintOperands - Information about all of the constraints.
276 GISelAsmOperandInfoVector ConstraintOperands
;
278 MachineFunction
&MF
= MIRBuilder
.getMF();
279 const Function
&F
= MF
.getFunction();
280 const DataLayout
&DL
= F
.getParent()->getDataLayout();
281 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
283 MachineRegisterInfo
*MRI
= MIRBuilder
.getMRI();
285 TargetLowering::AsmOperandInfoVector TargetConstraints
=
286 TLI
->ParseConstraints(DL
, TRI
, Call
);
288 ExtraFlags
ExtraInfo(Call
);
289 unsigned ArgNo
= 0; // ArgNo - The argument of the CallInst.
290 unsigned ResNo
= 0; // ResNo - The result number of the next output.
291 for (auto &T
: TargetConstraints
) {
292 ConstraintOperands
.push_back(GISelAsmOperandInfo(T
));
293 GISelAsmOperandInfo
&OpInfo
= ConstraintOperands
.back();
295 // Compute the value type for each operand.
296 if (OpInfo
.hasArg()) {
297 OpInfo
.CallOperandVal
= const_cast<Value
*>(Call
.getArgOperand(ArgNo
));
299 if (isa
<BasicBlock
>(OpInfo
.CallOperandVal
)) {
300 LLVM_DEBUG(dbgs() << "Basic block input operands not supported yet\n");
304 Type
*OpTy
= OpInfo
.CallOperandVal
->getType();
306 // If this is an indirect operand, the operand is a pointer to the
308 if (OpInfo
.isIndirect
) {
309 OpTy
= Call
.getParamElementType(ArgNo
);
310 assert(OpTy
&& "Indirect operand must have elementtype attribute");
313 // FIXME: Support aggregate input operands
314 if (!OpTy
->isSingleValueType()) {
316 dbgs() << "Aggregate input operands are not supported yet\n");
320 OpInfo
.ConstraintVT
=
321 TLI
->getAsmOperandValueType(DL
, OpTy
, true).getSimpleVT();
323 } else if (OpInfo
.Type
== InlineAsm::isOutput
&& !OpInfo
.isIndirect
) {
324 assert(!Call
.getType()->isVoidTy() && "Bad inline asm!");
325 if (StructType
*STy
= dyn_cast
<StructType
>(Call
.getType())) {
326 OpInfo
.ConstraintVT
=
327 TLI
->getSimpleValueType(DL
, STy
->getElementType(ResNo
));
329 assert(ResNo
== 0 && "Asm only has one result!");
330 OpInfo
.ConstraintVT
=
331 TLI
->getAsmOperandValueType(DL
, Call
.getType()).getSimpleVT();
335 assert(OpInfo
.Type
!= InlineAsm::isLabel
&&
336 "GlobalISel currently doesn't support callbr");
337 OpInfo
.ConstraintVT
= MVT::Other
;
340 if (OpInfo
.ConstraintVT
== MVT::i64x8
)
343 // Compute the constraint code and ConstraintType to use.
344 computeConstraintToUse(TLI
, OpInfo
);
346 // The selected constraint type might expose new sideeffects
347 ExtraInfo
.update(OpInfo
);
350 // At this point, all operand types are decided.
351 // Create the MachineInstr, but don't insert it yet since input
352 // operands still need to insert instructions before this one
353 auto Inst
= MIRBuilder
.buildInstrNoInsert(TargetOpcode::INLINEASM
)
354 .addExternalSymbol(IA
->getAsmString().c_str())
355 .addImm(ExtraInfo
.get());
357 // Starting from this operand: flag followed by register(s) will be added as
358 // operands to Inst for each constraint. Used for matching input constraints.
359 unsigned StartIdx
= Inst
->getNumOperands();
361 // Collects the output operands for later processing
362 GISelAsmOperandInfoVector OutputOperands
;
364 for (auto &OpInfo
: ConstraintOperands
) {
365 GISelAsmOperandInfo
&RefOpInfo
=
366 OpInfo
.isMatchingInputConstraint()
367 ? ConstraintOperands
[OpInfo
.getMatchedOperand()]
370 // Assign registers for register operands
371 getRegistersForValue(MF
, MIRBuilder
, OpInfo
, RefOpInfo
);
373 switch (OpInfo
.Type
) {
374 case InlineAsm::isOutput
:
375 if (OpInfo
.ConstraintType
== TargetLowering::C_Memory
) {
376 unsigned ConstraintID
=
377 TLI
->getInlineAsmMemConstraint(OpInfo
.ConstraintCode
);
378 assert(ConstraintID
!= InlineAsm::Constraint_Unknown
&&
379 "Failed to convert memory constraint code to constraint id.");
381 // Add information to the INLINEASM instruction to know about this
383 unsigned OpFlags
= InlineAsm::getFlagWord(InlineAsm::Kind_Mem
, 1);
384 OpFlags
= InlineAsm::getFlagWordForMem(OpFlags
, ConstraintID
);
385 Inst
.addImm(OpFlags
);
386 ArrayRef
<Register
> SourceRegs
=
387 GetOrCreateVRegs(*OpInfo
.CallOperandVal
);
389 SourceRegs
.size() == 1 &&
390 "Expected the memory output to fit into a single virtual register");
391 Inst
.addReg(SourceRegs
[0]);
393 // Otherwise, this outputs to a register (directly for C_Register /
394 // C_RegisterClass/C_Other.
395 assert(OpInfo
.ConstraintType
== TargetLowering::C_Register
||
396 OpInfo
.ConstraintType
== TargetLowering::C_RegisterClass
||
397 OpInfo
.ConstraintType
== TargetLowering::C_Other
);
399 // Find a register that we can use.
400 if (OpInfo
.Regs
.empty()) {
402 << "Couldn't allocate output register for constraint\n");
406 // Add information to the INLINEASM instruction to know that this
408 unsigned Flag
= InlineAsm::getFlagWord(
409 OpInfo
.isEarlyClobber
? InlineAsm::Kind_RegDefEarlyClobber
410 : InlineAsm::Kind_RegDef
,
412 if (OpInfo
.Regs
.front().isVirtual()) {
413 // Put the register class of the virtual registers in the flag word.
414 // That way, later passes can recompute register class constraints for
415 // inline assembly as well as normal instructions. Don't do this for
416 // tied operands that can use the regclass information from the def.
417 const TargetRegisterClass
*RC
= MRI
->getRegClass(OpInfo
.Regs
.front());
418 Flag
= InlineAsm::getFlagWordForRegClass(Flag
, RC
->getID());
423 for (Register Reg
: OpInfo
.Regs
) {
425 RegState::Define
| getImplRegState(Reg
.isPhysical()) |
426 (OpInfo
.isEarlyClobber
? RegState::EarlyClobber
: 0));
429 // Remember this output operand for later processing
430 OutputOperands
.push_back(OpInfo
);
434 case InlineAsm::isInput
:
435 case InlineAsm::isLabel
: {
436 if (OpInfo
.isMatchingInputConstraint()) {
437 unsigned DefIdx
= OpInfo
.getMatchedOperand();
438 // Find operand with register def that corresponds to DefIdx.
439 unsigned InstFlagIdx
= StartIdx
;
440 for (unsigned i
= 0; i
< DefIdx
; ++i
)
441 InstFlagIdx
+= getNumOpRegs(*Inst
, InstFlagIdx
) + 1;
442 assert(getNumOpRegs(*Inst
, InstFlagIdx
) == 1 && "Wrong flag");
444 unsigned MatchedOperandFlag
= Inst
->getOperand(InstFlagIdx
).getImm();
445 if (InlineAsm::isMemKind(MatchedOperandFlag
)) {
446 LLVM_DEBUG(dbgs() << "Matching input constraint to mem operand not "
447 "supported. This should be target specific.\n");
450 if (!InlineAsm::isRegDefKind(MatchedOperandFlag
) &&
451 !InlineAsm::isRegDefEarlyClobberKind(MatchedOperandFlag
)) {
452 LLVM_DEBUG(dbgs() << "Unknown matching constraint\n");
456 // We want to tie input to register in next operand.
457 unsigned DefRegIdx
= InstFlagIdx
+ 1;
458 Register Def
= Inst
->getOperand(DefRegIdx
).getReg();
460 ArrayRef
<Register
> SrcRegs
= GetOrCreateVRegs(*OpInfo
.CallOperandVal
);
461 assert(SrcRegs
.size() == 1 && "Single register is expected here");
463 // When Def is physreg: use given input.
464 Register In
= SrcRegs
[0];
465 // When Def is vreg: copy input to new vreg with same reg class as Def.
466 if (Def
.isVirtual()) {
467 In
= MRI
->createVirtualRegister(MRI
->getRegClass(Def
));
468 if (!buildAnyextOrCopy(In
, SrcRegs
[0], MIRBuilder
))
472 // Add Flag and input register operand (In) to Inst. Tie In to Def.
473 unsigned UseFlag
= InlineAsm::getFlagWord(InlineAsm::Kind_RegUse
, 1);
474 unsigned Flag
= InlineAsm::getFlagWordForMatchingOp(UseFlag
, DefIdx
);
477 Inst
->tieOperands(DefRegIdx
, Inst
->getNumOperands() - 1);
481 if (OpInfo
.ConstraintType
== TargetLowering::C_Other
&&
483 LLVM_DEBUG(dbgs() << "Indirect input operands with unknown constraint "
484 "not supported yet\n");
488 if (OpInfo
.ConstraintType
== TargetLowering::C_Immediate
||
489 OpInfo
.ConstraintType
== TargetLowering::C_Other
) {
491 std::vector
<MachineOperand
> Ops
;
492 if (!lowerAsmOperandForConstraint(OpInfo
.CallOperandVal
,
493 OpInfo
.ConstraintCode
, Ops
,
495 LLVM_DEBUG(dbgs() << "Don't support constraint: "
496 << OpInfo
.ConstraintCode
<< " yet\n");
500 assert(Ops
.size() > 0 &&
501 "Expected constraint to be lowered to at least one operand");
503 // Add information to the INLINEASM node to know about this input.
505 InlineAsm::getFlagWord(InlineAsm::Kind_Imm
, Ops
.size());
506 Inst
.addImm(OpFlags
);
511 if (OpInfo
.ConstraintType
== TargetLowering::C_Memory
) {
513 if (!OpInfo
.isIndirect
) {
515 << "Cannot indirectify memory input operands yet\n");
519 assert(OpInfo
.isIndirect
&& "Operand must be indirect to be a mem!");
521 unsigned ConstraintID
=
522 TLI
->getInlineAsmMemConstraint(OpInfo
.ConstraintCode
);
523 unsigned OpFlags
= InlineAsm::getFlagWord(InlineAsm::Kind_Mem
, 1);
524 OpFlags
= InlineAsm::getFlagWordForMem(OpFlags
, ConstraintID
);
525 Inst
.addImm(OpFlags
);
526 ArrayRef
<Register
> SourceRegs
=
527 GetOrCreateVRegs(*OpInfo
.CallOperandVal
);
529 SourceRegs
.size() == 1 &&
530 "Expected the memory input to fit into a single virtual register");
531 Inst
.addReg(SourceRegs
[0]);
535 assert((OpInfo
.ConstraintType
== TargetLowering::C_RegisterClass
||
536 OpInfo
.ConstraintType
== TargetLowering::C_Register
) &&
537 "Unknown constraint type!");
539 if (OpInfo
.isIndirect
) {
540 LLVM_DEBUG(dbgs() << "Can't handle indirect register inputs yet "
542 << OpInfo
.ConstraintCode
<< "'\n");
546 // Copy the input into the appropriate registers.
547 if (OpInfo
.Regs
.empty()) {
550 << "Couldn't allocate input register for register constraint\n");
554 unsigned NumRegs
= OpInfo
.Regs
.size();
555 ArrayRef
<Register
> SourceRegs
= GetOrCreateVRegs(*OpInfo
.CallOperandVal
);
556 assert(NumRegs
== SourceRegs
.size() &&
557 "Expected the number of input registers to match the number of "
561 LLVM_DEBUG(dbgs() << "Input operands with multiple input registers are "
562 "not supported yet\n");
566 unsigned Flag
= InlineAsm::getFlagWord(InlineAsm::Kind_RegUse
, NumRegs
);
567 if (OpInfo
.Regs
.front().isVirtual()) {
568 // Put the register class of the virtual registers in the flag word.
569 const TargetRegisterClass
*RC
= MRI
->getRegClass(OpInfo
.Regs
.front());
570 Flag
= InlineAsm::getFlagWordForRegClass(Flag
, RC
->getID());
573 if (!buildAnyextOrCopy(OpInfo
.Regs
[0], SourceRegs
[0], MIRBuilder
))
575 Inst
.addReg(OpInfo
.Regs
[0]);
579 case InlineAsm::isClobber
: {
581 unsigned NumRegs
= OpInfo
.Regs
.size();
584 InlineAsm::getFlagWord(InlineAsm::Kind_Clobber
, NumRegs
);
587 for (Register Reg
: OpInfo
.Regs
) {
588 Inst
.addReg(Reg
, RegState::Define
| RegState::EarlyClobber
|
589 getImplRegState(Reg
.isPhysical()));
597 if (const MDNode
*SrcLoc
= Call
.getMetadata("srcloc"))
598 Inst
.addMetadata(SrcLoc
);
600 // All inputs are handled, insert the instruction now
601 MIRBuilder
.insertInstr(Inst
);
603 // Finally, copy the output operands into the output registers
604 ArrayRef
<Register
> ResRegs
= GetOrCreateVRegs(Call
);
605 if (ResRegs
.size() != OutputOperands
.size()) {
606 LLVM_DEBUG(dbgs() << "Expected the number of output registers to match the "
607 "number of destination registers\n");
610 for (unsigned int i
= 0, e
= ResRegs
.size(); i
< e
; i
++) {
611 GISelAsmOperandInfo
&OpInfo
= OutputOperands
[i
];
613 if (OpInfo
.Regs
.empty())
616 switch (OpInfo
.ConstraintType
) {
617 case TargetLowering::C_Register
:
618 case TargetLowering::C_RegisterClass
: {
619 if (OpInfo
.Regs
.size() > 1) {
620 LLVM_DEBUG(dbgs() << "Output operands with multiple defining "
621 "registers are not supported yet\n");
625 Register SrcReg
= OpInfo
.Regs
[0];
626 unsigned SrcSize
= TRI
->getRegSizeInBits(SrcReg
, *MRI
);
627 LLT ResTy
= MRI
->getType(ResRegs
[i
]);
628 if (ResTy
.isScalar() && ResTy
.getSizeInBits() < SrcSize
) {
629 // First copy the non-typed virtual register into a generic virtual
632 MRI
->createGenericVirtualRegister(LLT::scalar(SrcSize
));
633 MIRBuilder
.buildCopy(Tmp1Reg
, SrcReg
);
634 // Need to truncate the result of the register
635 MIRBuilder
.buildTrunc(ResRegs
[i
], Tmp1Reg
);
636 } else if (ResTy
.getSizeInBits() == SrcSize
) {
637 MIRBuilder
.buildCopy(ResRegs
[i
], SrcReg
);
639 LLVM_DEBUG(dbgs() << "Unhandled output operand with "
640 "mismatched register size\n");
646 case TargetLowering::C_Immediate
:
647 case TargetLowering::C_Other
:
649 dbgs() << "Cannot lower target specific output constraints yet\n");
651 case TargetLowering::C_Memory
:
652 break; // Already handled.
653 case TargetLowering::C_Address
:
654 break; // Silence warning.
655 case TargetLowering::C_Unknown
:
656 LLVM_DEBUG(dbgs() << "Unexpected unknown constraint\n");
664 bool InlineAsmLowering::lowerAsmOperandForConstraint(
665 Value
*Val
, StringRef Constraint
, std::vector
<MachineOperand
> &Ops
,
666 MachineIRBuilder
&MIRBuilder
) const {
667 if (Constraint
.size() > 1)
670 char ConstraintLetter
= Constraint
[0];
671 switch (ConstraintLetter
) {
674 case 'i': // Simple Integer or Relocatable Constant
675 case 'n': // immediate integer with a known value.
676 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Val
)) {
677 assert(CI
->getBitWidth() <= 64 &&
678 "expected immediate to fit into 64-bits");
679 // Boolean constants should be zero-extended, others are sign-extended
680 bool IsBool
= CI
->getBitWidth() == 1;
681 int64_t ExtVal
= IsBool
? CI
->getZExtValue() : CI
->getSExtValue();
682 Ops
.push_back(MachineOperand::CreateImm(ExtVal
));