1 //===-- NVPTXAsmPrinter.cpp - NVPTX LLVM assembly writer ------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains a printer that converts from our internal representation
11 // of machine-dependent LLVM code to NVPTX assembly language.
13 //===----------------------------------------------------------------------===//
15 #include "NVPTXAsmPrinter.h"
16 #include "InstPrinter/NVPTXInstPrinter.h"
17 #include "MCTargetDesc/NVPTXBaseInfo.h"
18 #include "MCTargetDesc/NVPTXMCAsmInfo.h"
20 #include "NVPTXMCExpr.h"
21 #include "NVPTXMachineFunctionInfo.h"
22 #include "NVPTXRegisterInfo.h"
23 #include "NVPTXSubtarget.h"
24 #include "NVPTXTargetMachine.h"
25 #include "NVPTXUtilities.h"
26 #include "cl_common_defines.h"
27 #include "llvm/ADT/APFloat.h"
28 #include "llvm/ADT/APInt.h"
29 #include "llvm/ADT/DenseMap.h"
30 #include "llvm/ADT/DenseSet.h"
31 #include "llvm/ADT/SmallString.h"
32 #include "llvm/ADT/SmallVector.h"
33 #include "llvm/ADT/StringExtras.h"
34 #include "llvm/ADT/StringRef.h"
35 #include "llvm/ADT/Triple.h"
36 #include "llvm/ADT/Twine.h"
37 #include "llvm/Analysis/ConstantFolding.h"
38 #include "llvm/CodeGen/Analysis.h"
39 #include "llvm/CodeGen/MachineBasicBlock.h"
40 #include "llvm/CodeGen/MachineFrameInfo.h"
41 #include "llvm/CodeGen/MachineFunction.h"
42 #include "llvm/CodeGen/MachineInstr.h"
43 #include "llvm/CodeGen/MachineLoopInfo.h"
44 #include "llvm/CodeGen/MachineModuleInfo.h"
45 #include "llvm/CodeGen/MachineOperand.h"
46 #include "llvm/CodeGen/MachineRegisterInfo.h"
47 #include "llvm/CodeGen/TargetLowering.h"
48 #include "llvm/CodeGen/TargetRegisterInfo.h"
49 #include "llvm/CodeGen/ValueTypes.h"
50 #include "llvm/IR/Attributes.h"
51 #include "llvm/IR/BasicBlock.h"
52 #include "llvm/IR/Constant.h"
53 #include "llvm/IR/Constants.h"
54 #include "llvm/IR/DataLayout.h"
55 #include "llvm/IR/DebugInfo.h"
56 #include "llvm/IR/DebugInfoMetadata.h"
57 #include "llvm/IR/DebugLoc.h"
58 #include "llvm/IR/DerivedTypes.h"
59 #include "llvm/IR/Function.h"
60 #include "llvm/IR/GlobalValue.h"
61 #include "llvm/IR/GlobalVariable.h"
62 #include "llvm/IR/Instruction.h"
63 #include "llvm/IR/LLVMContext.h"
64 #include "llvm/IR/Module.h"
65 #include "llvm/IR/Operator.h"
66 #include "llvm/IR/Type.h"
67 #include "llvm/IR/User.h"
68 #include "llvm/MC/MCExpr.h"
69 #include "llvm/MC/MCInst.h"
70 #include "llvm/MC/MCInstrDesc.h"
71 #include "llvm/MC/MCStreamer.h"
72 #include "llvm/MC/MCSymbol.h"
73 #include "llvm/Support/Casting.h"
74 #include "llvm/Support/CommandLine.h"
75 #include "llvm/Support/ErrorHandling.h"
76 #include "llvm/Support/MachineValueType.h"
77 #include "llvm/Support/Path.h"
78 #include "llvm/Support/TargetRegistry.h"
79 #include "llvm/Support/raw_ostream.h"
80 #include "llvm/Target/TargetLoweringObjectFile.h"
81 #include "llvm/Target/TargetMachine.h"
82 #include "llvm/Transforms/Utils/UnrollLoop.h"
93 #define DEPOTNAME "__local_depot"
95 /// DiscoverDependentGlobals - Return a set of GlobalVariables on which \p V
98 DiscoverDependentGlobals(const Value
*V
,
99 DenseSet
<const GlobalVariable
*> &Globals
) {
100 if (const GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(V
))
103 if (const User
*U
= dyn_cast
<User
>(V
)) {
104 for (unsigned i
= 0, e
= U
->getNumOperands(); i
!= e
; ++i
) {
105 DiscoverDependentGlobals(U
->getOperand(i
), Globals
);
111 /// VisitGlobalVariableForEmission - Add \p GV to the list of GlobalVariable
112 /// instances to be emitted, but only after any dependents have been added
115 VisitGlobalVariableForEmission(const GlobalVariable
*GV
,
116 SmallVectorImpl
<const GlobalVariable
*> &Order
,
117 DenseSet
<const GlobalVariable
*> &Visited
,
118 DenseSet
<const GlobalVariable
*> &Visiting
) {
119 // Have we already visited this one?
120 if (Visited
.count(GV
))
123 // Do we have a circular dependency?
124 if (!Visiting
.insert(GV
).second
)
125 report_fatal_error("Circular dependency found in global variable set");
127 // Make sure we visit all dependents first
128 DenseSet
<const GlobalVariable
*> Others
;
129 for (unsigned i
= 0, e
= GV
->getNumOperands(); i
!= e
; ++i
)
130 DiscoverDependentGlobals(GV
->getOperand(i
), Others
);
132 for (DenseSet
<const GlobalVariable
*>::iterator I
= Others
.begin(),
135 VisitGlobalVariableForEmission(*I
, Order
, Visited
, Visiting
);
137 // Now we can visit ourself
143 void NVPTXAsmPrinter::EmitInstruction(const MachineInstr
*MI
) {
145 lowerToMCInst(MI
, Inst
);
146 EmitToStreamer(*OutStreamer
, Inst
);
149 // Handle symbol backtracking for targets that do not support image handles
150 bool NVPTXAsmPrinter::lowerImageHandleOperand(const MachineInstr
*MI
,
151 unsigned OpNo
, MCOperand
&MCOp
) {
152 const MachineOperand
&MO
= MI
->getOperand(OpNo
);
153 const MCInstrDesc
&MCID
= MI
->getDesc();
155 if (MCID
.TSFlags
& NVPTXII::IsTexFlag
) {
156 // This is a texture fetch, so operand 4 is a texref and operand 5 is
158 if (OpNo
== 4 && MO
.isImm()) {
159 lowerImageHandleSymbol(MO
.getImm(), MCOp
);
162 if (OpNo
== 5 && MO
.isImm() && !(MCID
.TSFlags
& NVPTXII::IsTexModeUnifiedFlag
)) {
163 lowerImageHandleSymbol(MO
.getImm(), MCOp
);
168 } else if (MCID
.TSFlags
& NVPTXII::IsSuldMask
) {
170 1 << (((MCID
.TSFlags
& NVPTXII::IsSuldMask
) >> NVPTXII::IsSuldShift
) - 1);
172 // For a surface load of vector size N, the Nth operand will be the surfref
173 if (OpNo
== VecSize
&& MO
.isImm()) {
174 lowerImageHandleSymbol(MO
.getImm(), MCOp
);
179 } else if (MCID
.TSFlags
& NVPTXII::IsSustFlag
) {
180 // This is a surface store, so operand 0 is a surfref
181 if (OpNo
== 0 && MO
.isImm()) {
182 lowerImageHandleSymbol(MO
.getImm(), MCOp
);
187 } else if (MCID
.TSFlags
& NVPTXII::IsSurfTexQueryFlag
) {
188 // This is a query, so operand 1 is a surfref/texref
189 if (OpNo
== 1 && MO
.isImm()) {
190 lowerImageHandleSymbol(MO
.getImm(), MCOp
);
200 void NVPTXAsmPrinter::lowerImageHandleSymbol(unsigned Index
, MCOperand
&MCOp
) {
202 TargetMachine
&TM
= const_cast<TargetMachine
&>(MF
->getTarget());
203 NVPTXTargetMachine
&nvTM
= static_cast<NVPTXTargetMachine
&>(TM
);
204 const NVPTXMachineFunctionInfo
*MFI
= MF
->getInfo
<NVPTXMachineFunctionInfo
>();
205 const char *Sym
= MFI
->getImageHandleSymbol(Index
);
206 std::string
*SymNamePtr
=
207 nvTM
.getManagedStrPool()->getManagedString(Sym
);
208 MCOp
= GetSymbolRef(OutContext
.getOrCreateSymbol(StringRef(*SymNamePtr
)));
211 void NVPTXAsmPrinter::lowerToMCInst(const MachineInstr
*MI
, MCInst
&OutMI
) {
212 OutMI
.setOpcode(MI
->getOpcode());
213 // Special: Do not mangle symbol operand of CALL_PROTOTYPE
214 if (MI
->getOpcode() == NVPTX::CALL_PROTOTYPE
) {
215 const MachineOperand
&MO
= MI
->getOperand(0);
216 OutMI
.addOperand(GetSymbolRef(
217 OutContext
.getOrCreateSymbol(Twine(MO
.getSymbolName()))));
221 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
222 const MachineOperand
&MO
= MI
->getOperand(i
);
225 if (!nvptxSubtarget
->hasImageHandles()) {
226 if (lowerImageHandleOperand(MI
, i
, MCOp
)) {
227 OutMI
.addOperand(MCOp
);
232 if (lowerOperand(MO
, MCOp
))
233 OutMI
.addOperand(MCOp
);
237 bool NVPTXAsmPrinter::lowerOperand(const MachineOperand
&MO
,
239 switch (MO
.getType()) {
240 default: llvm_unreachable("unknown operand type");
241 case MachineOperand::MO_Register
:
242 MCOp
= MCOperand::createReg(encodeVirtualRegister(MO
.getReg()));
244 case MachineOperand::MO_Immediate
:
245 MCOp
= MCOperand::createImm(MO
.getImm());
247 case MachineOperand::MO_MachineBasicBlock
:
248 MCOp
= MCOperand::createExpr(MCSymbolRefExpr::create(
249 MO
.getMBB()->getSymbol(), OutContext
));
251 case MachineOperand::MO_ExternalSymbol
:
252 MCOp
= GetSymbolRef(GetExternalSymbolSymbol(MO
.getSymbolName()));
254 case MachineOperand::MO_GlobalAddress
:
255 MCOp
= GetSymbolRef(getSymbol(MO
.getGlobal()));
257 case MachineOperand::MO_FPImmediate
: {
258 const ConstantFP
*Cnt
= MO
.getFPImm();
259 const APFloat
&Val
= Cnt
->getValueAPF();
261 switch (Cnt
->getType()->getTypeID()) {
262 default: report_fatal_error("Unsupported FP type"); break;
264 MCOp
= MCOperand::createExpr(
265 NVPTXFloatMCExpr::createConstantFPHalf(Val
, OutContext
));
267 case Type::FloatTyID
:
268 MCOp
= MCOperand::createExpr(
269 NVPTXFloatMCExpr::createConstantFPSingle(Val
, OutContext
));
271 case Type::DoubleTyID
:
272 MCOp
= MCOperand::createExpr(
273 NVPTXFloatMCExpr::createConstantFPDouble(Val
, OutContext
));
282 unsigned NVPTXAsmPrinter::encodeVirtualRegister(unsigned Reg
) {
283 if (TargetRegisterInfo::isVirtualRegister(Reg
)) {
284 const TargetRegisterClass
*RC
= MRI
->getRegClass(Reg
);
286 DenseMap
<unsigned, unsigned> &RegMap
= VRegMapping
[RC
];
287 unsigned RegNum
= RegMap
[Reg
];
289 // Encode the register class in the upper 4 bits
290 // Must be kept in sync with NVPTXInstPrinter::printRegName
292 if (RC
== &NVPTX::Int1RegsRegClass
) {
294 } else if (RC
== &NVPTX::Int16RegsRegClass
) {
296 } else if (RC
== &NVPTX::Int32RegsRegClass
) {
298 } else if (RC
== &NVPTX::Int64RegsRegClass
) {
300 } else if (RC
== &NVPTX::Float32RegsRegClass
) {
302 } else if (RC
== &NVPTX::Float64RegsRegClass
) {
304 } else if (RC
== &NVPTX::Float16RegsRegClass
) {
306 } else if (RC
== &NVPTX::Float16x2RegsRegClass
) {
309 report_fatal_error("Bad register class");
312 // Insert the vreg number
313 Ret
|= (RegNum
& 0x0FFFFFFF);
316 // Some special-use registers are actually physical registers.
317 // Encode this as the register class ID of 0 and the real register ID.
318 return Reg
& 0x0FFFFFFF;
322 MCOperand
NVPTXAsmPrinter::GetSymbolRef(const MCSymbol
*Symbol
) {
324 Expr
= MCSymbolRefExpr::create(Symbol
, MCSymbolRefExpr::VK_None
,
326 return MCOperand::createExpr(Expr
);
329 void NVPTXAsmPrinter::printReturnValStr(const Function
*F
, raw_ostream
&O
) {
330 const DataLayout
&DL
= getDataLayout();
331 const TargetLowering
*TLI
= nvptxSubtarget
->getTargetLowering();
333 Type
*Ty
= F
->getReturnType();
335 bool isABI
= (nvptxSubtarget
->getSmVersion() >= 20);
337 if (Ty
->getTypeID() == Type::VoidTyID
)
343 if (Ty
->isFloatingPointTy() || (Ty
->isIntegerTy() && !Ty
->isIntegerTy(128))) {
345 if (auto *ITy
= dyn_cast
<IntegerType
>(Ty
)) {
346 size
= ITy
->getBitWidth();
348 assert(Ty
->isFloatingPointTy() && "Floating point type expected here");
349 size
= Ty
->getPrimitiveSizeInBits();
351 // PTX ABI requires all scalar return values to be at least 32
352 // bits in size. fp16 normally uses .b16 as its storage type in
353 // PTX, so its size must be adjusted here, too.
357 O
<< ".param .b" << size
<< " func_retval0";
358 } else if (isa
<PointerType
>(Ty
)) {
359 O
<< ".param .b" << TLI
->getPointerTy(DL
).getSizeInBits()
361 } else if (Ty
->isAggregateType() || Ty
->isVectorTy() || Ty
->isIntegerTy(128)) {
362 unsigned totalsz
= DL
.getTypeAllocSize(Ty
);
363 unsigned retAlignment
= 0;
364 if (!getAlign(*F
, 0, retAlignment
))
365 retAlignment
= DL
.getABITypeAlignment(Ty
);
366 O
<< ".param .align " << retAlignment
<< " .b8 func_retval0[" << totalsz
369 llvm_unreachable("Unknown return type");
371 SmallVector
<EVT
, 16> vtparts
;
372 ComputeValueVTs(*TLI
, DL
, Ty
, vtparts
);
374 for (unsigned i
= 0, e
= vtparts
.size(); i
!= e
; ++i
) {
376 EVT elemtype
= vtparts
[i
];
377 if (vtparts
[i
].isVector()) {
378 elems
= vtparts
[i
].getVectorNumElements();
379 elemtype
= vtparts
[i
].getVectorElementType();
382 for (unsigned j
= 0, je
= elems
; j
!= je
; ++j
) {
383 unsigned sz
= elemtype
.getSizeInBits();
384 if (elemtype
.isInteger() && (sz
< 32))
386 O
<< ".reg .b" << sz
<< " func_retval" << idx
;
398 void NVPTXAsmPrinter::printReturnValStr(const MachineFunction
&MF
,
400 const Function
&F
= MF
.getFunction();
401 printReturnValStr(&F
, O
);
404 // Return true if MBB is the header of a loop marked with
405 // llvm.loop.unroll.disable.
406 // TODO: consider "#pragma unroll 1" which is equivalent to "#pragma nounroll".
407 bool NVPTXAsmPrinter::isLoopHeaderOfNoUnroll(
408 const MachineBasicBlock
&MBB
) const {
409 MachineLoopInfo
&LI
= getAnalysis
<MachineLoopInfo
>();
410 // We insert .pragma "nounroll" only to the loop header.
411 if (!LI
.isLoopHeader(&MBB
))
414 // llvm.loop.unroll.disable is marked on the back edges of a loop. Therefore,
415 // we iterate through each back edge of the loop with header MBB, and check
416 // whether its metadata contains llvm.loop.unroll.disable.
417 for (auto I
= MBB
.pred_begin(); I
!= MBB
.pred_end(); ++I
) {
418 const MachineBasicBlock
*PMBB
= *I
;
419 if (LI
.getLoopFor(PMBB
) != LI
.getLoopFor(&MBB
)) {
420 // Edges from other loops to MBB are not back edges.
423 if (const BasicBlock
*PBB
= PMBB
->getBasicBlock()) {
425 PBB
->getTerminator()->getMetadata(LLVMContext::MD_loop
)) {
426 if (GetUnrollMetadata(LoopID
, "llvm.loop.unroll.disable"))
434 void NVPTXAsmPrinter::EmitBasicBlockStart(const MachineBasicBlock
&MBB
) const {
435 AsmPrinter::EmitBasicBlockStart(MBB
);
436 if (isLoopHeaderOfNoUnroll(MBB
))
437 OutStreamer
->EmitRawText(StringRef("\t.pragma \"nounroll\";\n"));
440 void NVPTXAsmPrinter::EmitFunctionEntryLabel() {
441 SmallString
<128> Str
;
442 raw_svector_ostream
O(Str
);
444 if (!GlobalsEmitted
) {
445 emitGlobals(*MF
->getFunction().getParent());
446 GlobalsEmitted
= true;
450 MRI
= &MF
->getRegInfo();
451 F
= &MF
->getFunction();
452 emitLinkageDirective(F
, O
);
453 if (isKernelFunction(*F
))
457 printReturnValStr(*MF
, O
);
460 CurrentFnSym
->print(O
, MAI
);
462 emitFunctionParamList(*MF
, O
);
464 if (isKernelFunction(*F
))
465 emitKernelFunctionDirectives(*F
, O
);
467 OutStreamer
->EmitRawText(O
.str());
470 // Emit open brace for function body.
471 OutStreamer
->EmitRawText(StringRef("{\n"));
472 setAndEmitFunctionVirtualRegisters(*MF
);
475 bool NVPTXAsmPrinter::runOnMachineFunction(MachineFunction
&F
) {
476 nvptxSubtarget
= &F
.getSubtarget
<NVPTXSubtarget
>();
477 bool Result
= AsmPrinter::runOnMachineFunction(F
);
478 // Emit closing brace for the body of function F.
479 // The closing brace must be emitted here because we need to emit additional
480 // debug labels/data after the last basic block.
481 // We need to emit the closing brace here because we don't have function that
482 // finished emission of the function body.
483 OutStreamer
->EmitRawText(StringRef("}\n"));
487 void NVPTXAsmPrinter::EmitFunctionBodyStart() {
488 SmallString
<128> Str
;
489 raw_svector_ostream
O(Str
);
490 emitDemotedVars(&MF
->getFunction(), O
);
491 OutStreamer
->EmitRawText(O
.str());
494 void NVPTXAsmPrinter::EmitFunctionBodyEnd() {
498 const MCSymbol
*NVPTXAsmPrinter::getFunctionFrameSymbol() const {
499 SmallString
<128> Str
;
500 raw_svector_ostream(Str
) << DEPOTNAME
<< getFunctionNumber();
501 return OutContext
.getOrCreateSymbol(Str
);
504 void NVPTXAsmPrinter::emitImplicitDef(const MachineInstr
*MI
) const {
505 unsigned RegNo
= MI
->getOperand(0).getReg();
506 if (TargetRegisterInfo::isVirtualRegister(RegNo
)) {
507 OutStreamer
->AddComment(Twine("implicit-def: ") +
508 getVirtualRegisterName(RegNo
));
510 OutStreamer
->AddComment(Twine("implicit-def: ") +
511 nvptxSubtarget
->getRegisterInfo()->getName(RegNo
));
513 OutStreamer
->AddBlankLine();
516 void NVPTXAsmPrinter::emitKernelFunctionDirectives(const Function
&F
,
517 raw_ostream
&O
) const {
518 // If the NVVM IR has some of reqntid* specified, then output
519 // the reqntid directive, and set the unspecified ones to 1.
520 // If none of reqntid* is specified, don't output reqntid directive.
521 unsigned reqntidx
, reqntidy
, reqntidz
;
522 bool specified
= false;
523 if (!getReqNTIDx(F
, reqntidx
))
527 if (!getReqNTIDy(F
, reqntidy
))
531 if (!getReqNTIDz(F
, reqntidz
))
537 O
<< ".reqntid " << reqntidx
<< ", " << reqntidy
<< ", " << reqntidz
540 // If the NVVM IR has some of maxntid* specified, then output
541 // the maxntid directive, and set the unspecified ones to 1.
542 // If none of maxntid* is specified, don't output maxntid directive.
543 unsigned maxntidx
, maxntidy
, maxntidz
;
545 if (!getMaxNTIDx(F
, maxntidx
))
549 if (!getMaxNTIDy(F
, maxntidy
))
553 if (!getMaxNTIDz(F
, maxntidz
))
559 O
<< ".maxntid " << maxntidx
<< ", " << maxntidy
<< ", " << maxntidz
563 if (getMinCTASm(F
, mincta
))
564 O
<< ".minnctapersm " << mincta
<< "\n";
567 if (getMaxNReg(F
, maxnreg
))
568 O
<< ".maxnreg " << maxnreg
<< "\n";
572 NVPTXAsmPrinter::getVirtualRegisterName(unsigned Reg
) const {
573 const TargetRegisterClass
*RC
= MRI
->getRegClass(Reg
);
576 raw_string_ostream
NameStr(Name
);
578 VRegRCMap::const_iterator I
= VRegMapping
.find(RC
);
579 assert(I
!= VRegMapping
.end() && "Bad register class");
580 const DenseMap
<unsigned, unsigned> &RegMap
= I
->second
;
582 VRegMap::const_iterator VI
= RegMap
.find(Reg
);
583 assert(VI
!= RegMap
.end() && "Bad virtual register");
584 unsigned MappedVR
= VI
->second
;
586 NameStr
<< getNVPTXRegClassStr(RC
) << MappedVR
;
592 void NVPTXAsmPrinter::emitVirtualRegister(unsigned int vr
,
594 O
<< getVirtualRegisterName(vr
);
597 void NVPTXAsmPrinter::printVecModifiedImmediate(
598 const MachineOperand
&MO
, const char *Modifier
, raw_ostream
&O
) {
599 static const char vecelem
[] = { '0', '1', '2', '3', '0', '1', '2', '3' };
600 int Imm
= (int) MO
.getImm();
601 if (0 == strcmp(Modifier
, "vecelem"))
602 O
<< "_" << vecelem
[Imm
];
603 else if (0 == strcmp(Modifier
, "vecv4comm1")) {
604 if ((Imm
< 0) || (Imm
> 3))
606 } else if (0 == strcmp(Modifier
, "vecv4comm2")) {
607 if ((Imm
< 4) || (Imm
> 7))
609 } else if (0 == strcmp(Modifier
, "vecv4pos")) {
612 O
<< "_" << vecelem
[Imm
% 4];
613 } else if (0 == strcmp(Modifier
, "vecv2comm1")) {
614 if ((Imm
< 0) || (Imm
> 1))
616 } else if (0 == strcmp(Modifier
, "vecv2comm2")) {
617 if ((Imm
< 2) || (Imm
> 3))
619 } else if (0 == strcmp(Modifier
, "vecv2pos")) {
622 O
<< "_" << vecelem
[Imm
% 2];
624 llvm_unreachable("Unknown Modifier on immediate operand");
627 void NVPTXAsmPrinter::emitDeclaration(const Function
*F
, raw_ostream
&O
) {
628 emitLinkageDirective(F
, O
);
629 if (isKernelFunction(*F
))
633 printReturnValStr(F
, O
);
634 getSymbol(F
)->print(O
, MAI
);
636 emitFunctionParamList(F
, O
);
640 static bool usedInGlobalVarDef(const Constant
*C
) {
644 if (const GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(C
)) {
645 return GV
->getName() != "llvm.used";
648 for (const User
*U
: C
->users())
649 if (const Constant
*C
= dyn_cast
<Constant
>(U
))
650 if (usedInGlobalVarDef(C
))
656 static bool usedInOneFunc(const User
*U
, Function
const *&oneFunc
) {
657 if (const GlobalVariable
*othergv
= dyn_cast
<GlobalVariable
>(U
)) {
658 if (othergv
->getName() == "llvm.used")
662 if (const Instruction
*instr
= dyn_cast
<Instruction
>(U
)) {
663 if (instr
->getParent() && instr
->getParent()->getParent()) {
664 const Function
*curFunc
= instr
->getParent()->getParent();
665 if (oneFunc
&& (curFunc
!= oneFunc
))
673 for (const User
*UU
: U
->users())
674 if (!usedInOneFunc(UU
, oneFunc
))
680 /* Find out if a global variable can be demoted to local scope.
681 * Currently, this is valid for CUDA shared variables, which have local
682 * scope and global lifetime. So the conditions to check are :
683 * 1. Is the global variable in shared address space?
684 * 2. Does it have internal linkage?
685 * 3. Is the global variable referenced only in one function?
687 static bool canDemoteGlobalVar(const GlobalVariable
*gv
, Function
const *&f
) {
688 if (!gv
->hasInternalLinkage())
690 PointerType
*Pty
= gv
->getType();
691 if (Pty
->getAddressSpace() != ADDRESS_SPACE_SHARED
)
694 const Function
*oneFunc
= nullptr;
696 bool flag
= usedInOneFunc(gv
, oneFunc
);
705 static bool useFuncSeen(const Constant
*C
,
706 DenseMap
<const Function
*, bool> &seenMap
) {
707 for (const User
*U
: C
->users()) {
708 if (const Constant
*cu
= dyn_cast
<Constant
>(U
)) {
709 if (useFuncSeen(cu
, seenMap
))
711 } else if (const Instruction
*I
= dyn_cast
<Instruction
>(U
)) {
712 const BasicBlock
*bb
= I
->getParent();
715 const Function
*caller
= bb
->getParent();
718 if (seenMap
.find(caller
) != seenMap
.end())
725 void NVPTXAsmPrinter::emitDeclarations(const Module
&M
, raw_ostream
&O
) {
726 DenseMap
<const Function
*, bool> seenMap
;
727 for (Module::const_iterator FI
= M
.begin(), FE
= M
.end(); FI
!= FE
; ++FI
) {
728 const Function
*F
= &*FI
;
730 if (F
->isDeclaration()) {
733 if (F
->getIntrinsicID())
735 emitDeclaration(F
, O
);
738 for (const User
*U
: F
->users()) {
739 if (const Constant
*C
= dyn_cast
<Constant
>(U
)) {
740 if (usedInGlobalVarDef(C
)) {
741 // The use is in the initialization of a global variable
742 // that is a function pointer, so print a declaration
743 // for the original function
744 emitDeclaration(F
, O
);
747 // Emit a declaration of this function if the function that
748 // uses this constant expr has already been seen.
749 if (useFuncSeen(C
, seenMap
)) {
750 emitDeclaration(F
, O
);
755 if (!isa
<Instruction
>(U
))
757 const Instruction
*instr
= cast
<Instruction
>(U
);
758 const BasicBlock
*bb
= instr
->getParent();
761 const Function
*caller
= bb
->getParent();
765 // If a caller has already been seen, then the caller is
766 // appearing in the module before the callee. so print out
767 // a declaration for the callee.
768 if (seenMap
.find(caller
) != seenMap
.end()) {
769 emitDeclaration(F
, O
);
777 static bool isEmptyXXStructor(GlobalVariable
*GV
) {
778 if (!GV
) return true;
779 const ConstantArray
*InitList
= dyn_cast
<ConstantArray
>(GV
->getInitializer());
780 if (!InitList
) return true; // Not an array; we don't know how to parse.
781 return InitList
->getNumOperands() == 0;
784 bool NVPTXAsmPrinter::doInitialization(Module
&M
) {
785 // Construct a default subtarget off of the TargetMachine defaults. The
786 // rest of NVPTX isn't friendly to change subtargets per function and
787 // so the default TargetMachine will have all of the options.
788 const Triple
&TT
= TM
.getTargetTriple();
789 StringRef CPU
= TM
.getTargetCPU();
790 StringRef FS
= TM
.getTargetFeatureString();
791 const NVPTXTargetMachine
&NTM
= static_cast<const NVPTXTargetMachine
&>(TM
);
792 const NVPTXSubtarget
STI(TT
, CPU
, FS
, NTM
);
794 if (M
.alias_size()) {
795 report_fatal_error("Module has aliases, which NVPTX does not support.");
796 return true; // error
798 if (!isEmptyXXStructor(M
.getNamedGlobal("llvm.global_ctors"))) {
800 "Module has a nontrivial global ctor, which NVPTX does not support.");
801 return true; // error
803 if (!isEmptyXXStructor(M
.getNamedGlobal("llvm.global_dtors"))) {
805 "Module has a nontrivial global dtor, which NVPTX does not support.");
806 return true; // error
809 SmallString
<128> Str1
;
810 raw_svector_ostream
OS1(Str1
);
812 // We need to call the parent's one explicitly.
813 bool Result
= AsmPrinter::doInitialization(M
);
815 // Emit header before any dwarf directives are emitted below.
816 emitHeader(M
, OS1
, STI
);
817 OutStreamer
->EmitRawText(OS1
.str());
819 // Emit module-level inline asm if it exists.
820 if (!M
.getModuleInlineAsm().empty()) {
821 OutStreamer
->AddComment("Start of file scope inline assembly");
822 OutStreamer
->AddBlankLine();
823 OutStreamer
->EmitRawText(StringRef(M
.getModuleInlineAsm()));
824 OutStreamer
->AddBlankLine();
825 OutStreamer
->AddComment("End of file scope inline assembly");
826 OutStreamer
->AddBlankLine();
829 GlobalsEmitted
= false;
834 void NVPTXAsmPrinter::emitGlobals(const Module
&M
) {
835 SmallString
<128> Str2
;
836 raw_svector_ostream
OS2(Str2
);
838 emitDeclarations(M
, OS2
);
840 // As ptxas does not support forward references of globals, we need to first
841 // sort the list of module-level globals in def-use order. We visit each
842 // global variable in order, and ensure that we emit it *after* its dependent
843 // globals. We use a little extra memory maintaining both a set and a list to
844 // have fast searches while maintaining a strict ordering.
845 SmallVector
<const GlobalVariable
*, 8> Globals
;
846 DenseSet
<const GlobalVariable
*> GVVisited
;
847 DenseSet
<const GlobalVariable
*> GVVisiting
;
849 // Visit each global variable, in order
850 for (const GlobalVariable
&I
: M
.globals())
851 VisitGlobalVariableForEmission(&I
, Globals
, GVVisited
, GVVisiting
);
853 assert(GVVisited
.size() == M
.getGlobalList().size() &&
854 "Missed a global variable");
855 assert(GVVisiting
.size() == 0 && "Did not fully process a global variable");
857 // Print out module-level global variables in proper order
858 for (unsigned i
= 0, e
= Globals
.size(); i
!= e
; ++i
)
859 printModuleLevelGV(Globals
[i
], OS2
);
863 OutStreamer
->EmitRawText(OS2
.str());
866 void NVPTXAsmPrinter::emitHeader(Module
&M
, raw_ostream
&O
,
867 const NVPTXSubtarget
&STI
) {
869 O
<< "// Generated by LLVM NVPTX Back-End\n";
873 unsigned PTXVersion
= STI
.getPTXVersion();
874 O
<< ".version " << (PTXVersion
/ 10) << "." << (PTXVersion
% 10) << "\n";
877 O
<< STI
.getTargetName();
879 const NVPTXTargetMachine
&NTM
= static_cast<const NVPTXTargetMachine
&>(TM
);
880 if (NTM
.getDrvInterface() == NVPTX::NVCL
)
881 O
<< ", texmode_independent";
883 // FIXME: remove comment once debug info is properly supported.
884 if (MMI
&& MMI
->hasDebugInfo())
889 O
<< ".address_size ";
899 bool NVPTXAsmPrinter::doFinalization(Module
&M
) {
900 bool HasDebugInfo
= MMI
&& MMI
->hasDebugInfo();
902 // If we did not emit any functions, then the global declarations have not
904 if (!GlobalsEmitted
) {
906 GlobalsEmitted
= true;
909 // XXX Temproarily remove global variables so that doFinalization() will not
910 // emit them again (global variables are emitted at beginning).
912 Module::GlobalListType
&global_list
= M
.getGlobalList();
913 int i
, n
= global_list
.size();
914 GlobalVariable
**gv_array
= new GlobalVariable
*[n
];
916 // first, back-up GlobalVariable in gv_array
918 for (Module::global_iterator I
= global_list
.begin(), E
= global_list
.end();
922 // second, empty global_list
923 while (!global_list
.empty())
924 global_list
.remove(global_list
.begin());
926 // call doFinalization
927 bool ret
= AsmPrinter::doFinalization(M
);
929 // now we restore global variables
930 for (i
= 0; i
< n
; i
++)
931 global_list
.insert(global_list
.end(), gv_array
[i
]);
933 clearAnnotationCache(&M
);
936 // FIXME: remove comment once debug info is properly supported.
937 // Close the last emitted section
939 OutStreamer
->EmitRawText("//\t}");
943 //bool Result = AsmPrinter::doFinalization(M);
944 // Instead of calling the parents doFinalization, we may
945 // clone parents doFinalization and customize here.
946 // Currently, we if NVISA out the EmitGlobals() in
947 // parent's doFinalization, which is too intrusive.
949 // Same for the doInitialization.
953 // This function emits appropriate linkage directives for
954 // functions and global variables.
956 // extern function declaration -> .extern
957 // extern function definition -> .visible
958 // external global variable with init -> .visible
959 // external without init -> .extern
960 // appending -> not allowed, assert.
961 // for any linkage other than
962 // internal, private, linker_private,
963 // linker_private_weak, linker_private_weak_def_auto,
966 void NVPTXAsmPrinter::emitLinkageDirective(const GlobalValue
*V
,
968 if (static_cast<NVPTXTargetMachine
&>(TM
).getDrvInterface() == NVPTX::CUDA
) {
969 if (V
->hasExternalLinkage()) {
970 if (isa
<GlobalVariable
>(V
)) {
971 const GlobalVariable
*GVar
= cast
<GlobalVariable
>(V
);
973 if (GVar
->hasInitializer())
978 } else if (V
->isDeclaration())
982 } else if (V
->hasAppendingLinkage()) {
984 msg
.append("Error: ");
985 msg
.append("Symbol ");
987 msg
.append(V
->getName());
988 msg
.append("has unsupported appending linkage type");
989 llvm_unreachable(msg
.c_str());
990 } else if (!V
->hasInternalLinkage() &&
991 !V
->hasPrivateLinkage()) {
997 void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable
*GVar
,
999 bool processDemoted
) {
1001 if (GVar
->hasSection()) {
1002 if (GVar
->getSection() == "llvm.metadata")
1006 // Skip LLVM intrinsic global variables
1007 if (GVar
->getName().startswith("llvm.") ||
1008 GVar
->getName().startswith("nvvm."))
1011 const DataLayout
&DL
= getDataLayout();
1013 // GlobalVariables are always constant pointers themselves.
1014 PointerType
*PTy
= GVar
->getType();
1015 Type
*ETy
= GVar
->getValueType();
1017 if (GVar
->hasExternalLinkage()) {
1018 if (GVar
->hasInitializer())
1022 } else if (GVar
->hasLinkOnceLinkage() || GVar
->hasWeakLinkage() ||
1023 GVar
->hasAvailableExternallyLinkage() ||
1024 GVar
->hasCommonLinkage()) {
1028 if (isTexture(*GVar
)) {
1029 O
<< ".global .texref " << getTextureName(*GVar
) << ";\n";
1033 if (isSurface(*GVar
)) {
1034 O
<< ".global .surfref " << getSurfaceName(*GVar
) << ";\n";
1038 if (GVar
->isDeclaration()) {
1039 // (extern) declarations, no definition or initializer
1040 // Currently the only known declaration is for an automatic __local
1041 // (.shared) promoted to global.
1042 emitPTXGlobalVariable(GVar
, O
);
1047 if (isSampler(*GVar
)) {
1048 O
<< ".global .samplerref " << getSamplerName(*GVar
);
1050 const Constant
*Initializer
= nullptr;
1051 if (GVar
->hasInitializer())
1052 Initializer
= GVar
->getInitializer();
1053 const ConstantInt
*CI
= nullptr;
1055 CI
= dyn_cast
<ConstantInt
>(Initializer
);
1057 unsigned sample
= CI
->getZExtValue();
1062 addr
= ((sample
& __CLK_ADDRESS_MASK
) >> __CLK_ADDRESS_BASE
);
1064 O
<< "addr_mode_" << i
<< " = ";
1070 O
<< "clamp_to_border";
1073 O
<< "clamp_to_edge";
1084 O
<< "filter_mode = ";
1085 switch ((sample
& __CLK_FILTER_MASK
) >> __CLK_FILTER_BASE
) {
1093 llvm_unreachable("Anisotropic filtering is not supported");
1098 if (!((sample
& __CLK_NORMALIZED_MASK
) >> __CLK_NORMALIZED_BASE
)) {
1099 O
<< ", force_unnormalized_coords = 1";
1108 if (GVar
->hasPrivateLinkage()) {
1109 if (strncmp(GVar
->getName().data(), "unrollpragma", 12) == 0)
1112 // FIXME - need better way (e.g. Metadata) to avoid generating this global
1113 if (strncmp(GVar
->getName().data(), "filename", 8) == 0)
1115 if (GVar
->use_empty())
1119 const Function
*demotedFunc
= nullptr;
1120 if (!processDemoted
&& canDemoteGlobalVar(GVar
, demotedFunc
)) {
1121 O
<< "// " << GVar
->getName() << " has been demoted\n";
1122 if (localDecls
.find(demotedFunc
) != localDecls
.end())
1123 localDecls
[demotedFunc
].push_back(GVar
);
1125 std::vector
<const GlobalVariable
*> temp
;
1126 temp
.push_back(GVar
);
1127 localDecls
[demotedFunc
] = temp
;
1133 emitPTXAddressSpace(PTy
->getAddressSpace(), O
);
1135 if (isManaged(*GVar
)) {
1136 O
<< " .attribute(.managed)";
1139 if (GVar
->getAlignment() == 0)
1140 O
<< " .align " << (int)DL
.getPrefTypeAlignment(ETy
);
1142 O
<< " .align " << GVar
->getAlignment();
1144 if (ETy
->isFloatingPointTy() || ETy
->isPointerTy() ||
1145 (ETy
->isIntegerTy() && ETy
->getScalarSizeInBits() <= 64)) {
1147 // Special case: ABI requires that we use .u8 for predicates
1148 if (ETy
->isIntegerTy(1))
1151 O
<< getPTXFundamentalTypeStr(ETy
, false);
1153 getSymbol(GVar
)->print(O
, MAI
);
1155 // Ptx allows variable initilization only for constant and global state
1157 if (GVar
->hasInitializer()) {
1158 if ((PTy
->getAddressSpace() == ADDRESS_SPACE_GLOBAL
) ||
1159 (PTy
->getAddressSpace() == ADDRESS_SPACE_CONST
)) {
1160 const Constant
*Initializer
= GVar
->getInitializer();
1161 // 'undef' is treated as there is no value specified.
1162 if (!Initializer
->isNullValue() && !isa
<UndefValue
>(Initializer
)) {
1164 printScalarConstant(Initializer
, O
);
1167 // The frontend adds zero-initializer to device and constant variables
1168 // that don't have an initial value, and UndefValue to shared
1169 // variables, so skip warning for this case.
1170 if (!GVar
->getInitializer()->isNullValue() &&
1171 !isa
<UndefValue
>(GVar
->getInitializer())) {
1172 report_fatal_error("initial value of '" + GVar
->getName() +
1173 "' is not allowed in addrspace(" +
1174 Twine(PTy
->getAddressSpace()) + ")");
1179 unsigned int ElementSize
= 0;
1181 // Although PTX has direct support for struct type and array type and
1182 // LLVM IR is very similar to PTX, the LLVM CodeGen does not support for
1183 // targets that support these high level field accesses. Structs, arrays
1184 // and vectors are lowered into arrays of bytes.
1185 switch (ETy
->getTypeID()) {
1186 case Type::IntegerTyID
: // Integers larger than 64 bits
1187 case Type::StructTyID
:
1188 case Type::ArrayTyID
:
1189 case Type::VectorTyID
:
1190 ElementSize
= DL
.getTypeStoreSize(ETy
);
1191 // Ptx allows variable initilization only for constant and
1192 // global state spaces.
1193 if (((PTy
->getAddressSpace() == ADDRESS_SPACE_GLOBAL
) ||
1194 (PTy
->getAddressSpace() == ADDRESS_SPACE_CONST
)) &&
1195 GVar
->hasInitializer()) {
1196 const Constant
*Initializer
= GVar
->getInitializer();
1197 if (!isa
<UndefValue
>(Initializer
) && !Initializer
->isNullValue()) {
1198 AggBuffer
aggBuffer(ElementSize
, O
, *this);
1199 bufferAggregateConstant(Initializer
, &aggBuffer
);
1200 if (aggBuffer
.numSymbols
) {
1201 if (static_cast<const NVPTXTargetMachine
&>(TM
).is64Bit()) {
1203 getSymbol(GVar
)->print(O
, MAI
);
1205 O
<< ElementSize
/ 8;
1208 getSymbol(GVar
)->print(O
, MAI
);
1210 O
<< ElementSize
/ 4;
1215 getSymbol(GVar
)->print(O
, MAI
);
1225 getSymbol(GVar
)->print(O
, MAI
);
1234 getSymbol(GVar
)->print(O
, MAI
);
1243 llvm_unreachable("type not supported yet");
1249 void NVPTXAsmPrinter::emitDemotedVars(const Function
*f
, raw_ostream
&O
) {
1250 if (localDecls
.find(f
) == localDecls
.end())
1253 std::vector
<const GlobalVariable
*> &gvars
= localDecls
[f
];
1255 for (unsigned i
= 0, e
= gvars
.size(); i
!= e
; ++i
) {
1256 O
<< "\t// demoted variable\n\t";
1257 printModuleLevelGV(gvars
[i
], O
, true);
1261 void NVPTXAsmPrinter::emitPTXAddressSpace(unsigned int AddressSpace
,
1262 raw_ostream
&O
) const {
1263 switch (AddressSpace
) {
1264 case ADDRESS_SPACE_LOCAL
:
1267 case ADDRESS_SPACE_GLOBAL
:
1270 case ADDRESS_SPACE_CONST
:
1273 case ADDRESS_SPACE_SHARED
:
1277 report_fatal_error("Bad address space found while emitting PTX: " +
1278 llvm::Twine(AddressSpace
));
1284 NVPTXAsmPrinter::getPTXFundamentalTypeStr(Type
*Ty
, bool useB4PTR
) const {
1285 switch (Ty
->getTypeID()) {
1287 llvm_unreachable("unexpected type");
1289 case Type::IntegerTyID
: {
1290 unsigned NumBits
= cast
<IntegerType
>(Ty
)->getBitWidth();
1293 else if (NumBits
<= 64) {
1294 std::string name
= "u";
1295 return name
+ utostr(NumBits
);
1297 llvm_unreachable("Integer too large");
1302 case Type::HalfTyID
:
1303 // fp16 is stored as .b16 for compatibility with pre-sm_53 PTX assembly.
1305 case Type::FloatTyID
:
1307 case Type::DoubleTyID
:
1309 case Type::PointerTyID
:
1310 if (static_cast<const NVPTXTargetMachine
&>(TM
).is64Bit())
1320 llvm_unreachable("unexpected type");
1324 void NVPTXAsmPrinter::emitPTXGlobalVariable(const GlobalVariable
*GVar
,
1326 const DataLayout
&DL
= getDataLayout();
1328 // GlobalVariables are always constant pointers themselves.
1329 Type
*ETy
= GVar
->getValueType();
1332 emitPTXAddressSpace(GVar
->getType()->getAddressSpace(), O
);
1333 if (GVar
->getAlignment() == 0)
1334 O
<< " .align " << (int)DL
.getPrefTypeAlignment(ETy
);
1336 O
<< " .align " << GVar
->getAlignment();
1338 // Special case for i128
1339 if (ETy
->isIntegerTy(128)) {
1341 getSymbol(GVar
)->print(O
, MAI
);
1346 if (ETy
->isFloatingPointTy() || ETy
->isIntOrPtrTy()) {
1348 O
<< getPTXFundamentalTypeStr(ETy
);
1350 getSymbol(GVar
)->print(O
, MAI
);
1354 int64_t ElementSize
= 0;
1356 // Although PTX has direct support for struct type and array type and LLVM IR
1357 // is very similar to PTX, the LLVM CodeGen does not support for targets that
1358 // support these high level field accesses. Structs and arrays are lowered
1359 // into arrays of bytes.
1360 switch (ETy
->getTypeID()) {
1361 case Type::StructTyID
:
1362 case Type::ArrayTyID
:
1363 case Type::VectorTyID
:
1364 ElementSize
= DL
.getTypeStoreSize(ETy
);
1366 getSymbol(GVar
)->print(O
, MAI
);
1374 llvm_unreachable("type not supported yet");
1378 static unsigned int getOpenCLAlignment(const DataLayout
&DL
, Type
*Ty
) {
1379 if (Ty
->isSingleValueType())
1380 return DL
.getPrefTypeAlignment(Ty
);
1382 auto *ATy
= dyn_cast
<ArrayType
>(Ty
);
1384 return getOpenCLAlignment(DL
, ATy
->getElementType());
1386 auto *STy
= dyn_cast
<StructType
>(Ty
);
1388 unsigned int alignStruct
= 1;
1389 // Go through each element of the struct and find the
1390 // largest alignment.
1391 for (unsigned i
= 0, e
= STy
->getNumElements(); i
!= e
; i
++) {
1392 Type
*ETy
= STy
->getElementType(i
);
1393 unsigned int align
= getOpenCLAlignment(DL
, ETy
);
1394 if (align
> alignStruct
)
1395 alignStruct
= align
;
1400 auto *FTy
= dyn_cast
<FunctionType
>(Ty
);
1402 return DL
.getPointerPrefAlignment();
1403 return DL
.getPrefTypeAlignment(Ty
);
1406 void NVPTXAsmPrinter::printParamName(Function::const_arg_iterator I
,
1407 int paramIndex
, raw_ostream
&O
) {
1408 getSymbol(I
->getParent())->print(O
, MAI
);
1409 O
<< "_param_" << paramIndex
;
1412 void NVPTXAsmPrinter::emitFunctionParamList(const Function
*F
, raw_ostream
&O
) {
1413 const DataLayout
&DL
= getDataLayout();
1414 const AttributeList
&PAL
= F
->getAttributes();
1415 const TargetLowering
*TLI
= nvptxSubtarget
->getTargetLowering();
1416 Function::const_arg_iterator I
, E
;
1417 unsigned paramIndex
= 0;
1419 bool isKernelFunc
= isKernelFunction(*F
);
1420 bool isABI
= (nvptxSubtarget
->getSmVersion() >= 20);
1421 MVT thePointerTy
= TLI
->getPointerTy(DL
);
1423 if (F
->arg_empty()) {
1430 for (I
= F
->arg_begin(), E
= F
->arg_end(); I
!= E
; ++I
, paramIndex
++) {
1431 Type
*Ty
= I
->getType();
1438 // Handle image/sampler parameters
1439 if (isKernelFunction(*F
)) {
1440 if (isSampler(*I
) || isImage(*I
)) {
1442 std::string sname
= I
->getName();
1443 if (isImageWriteOnly(*I
) || isImageReadWrite(*I
)) {
1444 if (nvptxSubtarget
->hasImageHandles())
1445 O
<< "\t.param .u64 .ptr .surfref ";
1447 O
<< "\t.param .surfref ";
1448 CurrentFnSym
->print(O
, MAI
);
1449 O
<< "_param_" << paramIndex
;
1451 else { // Default image is read_only
1452 if (nvptxSubtarget
->hasImageHandles())
1453 O
<< "\t.param .u64 .ptr .texref ";
1455 O
<< "\t.param .texref ";
1456 CurrentFnSym
->print(O
, MAI
);
1457 O
<< "_param_" << paramIndex
;
1460 if (nvptxSubtarget
->hasImageHandles())
1461 O
<< "\t.param .u64 .ptr .samplerref ";
1463 O
<< "\t.param .samplerref ";
1464 CurrentFnSym
->print(O
, MAI
);
1465 O
<< "_param_" << paramIndex
;
1471 if (!PAL
.hasParamAttribute(paramIndex
, Attribute::ByVal
)) {
1472 if (Ty
->isAggregateType() || Ty
->isVectorTy() || Ty
->isIntegerTy(128)) {
1473 // Just print .param .align <a> .b8 .param[size];
1474 // <a> = PAL.getparamalignment
1475 // size = typeallocsize of element type
1476 unsigned align
= PAL
.getParamAlignment(paramIndex
);
1478 align
= DL
.getABITypeAlignment(Ty
);
1480 unsigned sz
= DL
.getTypeAllocSize(Ty
);
1481 O
<< "\t.param .align " << align
<< " .b8 ";
1482 printParamName(I
, paramIndex
, O
);
1483 O
<< "[" << sz
<< "]";
1488 auto *PTy
= dyn_cast
<PointerType
>(Ty
);
1491 // Special handling for pointer arguments to kernel
1492 O
<< "\t.param .u" << thePointerTy
.getSizeInBits() << " ";
1494 if (static_cast<NVPTXTargetMachine
&>(TM
).getDrvInterface() !=
1496 Type
*ETy
= PTy
->getElementType();
1497 int addrSpace
= PTy
->getAddressSpace();
1498 switch (addrSpace
) {
1502 case ADDRESS_SPACE_CONST
:
1503 O
<< ".ptr .const ";
1505 case ADDRESS_SPACE_SHARED
:
1506 O
<< ".ptr .shared ";
1508 case ADDRESS_SPACE_GLOBAL
:
1509 O
<< ".ptr .global ";
1512 O
<< ".align " << (int)getOpenCLAlignment(DL
, ETy
) << " ";
1514 printParamName(I
, paramIndex
, O
);
1518 // non-pointer scalar to kernel func
1520 // Special case: predicate operands become .u8 types
1521 if (Ty
->isIntegerTy(1))
1524 O
<< getPTXFundamentalTypeStr(Ty
);
1526 printParamName(I
, paramIndex
, O
);
1529 // Non-kernel function, just print .param .b<size> for ABI
1530 // and .reg .b<size> for non-ABI
1532 if (isa
<IntegerType
>(Ty
)) {
1533 sz
= cast
<IntegerType
>(Ty
)->getBitWidth();
1536 } else if (isa
<PointerType
>(Ty
))
1537 sz
= thePointerTy
.getSizeInBits();
1538 else if (Ty
->isHalfTy())
1539 // PTX ABI requires all scalar parameters to be at least 32
1540 // bits in size. fp16 normally uses .b16 as its storage type
1541 // in PTX, so its size must be adjusted here, too.
1544 sz
= Ty
->getPrimitiveSizeInBits();
1546 O
<< "\t.param .b" << sz
<< " ";
1548 O
<< "\t.reg .b" << sz
<< " ";
1549 printParamName(I
, paramIndex
, O
);
1553 // param has byVal attribute. So should be a pointer
1554 auto *PTy
= dyn_cast
<PointerType
>(Ty
);
1555 assert(PTy
&& "Param with byval attribute should be a pointer type");
1556 Type
*ETy
= PTy
->getElementType();
1558 if (isABI
|| isKernelFunc
) {
1559 // Just print .param .align <a> .b8 .param[size];
1560 // <a> = PAL.getparamalignment
1561 // size = typeallocsize of element type
1562 unsigned align
= PAL
.getParamAlignment(paramIndex
);
1564 align
= DL
.getABITypeAlignment(ETy
);
1565 // Work around a bug in ptxas. When PTX code takes address of
1566 // byval parameter with alignment < 4, ptxas generates code to
1567 // spill argument into memory. Alas on sm_50+ ptxas generates
1568 // SASS code that fails with misaligned access. To work around
1569 // the problem, make sure that we align byval parameters by at
1570 // least 4. Matching change must be made in LowerCall() where we
1571 // prepare parameters for the call.
1573 // TODO: this will need to be undone when we get to support multi-TU
1574 // device-side compilation as it breaks ABI compatibility with nvcc.
1575 // Hopefully ptxas bug is fixed by then.
1576 if (!isKernelFunc
&& align
< 4)
1578 unsigned sz
= DL
.getTypeAllocSize(ETy
);
1579 O
<< "\t.param .align " << align
<< " .b8 ";
1580 printParamName(I
, paramIndex
, O
);
1581 O
<< "[" << sz
<< "]";
1584 // Split the ETy into constituent parts and
1585 // print .param .b<size> <name> for each part.
1586 // Further, if a part is vector, print the above for
1587 // each vector element.
1588 SmallVector
<EVT
, 16> vtparts
;
1589 ComputeValueVTs(*TLI
, DL
, ETy
, vtparts
);
1590 for (unsigned i
= 0, e
= vtparts
.size(); i
!= e
; ++i
) {
1592 EVT elemtype
= vtparts
[i
];
1593 if (vtparts
[i
].isVector()) {
1594 elems
= vtparts
[i
].getVectorNumElements();
1595 elemtype
= vtparts
[i
].getVectorElementType();
1598 for (unsigned j
= 0, je
= elems
; j
!= je
; ++j
) {
1599 unsigned sz
= elemtype
.getSizeInBits();
1600 if (elemtype
.isInteger() && (sz
< 32))
1602 O
<< "\t.reg .b" << sz
<< " ";
1603 printParamName(I
, paramIndex
, O
);
1619 void NVPTXAsmPrinter::emitFunctionParamList(const MachineFunction
&MF
,
1621 const Function
&F
= MF
.getFunction();
1622 emitFunctionParamList(&F
, O
);
1625 void NVPTXAsmPrinter::setAndEmitFunctionVirtualRegisters(
1626 const MachineFunction
&MF
) {
1627 SmallString
<128> Str
;
1628 raw_svector_ostream
O(Str
);
1630 // Map the global virtual register number to a register class specific
1631 // virtual register number starting from 1 with that class.
1632 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
1633 //unsigned numRegClasses = TRI->getNumRegClasses();
1635 // Emit the Fake Stack Object
1636 const MachineFrameInfo
&MFI
= MF
.getFrameInfo();
1637 int NumBytes
= (int) MFI
.getStackSize();
1639 O
<< "\t.local .align " << MFI
.getMaxAlignment() << " .b8 \t" << DEPOTNAME
1640 << getFunctionNumber() << "[" << NumBytes
<< "];\n";
1641 if (static_cast<const NVPTXTargetMachine
&>(MF
.getTarget()).is64Bit()) {
1642 O
<< "\t.reg .b64 \t%SP;\n";
1643 O
<< "\t.reg .b64 \t%SPL;\n";
1645 O
<< "\t.reg .b32 \t%SP;\n";
1646 O
<< "\t.reg .b32 \t%SPL;\n";
1650 // Go through all virtual registers to establish the mapping between the
1652 // register number and the per class virtual register number.
1653 // We use the per class virtual register number in the ptx output.
1654 unsigned int numVRs
= MRI
->getNumVirtRegs();
1655 for (unsigned i
= 0; i
< numVRs
; i
++) {
1656 unsigned int vr
= TRI
->index2VirtReg(i
);
1657 const TargetRegisterClass
*RC
= MRI
->getRegClass(vr
);
1658 DenseMap
<unsigned, unsigned> ®map
= VRegMapping
[RC
];
1659 int n
= regmap
.size();
1660 regmap
.insert(std::make_pair(vr
, n
+ 1));
1663 // Emit register declarations
1664 // @TODO: Extract out the real register usage
1665 // O << "\t.reg .pred %p<" << NVPTXNumRegisters << ">;\n";
1666 // O << "\t.reg .s16 %rc<" << NVPTXNumRegisters << ">;\n";
1667 // O << "\t.reg .s16 %rs<" << NVPTXNumRegisters << ">;\n";
1668 // O << "\t.reg .s32 %r<" << NVPTXNumRegisters << ">;\n";
1669 // O << "\t.reg .s64 %rd<" << NVPTXNumRegisters << ">;\n";
1670 // O << "\t.reg .f32 %f<" << NVPTXNumRegisters << ">;\n";
1671 // O << "\t.reg .f64 %fd<" << NVPTXNumRegisters << ">;\n";
1673 // Emit declaration of the virtual registers or 'physical' registers for
1674 // each register class
1675 for (unsigned i
=0; i
< TRI
->getNumRegClasses(); i
++) {
1676 const TargetRegisterClass
*RC
= TRI
->getRegClass(i
);
1677 DenseMap
<unsigned, unsigned> ®map
= VRegMapping
[RC
];
1678 std::string rcname
= getNVPTXRegClassName(RC
);
1679 std::string rcStr
= getNVPTXRegClassStr(RC
);
1680 int n
= regmap
.size();
1682 // Only declare those registers that may be used.
1684 O
<< "\t.reg " << rcname
<< " \t" << rcStr
<< "<" << (n
+1)
1689 OutStreamer
->EmitRawText(O
.str());
1692 void NVPTXAsmPrinter::printFPConstant(const ConstantFP
*Fp
, raw_ostream
&O
) {
1693 APFloat APF
= APFloat(Fp
->getValueAPF()); // make a copy
1695 unsigned int numHex
;
1698 if (Fp
->getType()->getTypeID() == Type::FloatTyID
) {
1701 APF
.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven
, &ignored
);
1702 } else if (Fp
->getType()->getTypeID() == Type::DoubleTyID
) {
1705 APF
.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven
, &ignored
);
1707 llvm_unreachable("unsupported fp type");
1709 APInt API
= APF
.bitcastToAPInt();
1710 O
<< lead
<< format_hex_no_prefix(API
.getZExtValue(), numHex
, /*Upper=*/true);
1713 void NVPTXAsmPrinter::printScalarConstant(const Constant
*CPV
, raw_ostream
&O
) {
1714 if (const ConstantInt
*CI
= dyn_cast
<ConstantInt
>(CPV
)) {
1715 O
<< CI
->getValue();
1718 if (const ConstantFP
*CFP
= dyn_cast
<ConstantFP
>(CPV
)) {
1719 printFPConstant(CFP
, O
);
1722 if (isa
<ConstantPointerNull
>(CPV
)) {
1726 if (const GlobalValue
*GVar
= dyn_cast
<GlobalValue
>(CPV
)) {
1727 bool IsNonGenericPointer
= false;
1728 if (GVar
->getType()->getAddressSpace() != 0) {
1729 IsNonGenericPointer
= true;
1731 if (EmitGeneric
&& !isa
<Function
>(CPV
) && !IsNonGenericPointer
) {
1733 getSymbol(GVar
)->print(O
, MAI
);
1736 getSymbol(GVar
)->print(O
, MAI
);
1740 if (const ConstantExpr
*Cexpr
= dyn_cast
<ConstantExpr
>(CPV
)) {
1741 const Value
*v
= Cexpr
->stripPointerCasts();
1742 PointerType
*PTy
= dyn_cast
<PointerType
>(Cexpr
->getType());
1743 bool IsNonGenericPointer
= false;
1744 if (PTy
&& PTy
->getAddressSpace() != 0) {
1745 IsNonGenericPointer
= true;
1747 if (const GlobalValue
*GVar
= dyn_cast
<GlobalValue
>(v
)) {
1748 if (EmitGeneric
&& !isa
<Function
>(v
) && !IsNonGenericPointer
) {
1750 getSymbol(GVar
)->print(O
, MAI
);
1753 getSymbol(GVar
)->print(O
, MAI
);
1757 lowerConstant(CPV
)->print(O
, MAI
);
1761 llvm_unreachable("Not scalar type found in printScalarConstant()");
1764 // These utility functions assure we get the right sequence of bytes for a given
1765 // type even for big-endian machines
1766 template <typename T
> static void ConvertIntToBytes(unsigned char *p
, T val
) {
1767 int64_t vp
= (int64_t)val
;
1768 for (unsigned i
= 0; i
< sizeof(T
); ++i
) {
1769 p
[i
] = (unsigned char)vp
;
1773 static void ConvertFloatToBytes(unsigned char *p
, float val
) {
1774 int32_t *vp
= (int32_t *)&val
;
1775 for (unsigned i
= 0; i
< sizeof(int32_t); ++i
) {
1776 p
[i
] = (unsigned char)*vp
;
1780 static void ConvertDoubleToBytes(unsigned char *p
, double val
) {
1781 int64_t *vp
= (int64_t *)&val
;
1782 for (unsigned i
= 0; i
< sizeof(int64_t); ++i
) {
1783 p
[i
] = (unsigned char)*vp
;
1788 void NVPTXAsmPrinter::bufferLEByte(const Constant
*CPV
, int Bytes
,
1789 AggBuffer
*aggBuffer
) {
1790 const DataLayout
&DL
= getDataLayout();
1792 if (isa
<UndefValue
>(CPV
) || CPV
->isNullValue()) {
1793 int s
= DL
.getTypeAllocSize(CPV
->getType());
1796 aggBuffer
->addZeros(s
);
1800 unsigned char ptr
[8];
1801 switch (CPV
->getType()->getTypeID()) {
1803 case Type::IntegerTyID
: {
1804 Type
*ETy
= CPV
->getType();
1805 if (ETy
== Type::getInt8Ty(CPV
->getContext())) {
1806 unsigned char c
= (unsigned char)cast
<ConstantInt
>(CPV
)->getZExtValue();
1807 ConvertIntToBytes
<>(ptr
, c
);
1808 aggBuffer
->addBytes(ptr
, 1, Bytes
);
1809 } else if (ETy
== Type::getInt16Ty(CPV
->getContext())) {
1810 short int16
= (short)cast
<ConstantInt
>(CPV
)->getZExtValue();
1811 ConvertIntToBytes
<>(ptr
, int16
);
1812 aggBuffer
->addBytes(ptr
, 2, Bytes
);
1813 } else if (ETy
== Type::getInt32Ty(CPV
->getContext())) {
1814 if (const ConstantInt
*constInt
= dyn_cast
<ConstantInt
>(CPV
)) {
1815 int int32
= (int)(constInt
->getZExtValue());
1816 ConvertIntToBytes
<>(ptr
, int32
);
1817 aggBuffer
->addBytes(ptr
, 4, Bytes
);
1819 } else if (const auto *Cexpr
= dyn_cast
<ConstantExpr
>(CPV
)) {
1820 if (const auto *constInt
= dyn_cast_or_null
<ConstantInt
>(
1821 ConstantFoldConstant(Cexpr
, DL
))) {
1822 int int32
= (int)(constInt
->getZExtValue());
1823 ConvertIntToBytes
<>(ptr
, int32
);
1824 aggBuffer
->addBytes(ptr
, 4, Bytes
);
1827 if (Cexpr
->getOpcode() == Instruction::PtrToInt
) {
1828 Value
*v
= Cexpr
->getOperand(0)->stripPointerCasts();
1829 aggBuffer
->addSymbol(v
, Cexpr
->getOperand(0));
1830 aggBuffer
->addZeros(4);
1834 llvm_unreachable("unsupported integer const type");
1835 } else if (ETy
== Type::getInt64Ty(CPV
->getContext())) {
1836 if (const ConstantInt
*constInt
= dyn_cast
<ConstantInt
>(CPV
)) {
1837 long long int64
= (long long)(constInt
->getZExtValue());
1838 ConvertIntToBytes
<>(ptr
, int64
);
1839 aggBuffer
->addBytes(ptr
, 8, Bytes
);
1841 } else if (const ConstantExpr
*Cexpr
= dyn_cast
<ConstantExpr
>(CPV
)) {
1842 if (const auto *constInt
= dyn_cast_or_null
<ConstantInt
>(
1843 ConstantFoldConstant(Cexpr
, DL
))) {
1844 long long int64
= (long long)(constInt
->getZExtValue());
1845 ConvertIntToBytes
<>(ptr
, int64
);
1846 aggBuffer
->addBytes(ptr
, 8, Bytes
);
1849 if (Cexpr
->getOpcode() == Instruction::PtrToInt
) {
1850 Value
*v
= Cexpr
->getOperand(0)->stripPointerCasts();
1851 aggBuffer
->addSymbol(v
, Cexpr
->getOperand(0));
1852 aggBuffer
->addZeros(8);
1856 llvm_unreachable("unsupported integer const type");
1858 llvm_unreachable("unsupported integer const type");
1861 case Type::HalfTyID
:
1862 case Type::FloatTyID
:
1863 case Type::DoubleTyID
: {
1864 const ConstantFP
*CFP
= dyn_cast
<ConstantFP
>(CPV
);
1865 Type
*Ty
= CFP
->getType();
1866 if (Ty
== Type::getHalfTy(CPV
->getContext())) {
1867 APInt API
= CFP
->getValueAPF().bitcastToAPInt();
1868 uint16_t float16
= API
.getLoBits(16).getZExtValue();
1869 ConvertIntToBytes
<>(ptr
, float16
);
1870 aggBuffer
->addBytes(ptr
, 2, Bytes
);
1871 } else if (Ty
== Type::getFloatTy(CPV
->getContext())) {
1872 float float32
= (float) CFP
->getValueAPF().convertToFloat();
1873 ConvertFloatToBytes(ptr
, float32
);
1874 aggBuffer
->addBytes(ptr
, 4, Bytes
);
1875 } else if (Ty
== Type::getDoubleTy(CPV
->getContext())) {
1876 double float64
= CFP
->getValueAPF().convertToDouble();
1877 ConvertDoubleToBytes(ptr
, float64
);
1878 aggBuffer
->addBytes(ptr
, 8, Bytes
);
1880 llvm_unreachable("unsupported fp const type");
1884 case Type::PointerTyID
: {
1885 if (const GlobalValue
*GVar
= dyn_cast
<GlobalValue
>(CPV
)) {
1886 aggBuffer
->addSymbol(GVar
, GVar
);
1887 } else if (const ConstantExpr
*Cexpr
= dyn_cast
<ConstantExpr
>(CPV
)) {
1888 const Value
*v
= Cexpr
->stripPointerCasts();
1889 aggBuffer
->addSymbol(v
, Cexpr
);
1891 unsigned int s
= DL
.getTypeAllocSize(CPV
->getType());
1892 aggBuffer
->addZeros(s
);
1896 case Type::ArrayTyID
:
1897 case Type::VectorTyID
:
1898 case Type::StructTyID
: {
1899 if (isa
<ConstantAggregate
>(CPV
) || isa
<ConstantDataSequential
>(CPV
)) {
1900 int ElementSize
= DL
.getTypeAllocSize(CPV
->getType());
1901 bufferAggregateConstant(CPV
, aggBuffer
);
1902 if (Bytes
> ElementSize
)
1903 aggBuffer
->addZeros(Bytes
- ElementSize
);
1904 } else if (isa
<ConstantAggregateZero
>(CPV
))
1905 aggBuffer
->addZeros(Bytes
);
1907 llvm_unreachable("Unexpected Constant type");
1912 llvm_unreachable("unsupported type");
1916 void NVPTXAsmPrinter::bufferAggregateConstant(const Constant
*CPV
,
1917 AggBuffer
*aggBuffer
) {
1918 const DataLayout
&DL
= getDataLayout();
1921 // Integers of arbitrary width
1922 if (const ConstantInt
*CI
= dyn_cast
<ConstantInt
>(CPV
)) {
1923 APInt Val
= CI
->getValue();
1924 for (unsigned I
= 0, E
= DL
.getTypeAllocSize(CPV
->getType()); I
< E
; ++I
) {
1925 uint8_t Byte
= Val
.getLoBits(8).getZExtValue();
1926 aggBuffer
->addBytes(&Byte
, 1, 1);
1933 if (isa
<ConstantArray
>(CPV
) || isa
<ConstantVector
>(CPV
)) {
1934 if (CPV
->getNumOperands())
1935 for (unsigned i
= 0, e
= CPV
->getNumOperands(); i
!= e
; ++i
)
1936 bufferLEByte(cast
<Constant
>(CPV
->getOperand(i
)), 0, aggBuffer
);
1940 if (const ConstantDataSequential
*CDS
=
1941 dyn_cast
<ConstantDataSequential
>(CPV
)) {
1942 if (CDS
->getNumElements())
1943 for (unsigned i
= 0; i
< CDS
->getNumElements(); ++i
)
1944 bufferLEByte(cast
<Constant
>(CDS
->getElementAsConstant(i
)), 0,
1949 if (isa
<ConstantStruct
>(CPV
)) {
1950 if (CPV
->getNumOperands()) {
1951 StructType
*ST
= cast
<StructType
>(CPV
->getType());
1952 for (unsigned i
= 0, e
= CPV
->getNumOperands(); i
!= e
; ++i
) {
1954 Bytes
= DL
.getStructLayout(ST
)->getElementOffset(0) +
1955 DL
.getTypeAllocSize(ST
) -
1956 DL
.getStructLayout(ST
)->getElementOffset(i
);
1958 Bytes
= DL
.getStructLayout(ST
)->getElementOffset(i
+ 1) -
1959 DL
.getStructLayout(ST
)->getElementOffset(i
);
1960 bufferLEByte(cast
<Constant
>(CPV
->getOperand(i
)), Bytes
, aggBuffer
);
1965 llvm_unreachable("unsupported constant type in printAggregateConstant()");
1968 /// lowerConstantForGV - Return an MCExpr for the given Constant. This is mostly
1969 /// a copy from AsmPrinter::lowerConstant, except customized to only handle
1970 /// expressions that are representable in PTX and create
1971 /// NVPTXGenericMCSymbolRefExpr nodes for addrspacecast instructions.
1973 NVPTXAsmPrinter::lowerConstantForGV(const Constant
*CV
, bool ProcessingGeneric
) {
1974 MCContext
&Ctx
= OutContext
;
1976 if (CV
->isNullValue() || isa
<UndefValue
>(CV
))
1977 return MCConstantExpr::create(0, Ctx
);
1979 if (const ConstantInt
*CI
= dyn_cast
<ConstantInt
>(CV
))
1980 return MCConstantExpr::create(CI
->getZExtValue(), Ctx
);
1982 if (const GlobalValue
*GV
= dyn_cast
<GlobalValue
>(CV
)) {
1983 const MCSymbolRefExpr
*Expr
=
1984 MCSymbolRefExpr::create(getSymbol(GV
), Ctx
);
1985 if (ProcessingGeneric
) {
1986 return NVPTXGenericMCSymbolRefExpr::create(Expr
, Ctx
);
1992 const ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(CV
);
1994 llvm_unreachable("Unknown constant value to lower!");
1997 switch (CE
->getOpcode()) {
1999 // If the code isn't optimized, there may be outstanding folding
2000 // opportunities. Attempt to fold the expression using DataLayout as a
2001 // last resort before giving up.
2002 if (Constant
*C
= ConstantFoldConstant(CE
, getDataLayout()))
2004 return lowerConstantForGV(C
, ProcessingGeneric
);
2006 // Otherwise report the problem to the user.
2009 raw_string_ostream
OS(S
);
2010 OS
<< "Unsupported expression in static initializer: ";
2011 CE
->printAsOperand(OS
, /*PrintType=*/false,
2012 !MF
? nullptr : MF
->getFunction().getParent());
2013 report_fatal_error(OS
.str());
2016 case Instruction::AddrSpaceCast
: {
2017 // Strip the addrspacecast and pass along the operand
2018 PointerType
*DstTy
= cast
<PointerType
>(CE
->getType());
2019 if (DstTy
->getAddressSpace() == 0) {
2020 return lowerConstantForGV(cast
<const Constant
>(CE
->getOperand(0)), true);
2023 raw_string_ostream
OS(S
);
2024 OS
<< "Unsupported expression in static initializer: ";
2025 CE
->printAsOperand(OS
, /*PrintType=*/ false,
2026 !MF
? nullptr : MF
->getFunction().getParent());
2027 report_fatal_error(OS
.str());
2030 case Instruction::GetElementPtr
: {
2031 const DataLayout
&DL
= getDataLayout();
2033 // Generate a symbolic expression for the byte address
2034 APInt
OffsetAI(DL
.getPointerTypeSizeInBits(CE
->getType()), 0);
2035 cast
<GEPOperator
>(CE
)->accumulateConstantOffset(DL
, OffsetAI
);
2037 const MCExpr
*Base
= lowerConstantForGV(CE
->getOperand(0),
2042 int64_t Offset
= OffsetAI
.getSExtValue();
2043 return MCBinaryExpr::createAdd(Base
, MCConstantExpr::create(Offset
, Ctx
),
2047 case Instruction::Trunc
:
2048 // We emit the value and depend on the assembler to truncate the generated
2049 // expression properly. This is important for differences between
2050 // blockaddress labels. Since the two labels are in the same function, it
2051 // is reasonable to treat their delta as a 32-bit value.
2053 case Instruction::BitCast
:
2054 return lowerConstantForGV(CE
->getOperand(0), ProcessingGeneric
);
2056 case Instruction::IntToPtr
: {
2057 const DataLayout
&DL
= getDataLayout();
2059 // Handle casts to pointers by changing them into casts to the appropriate
2060 // integer type. This promotes constant folding and simplifies this code.
2061 Constant
*Op
= CE
->getOperand(0);
2062 Op
= ConstantExpr::getIntegerCast(Op
, DL
.getIntPtrType(CV
->getType()),
2064 return lowerConstantForGV(Op
, ProcessingGeneric
);
2067 case Instruction::PtrToInt
: {
2068 const DataLayout
&DL
= getDataLayout();
2070 // Support only foldable casts to/from pointers that can be eliminated by
2071 // changing the pointer to the appropriately sized integer type.
2072 Constant
*Op
= CE
->getOperand(0);
2073 Type
*Ty
= CE
->getType();
2075 const MCExpr
*OpExpr
= lowerConstantForGV(Op
, ProcessingGeneric
);
2077 // We can emit the pointer value into this slot if the slot is an
2078 // integer slot equal to the size of the pointer.
2079 if (DL
.getTypeAllocSize(Ty
) == DL
.getTypeAllocSize(Op
->getType()))
2082 // Otherwise the pointer is smaller than the resultant integer, mask off
2083 // the high bits so we are sure to get a proper truncation if the input is
2085 unsigned InBits
= DL
.getTypeAllocSizeInBits(Op
->getType());
2086 const MCExpr
*MaskExpr
= MCConstantExpr::create(~0ULL >> (64-InBits
), Ctx
);
2087 return MCBinaryExpr::createAnd(OpExpr
, MaskExpr
, Ctx
);
2090 // The MC library also has a right-shift operator, but it isn't consistently
2091 // signed or unsigned between different targets.
2092 case Instruction::Add
: {
2093 const MCExpr
*LHS
= lowerConstantForGV(CE
->getOperand(0), ProcessingGeneric
);
2094 const MCExpr
*RHS
= lowerConstantForGV(CE
->getOperand(1), ProcessingGeneric
);
2095 switch (CE
->getOpcode()) {
2096 default: llvm_unreachable("Unknown binary operator constant cast expr");
2097 case Instruction::Add
: return MCBinaryExpr::createAdd(LHS
, RHS
, Ctx
);
2103 // Copy of MCExpr::print customized for NVPTX
2104 void NVPTXAsmPrinter::printMCExpr(const MCExpr
&Expr
, raw_ostream
&OS
) {
2105 switch (Expr
.getKind()) {
2106 case MCExpr::Target
:
2107 return cast
<MCTargetExpr
>(&Expr
)->printImpl(OS
, MAI
);
2108 case MCExpr::Constant
:
2109 OS
<< cast
<MCConstantExpr
>(Expr
).getValue();
2112 case MCExpr::SymbolRef
: {
2113 const MCSymbolRefExpr
&SRE
= cast
<MCSymbolRefExpr
>(Expr
);
2114 const MCSymbol
&Sym
= SRE
.getSymbol();
2119 case MCExpr::Unary
: {
2120 const MCUnaryExpr
&UE
= cast
<MCUnaryExpr
>(Expr
);
2121 switch (UE
.getOpcode()) {
2122 case MCUnaryExpr::LNot
: OS
<< '!'; break;
2123 case MCUnaryExpr::Minus
: OS
<< '-'; break;
2124 case MCUnaryExpr::Not
: OS
<< '~'; break;
2125 case MCUnaryExpr::Plus
: OS
<< '+'; break;
2127 printMCExpr(*UE
.getSubExpr(), OS
);
2131 case MCExpr::Binary
: {
2132 const MCBinaryExpr
&BE
= cast
<MCBinaryExpr
>(Expr
);
2134 // Only print parens around the LHS if it is non-trivial.
2135 if (isa
<MCConstantExpr
>(BE
.getLHS()) || isa
<MCSymbolRefExpr
>(BE
.getLHS()) ||
2136 isa
<NVPTXGenericMCSymbolRefExpr
>(BE
.getLHS())) {
2137 printMCExpr(*BE
.getLHS(), OS
);
2140 printMCExpr(*BE
.getLHS(), OS
);
2144 switch (BE
.getOpcode()) {
2145 case MCBinaryExpr::Add
:
2146 // Print "X-42" instead of "X+-42".
2147 if (const MCConstantExpr
*RHSC
= dyn_cast
<MCConstantExpr
>(BE
.getRHS())) {
2148 if (RHSC
->getValue() < 0) {
2149 OS
<< RHSC
->getValue();
2156 default: llvm_unreachable("Unhandled binary operator");
2159 // Only print parens around the LHS if it is non-trivial.
2160 if (isa
<MCConstantExpr
>(BE
.getRHS()) || isa
<MCSymbolRefExpr
>(BE
.getRHS())) {
2161 printMCExpr(*BE
.getRHS(), OS
);
2164 printMCExpr(*BE
.getRHS(), OS
);
2171 llvm_unreachable("Invalid expression kind!");
2174 /// PrintAsmOperand - Print out an operand for an inline asm expression.
2176 bool NVPTXAsmPrinter::PrintAsmOperand(const MachineInstr
*MI
, unsigned OpNo
,
2177 unsigned AsmVariant
,
2178 const char *ExtraCode
, raw_ostream
&O
) {
2179 if (ExtraCode
&& ExtraCode
[0]) {
2180 if (ExtraCode
[1] != 0)
2181 return true; // Unknown modifier.
2183 switch (ExtraCode
[0]) {
2185 // See if this is a generic print operand
2186 return AsmPrinter::PrintAsmOperand(MI
, OpNo
, AsmVariant
, ExtraCode
, O
);
2192 printOperand(MI
, OpNo
, O
);
2197 bool NVPTXAsmPrinter::PrintAsmMemoryOperand(
2198 const MachineInstr
*MI
, unsigned OpNo
, unsigned AsmVariant
,
2199 const char *ExtraCode
, raw_ostream
&O
) {
2200 if (ExtraCode
&& ExtraCode
[0])
2201 return true; // Unknown modifier
2204 printMemOperand(MI
, OpNo
, O
);
2210 void NVPTXAsmPrinter::printOperand(const MachineInstr
*MI
, int opNum
,
2211 raw_ostream
&O
, const char *Modifier
) {
2212 const MachineOperand
&MO
= MI
->getOperand(opNum
);
2213 switch (MO
.getType()) {
2214 case MachineOperand::MO_Register
:
2215 if (TargetRegisterInfo::isPhysicalRegister(MO
.getReg())) {
2216 if (MO
.getReg() == NVPTX::VRDepot
)
2217 O
<< DEPOTNAME
<< getFunctionNumber();
2219 O
<< NVPTXInstPrinter::getRegisterName(MO
.getReg());
2221 emitVirtualRegister(MO
.getReg(), O
);
2225 case MachineOperand::MO_Immediate
:
2228 else if (strstr(Modifier
, "vec") == Modifier
)
2229 printVecModifiedImmediate(MO
, Modifier
, O
);
2232 "Don't know how to handle modifier on immediate operand");
2235 case MachineOperand::MO_FPImmediate
:
2236 printFPConstant(MO
.getFPImm(), O
);
2239 case MachineOperand::MO_GlobalAddress
:
2240 getSymbol(MO
.getGlobal())->print(O
, MAI
);
2243 case MachineOperand::MO_MachineBasicBlock
:
2244 MO
.getMBB()->getSymbol()->print(O
, MAI
);
2248 llvm_unreachable("Operand type not supported.");
2252 void NVPTXAsmPrinter::printMemOperand(const MachineInstr
*MI
, int opNum
,
2253 raw_ostream
&O
, const char *Modifier
) {
2254 printOperand(MI
, opNum
, O
);
2256 if (Modifier
&& strcmp(Modifier
, "add") == 0) {
2258 printOperand(MI
, opNum
+ 1, O
);
2260 if (MI
->getOperand(opNum
+ 1).isImm() &&
2261 MI
->getOperand(opNum
+ 1).getImm() == 0)
2262 return; // don't print ',0' or '+0'
2264 printOperand(MI
, opNum
+ 1, O
);
2268 // Force static initialization.
2269 extern "C" void LLVMInitializeNVPTXAsmPrinter() {
2270 RegisterAsmPrinter
<NVPTXAsmPrinter
> X(getTheNVPTXTarget32());
2271 RegisterAsmPrinter
<NVPTXAsmPrinter
> Y(getTheNVPTXTarget64());