1 //===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the targeting of the InstructionSelector class for
11 // TODO: This should be generated by TableGen.
13 //===----------------------------------------------------------------------===//
15 #include "MCTargetDesc/SPIRVBaseInfo.h"
16 #include "MCTargetDesc/SPIRVMCTargetDesc.h"
18 #include "SPIRVGlobalRegistry.h"
19 #include "SPIRVInstrInfo.h"
20 #include "SPIRVRegisterBankInfo.h"
21 #include "SPIRVRegisterInfo.h"
22 #include "SPIRVTargetMachine.h"
23 #include "SPIRVUtils.h"
24 #include "llvm/ADT/APFloat.h"
25 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
26 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
27 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
28 #include "llvm/CodeGen/MachineInstrBuilder.h"
29 #include "llvm/CodeGen/MachineModuleInfoImpls.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/TargetOpcodes.h"
32 #include "llvm/IR/IntrinsicsSPIRV.h"
33 #include "llvm/Support/Debug.h"
37 class SPIRVMachineModuleInfo
: public MachineModuleInfoImpl
{
39 SyncScope::ID Work_ItemSSID
;
40 SyncScope::ID WorkGroupSSID
;
41 SyncScope::ID DeviceSSID
;
42 SyncScope::ID AllSVMDevicesSSID
;
43 SyncScope::ID SubGroupSSID
;
45 SPIRVMachineModuleInfo(const MachineModuleInfo
&MMI
) {
46 LLVMContext
&CTX
= MMI
.getModule()->getContext();
47 Work_ItemSSID
= CTX
.getOrInsertSyncScopeID("work_item");
48 WorkGroupSSID
= CTX
.getOrInsertSyncScopeID("workgroup");
49 DeviceSSID
= CTX
.getOrInsertSyncScopeID("device");
50 AllSVMDevicesSSID
= CTX
.getOrInsertSyncScopeID("all_svm_devices");
51 SubGroupSSID
= CTX
.getOrInsertSyncScopeID("sub_group");
55 } // end namespace llvm
57 #define DEBUG_TYPE "spirv-isel"
60 namespace CL
= SPIRV::OpenCLExtInst
;
61 namespace GL
= SPIRV::GLSLExtInst
;
64 std::vector
<std::pair
<SPIRV::InstructionSet::InstructionSet
, uint32_t>>;
68 #define GET_GLOBALISEL_PREDICATE_BITSET
69 #include "SPIRVGenGlobalISel.inc"
70 #undef GET_GLOBALISEL_PREDICATE_BITSET
72 class SPIRVInstructionSelector
: public InstructionSelector
{
73 const SPIRVSubtarget
&STI
;
74 const SPIRVInstrInfo
&TII
;
75 const SPIRVRegisterInfo
&TRI
;
76 const RegisterBankInfo
&RBI
;
77 SPIRVGlobalRegistry
&GR
;
78 MachineRegisterInfo
*MRI
;
79 SPIRVMachineModuleInfo
*MMI
= nullptr;
81 /// We need to keep track of the number we give to anonymous global values to
82 /// generate the same name every time when this is needed.
83 mutable DenseMap
<const GlobalValue
*, unsigned> UnnamedGlobalIDs
;
86 SPIRVInstructionSelector(const SPIRVTargetMachine
&TM
,
87 const SPIRVSubtarget
&ST
,
88 const RegisterBankInfo
&RBI
);
89 void setupMF(MachineFunction
&MF
, GISelKnownBits
*KB
,
90 CodeGenCoverage
*CoverageInfo
, ProfileSummaryInfo
*PSI
,
91 BlockFrequencyInfo
*BFI
) override
;
92 // Common selection code. Instruction-specific selection occurs in spvSelect.
93 bool select(MachineInstr
&I
) override
;
94 static const char *getName() { return DEBUG_TYPE
; }
96 #define GET_GLOBALISEL_PREDICATES_DECL
97 #include "SPIRVGenGlobalISel.inc"
98 #undef GET_GLOBALISEL_PREDICATES_DECL
100 #define GET_GLOBALISEL_TEMPORARIES_DECL
101 #include "SPIRVGenGlobalISel.inc"
102 #undef GET_GLOBALISEL_TEMPORARIES_DECL
105 // tblgen-erated 'select' implementation, used as the initial selector for
106 // the patterns that don't require complex C++.
107 bool selectImpl(MachineInstr
&I
, CodeGenCoverage
&CoverageInfo
) const;
109 // All instruction-specific selection that didn't happen in "select()".
110 // Is basically a large Switch/Case delegating to all other select method.
111 bool spvSelect(Register ResVReg
, const SPIRVType
*ResType
,
112 MachineInstr
&I
) const;
114 bool selectGlobalValue(Register ResVReg
, MachineInstr
&I
,
115 const MachineInstr
*Init
= nullptr) const;
117 bool selectUnOpWithSrc(Register ResVReg
, const SPIRVType
*ResType
,
118 MachineInstr
&I
, Register SrcReg
,
119 unsigned Opcode
) const;
120 bool selectUnOp(Register ResVReg
, const SPIRVType
*ResType
, MachineInstr
&I
,
121 unsigned Opcode
) const;
123 bool selectBitcast(Register ResVReg
, const SPIRVType
*ResType
,
124 MachineInstr
&I
) const;
126 bool selectLoad(Register ResVReg
, const SPIRVType
*ResType
,
127 MachineInstr
&I
) const;
128 bool selectStore(MachineInstr
&I
) const;
130 bool selectStackSave(Register ResVReg
, const SPIRVType
*ResType
,
131 MachineInstr
&I
) const;
132 bool selectStackRestore(MachineInstr
&I
) const;
134 bool selectMemOperation(Register ResVReg
, MachineInstr
&I
) const;
136 bool selectAtomicRMW(Register ResVReg
, const SPIRVType
*ResType
,
137 MachineInstr
&I
, unsigned NewOpcode
,
138 unsigned NegateOpcode
= 0) const;
140 bool selectAtomicCmpXchg(Register ResVReg
, const SPIRVType
*ResType
,
141 MachineInstr
&I
) const;
143 bool selectFence(MachineInstr
&I
) const;
145 bool selectAddrSpaceCast(Register ResVReg
, const SPIRVType
*ResType
,
146 MachineInstr
&I
) const;
148 bool selectAnyOrAll(Register ResVReg
, const SPIRVType
*ResType
,
149 MachineInstr
&I
, unsigned OpType
) const;
151 bool selectAll(Register ResVReg
, const SPIRVType
*ResType
,
152 MachineInstr
&I
) const;
154 bool selectAny(Register ResVReg
, const SPIRVType
*ResType
,
155 MachineInstr
&I
) const;
157 bool selectBitreverse(Register ResVReg
, const SPIRVType
*ResType
,
158 MachineInstr
&I
) const;
160 bool selectConstVector(Register ResVReg
, const SPIRVType
*ResType
,
161 MachineInstr
&I
) const;
162 bool selectSplatVector(Register ResVReg
, const SPIRVType
*ResType
,
163 MachineInstr
&I
) const;
165 bool selectCmp(Register ResVReg
, const SPIRVType
*ResType
,
166 unsigned comparisonOpcode
, MachineInstr
&I
) const;
168 bool selectICmp(Register ResVReg
, const SPIRVType
*ResType
,
169 MachineInstr
&I
) const;
170 bool selectFCmp(Register ResVReg
, const SPIRVType
*ResType
,
171 MachineInstr
&I
) const;
173 bool selectFmix(Register ResVReg
, const SPIRVType
*ResType
,
174 MachineInstr
&I
) const;
176 bool selectRsqrt(Register ResVReg
, const SPIRVType
*ResType
,
177 MachineInstr
&I
) const;
179 void renderImm32(MachineInstrBuilder
&MIB
, const MachineInstr
&I
,
181 void renderFImm32(MachineInstrBuilder
&MIB
, const MachineInstr
&I
,
184 bool selectConst(Register ResVReg
, const SPIRVType
*ResType
, const APInt
&Imm
,
185 MachineInstr
&I
) const;
187 bool selectSelect(Register ResVReg
, const SPIRVType
*ResType
, MachineInstr
&I
,
188 bool IsSigned
) const;
189 bool selectIToF(Register ResVReg
, const SPIRVType
*ResType
, MachineInstr
&I
,
190 bool IsSigned
, unsigned Opcode
) const;
191 bool selectExt(Register ResVReg
, const SPIRVType
*ResType
, MachineInstr
&I
,
192 bool IsSigned
) const;
194 bool selectTrunc(Register ResVReg
, const SPIRVType
*ResType
,
195 MachineInstr
&I
) const;
197 bool selectIntToBool(Register IntReg
, Register ResVReg
, MachineInstr
&I
,
198 const SPIRVType
*intTy
, const SPIRVType
*boolTy
) const;
200 bool selectOpUndef(Register ResVReg
, const SPIRVType
*ResType
,
201 MachineInstr
&I
) const;
202 bool selectFreeze(Register ResVReg
, const SPIRVType
*ResType
,
203 MachineInstr
&I
) const;
204 bool selectIntrinsic(Register ResVReg
, const SPIRVType
*ResType
,
205 MachineInstr
&I
) const;
206 bool selectExtractVal(Register ResVReg
, const SPIRVType
*ResType
,
207 MachineInstr
&I
) const;
208 bool selectInsertVal(Register ResVReg
, const SPIRVType
*ResType
,
209 MachineInstr
&I
) const;
210 bool selectExtractElt(Register ResVReg
, const SPIRVType
*ResType
,
211 MachineInstr
&I
) const;
212 bool selectInsertElt(Register ResVReg
, const SPIRVType
*ResType
,
213 MachineInstr
&I
) const;
214 bool selectGEP(Register ResVReg
, const SPIRVType
*ResType
,
215 MachineInstr
&I
) const;
217 bool selectFrameIndex(Register ResVReg
, const SPIRVType
*ResType
,
218 MachineInstr
&I
) const;
219 bool selectAllocaArray(Register ResVReg
, const SPIRVType
*ResType
,
220 MachineInstr
&I
) const;
222 bool selectBranch(MachineInstr
&I
) const;
223 bool selectBranchCond(MachineInstr
&I
) const;
225 bool selectPhi(Register ResVReg
, const SPIRVType
*ResType
,
226 MachineInstr
&I
) const;
228 bool selectExtInst(Register ResVReg
, const SPIRVType
*ResType
,
229 MachineInstr
&I
, CL::OpenCLExtInst CLInst
) const;
230 bool selectExtInst(Register ResVReg
, const SPIRVType
*ResType
,
231 MachineInstr
&I
, CL::OpenCLExtInst CLInst
,
232 GL::GLSLExtInst GLInst
) const;
233 bool selectExtInst(Register ResVReg
, const SPIRVType
*ResType
,
234 MachineInstr
&I
, const ExtInstList
&ExtInsts
) const;
236 bool selectLog10(Register ResVReg
, const SPIRVType
*ResType
,
237 MachineInstr
&I
) const;
239 bool selectSpvThreadId(Register ResVReg
, const SPIRVType
*ResType
,
240 MachineInstr
&I
) const;
242 bool selectUnmergeValues(MachineInstr
&I
) const;
244 Register
buildI32Constant(uint32_t Val
, MachineInstr
&I
,
245 const SPIRVType
*ResType
= nullptr) const;
247 Register
buildZerosVal(const SPIRVType
*ResType
, MachineInstr
&I
) const;
248 Register
buildZerosValF(const SPIRVType
*ResType
, MachineInstr
&I
) const;
249 Register
buildOnesVal(bool AllOnes
, const SPIRVType
*ResType
,
250 MachineInstr
&I
) const;
252 bool wrapIntoSpecConstantOp(MachineInstr
&I
,
253 SmallVector
<Register
> &CompositeArgs
) const;
256 } // end anonymous namespace
258 #define GET_GLOBALISEL_IMPL
259 #include "SPIRVGenGlobalISel.inc"
260 #undef GET_GLOBALISEL_IMPL
262 SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine
&TM
,
263 const SPIRVSubtarget
&ST
,
264 const RegisterBankInfo
&RBI
)
265 : InstructionSelector(), STI(ST
), TII(*ST
.getInstrInfo()),
266 TRI(*ST
.getRegisterInfo()), RBI(RBI
), GR(*ST
.getSPIRVGlobalRegistry()),
267 #define GET_GLOBALISEL_PREDICATES_INIT
268 #include "SPIRVGenGlobalISel.inc"
269 #undef GET_GLOBALISEL_PREDICATES_INIT
270 #define GET_GLOBALISEL_TEMPORARIES_INIT
271 #include "SPIRVGenGlobalISel.inc"
272 #undef GET_GLOBALISEL_TEMPORARIES_INIT
276 void SPIRVInstructionSelector::setupMF(MachineFunction
&MF
, GISelKnownBits
*KB
,
277 CodeGenCoverage
*CoverageInfo
,
278 ProfileSummaryInfo
*PSI
,
279 BlockFrequencyInfo
*BFI
) {
280 MMI
= &MF
.getMMI().getObjFileInfo
<SPIRVMachineModuleInfo
>();
281 MRI
= &MF
.getRegInfo();
282 GR
.setCurrentFunc(MF
);
283 InstructionSelector::setupMF(MF
, KB
, CoverageInfo
, PSI
, BFI
);
286 static bool isImm(const MachineOperand
&MO
, MachineRegisterInfo
*MRI
);
288 // Defined in SPIRVLegalizerInfo.cpp.
289 extern bool isTypeFoldingSupported(unsigned Opcode
);
291 bool SPIRVInstructionSelector::select(MachineInstr
&I
) {
292 assert(I
.getParent() && "Instruction should be in a basic block!");
293 assert(I
.getParent()->getParent() && "Instruction should be in a function!");
295 Register Opcode
= I
.getOpcode();
296 // If it's not a GMIR instruction, we've selected it already.
297 if (!isPreISelGenericOpcode(Opcode
)) {
298 if (Opcode
== SPIRV::ASSIGN_TYPE
) { // These pseudos aren't needed any more.
299 Register DstReg
= I
.getOperand(0).getReg();
300 Register SrcReg
= I
.getOperand(1).getReg();
301 auto *Def
= MRI
->getVRegDef(SrcReg
);
302 if (isTypeFoldingSupported(Def
->getOpcode())) {
303 if (MRI
->getType(DstReg
).isPointer())
304 MRI
->setType(DstReg
, LLT::scalar(32));
305 bool Res
= selectImpl(I
, *CoverageInfo
);
306 assert(Res
|| Def
->getOpcode() == TargetOpcode::G_CONSTANT
);
310 MRI
->replaceRegWith(SrcReg
, DstReg
);
311 I
.removeFromParent();
313 } else if (I
.getNumDefs() == 1) {
314 // Make all vregs 32 bits (for SPIR-V IDs).
315 MRI
->setType(I
.getOperand(0).getReg(), LLT::scalar(32));
317 return constrainSelectedInstRegOperands(I
, TII
, TRI
, RBI
);
320 if (I
.getNumOperands() != I
.getNumExplicitOperands()) {
321 LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n");
325 // Common code for getting return reg+type, and removing selected instr
326 // from parent occurs here. Instr-specific selection happens in spvSelect().
327 bool HasDefs
= I
.getNumDefs() > 0;
328 Register ResVReg
= HasDefs
? I
.getOperand(0).getReg() : Register(0);
329 SPIRVType
*ResType
= HasDefs
? GR
.getSPIRVTypeForVReg(ResVReg
) : nullptr;
330 assert(!HasDefs
|| ResType
|| I
.getOpcode() == TargetOpcode::G_GLOBAL_VALUE
);
331 if (spvSelect(ResVReg
, ResType
, I
)) {
332 if (HasDefs
) // Make all vregs 32 bits (for SPIR-V IDs).
333 for (unsigned i
= 0; i
< I
.getNumDefs(); ++i
)
334 MRI
->setType(I
.getOperand(i
).getReg(), LLT::scalar(32));
335 I
.removeFromParent();
341 bool SPIRVInstructionSelector::spvSelect(Register ResVReg
,
342 const SPIRVType
*ResType
,
343 MachineInstr
&I
) const {
344 const unsigned Opcode
= I
.getOpcode();
345 if (isTypeFoldingSupported(Opcode
) && Opcode
!= TargetOpcode::G_CONSTANT
)
346 return selectImpl(I
, *CoverageInfo
);
348 case TargetOpcode::G_CONSTANT
:
349 return selectConst(ResVReg
, ResType
, I
.getOperand(1).getCImm()->getValue(),
351 case TargetOpcode::G_GLOBAL_VALUE
:
352 return selectGlobalValue(ResVReg
, I
);
353 case TargetOpcode::G_IMPLICIT_DEF
:
354 return selectOpUndef(ResVReg
, ResType
, I
);
355 case TargetOpcode::G_FREEZE
:
356 return selectFreeze(ResVReg
, ResType
, I
);
358 case TargetOpcode::G_INTRINSIC
:
359 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
:
360 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS
:
361 return selectIntrinsic(ResVReg
, ResType
, I
);
362 case TargetOpcode::G_BITREVERSE
:
363 return selectBitreverse(ResVReg
, ResType
, I
);
365 case TargetOpcode::G_BUILD_VECTOR
:
366 return selectConstVector(ResVReg
, ResType
, I
);
367 case TargetOpcode::G_SPLAT_VECTOR
:
368 return selectSplatVector(ResVReg
, ResType
, I
);
370 case TargetOpcode::G_SHUFFLE_VECTOR
: {
371 MachineBasicBlock
&BB
= *I
.getParent();
372 auto MIB
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpVectorShuffle
))
374 .addUse(GR
.getSPIRVTypeID(ResType
))
375 .addUse(I
.getOperand(1).getReg())
376 .addUse(I
.getOperand(2).getReg());
377 for (auto V
: I
.getOperand(3).getShuffleMask())
379 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
381 case TargetOpcode::G_MEMMOVE
:
382 case TargetOpcode::G_MEMCPY
:
383 case TargetOpcode::G_MEMSET
:
384 return selectMemOperation(ResVReg
, I
);
386 case TargetOpcode::G_ICMP
:
387 return selectICmp(ResVReg
, ResType
, I
);
388 case TargetOpcode::G_FCMP
:
389 return selectFCmp(ResVReg
, ResType
, I
);
391 case TargetOpcode::G_FRAME_INDEX
:
392 return selectFrameIndex(ResVReg
, ResType
, I
);
394 case TargetOpcode::G_LOAD
:
395 return selectLoad(ResVReg
, ResType
, I
);
396 case TargetOpcode::G_STORE
:
397 return selectStore(I
);
399 case TargetOpcode::G_BR
:
400 return selectBranch(I
);
401 case TargetOpcode::G_BRCOND
:
402 return selectBranchCond(I
);
404 case TargetOpcode::G_PHI
:
405 return selectPhi(ResVReg
, ResType
, I
);
407 case TargetOpcode::G_FPTOSI
:
408 return selectUnOp(ResVReg
, ResType
, I
, SPIRV::OpConvertFToS
);
409 case TargetOpcode::G_FPTOUI
:
410 return selectUnOp(ResVReg
, ResType
, I
, SPIRV::OpConvertFToU
);
412 case TargetOpcode::G_SITOFP
:
413 return selectIToF(ResVReg
, ResType
, I
, true, SPIRV::OpConvertSToF
);
414 case TargetOpcode::G_UITOFP
:
415 return selectIToF(ResVReg
, ResType
, I
, false, SPIRV::OpConvertUToF
);
417 case TargetOpcode::G_CTPOP
:
418 return selectUnOp(ResVReg
, ResType
, I
, SPIRV::OpBitCount
);
419 case TargetOpcode::G_SMIN
:
420 return selectExtInst(ResVReg
, ResType
, I
, CL::s_min
, GL::SMin
);
421 case TargetOpcode::G_UMIN
:
422 return selectExtInst(ResVReg
, ResType
, I
, CL::u_min
, GL::UMin
);
424 case TargetOpcode::G_SMAX
:
425 return selectExtInst(ResVReg
, ResType
, I
, CL::s_max
, GL::SMax
);
426 case TargetOpcode::G_UMAX
:
427 return selectExtInst(ResVReg
, ResType
, I
, CL::u_max
, GL::UMax
);
429 case TargetOpcode::G_FMA
:
430 return selectExtInst(ResVReg
, ResType
, I
, CL::fma
, GL::Fma
);
432 case TargetOpcode::G_FPOW
:
433 return selectExtInst(ResVReg
, ResType
, I
, CL::pow
, GL::Pow
);
434 case TargetOpcode::G_FPOWI
:
435 return selectExtInst(ResVReg
, ResType
, I
, CL::pown
);
437 case TargetOpcode::G_FEXP
:
438 return selectExtInst(ResVReg
, ResType
, I
, CL::exp
, GL::Exp
);
439 case TargetOpcode::G_FEXP2
:
440 return selectExtInst(ResVReg
, ResType
, I
, CL::exp2
, GL::Exp2
);
442 case TargetOpcode::G_FLOG
:
443 return selectExtInst(ResVReg
, ResType
, I
, CL::log
, GL::Log
);
444 case TargetOpcode::G_FLOG2
:
445 return selectExtInst(ResVReg
, ResType
, I
, CL::log2
, GL::Log2
);
446 case TargetOpcode::G_FLOG10
:
447 return selectLog10(ResVReg
, ResType
, I
);
449 case TargetOpcode::G_FABS
:
450 return selectExtInst(ResVReg
, ResType
, I
, CL::fabs
, GL::FAbs
);
451 case TargetOpcode::G_ABS
:
452 return selectExtInst(ResVReg
, ResType
, I
, CL::s_abs
, GL::SAbs
);
454 case TargetOpcode::G_FMINNUM
:
455 case TargetOpcode::G_FMINIMUM
:
456 return selectExtInst(ResVReg
, ResType
, I
, CL::fmin
, GL::NMin
);
457 case TargetOpcode::G_FMAXNUM
:
458 case TargetOpcode::G_FMAXIMUM
:
459 return selectExtInst(ResVReg
, ResType
, I
, CL::fmax
, GL::NMax
);
461 case TargetOpcode::G_FCOPYSIGN
:
462 return selectExtInst(ResVReg
, ResType
, I
, CL::copysign
);
464 case TargetOpcode::G_FCEIL
:
465 return selectExtInst(ResVReg
, ResType
, I
, CL::ceil
, GL::Ceil
);
466 case TargetOpcode::G_FFLOOR
:
467 return selectExtInst(ResVReg
, ResType
, I
, CL::floor
, GL::Floor
);
469 case TargetOpcode::G_FCOS
:
470 return selectExtInst(ResVReg
, ResType
, I
, CL::cos
, GL::Cos
);
471 case TargetOpcode::G_FSIN
:
472 return selectExtInst(ResVReg
, ResType
, I
, CL::sin
, GL::Sin
);
473 case TargetOpcode::G_FTAN
:
474 return selectExtInst(ResVReg
, ResType
, I
, CL::tan
, GL::Tan
);
475 case TargetOpcode::G_FACOS
:
476 return selectExtInst(ResVReg
, ResType
, I
, CL::acos
, GL::Acos
);
477 case TargetOpcode::G_FASIN
:
478 return selectExtInst(ResVReg
, ResType
, I
, CL::asin
, GL::Asin
);
479 case TargetOpcode::G_FATAN
:
480 return selectExtInst(ResVReg
, ResType
, I
, CL::atan
, GL::Atan
);
481 case TargetOpcode::G_FCOSH
:
482 return selectExtInst(ResVReg
, ResType
, I
, CL::cosh
, GL::Cosh
);
483 case TargetOpcode::G_FSINH
:
484 return selectExtInst(ResVReg
, ResType
, I
, CL::sinh
, GL::Sinh
);
485 case TargetOpcode::G_FTANH
:
486 return selectExtInst(ResVReg
, ResType
, I
, CL::tanh
, GL::Tanh
);
488 case TargetOpcode::G_FSQRT
:
489 return selectExtInst(ResVReg
, ResType
, I
, CL::sqrt
, GL::Sqrt
);
491 case TargetOpcode::G_CTTZ
:
492 case TargetOpcode::G_CTTZ_ZERO_UNDEF
:
493 return selectExtInst(ResVReg
, ResType
, I
, CL::ctz
);
494 case TargetOpcode::G_CTLZ
:
495 case TargetOpcode::G_CTLZ_ZERO_UNDEF
:
496 return selectExtInst(ResVReg
, ResType
, I
, CL::clz
);
498 case TargetOpcode::G_INTRINSIC_ROUND
:
499 return selectExtInst(ResVReg
, ResType
, I
, CL::round
, GL::Round
);
500 case TargetOpcode::G_INTRINSIC_ROUNDEVEN
:
501 return selectExtInst(ResVReg
, ResType
, I
, CL::rint
, GL::RoundEven
);
502 case TargetOpcode::G_INTRINSIC_TRUNC
:
503 return selectExtInst(ResVReg
, ResType
, I
, CL::trunc
, GL::Trunc
);
504 case TargetOpcode::G_FRINT
:
505 case TargetOpcode::G_FNEARBYINT
:
506 return selectExtInst(ResVReg
, ResType
, I
, CL::rint
, GL::RoundEven
);
508 case TargetOpcode::G_SMULH
:
509 return selectExtInst(ResVReg
, ResType
, I
, CL::s_mul_hi
);
510 case TargetOpcode::G_UMULH
:
511 return selectExtInst(ResVReg
, ResType
, I
, CL::u_mul_hi
);
513 case TargetOpcode::G_SADDSAT
:
514 return selectExtInst(ResVReg
, ResType
, I
, CL::s_add_sat
);
515 case TargetOpcode::G_UADDSAT
:
516 return selectExtInst(ResVReg
, ResType
, I
, CL::u_add_sat
);
517 case TargetOpcode::G_SSUBSAT
:
518 return selectExtInst(ResVReg
, ResType
, I
, CL::s_sub_sat
);
519 case TargetOpcode::G_USUBSAT
:
520 return selectExtInst(ResVReg
, ResType
, I
, CL::u_sub_sat
);
522 case TargetOpcode::G_SEXT
:
523 return selectExt(ResVReg
, ResType
, I
, true);
524 case TargetOpcode::G_ANYEXT
:
525 case TargetOpcode::G_ZEXT
:
526 return selectExt(ResVReg
, ResType
, I
, false);
527 case TargetOpcode::G_TRUNC
:
528 return selectTrunc(ResVReg
, ResType
, I
);
529 case TargetOpcode::G_FPTRUNC
:
530 case TargetOpcode::G_FPEXT
:
531 return selectUnOp(ResVReg
, ResType
, I
, SPIRV::OpFConvert
);
533 case TargetOpcode::G_PTRTOINT
:
534 return selectUnOp(ResVReg
, ResType
, I
, SPIRV::OpConvertPtrToU
);
535 case TargetOpcode::G_INTTOPTR
:
536 return selectUnOp(ResVReg
, ResType
, I
, SPIRV::OpConvertUToPtr
);
537 case TargetOpcode::G_BITCAST
:
538 return selectBitcast(ResVReg
, ResType
, I
);
539 case TargetOpcode::G_ADDRSPACE_CAST
:
540 return selectAddrSpaceCast(ResVReg
, ResType
, I
);
541 case TargetOpcode::G_PTR_ADD
: {
542 // Currently, we get G_PTR_ADD only as a result of translating
543 // global variables, initialized with constant expressions like GV + Const
544 // (see test opencl/basic/progvar_prog_scope_init.ll).
545 // TODO: extend the handler once we have other cases.
546 assert(I
.getOperand(1).isReg() && I
.getOperand(2).isReg());
547 Register GV
= I
.getOperand(1).getReg();
548 MachineRegisterInfo::def_instr_iterator II
= MRI
->def_instr_begin(GV
);
550 assert(((*II
).getOpcode() == TargetOpcode::G_GLOBAL_VALUE
||
551 (*II
).getOpcode() == TargetOpcode::COPY
||
552 (*II
).getOpcode() == SPIRV::OpVariable
) &&
553 isImm(I
.getOperand(2), MRI
));
554 Register Idx
= buildZerosVal(GR
.getOrCreateSPIRVIntegerType(32, I
, TII
), I
);
555 MachineBasicBlock
&BB
= *I
.getParent();
556 auto MIB
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpSpecConstantOp
))
558 .addUse(GR
.getSPIRVTypeID(ResType
))
559 .addImm(static_cast<uint32_t>(
560 SPIRV::Opcode::InBoundsPtrAccessChain
))
563 .addUse(I
.getOperand(2).getReg());
564 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
567 case TargetOpcode::G_ATOMICRMW_OR
:
568 return selectAtomicRMW(ResVReg
, ResType
, I
, SPIRV::OpAtomicOr
);
569 case TargetOpcode::G_ATOMICRMW_ADD
:
570 return selectAtomicRMW(ResVReg
, ResType
, I
, SPIRV::OpAtomicIAdd
);
571 case TargetOpcode::G_ATOMICRMW_AND
:
572 return selectAtomicRMW(ResVReg
, ResType
, I
, SPIRV::OpAtomicAnd
);
573 case TargetOpcode::G_ATOMICRMW_MAX
:
574 return selectAtomicRMW(ResVReg
, ResType
, I
, SPIRV::OpAtomicSMax
);
575 case TargetOpcode::G_ATOMICRMW_MIN
:
576 return selectAtomicRMW(ResVReg
, ResType
, I
, SPIRV::OpAtomicSMin
);
577 case TargetOpcode::G_ATOMICRMW_SUB
:
578 return selectAtomicRMW(ResVReg
, ResType
, I
, SPIRV::OpAtomicISub
);
579 case TargetOpcode::G_ATOMICRMW_XOR
:
580 return selectAtomicRMW(ResVReg
, ResType
, I
, SPIRV::OpAtomicXor
);
581 case TargetOpcode::G_ATOMICRMW_UMAX
:
582 return selectAtomicRMW(ResVReg
, ResType
, I
, SPIRV::OpAtomicUMax
);
583 case TargetOpcode::G_ATOMICRMW_UMIN
:
584 return selectAtomicRMW(ResVReg
, ResType
, I
, SPIRV::OpAtomicUMin
);
585 case TargetOpcode::G_ATOMICRMW_XCHG
:
586 return selectAtomicRMW(ResVReg
, ResType
, I
, SPIRV::OpAtomicExchange
);
587 case TargetOpcode::G_ATOMIC_CMPXCHG
:
588 return selectAtomicCmpXchg(ResVReg
, ResType
, I
);
590 case TargetOpcode::G_ATOMICRMW_FADD
:
591 return selectAtomicRMW(ResVReg
, ResType
, I
, SPIRV::OpAtomicFAddEXT
);
592 case TargetOpcode::G_ATOMICRMW_FSUB
:
593 // Translate G_ATOMICRMW_FSUB to OpAtomicFAddEXT with negative value operand
594 return selectAtomicRMW(ResVReg
, ResType
, I
, SPIRV::OpAtomicFAddEXT
,
596 case TargetOpcode::G_ATOMICRMW_FMIN
:
597 return selectAtomicRMW(ResVReg
, ResType
, I
, SPIRV::OpAtomicFMinEXT
);
598 case TargetOpcode::G_ATOMICRMW_FMAX
:
599 return selectAtomicRMW(ResVReg
, ResType
, I
, SPIRV::OpAtomicFMaxEXT
);
601 case TargetOpcode::G_FENCE
:
602 return selectFence(I
);
604 case TargetOpcode::G_STACKSAVE
:
605 return selectStackSave(ResVReg
, ResType
, I
);
606 case TargetOpcode::G_STACKRESTORE
:
607 return selectStackRestore(I
);
609 case TargetOpcode::G_UNMERGE_VALUES
:
610 return selectUnmergeValues(I
);
617 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg
,
618 const SPIRVType
*ResType
,
620 CL::OpenCLExtInst CLInst
) const {
621 return selectExtInst(ResVReg
, ResType
, I
,
622 {{SPIRV::InstructionSet::OpenCL_std
, CLInst
}});
625 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg
,
626 const SPIRVType
*ResType
,
628 CL::OpenCLExtInst CLInst
,
629 GL::GLSLExtInst GLInst
) const {
630 ExtInstList ExtInsts
= {{SPIRV::InstructionSet::OpenCL_std
, CLInst
},
631 {SPIRV::InstructionSet::GLSL_std_450
, GLInst
}};
632 return selectExtInst(ResVReg
, ResType
, I
, ExtInsts
);
635 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg
,
636 const SPIRVType
*ResType
,
638 const ExtInstList
&Insts
) const {
640 for (const auto &Ex
: Insts
) {
641 SPIRV::InstructionSet::InstructionSet Set
= Ex
.first
;
642 uint32_t Opcode
= Ex
.second
;
643 if (STI
.canUseExtInstSet(Set
)) {
644 MachineBasicBlock
&BB
= *I
.getParent();
645 auto MIB
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpExtInst
))
647 .addUse(GR
.getSPIRVTypeID(ResType
))
648 .addImm(static_cast<uint32_t>(Set
))
650 const unsigned NumOps
= I
.getNumOperands();
651 for (unsigned i
= 1; i
< NumOps
; ++i
)
652 MIB
.add(I
.getOperand(i
));
653 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
659 bool SPIRVInstructionSelector::selectUnOpWithSrc(Register ResVReg
,
660 const SPIRVType
*ResType
,
663 unsigned Opcode
) const {
664 return BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(Opcode
))
666 .addUse(GR
.getSPIRVTypeID(ResType
))
668 .constrainAllUses(TII
, TRI
, RBI
);
671 bool SPIRVInstructionSelector::selectUnOp(Register ResVReg
,
672 const SPIRVType
*ResType
,
674 unsigned Opcode
) const {
675 if (STI
.isOpenCLEnv() && I
.getOperand(1).isReg()) {
676 Register SrcReg
= I
.getOperand(1).getReg();
678 for (MachineRegisterInfo::def_instr_iterator DefIt
=
679 MRI
->def_instr_begin(SrcReg
);
680 DefIt
!= MRI
->def_instr_end(); DefIt
= std::next(DefIt
)) {
681 if ((*DefIt
).getOpcode() == TargetOpcode::G_GLOBAL_VALUE
) {
687 uint32_t SpecOpcode
= 0;
689 case SPIRV::OpConvertPtrToU
:
690 SpecOpcode
= static_cast<uint32_t>(SPIRV::Opcode::ConvertPtrToU
);
692 case SPIRV::OpConvertUToPtr
:
693 SpecOpcode
= static_cast<uint32_t>(SPIRV::Opcode::ConvertUToPtr
);
697 return BuildMI(*I
.getParent(), I
, I
.getDebugLoc(),
698 TII
.get(SPIRV::OpSpecConstantOp
))
700 .addUse(GR
.getSPIRVTypeID(ResType
))
703 .constrainAllUses(TII
, TRI
, RBI
);
706 return selectUnOpWithSrc(ResVReg
, ResType
, I
, I
.getOperand(1).getReg(),
710 bool SPIRVInstructionSelector::selectBitcast(Register ResVReg
,
711 const SPIRVType
*ResType
,
712 MachineInstr
&I
) const {
713 Register OpReg
= I
.getOperand(1).getReg();
714 SPIRVType
*OpType
= OpReg
.isValid() ? GR
.getSPIRVTypeForVReg(OpReg
) : nullptr;
715 if (!GR
.isBitcastCompatible(ResType
, OpType
))
716 report_fatal_error("incompatible result and operand types in a bitcast");
717 return selectUnOp(ResVReg
, ResType
, I
, SPIRV::OpBitcast
);
720 static SPIRV::Scope::Scope
getScope(SyncScope::ID Ord
,
721 SPIRVMachineModuleInfo
*MMI
) {
722 if (Ord
== SyncScope::SingleThread
|| Ord
== MMI
->Work_ItemSSID
)
723 return SPIRV::Scope::Invocation
;
724 else if (Ord
== SyncScope::System
|| Ord
== MMI
->DeviceSSID
)
725 return SPIRV::Scope::Device
;
726 else if (Ord
== MMI
->WorkGroupSSID
)
727 return SPIRV::Scope::Workgroup
;
728 else if (Ord
== MMI
->AllSVMDevicesSSID
)
729 return SPIRV::Scope::CrossDevice
;
730 else if (Ord
== MMI
->SubGroupSSID
)
731 return SPIRV::Scope::Subgroup
;
733 // OpenCL approach is: "The functions that do not have memory_scope argument
734 // have the same semantics as the corresponding functions with the
735 // memory_scope argument set to memory_scope_device." See ref.: //
736 // https://registry.khronos.org/OpenCL/specs/3.0-unified/html/OpenCL_C.html#atomic-functions
737 // In our case if the scope is unknown, assuming that SPIR-V code is to be
738 // consumed in an OpenCL environment, we use the same approach and set the
739 // scope to memory_scope_device.
740 return SPIRV::Scope::Device
;
743 static void addMemoryOperands(MachineMemOperand
*MemOp
,
744 MachineInstrBuilder
&MIB
) {
745 uint32_t SpvMemOp
= static_cast<uint32_t>(SPIRV::MemoryOperand::None
);
746 if (MemOp
->isVolatile())
747 SpvMemOp
|= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile
);
748 if (MemOp
->isNonTemporal())
749 SpvMemOp
|= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal
);
750 if (MemOp
->getAlign().value())
751 SpvMemOp
|= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned
);
753 if (SpvMemOp
!= static_cast<uint32_t>(SPIRV::MemoryOperand::None
)) {
754 MIB
.addImm(SpvMemOp
);
755 if (SpvMemOp
& static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned
))
756 MIB
.addImm(MemOp
->getAlign().value());
760 static void addMemoryOperands(uint64_t Flags
, MachineInstrBuilder
&MIB
) {
761 uint32_t SpvMemOp
= static_cast<uint32_t>(SPIRV::MemoryOperand::None
);
762 if (Flags
& MachineMemOperand::Flags::MOVolatile
)
763 SpvMemOp
|= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile
);
764 if (Flags
& MachineMemOperand::Flags::MONonTemporal
)
765 SpvMemOp
|= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal
);
767 if (SpvMemOp
!= static_cast<uint32_t>(SPIRV::MemoryOperand::None
))
768 MIB
.addImm(SpvMemOp
);
771 bool SPIRVInstructionSelector::selectLoad(Register ResVReg
,
772 const SPIRVType
*ResType
,
773 MachineInstr
&I
) const {
774 unsigned OpOffset
= isa
<GIntrinsic
>(I
) ? 1 : 0;
775 Register Ptr
= I
.getOperand(1 + OpOffset
).getReg();
776 auto MIB
= BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(SPIRV::OpLoad
))
778 .addUse(GR
.getSPIRVTypeID(ResType
))
780 if (!I
.getNumMemOperands()) {
781 assert(I
.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
||
783 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS
);
784 addMemoryOperands(I
.getOperand(2 + OpOffset
).getImm(), MIB
);
786 addMemoryOperands(*I
.memoperands_begin(), MIB
);
788 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
791 bool SPIRVInstructionSelector::selectStore(MachineInstr
&I
) const {
792 unsigned OpOffset
= isa
<GIntrinsic
>(I
) ? 1 : 0;
793 Register StoreVal
= I
.getOperand(0 + OpOffset
).getReg();
794 Register Ptr
= I
.getOperand(1 + OpOffset
).getReg();
795 MachineBasicBlock
&BB
= *I
.getParent();
796 auto MIB
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpStore
))
799 if (!I
.getNumMemOperands()) {
800 assert(I
.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
||
802 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS
);
803 addMemoryOperands(I
.getOperand(2 + OpOffset
).getImm(), MIB
);
805 addMemoryOperands(*I
.memoperands_begin(), MIB
);
807 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
810 bool SPIRVInstructionSelector::selectStackSave(Register ResVReg
,
811 const SPIRVType
*ResType
,
812 MachineInstr
&I
) const {
813 if (!STI
.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array
))
815 "llvm.stacksave intrinsic: this instruction requires the following "
816 "SPIR-V extension: SPV_INTEL_variable_length_array",
818 MachineBasicBlock
&BB
= *I
.getParent();
819 return BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpSaveMemoryINTEL
))
821 .addUse(GR
.getSPIRVTypeID(ResType
))
822 .constrainAllUses(TII
, TRI
, RBI
);
825 bool SPIRVInstructionSelector::selectStackRestore(MachineInstr
&I
) const {
826 if (!STI
.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array
))
828 "llvm.stackrestore intrinsic: this instruction requires the following "
829 "SPIR-V extension: SPV_INTEL_variable_length_array",
831 if (!I
.getOperand(0).isReg())
833 MachineBasicBlock
&BB
= *I
.getParent();
834 return BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpRestoreMemoryINTEL
))
835 .addUse(I
.getOperand(0).getReg())
836 .constrainAllUses(TII
, TRI
, RBI
);
839 bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg
,
840 MachineInstr
&I
) const {
841 MachineBasicBlock
&BB
= *I
.getParent();
842 Register SrcReg
= I
.getOperand(1).getReg();
843 if (I
.getOpcode() == TargetOpcode::G_MEMSET
) {
844 assert(I
.getOperand(1).isReg() && I
.getOperand(2).isReg());
845 unsigned Val
= getIConstVal(I
.getOperand(1).getReg(), MRI
);
846 unsigned Num
= getIConstVal(I
.getOperand(2).getReg(), MRI
);
847 SPIRVType
*ValTy
= GR
.getOrCreateSPIRVIntegerType(8, I
, TII
);
848 SPIRVType
*ArrTy
= GR
.getOrCreateSPIRVArrayType(ValTy
, Num
, I
, TII
);
849 Register Const
= GR
.getOrCreateConstIntArray(Val
, Num
, I
, ArrTy
, TII
);
850 SPIRVType
*VarTy
= GR
.getOrCreateSPIRVPointerType(
851 ArrTy
, I
, TII
, SPIRV::StorageClass::UniformConstant
);
852 // TODO: check if we have such GV, add init, use buildGlobalVariable.
853 Function
&CurFunction
= GR
.CurMF
->getFunction();
855 ArrayType::get(IntegerType::get(CurFunction
.getContext(), 8), Num
);
856 // Module takes ownership of the global var.
857 GlobalVariable
*GV
= new GlobalVariable(*CurFunction
.getParent(), LLVMArrTy
,
858 true, GlobalValue::InternalLinkage
,
859 Constant::getNullValue(LLVMArrTy
));
860 Register VarReg
= MRI
->createGenericVirtualRegister(LLT::scalar(32));
861 GR
.add(GV
, GR
.CurMF
, VarReg
);
863 buildOpDecorate(VarReg
, I
, TII
, SPIRV::Decoration::Constant
, {});
864 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(SPIRV::OpVariable
))
866 .addUse(GR
.getSPIRVTypeID(VarTy
))
867 .addImm(SPIRV::StorageClass::UniformConstant
)
869 .constrainAllUses(TII
, TRI
, RBI
);
870 SPIRVType
*SourceTy
= GR
.getOrCreateSPIRVPointerType(
871 ValTy
, I
, TII
, SPIRV::StorageClass::UniformConstant
);
872 SrcReg
= MRI
->createGenericVirtualRegister(LLT::scalar(32));
873 selectUnOpWithSrc(SrcReg
, SourceTy
, I
, VarReg
, SPIRV::OpBitcast
);
875 auto MIB
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpCopyMemorySized
))
876 .addUse(I
.getOperand(0).getReg())
878 .addUse(I
.getOperand(2).getReg());
879 if (I
.getNumMemOperands())
880 addMemoryOperands(*I
.memoperands_begin(), MIB
);
881 bool Result
= MIB
.constrainAllUses(TII
, TRI
, RBI
);
882 if (ResVReg
.isValid() && ResVReg
!= MIB
->getOperand(0).getReg())
883 BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(TargetOpcode::COPY
), ResVReg
)
884 .addUse(MIB
->getOperand(0).getReg());
888 bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg
,
889 const SPIRVType
*ResType
,
892 unsigned NegateOpcode
) const {
893 assert(I
.hasOneMemOperand());
894 const MachineMemOperand
*MemOp
= *I
.memoperands_begin();
896 static_cast<uint32_t>(getScope(MemOp
->getSyncScopeID(), MMI
));
897 Register ScopeReg
= buildI32Constant(Scope
, I
);
899 Register Ptr
= I
.getOperand(1).getReg();
900 // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
902 // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
903 AtomicOrdering AO
= MemOp
->getSuccessOrdering();
904 uint32_t MemSem
= static_cast<uint32_t>(getMemSemantics(AO
));
905 Register MemSemReg
= buildI32Constant(MemSem
/*| ScSem*/, I
);
908 Register ValueReg
= I
.getOperand(2).getReg();
909 if (NegateOpcode
!= 0) {
910 // Translation with negative value operand is requested
911 Register TmpReg
= MRI
->createVirtualRegister(&SPIRV::IDRegClass
);
912 Result
|= selectUnOpWithSrc(TmpReg
, ResType
, I
, ValueReg
, NegateOpcode
);
916 Result
|= BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(NewOpcode
))
918 .addUse(GR
.getSPIRVTypeID(ResType
))
923 .constrainAllUses(TII
, TRI
, RBI
);
927 bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr
&I
) const {
928 unsigned ArgI
= I
.getNumOperands() - 1;
930 I
.getOperand(ArgI
).isReg() ? I
.getOperand(ArgI
).getReg() : Register(0);
932 SrcReg
.isValid() ? GR
.getSPIRVTypeForVReg(SrcReg
) : nullptr;
933 if (!DefType
|| DefType
->getOpcode() != SPIRV::OpTypeVector
)
935 "cannot select G_UNMERGE_VALUES with a non-vector argument");
937 SPIRVType
*ScalarType
=
938 GR
.getSPIRVTypeForVReg(DefType
->getOperand(1).getReg());
939 MachineBasicBlock
&BB
= *I
.getParent();
941 for (unsigned i
= 0; i
< I
.getNumDefs(); ++i
) {
942 Register ResVReg
= I
.getOperand(i
).getReg();
943 SPIRVType
*ResType
= GR
.getSPIRVTypeForVReg(ResVReg
);
945 // There was no "assign type" actions, let's fix this now
946 ResType
= ScalarType
;
947 MRI
->setRegClass(ResVReg
, &SPIRV::IDRegClass
);
948 MRI
->setType(ResVReg
, LLT::scalar(GR
.getScalarOrVectorBitWidth(ResType
)));
949 GR
.assignSPIRVTypeToVReg(ResType
, ResVReg
, *GR
.CurMF
);
952 BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpCompositeExtract
))
954 .addUse(GR
.getSPIRVTypeID(ResType
))
956 .addImm(static_cast<int64_t>(i
));
957 Res
|= MIB
.constrainAllUses(TII
, TRI
, RBI
);
962 bool SPIRVInstructionSelector::selectFence(MachineInstr
&I
) const {
963 AtomicOrdering AO
= AtomicOrdering(I
.getOperand(0).getImm());
964 uint32_t MemSem
= static_cast<uint32_t>(getMemSemantics(AO
));
965 Register MemSemReg
= buildI32Constant(MemSem
, I
);
966 SyncScope::ID Ord
= SyncScope::ID(I
.getOperand(1).getImm());
967 uint32_t Scope
= static_cast<uint32_t>(getScope(Ord
, MMI
));
968 Register ScopeReg
= buildI32Constant(Scope
, I
);
969 MachineBasicBlock
&BB
= *I
.getParent();
970 return BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpMemoryBarrier
))
973 .constrainAllUses(TII
, TRI
, RBI
);
976 bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg
,
977 const SPIRVType
*ResType
,
978 MachineInstr
&I
) const {
980 Register MemSemEqReg
;
981 Register MemSemNeqReg
;
982 Register Ptr
= I
.getOperand(2).getReg();
983 if (!isa
<GIntrinsic
>(I
)) {
984 assert(I
.hasOneMemOperand());
985 const MachineMemOperand
*MemOp
= *I
.memoperands_begin();
987 static_cast<uint32_t>(getScope(MemOp
->getSyncScopeID(), MMI
));
988 ScopeReg
= buildI32Constant(Scope
, I
);
990 unsigned ScSem
= static_cast<uint32_t>(
991 getMemSemanticsForStorageClass(GR
.getPointerStorageClass(Ptr
)));
992 AtomicOrdering AO
= MemOp
->getSuccessOrdering();
993 unsigned MemSemEq
= static_cast<uint32_t>(getMemSemantics(AO
)) | ScSem
;
994 MemSemEqReg
= buildI32Constant(MemSemEq
, I
);
995 AtomicOrdering FO
= MemOp
->getFailureOrdering();
996 unsigned MemSemNeq
= static_cast<uint32_t>(getMemSemantics(FO
)) | ScSem
;
998 MemSemEq
== MemSemNeq
? MemSemEqReg
: buildI32Constant(MemSemNeq
, I
);
1000 ScopeReg
= I
.getOperand(5).getReg();
1001 MemSemEqReg
= I
.getOperand(6).getReg();
1002 MemSemNeqReg
= I
.getOperand(7).getReg();
1005 Register Cmp
= I
.getOperand(3).getReg();
1006 Register Val
= I
.getOperand(4).getReg();
1007 SPIRVType
*SpvValTy
= GR
.getSPIRVTypeForVReg(Val
);
1008 Register ACmpRes
= MRI
->createVirtualRegister(&SPIRV::IDRegClass
);
1009 const DebugLoc
&DL
= I
.getDebugLoc();
1011 BuildMI(*I
.getParent(), I
, DL
, TII
.get(SPIRV::OpAtomicCompareExchange
))
1013 .addUse(GR
.getSPIRVTypeID(SpvValTy
))
1016 .addUse(MemSemEqReg
)
1017 .addUse(MemSemNeqReg
)
1020 .constrainAllUses(TII
, TRI
, RBI
);
1021 Register CmpSuccReg
= MRI
->createVirtualRegister(&SPIRV::IDRegClass
);
1022 SPIRVType
*BoolTy
= GR
.getOrCreateSPIRVBoolType(I
, TII
);
1023 Result
|= BuildMI(*I
.getParent(), I
, DL
, TII
.get(SPIRV::OpIEqual
))
1025 .addUse(GR
.getSPIRVTypeID(BoolTy
))
1028 .constrainAllUses(TII
, TRI
, RBI
);
1029 Register TmpReg
= MRI
->createVirtualRegister(&SPIRV::IDRegClass
);
1030 Result
|= BuildMI(*I
.getParent(), I
, DL
, TII
.get(SPIRV::OpCompositeInsert
))
1032 .addUse(GR
.getSPIRVTypeID(ResType
))
1034 .addUse(GR
.getOrCreateUndef(I
, ResType
, TII
))
1036 .constrainAllUses(TII
, TRI
, RBI
);
1037 Result
|= BuildMI(*I
.getParent(), I
, DL
, TII
.get(SPIRV::OpCompositeInsert
))
1039 .addUse(GR
.getSPIRVTypeID(ResType
))
1043 .constrainAllUses(TII
, TRI
, RBI
);
1047 static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC
) {
1049 case SPIRV::StorageClass::Workgroup
:
1050 case SPIRV::StorageClass::CrossWorkgroup
:
1051 case SPIRV::StorageClass::Function
:
1058 static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC
) {
1060 case SPIRV::StorageClass::DeviceOnlyINTEL
:
1061 case SPIRV::StorageClass::HostOnlyINTEL
:
1068 // In SPIR-V address space casting can only happen to and from the Generic
1069 // storage class. We can also only cast Workgroup, CrossWorkgroup, or Function
1070 // pointers to and from Generic pointers. As such, we can convert e.g. from
1071 // Workgroup to Function by going via a Generic pointer as an intermediary. All
1072 // other combinations can only be done by a bitcast, and are probably not safe.
1073 bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg
,
1074 const SPIRVType
*ResType
,
1075 MachineInstr
&I
) const {
1076 // If the AddrSpaceCast user is single and in OpConstantComposite or
1077 // OpVariable, we should select OpSpecConstantOp.
1078 auto UIs
= MRI
->use_instructions(ResVReg
);
1079 if (!UIs
.empty() && ++UIs
.begin() == UIs
.end() &&
1080 (UIs
.begin()->getOpcode() == SPIRV::OpConstantComposite
||
1081 UIs
.begin()->getOpcode() == SPIRV::OpVariable
||
1082 isSpvIntrinsic(*UIs
.begin(), Intrinsic::spv_init_global
))) {
1083 Register NewReg
= I
.getOperand(1).getReg();
1084 MachineBasicBlock
&BB
= *I
.getParent();
1085 SPIRVType
*SpvBaseTy
= GR
.getOrCreateSPIRVIntegerType(8, I
, TII
);
1086 ResType
= GR
.getOrCreateSPIRVPointerType(SpvBaseTy
, I
, TII
,
1087 SPIRV::StorageClass::Generic
);
1089 BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpSpecConstantOp
))
1091 .addUse(GR
.getSPIRVTypeID(ResType
))
1092 .addImm(static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric
))
1094 .constrainAllUses(TII
, TRI
, RBI
);
1097 Register SrcPtr
= I
.getOperand(1).getReg();
1098 SPIRVType
*SrcPtrTy
= GR
.getSPIRVTypeForVReg(SrcPtr
);
1099 SPIRV::StorageClass::StorageClass SrcSC
= GR
.getPointerStorageClass(SrcPtr
);
1100 SPIRV::StorageClass::StorageClass DstSC
= GR
.getPointerStorageClass(ResVReg
);
1102 // don't generate a cast between identical storage classes
1104 return BuildMI(*I
.getParent(), I
, I
.getDebugLoc(),
1105 TII
.get(TargetOpcode::COPY
))
1108 .constrainAllUses(TII
, TRI
, RBI
);
1110 // Casting from an eligible pointer to Generic.
1111 if (DstSC
== SPIRV::StorageClass::Generic
&& isGenericCastablePtr(SrcSC
))
1112 return selectUnOp(ResVReg
, ResType
, I
, SPIRV::OpPtrCastToGeneric
);
1113 // Casting from Generic to an eligible pointer.
1114 if (SrcSC
== SPIRV::StorageClass::Generic
&& isGenericCastablePtr(DstSC
))
1115 return selectUnOp(ResVReg
, ResType
, I
, SPIRV::OpGenericCastToPtr
);
1116 // Casting between 2 eligible pointers using Generic as an intermediary.
1117 if (isGenericCastablePtr(SrcSC
) && isGenericCastablePtr(DstSC
)) {
1118 Register Tmp
= MRI
->createVirtualRegister(&SPIRV::IDRegClass
);
1119 SPIRVType
*GenericPtrTy
= GR
.getOrCreateSPIRVPointerType(
1120 GR
.getPointeeType(SrcPtrTy
), I
, TII
, SPIRV::StorageClass::Generic
);
1121 MachineBasicBlock
&BB
= *I
.getParent();
1122 const DebugLoc
&DL
= I
.getDebugLoc();
1123 bool Success
= BuildMI(BB
, I
, DL
, TII
.get(SPIRV::OpPtrCastToGeneric
))
1125 .addUse(GR
.getSPIRVTypeID(GenericPtrTy
))
1127 .constrainAllUses(TII
, TRI
, RBI
);
1128 return Success
&& BuildMI(BB
, I
, DL
, TII
.get(SPIRV::OpGenericCastToPtr
))
1130 .addUse(GR
.getSPIRVTypeID(ResType
))
1132 .constrainAllUses(TII
, TRI
, RBI
);
1135 // Check if instructions from the SPV_INTEL_usm_storage_classes extension may
1137 if (isUSMStorageClass(SrcSC
) && DstSC
== SPIRV::StorageClass::CrossWorkgroup
)
1138 return selectUnOp(ResVReg
, ResType
, I
,
1139 SPIRV::OpPtrCastToCrossWorkgroupINTEL
);
1140 if (SrcSC
== SPIRV::StorageClass::CrossWorkgroup
&& isUSMStorageClass(DstSC
))
1141 return selectUnOp(ResVReg
, ResType
, I
,
1142 SPIRV::OpCrossWorkgroupCastToPtrINTEL
);
1143 if (isUSMStorageClass(SrcSC
) && DstSC
== SPIRV::StorageClass::Generic
)
1144 return selectUnOp(ResVReg
, ResType
, I
, SPIRV::OpPtrCastToGeneric
);
1145 if (SrcSC
== SPIRV::StorageClass::Generic
&& isUSMStorageClass(DstSC
))
1146 return selectUnOp(ResVReg
, ResType
, I
, SPIRV::OpGenericCastToPtr
);
1148 // Bitcast for pointers requires that the address spaces must match
1152 static unsigned getFCmpOpcode(unsigned PredNum
) {
1153 auto Pred
= static_cast<CmpInst::Predicate
>(PredNum
);
1155 case CmpInst::FCMP_OEQ
:
1156 return SPIRV::OpFOrdEqual
;
1157 case CmpInst::FCMP_OGE
:
1158 return SPIRV::OpFOrdGreaterThanEqual
;
1159 case CmpInst::FCMP_OGT
:
1160 return SPIRV::OpFOrdGreaterThan
;
1161 case CmpInst::FCMP_OLE
:
1162 return SPIRV::OpFOrdLessThanEqual
;
1163 case CmpInst::FCMP_OLT
:
1164 return SPIRV::OpFOrdLessThan
;
1165 case CmpInst::FCMP_ONE
:
1166 return SPIRV::OpFOrdNotEqual
;
1167 case CmpInst::FCMP_ORD
:
1168 return SPIRV::OpOrdered
;
1169 case CmpInst::FCMP_UEQ
:
1170 return SPIRV::OpFUnordEqual
;
1171 case CmpInst::FCMP_UGE
:
1172 return SPIRV::OpFUnordGreaterThanEqual
;
1173 case CmpInst::FCMP_UGT
:
1174 return SPIRV::OpFUnordGreaterThan
;
1175 case CmpInst::FCMP_ULE
:
1176 return SPIRV::OpFUnordLessThanEqual
;
1177 case CmpInst::FCMP_ULT
:
1178 return SPIRV::OpFUnordLessThan
;
1179 case CmpInst::FCMP_UNE
:
1180 return SPIRV::OpFUnordNotEqual
;
1181 case CmpInst::FCMP_UNO
:
1182 return SPIRV::OpUnordered
;
1184 llvm_unreachable("Unknown predicate type for FCmp");
1188 static unsigned getICmpOpcode(unsigned PredNum
) {
1189 auto Pred
= static_cast<CmpInst::Predicate
>(PredNum
);
1191 case CmpInst::ICMP_EQ
:
1192 return SPIRV::OpIEqual
;
1193 case CmpInst::ICMP_NE
:
1194 return SPIRV::OpINotEqual
;
1195 case CmpInst::ICMP_SGE
:
1196 return SPIRV::OpSGreaterThanEqual
;
1197 case CmpInst::ICMP_SGT
:
1198 return SPIRV::OpSGreaterThan
;
1199 case CmpInst::ICMP_SLE
:
1200 return SPIRV::OpSLessThanEqual
;
1201 case CmpInst::ICMP_SLT
:
1202 return SPIRV::OpSLessThan
;
1203 case CmpInst::ICMP_UGE
:
1204 return SPIRV::OpUGreaterThanEqual
;
1205 case CmpInst::ICMP_UGT
:
1206 return SPIRV::OpUGreaterThan
;
1207 case CmpInst::ICMP_ULE
:
1208 return SPIRV::OpULessThanEqual
;
1209 case CmpInst::ICMP_ULT
:
1210 return SPIRV::OpULessThan
;
1212 llvm_unreachable("Unknown predicate type for ICmp");
1216 static unsigned getPtrCmpOpcode(unsigned Pred
) {
1217 switch (static_cast<CmpInst::Predicate
>(Pred
)) {
1218 case CmpInst::ICMP_EQ
:
1219 return SPIRV::OpPtrEqual
;
1220 case CmpInst::ICMP_NE
:
1221 return SPIRV::OpPtrNotEqual
;
1223 llvm_unreachable("Unknown predicate type for pointer comparison");
1227 // Return the logical operation, or abort if none exists.
1228 static unsigned getBoolCmpOpcode(unsigned PredNum
) {
1229 auto Pred
= static_cast<CmpInst::Predicate
>(PredNum
);
1231 case CmpInst::ICMP_EQ
:
1232 return SPIRV::OpLogicalEqual
;
1233 case CmpInst::ICMP_NE
:
1234 return SPIRV::OpLogicalNotEqual
;
1236 llvm_unreachable("Unknown predicate type for Bool comparison");
1240 bool SPIRVInstructionSelector::selectAnyOrAll(Register ResVReg
,
1241 const SPIRVType
*ResType
,
1243 unsigned OpAnyOrAll
) const {
1244 assert(I
.getNumOperands() == 3);
1245 assert(I
.getOperand(2).isReg());
1246 MachineBasicBlock
&BB
= *I
.getParent();
1247 Register InputRegister
= I
.getOperand(2).getReg();
1248 SPIRVType
*InputType
= GR
.getSPIRVTypeForVReg(InputRegister
);
1251 report_fatal_error("Input Type could not be determined.");
1253 bool IsBoolTy
= GR
.isScalarOrVectorOfType(InputRegister
, SPIRV::OpTypeBool
);
1254 bool IsVectorTy
= InputType
->getOpcode() == SPIRV::OpTypeVector
;
1255 if (IsBoolTy
&& !IsVectorTy
) {
1256 assert(ResVReg
== I
.getOperand(0).getReg());
1257 return BuildMI(*I
.getParent(), I
, I
.getDebugLoc(),
1258 TII
.get(TargetOpcode::COPY
))
1260 .addUse(InputRegister
)
1261 .constrainAllUses(TII
, TRI
, RBI
);
1264 bool IsFloatTy
= GR
.isScalarOrVectorOfType(InputRegister
, SPIRV::OpTypeFloat
);
1265 unsigned SpirvNotEqualId
=
1266 IsFloatTy
? SPIRV::OpFOrdNotEqual
: SPIRV::OpINotEqual
;
1267 SPIRVType
*SpvBoolScalarTy
= GR
.getOrCreateSPIRVBoolType(I
, TII
);
1268 SPIRVType
*SpvBoolTy
= SpvBoolScalarTy
;
1269 Register NotEqualReg
= ResVReg
;
1272 NotEqualReg
= IsBoolTy
? InputRegister
1273 : MRI
->createVirtualRegister(&SPIRV::IDRegClass
);
1274 const unsigned NumElts
= InputType
->getOperand(2).getImm();
1275 SpvBoolTy
= GR
.getOrCreateSPIRVVectorType(SpvBoolTy
, NumElts
, I
, TII
);
1279 Register ConstZeroReg
=
1280 IsFloatTy
? buildZerosValF(InputType
, I
) : buildZerosVal(InputType
, I
);
1282 BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SpirvNotEqualId
))
1283 .addDef(NotEqualReg
)
1284 .addUse(GR
.getSPIRVTypeID(SpvBoolTy
))
1285 .addUse(InputRegister
)
1286 .addUse(ConstZeroReg
)
1287 .constrainAllUses(TII
, TRI
, RBI
);
1293 return BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(OpAnyOrAll
))
1295 .addUse(GR
.getSPIRVTypeID(SpvBoolScalarTy
))
1296 .addUse(NotEqualReg
)
1297 .constrainAllUses(TII
, TRI
, RBI
);
1300 bool SPIRVInstructionSelector::selectAll(Register ResVReg
,
1301 const SPIRVType
*ResType
,
1302 MachineInstr
&I
) const {
1303 return selectAnyOrAll(ResVReg
, ResType
, I
, SPIRV::OpAll
);
1306 bool SPIRVInstructionSelector::selectAny(Register ResVReg
,
1307 const SPIRVType
*ResType
,
1308 MachineInstr
&I
) const {
1309 return selectAnyOrAll(ResVReg
, ResType
, I
, SPIRV::OpAny
);
1312 bool SPIRVInstructionSelector::selectFmix(Register ResVReg
,
1313 const SPIRVType
*ResType
,
1314 MachineInstr
&I
) const {
1316 assert(I
.getNumOperands() == 5);
1317 assert(I
.getOperand(2).isReg());
1318 assert(I
.getOperand(3).isReg());
1319 assert(I
.getOperand(4).isReg());
1320 MachineBasicBlock
&BB
= *I
.getParent();
1322 return BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpExtInst
))
1324 .addUse(GR
.getSPIRVTypeID(ResType
))
1325 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450
))
1327 .addUse(I
.getOperand(2).getReg())
1328 .addUse(I
.getOperand(3).getReg())
1329 .addUse(I
.getOperand(4).getReg())
1330 .constrainAllUses(TII
, TRI
, RBI
);
1333 bool SPIRVInstructionSelector::selectRsqrt(Register ResVReg
,
1334 const SPIRVType
*ResType
,
1335 MachineInstr
&I
) const {
1337 assert(I
.getNumOperands() == 3);
1338 assert(I
.getOperand(2).isReg());
1339 MachineBasicBlock
&BB
= *I
.getParent();
1341 return BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpExtInst
))
1343 .addUse(GR
.getSPIRVTypeID(ResType
))
1344 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450
))
1345 .addImm(GL::InverseSqrt
)
1346 .addUse(I
.getOperand(2).getReg())
1347 .constrainAllUses(TII
, TRI
, RBI
);
1350 bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg
,
1351 const SPIRVType
*ResType
,
1352 MachineInstr
&I
) const {
1353 MachineBasicBlock
&BB
= *I
.getParent();
1354 return BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpBitReverse
))
1356 .addUse(GR
.getSPIRVTypeID(ResType
))
1357 .addUse(I
.getOperand(1).getReg())
1358 .constrainAllUses(TII
, TRI
, RBI
);
1361 bool SPIRVInstructionSelector::selectFreeze(Register ResVReg
,
1362 const SPIRVType
*ResType
,
1363 MachineInstr
&I
) const {
1364 // There is no way to implement `freeze` correctly without support on SPIR-V
1365 // standard side, but we may at least address a simple (static) case when
1366 // undef/poison value presence is obvious. The main benefit of even
1367 // incomplete `freeze` support is preventing of translation from crashing due
1368 // to lack of support on legalization and instruction selection steps.
1369 if (!I
.getOperand(0).isReg() || !I
.getOperand(1).isReg())
1371 Register OpReg
= I
.getOperand(1).getReg();
1372 if (MachineInstr
*Def
= MRI
->getVRegDef(OpReg
)) {
1374 switch (Def
->getOpcode()) {
1375 case SPIRV::ASSIGN_TYPE
:
1376 if (MachineInstr
*AssignToDef
=
1377 MRI
->getVRegDef(Def
->getOperand(1).getReg())) {
1378 if (AssignToDef
->getOpcode() == TargetOpcode::G_IMPLICIT_DEF
)
1379 Reg
= Def
->getOperand(2).getReg();
1382 case SPIRV::OpUndef
:
1383 Reg
= Def
->getOperand(1).getReg();
1386 unsigned DestOpCode
;
1387 if (Reg
.isValid()) {
1388 DestOpCode
= SPIRV::OpConstantNull
;
1390 DestOpCode
= TargetOpcode::COPY
;
1393 return BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(DestOpCode
))
1394 .addDef(I
.getOperand(0).getReg())
1396 .constrainAllUses(TII
, TRI
, RBI
);
1401 bool SPIRVInstructionSelector::selectConstVector(Register ResVReg
,
1402 const SPIRVType
*ResType
,
1403 MachineInstr
&I
) const {
1404 // TODO: only const case is supported for now.
1406 I
.operands_begin(), I
.operands_end(), [this](const MachineOperand
&MO
) {
1411 SPIRVType
*ConstTy
= this->MRI
->getVRegDef(MO
.getReg());
1412 assert(ConstTy
&& ConstTy
->getOpcode() == SPIRV::ASSIGN_TYPE
&&
1413 ConstTy
->getOperand(1).isReg());
1414 Register ConstReg
= ConstTy
->getOperand(1).getReg();
1415 const MachineInstr
*Const
= this->MRI
->getVRegDef(ConstReg
);
1417 return (Const
->getOpcode() == TargetOpcode::G_CONSTANT
||
1418 Const
->getOpcode() == TargetOpcode::G_FCONSTANT
);
1421 auto MIB
= BuildMI(*I
.getParent(), I
, I
.getDebugLoc(),
1422 TII
.get(SPIRV::OpConstantComposite
))
1424 .addUse(GR
.getSPIRVTypeID(ResType
));
1425 for (unsigned i
= I
.getNumExplicitDefs(); i
< I
.getNumExplicitOperands(); ++i
)
1426 MIB
.addUse(I
.getOperand(i
).getReg());
1427 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
1430 static unsigned getArrayComponentCount(MachineRegisterInfo
*MRI
,
1431 const SPIRVType
*ResType
) {
1432 Register OpReg
= ResType
->getOperand(2).getReg();
1433 SPIRVType
*OpDef
= MRI
->getVRegDef(OpReg
);
1436 if (OpDef
->getOpcode() == SPIRV::ASSIGN_TYPE
&&
1437 OpDef
->getOperand(1).isReg()) {
1438 if (SPIRVType
*RefDef
= MRI
->getVRegDef(OpDef
->getOperand(1).getReg()))
1441 unsigned N
= OpDef
->getOpcode() == TargetOpcode::G_CONSTANT
1442 ? OpDef
->getOperand(1).getCImm()->getValue().getZExtValue()
1447 // Return true if the type represents a constant register
1448 static bool isConstReg(MachineRegisterInfo
*MRI
, SPIRVType
*OpDef
,
1449 SmallPtrSet
<SPIRVType
*, 4> &Visited
) {
1450 if (OpDef
->getOpcode() == SPIRV::ASSIGN_TYPE
&&
1451 OpDef
->getOperand(1).isReg()) {
1452 if (SPIRVType
*RefDef
= MRI
->getVRegDef(OpDef
->getOperand(1).getReg()))
1456 if (Visited
.contains(OpDef
))
1458 Visited
.insert(OpDef
);
1460 unsigned Opcode
= OpDef
->getOpcode();
1462 case TargetOpcode::G_CONSTANT
:
1463 case TargetOpcode::G_FCONSTANT
:
1465 case TargetOpcode::G_INTRINSIC
:
1466 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
:
1467 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS
:
1468 return cast
<GIntrinsic
>(*OpDef
).getIntrinsicID() ==
1469 Intrinsic::spv_const_composite
;
1470 case TargetOpcode::G_BUILD_VECTOR
:
1471 case TargetOpcode::G_SPLAT_VECTOR
: {
1472 for (unsigned i
= OpDef
->getNumExplicitDefs(); i
< OpDef
->getNumOperands();
1474 SPIRVType
*OpNestedDef
=
1475 OpDef
->getOperand(i
).isReg()
1476 ? MRI
->getVRegDef(OpDef
->getOperand(i
).getReg())
1478 if (OpNestedDef
&& !isConstReg(MRI
, OpNestedDef
, Visited
))
1487 // Return true if the virtual register represents a constant
1488 static bool isConstReg(MachineRegisterInfo
*MRI
, Register OpReg
) {
1489 SmallPtrSet
<SPIRVType
*, 4> Visited
;
1490 if (SPIRVType
*OpDef
= MRI
->getVRegDef(OpReg
))
1491 return isConstReg(MRI
, OpDef
, Visited
);
1495 bool SPIRVInstructionSelector::selectSplatVector(Register ResVReg
,
1496 const SPIRVType
*ResType
,
1497 MachineInstr
&I
) const {
1499 if (ResType
->getOpcode() == SPIRV::OpTypeVector
)
1500 N
= GR
.getScalarOrVectorComponentCount(ResType
);
1501 else if (ResType
->getOpcode() == SPIRV::OpTypeArray
)
1502 N
= getArrayComponentCount(MRI
, ResType
);
1504 report_fatal_error("Cannot select G_SPLAT_VECTOR with a non-vector result");
1506 unsigned OpIdx
= I
.getNumExplicitDefs();
1507 if (!I
.getOperand(OpIdx
).isReg())
1508 report_fatal_error("Unexpected argument in G_SPLAT_VECTOR");
1510 // check if we may construct a constant vector
1511 Register OpReg
= I
.getOperand(OpIdx
).getReg();
1512 bool IsConst
= isConstReg(MRI
, OpReg
);
1514 if (!IsConst
&& N
< 2)
1516 "There must be at least two constituent operands in a vector");
1518 auto MIB
= BuildMI(*I
.getParent(), I
, I
.getDebugLoc(),
1519 TII
.get(IsConst
? SPIRV::OpConstantComposite
1520 : SPIRV::OpCompositeConstruct
))
1522 .addUse(GR
.getSPIRVTypeID(ResType
));
1523 for (unsigned i
= 0; i
< N
; ++i
)
1525 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
1528 bool SPIRVInstructionSelector::selectCmp(Register ResVReg
,
1529 const SPIRVType
*ResType
,
1531 MachineInstr
&I
) const {
1532 Register Cmp0
= I
.getOperand(2).getReg();
1533 Register Cmp1
= I
.getOperand(3).getReg();
1534 assert(GR
.getSPIRVTypeForVReg(Cmp0
)->getOpcode() ==
1535 GR
.getSPIRVTypeForVReg(Cmp1
)->getOpcode() &&
1536 "CMP operands should have the same type");
1537 return BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(CmpOpc
))
1539 .addUse(GR
.getSPIRVTypeID(ResType
))
1542 .constrainAllUses(TII
, TRI
, RBI
);
1545 bool SPIRVInstructionSelector::selectICmp(Register ResVReg
,
1546 const SPIRVType
*ResType
,
1547 MachineInstr
&I
) const {
1548 auto Pred
= I
.getOperand(1).getPredicate();
1551 Register CmpOperand
= I
.getOperand(2).getReg();
1552 if (GR
.isScalarOfType(CmpOperand
, SPIRV::OpTypePointer
))
1553 CmpOpc
= getPtrCmpOpcode(Pred
);
1554 else if (GR
.isScalarOrVectorOfType(CmpOperand
, SPIRV::OpTypeBool
))
1555 CmpOpc
= getBoolCmpOpcode(Pred
);
1557 CmpOpc
= getICmpOpcode(Pred
);
1558 return selectCmp(ResVReg
, ResType
, CmpOpc
, I
);
1561 void SPIRVInstructionSelector::renderFImm32(MachineInstrBuilder
&MIB
,
1562 const MachineInstr
&I
,
1564 assert(I
.getOpcode() == TargetOpcode::G_FCONSTANT
&& OpIdx
== -1 &&
1565 "Expected G_FCONSTANT");
1566 const ConstantFP
*FPImm
= I
.getOperand(1).getFPImm();
1567 addNumImm(FPImm
->getValueAPF().bitcastToAPInt(), MIB
);
1570 void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder
&MIB
,
1571 const MachineInstr
&I
,
1573 assert(I
.getOpcode() == TargetOpcode::G_CONSTANT
&& OpIdx
== -1 &&
1574 "Expected G_CONSTANT");
1575 addNumImm(I
.getOperand(1).getCImm()->getValue(), MIB
);
1579 SPIRVInstructionSelector::buildI32Constant(uint32_t Val
, MachineInstr
&I
,
1580 const SPIRVType
*ResType
) const {
1581 Type
*LLVMTy
= IntegerType::get(GR
.CurMF
->getFunction().getContext(), 32);
1582 const SPIRVType
*SpvI32Ty
=
1583 ResType
? ResType
: GR
.getOrCreateSPIRVIntegerType(32, I
, TII
);
1584 // Find a constant in DT or build a new one.
1585 auto ConstInt
= ConstantInt::get(LLVMTy
, Val
);
1586 Register NewReg
= GR
.find(ConstInt
, GR
.CurMF
);
1587 if (!NewReg
.isValid()) {
1588 NewReg
= MRI
->createGenericVirtualRegister(LLT::scalar(32));
1589 GR
.add(ConstInt
, GR
.CurMF
, NewReg
);
1591 MachineBasicBlock
&BB
= *I
.getParent();
1593 MI
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpConstantNull
))
1595 .addUse(GR
.getSPIRVTypeID(SpvI32Ty
));
1597 MI
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpConstantI
))
1599 .addUse(GR
.getSPIRVTypeID(SpvI32Ty
))
1600 .addImm(APInt(32, Val
).getZExtValue());
1602 constrainSelectedInstRegOperands(*MI
, TII
, TRI
, RBI
);
1607 bool SPIRVInstructionSelector::selectFCmp(Register ResVReg
,
1608 const SPIRVType
*ResType
,
1609 MachineInstr
&I
) const {
1610 unsigned CmpOp
= getFCmpOpcode(I
.getOperand(1).getPredicate());
1611 return selectCmp(ResVReg
, ResType
, CmpOp
, I
);
1614 Register
SPIRVInstructionSelector::buildZerosVal(const SPIRVType
*ResType
,
1615 MachineInstr
&I
) const {
1616 // OpenCL uses nulls for Zero. In HLSL we don't use null constants.
1617 bool ZeroAsNull
= STI
.isOpenCLEnv();
1618 if (ResType
->getOpcode() == SPIRV::OpTypeVector
)
1619 return GR
.getOrCreateConstVector(0UL, I
, ResType
, TII
, ZeroAsNull
);
1620 return GR
.getOrCreateConstInt(0, I
, ResType
, TII
, ZeroAsNull
);
1623 static APFloat
getZeroFP(const Type
*LLVMFloatTy
) {
1625 return APFloat::getZero(APFloat::IEEEsingle());
1626 switch (LLVMFloatTy
->getScalarType()->getTypeID()) {
1627 case Type::HalfTyID
:
1628 return APFloat::getZero(APFloat::IEEEhalf());
1630 case Type::FloatTyID
:
1631 return APFloat::getZero(APFloat::IEEEsingle());
1632 case Type::DoubleTyID
:
1633 return APFloat::getZero(APFloat::IEEEdouble());
1637 Register
SPIRVInstructionSelector::buildZerosValF(const SPIRVType
*ResType
,
1638 MachineInstr
&I
) const {
1639 // OpenCL uses nulls for Zero. In HLSL we don't use null constants.
1640 bool ZeroAsNull
= STI
.isOpenCLEnv();
1641 APFloat VZero
= getZeroFP(GR
.getTypeForSPIRVType(ResType
));
1642 if (ResType
->getOpcode() == SPIRV::OpTypeVector
)
1643 return GR
.getOrCreateConstVector(VZero
, I
, ResType
, TII
, ZeroAsNull
);
1644 return GR
.getOrCreateConstFP(VZero
, I
, ResType
, TII
, ZeroAsNull
);
1647 Register
SPIRVInstructionSelector::buildOnesVal(bool AllOnes
,
1648 const SPIRVType
*ResType
,
1649 MachineInstr
&I
) const {
1650 unsigned BitWidth
= GR
.getScalarOrVectorBitWidth(ResType
);
1652 AllOnes
? APInt::getAllOnes(BitWidth
) : APInt::getOneBitSet(BitWidth
, 0);
1653 if (ResType
->getOpcode() == SPIRV::OpTypeVector
)
1654 return GR
.getOrCreateConstVector(One
.getZExtValue(), I
, ResType
, TII
);
1655 return GR
.getOrCreateConstInt(One
.getZExtValue(), I
, ResType
, TII
);
1658 bool SPIRVInstructionSelector::selectSelect(Register ResVReg
,
1659 const SPIRVType
*ResType
,
1661 bool IsSigned
) const {
1662 // To extend a bool, we need to use OpSelect between constants.
1663 Register ZeroReg
= buildZerosVal(ResType
, I
);
1664 Register OneReg
= buildOnesVal(IsSigned
, ResType
, I
);
1666 GR
.isScalarOfType(I
.getOperand(1).getReg(), SPIRV::OpTypeBool
);
1668 IsScalarBool
? SPIRV::OpSelectSISCond
: SPIRV::OpSelectSIVCond
;
1669 return BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(Opcode
))
1671 .addUse(GR
.getSPIRVTypeID(ResType
))
1672 .addUse(I
.getOperand(1).getReg())
1675 .constrainAllUses(TII
, TRI
, RBI
);
1678 bool SPIRVInstructionSelector::selectIToF(Register ResVReg
,
1679 const SPIRVType
*ResType
,
1680 MachineInstr
&I
, bool IsSigned
,
1681 unsigned Opcode
) const {
1682 Register SrcReg
= I
.getOperand(1).getReg();
1683 // We can convert bool value directly to float type without OpConvert*ToF,
1684 // however the translator generates OpSelect+OpConvert*ToF, so we do the same.
1685 if (GR
.isScalarOrVectorOfType(I
.getOperand(1).getReg(), SPIRV::OpTypeBool
)) {
1686 unsigned BitWidth
= GR
.getScalarOrVectorBitWidth(ResType
);
1687 SPIRVType
*TmpType
= GR
.getOrCreateSPIRVIntegerType(BitWidth
, I
, TII
);
1688 if (ResType
->getOpcode() == SPIRV::OpTypeVector
) {
1689 const unsigned NumElts
= ResType
->getOperand(2).getImm();
1690 TmpType
= GR
.getOrCreateSPIRVVectorType(TmpType
, NumElts
, I
, TII
);
1692 SrcReg
= MRI
->createVirtualRegister(&SPIRV::IDRegClass
);
1693 selectSelect(SrcReg
, TmpType
, I
, false);
1695 return selectUnOpWithSrc(ResVReg
, ResType
, I
, SrcReg
, Opcode
);
1698 bool SPIRVInstructionSelector::selectExt(Register ResVReg
,
1699 const SPIRVType
*ResType
,
1700 MachineInstr
&I
, bool IsSigned
) const {
1701 Register SrcReg
= I
.getOperand(1).getReg();
1702 if (GR
.isScalarOrVectorOfType(SrcReg
, SPIRV::OpTypeBool
))
1703 return selectSelect(ResVReg
, ResType
, I
, IsSigned
);
1705 SPIRVType
*SrcType
= GR
.getSPIRVTypeForVReg(SrcReg
);
1706 if (SrcType
== ResType
)
1707 return BuildMI(*I
.getParent(), I
, I
.getDebugLoc(),
1708 TII
.get(TargetOpcode::COPY
))
1711 .constrainAllUses(TII
, TRI
, RBI
);
1713 unsigned Opcode
= IsSigned
? SPIRV::OpSConvert
: SPIRV::OpUConvert
;
1714 return selectUnOp(ResVReg
, ResType
, I
, Opcode
);
1717 bool SPIRVInstructionSelector::selectIntToBool(Register IntReg
,
1720 const SPIRVType
*IntTy
,
1721 const SPIRVType
*BoolTy
) const {
1722 // To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero.
1723 Register BitIntReg
= MRI
->createVirtualRegister(&SPIRV::IDRegClass
);
1724 bool IsVectorTy
= IntTy
->getOpcode() == SPIRV::OpTypeVector
;
1725 unsigned Opcode
= IsVectorTy
? SPIRV::OpBitwiseAndV
: SPIRV::OpBitwiseAndS
;
1726 Register Zero
= buildZerosVal(IntTy
, I
);
1727 Register One
= buildOnesVal(false, IntTy
, I
);
1728 MachineBasicBlock
&BB
= *I
.getParent();
1729 BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(Opcode
))
1731 .addUse(GR
.getSPIRVTypeID(IntTy
))
1734 .constrainAllUses(TII
, TRI
, RBI
);
1735 return BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpINotEqual
))
1737 .addUse(GR
.getSPIRVTypeID(BoolTy
))
1740 .constrainAllUses(TII
, TRI
, RBI
);
1743 bool SPIRVInstructionSelector::selectTrunc(Register ResVReg
,
1744 const SPIRVType
*ResType
,
1745 MachineInstr
&I
) const {
1746 Register IntReg
= I
.getOperand(1).getReg();
1747 const SPIRVType
*ArgType
= GR
.getSPIRVTypeForVReg(IntReg
);
1748 if (GR
.isScalarOrVectorOfType(ResVReg
, SPIRV::OpTypeBool
))
1749 return selectIntToBool(IntReg
, ResVReg
, I
, ArgType
, ResType
);
1750 if (ArgType
== ResType
)
1751 return BuildMI(*I
.getParent(), I
, I
.getDebugLoc(),
1752 TII
.get(TargetOpcode::COPY
))
1755 .constrainAllUses(TII
, TRI
, RBI
);
1756 bool IsSigned
= GR
.isScalarOrVectorSigned(ResType
);
1757 unsigned Opcode
= IsSigned
? SPIRV::OpSConvert
: SPIRV::OpUConvert
;
1758 return selectUnOp(ResVReg
, ResType
, I
, Opcode
);
1761 bool SPIRVInstructionSelector::selectConst(Register ResVReg
,
1762 const SPIRVType
*ResType
,
1764 MachineInstr
&I
) const {
1765 unsigned TyOpcode
= ResType
->getOpcode();
1766 assert(TyOpcode
!= SPIRV::OpTypePointer
|| Imm
.isZero());
1767 MachineBasicBlock
&BB
= *I
.getParent();
1768 if ((TyOpcode
== SPIRV::OpTypePointer
|| TyOpcode
== SPIRV::OpTypeEvent
) &&
1770 return BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpConstantNull
))
1772 .addUse(GR
.getSPIRVTypeID(ResType
))
1773 .constrainAllUses(TII
, TRI
, RBI
);
1774 if (TyOpcode
== SPIRV::OpTypeInt
) {
1775 assert(Imm
.getBitWidth() <= 64 && "Unsupported integer width!");
1776 Register Reg
= GR
.getOrCreateConstInt(Imm
.getZExtValue(), I
, ResType
, TII
);
1779 return BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(TargetOpcode::COPY
))
1782 .constrainAllUses(TII
, TRI
, RBI
);
1784 auto MIB
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpConstantI
))
1786 .addUse(GR
.getSPIRVTypeID(ResType
));
1787 // <=32-bit integers should be caught by the sdag pattern.
1788 assert(Imm
.getBitWidth() > 32);
1789 addNumImm(Imm
, MIB
);
1790 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
1793 bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg
,
1794 const SPIRVType
*ResType
,
1795 MachineInstr
&I
) const {
1796 return BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(SPIRV::OpUndef
))
1798 .addUse(GR
.getSPIRVTypeID(ResType
))
1799 .constrainAllUses(TII
, TRI
, RBI
);
1802 static bool isImm(const MachineOperand
&MO
, MachineRegisterInfo
*MRI
) {
1804 const SPIRVType
*TypeInst
= MRI
->getVRegDef(MO
.getReg());
1805 if (TypeInst
->getOpcode() == SPIRV::ASSIGN_TYPE
) {
1806 assert(TypeInst
->getOperand(1).isReg());
1807 MachineInstr
*ImmInst
= MRI
->getVRegDef(TypeInst
->getOperand(1).getReg());
1808 return ImmInst
->getOpcode() == TargetOpcode::G_CONSTANT
;
1810 return TypeInst
->getOpcode() == SPIRV::OpConstantI
;
1813 static int64_t foldImm(const MachineOperand
&MO
, MachineRegisterInfo
*MRI
) {
1814 const SPIRVType
*TypeInst
= MRI
->getVRegDef(MO
.getReg());
1815 if (TypeInst
->getOpcode() == SPIRV::OpConstantI
)
1816 return TypeInst
->getOperand(2).getImm();
1817 MachineInstr
*ImmInst
= MRI
->getVRegDef(TypeInst
->getOperand(1).getReg());
1818 assert(ImmInst
->getOpcode() == TargetOpcode::G_CONSTANT
);
1819 return ImmInst
->getOperand(1).getCImm()->getZExtValue();
1822 bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg
,
1823 const SPIRVType
*ResType
,
1824 MachineInstr
&I
) const {
1825 MachineBasicBlock
&BB
= *I
.getParent();
1826 auto MIB
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpCompositeInsert
))
1828 .addUse(GR
.getSPIRVTypeID(ResType
))
1830 .addUse(I
.getOperand(3).getReg())
1831 // composite to insert into
1832 .addUse(I
.getOperand(2).getReg());
1833 for (unsigned i
= 4; i
< I
.getNumOperands(); i
++)
1834 MIB
.addImm(foldImm(I
.getOperand(i
), MRI
));
1835 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
1838 bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg
,
1839 const SPIRVType
*ResType
,
1840 MachineInstr
&I
) const {
1841 MachineBasicBlock
&BB
= *I
.getParent();
1842 auto MIB
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpCompositeExtract
))
1844 .addUse(GR
.getSPIRVTypeID(ResType
))
1845 .addUse(I
.getOperand(2).getReg());
1846 for (unsigned i
= 3; i
< I
.getNumOperands(); i
++)
1847 MIB
.addImm(foldImm(I
.getOperand(i
), MRI
));
1848 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
1851 bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg
,
1852 const SPIRVType
*ResType
,
1853 MachineInstr
&I
) const {
1854 if (isImm(I
.getOperand(4), MRI
))
1855 return selectInsertVal(ResVReg
, ResType
, I
);
1856 MachineBasicBlock
&BB
= *I
.getParent();
1857 return BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpVectorInsertDynamic
))
1859 .addUse(GR
.getSPIRVTypeID(ResType
))
1860 .addUse(I
.getOperand(2).getReg())
1861 .addUse(I
.getOperand(3).getReg())
1862 .addUse(I
.getOperand(4).getReg())
1863 .constrainAllUses(TII
, TRI
, RBI
);
1866 bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg
,
1867 const SPIRVType
*ResType
,
1868 MachineInstr
&I
) const {
1869 if (isImm(I
.getOperand(3), MRI
))
1870 return selectExtractVal(ResVReg
, ResType
, I
);
1871 MachineBasicBlock
&BB
= *I
.getParent();
1872 return BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpVectorExtractDynamic
))
1874 .addUse(GR
.getSPIRVTypeID(ResType
))
1875 .addUse(I
.getOperand(2).getReg())
1876 .addUse(I
.getOperand(3).getReg())
1877 .constrainAllUses(TII
, TRI
, RBI
);
1880 bool SPIRVInstructionSelector::selectGEP(Register ResVReg
,
1881 const SPIRVType
*ResType
,
1882 MachineInstr
&I
) const {
1883 const bool IsGEPInBounds
= I
.getOperand(2).getImm();
1885 // OpAccessChain could be used for OpenCL, but the SPIRV-LLVM Translator only
1886 // relies on PtrAccessChain, so we'll try not to deviate. For Vulkan however,
1887 // we have to use Op[InBounds]AccessChain.
1888 const unsigned Opcode
= STI
.isVulkanEnv()
1889 ? (IsGEPInBounds
? SPIRV::OpInBoundsAccessChain
1890 : SPIRV::OpAccessChain
)
1891 : (IsGEPInBounds
? SPIRV::OpInBoundsPtrAccessChain
1892 : SPIRV::OpPtrAccessChain
);
1894 auto Res
= BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(Opcode
))
1896 .addUse(GR
.getSPIRVTypeID(ResType
))
1897 // Object to get a pointer to.
1898 .addUse(I
.getOperand(3).getReg());
1900 const unsigned StartingIndex
=
1901 (Opcode
== SPIRV::OpAccessChain
|| Opcode
== SPIRV::OpInBoundsAccessChain
)
1904 for (unsigned i
= StartingIndex
; i
< I
.getNumExplicitOperands(); ++i
)
1905 Res
.addUse(I
.getOperand(i
).getReg());
1906 return Res
.constrainAllUses(TII
, TRI
, RBI
);
1909 // Maybe wrap a value into OpSpecConstantOp
1910 bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
1911 MachineInstr
&I
, SmallVector
<Register
> &CompositeArgs
) const {
1913 unsigned Lim
= I
.getNumExplicitOperands();
1914 for (unsigned i
= I
.getNumExplicitDefs() + 1; i
< Lim
; ++i
) {
1915 Register OpReg
= I
.getOperand(i
).getReg();
1916 SPIRVType
*OpDefine
= MRI
->getVRegDef(OpReg
);
1917 SPIRVType
*OpType
= GR
.getSPIRVTypeForVReg(OpReg
);
1918 SmallPtrSet
<SPIRVType
*, 4> Visited
;
1919 if (!OpDefine
|| !OpType
|| isConstReg(MRI
, OpDefine
, Visited
) ||
1920 OpDefine
->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST
||
1921 GR
.isAggregateType(OpType
)) {
1922 // The case of G_ADDRSPACE_CAST inside spv_const_composite() is processed
1923 // by selectAddrSpaceCast()
1924 CompositeArgs
.push_back(OpReg
);
1927 MachineFunction
*MF
= I
.getMF();
1928 Register WrapReg
= GR
.find(OpDefine
, MF
);
1929 if (WrapReg
.isValid()) {
1930 CompositeArgs
.push_back(WrapReg
);
1933 // Create a new register for the wrapper
1934 WrapReg
= MRI
->createVirtualRegister(&SPIRV::IDRegClass
);
1935 GR
.add(OpDefine
, MF
, WrapReg
);
1936 CompositeArgs
.push_back(WrapReg
);
1937 // Decorate the wrapper register and generate a new instruction
1938 MRI
->setType(WrapReg
, LLT::pointer(0, 32));
1939 GR
.assignSPIRVTypeToVReg(OpType
, WrapReg
, *MF
);
1940 MachineBasicBlock
&BB
= *I
.getParent();
1941 Result
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpSpecConstantOp
))
1943 .addUse(GR
.getSPIRVTypeID(OpType
))
1944 .addImm(static_cast<uint32_t>(SPIRV::Opcode::Bitcast
))
1946 .constrainAllUses(TII
, TRI
, RBI
);
1953 bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg
,
1954 const SPIRVType
*ResType
,
1955 MachineInstr
&I
) const {
1956 MachineBasicBlock
&BB
= *I
.getParent();
1957 Intrinsic::ID IID
= cast
<GIntrinsic
>(I
).getIntrinsicID();
1959 case Intrinsic::spv_load
:
1960 return selectLoad(ResVReg
, ResType
, I
);
1961 case Intrinsic::spv_store
:
1962 return selectStore(I
);
1963 case Intrinsic::spv_extractv
:
1964 return selectExtractVal(ResVReg
, ResType
, I
);
1965 case Intrinsic::spv_insertv
:
1966 return selectInsertVal(ResVReg
, ResType
, I
);
1967 case Intrinsic::spv_extractelt
:
1968 return selectExtractElt(ResVReg
, ResType
, I
);
1969 case Intrinsic::spv_insertelt
:
1970 return selectInsertElt(ResVReg
, ResType
, I
);
1971 case Intrinsic::spv_gep
:
1972 return selectGEP(ResVReg
, ResType
, I
);
1973 case Intrinsic::spv_unref_global
:
1974 case Intrinsic::spv_init_global
: {
1975 MachineInstr
*MI
= MRI
->getVRegDef(I
.getOperand(1).getReg());
1976 MachineInstr
*Init
= I
.getNumExplicitOperands() > 2
1977 ? MRI
->getVRegDef(I
.getOperand(2).getReg())
1980 return selectGlobalValue(MI
->getOperand(0).getReg(), *MI
, Init
);
1982 case Intrinsic::spv_undef
: {
1983 auto MIB
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpUndef
))
1985 .addUse(GR
.getSPIRVTypeID(ResType
));
1986 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
1988 case Intrinsic::spv_const_composite
: {
1989 // If no values are attached, the composite is null constant.
1990 bool IsNull
= I
.getNumExplicitDefs() + 1 == I
.getNumExplicitOperands();
1991 // Select a proper instruction.
1992 unsigned Opcode
= SPIRV::OpConstantNull
;
1993 SmallVector
<Register
> CompositeArgs
;
1995 Opcode
= SPIRV::OpConstantComposite
;
1996 if (!wrapIntoSpecConstantOp(I
, CompositeArgs
))
1999 auto MIB
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(Opcode
))
2001 .addUse(GR
.getSPIRVTypeID(ResType
));
2002 // skip type MD node we already used when generated assign.type for this
2004 for (Register OpReg
: CompositeArgs
)
2007 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
2009 case Intrinsic::spv_assign_name
: {
2010 auto MIB
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpName
));
2011 MIB
.addUse(I
.getOperand(I
.getNumExplicitDefs() + 1).getReg());
2012 for (unsigned i
= I
.getNumExplicitDefs() + 2;
2013 i
< I
.getNumExplicitOperands(); ++i
) {
2014 MIB
.addImm(I
.getOperand(i
).getImm());
2016 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
2018 case Intrinsic::spv_switch
: {
2019 auto MIB
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpSwitch
));
2020 for (unsigned i
= 1; i
< I
.getNumExplicitOperands(); ++i
) {
2021 if (I
.getOperand(i
).isReg())
2022 MIB
.addReg(I
.getOperand(i
).getReg());
2023 else if (I
.getOperand(i
).isCImm())
2024 addNumImm(I
.getOperand(i
).getCImm()->getValue(), MIB
);
2025 else if (I
.getOperand(i
).isMBB())
2026 MIB
.addMBB(I
.getOperand(i
).getMBB());
2028 llvm_unreachable("Unexpected OpSwitch operand");
2030 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
2032 case Intrinsic::spv_cmpxchg
:
2033 return selectAtomicCmpXchg(ResVReg
, ResType
, I
);
2034 case Intrinsic::spv_unreachable
:
2035 BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpUnreachable
));
2037 case Intrinsic::spv_alloca
:
2038 return selectFrameIndex(ResVReg
, ResType
, I
);
2039 case Intrinsic::spv_alloca_array
:
2040 return selectAllocaArray(ResVReg
, ResType
, I
);
2041 case Intrinsic::spv_assume
:
2042 if (STI
.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume
))
2043 BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpAssumeTrueKHR
))
2044 .addUse(I
.getOperand(1).getReg());
2046 case Intrinsic::spv_expect
:
2047 if (STI
.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume
))
2048 BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpExpectKHR
))
2050 .addUse(GR
.getSPIRVTypeID(ResType
))
2051 .addUse(I
.getOperand(2).getReg())
2052 .addUse(I
.getOperand(3).getReg());
2054 case Intrinsic::spv_thread_id
:
2055 return selectSpvThreadId(ResVReg
, ResType
, I
);
2056 case Intrinsic::spv_all
:
2057 return selectAll(ResVReg
, ResType
, I
);
2058 case Intrinsic::spv_any
:
2059 return selectAny(ResVReg
, ResType
, I
);
2060 case Intrinsic::spv_lerp
:
2061 return selectFmix(ResVReg
, ResType
, I
);
2062 case Intrinsic::spv_rsqrt
:
2063 return selectRsqrt(ResVReg
, ResType
, I
);
2064 case Intrinsic::spv_lifetime_start
:
2065 case Intrinsic::spv_lifetime_end
: {
2066 unsigned Op
= IID
== Intrinsic::spv_lifetime_start
? SPIRV::OpLifetimeStart
2067 : SPIRV::OpLifetimeStop
;
2068 int64_t Size
= I
.getOperand(I
.getNumExplicitDefs() + 1).getImm();
2069 Register PtrReg
= I
.getOperand(I
.getNumExplicitDefs() + 2).getReg();
2070 unsigned PonteeOpType
= GR
.getPointeeTypeOp(PtrReg
);
2071 bool IsNonvoidPtr
= PonteeOpType
!= 0 && PonteeOpType
!= SPIRV::OpTypeVoid
;
2072 if (Size
== -1 || IsNonvoidPtr
)
2074 BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(Op
)).addUse(PtrReg
).addImm(Size
);
2077 std::string DiagMsg
;
2078 raw_string_ostream
OS(DiagMsg
);
2080 DiagMsg
= "Intrinsic selection not implemented: " + DiagMsg
;
2081 report_fatal_error(DiagMsg
.c_str(), false);
2087 bool SPIRVInstructionSelector::selectAllocaArray(Register ResVReg
,
2088 const SPIRVType
*ResType
,
2089 MachineInstr
&I
) const {
2090 // there was an allocation size parameter to the allocation instruction
2092 MachineBasicBlock
&BB
= *I
.getParent();
2093 return BuildMI(BB
, I
, I
.getDebugLoc(),
2094 TII
.get(SPIRV::OpVariableLengthArrayINTEL
))
2096 .addUse(GR
.getSPIRVTypeID(ResType
))
2097 .addUse(I
.getOperand(2).getReg())
2098 .constrainAllUses(TII
, TRI
, RBI
);
2101 bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg
,
2102 const SPIRVType
*ResType
,
2103 MachineInstr
&I
) const {
2104 // Change order of instructions if needed: all OpVariable instructions in a
2105 // function must be the first instructions in the first block
2106 MachineFunction
*MF
= I
.getParent()->getParent();
2107 MachineBasicBlock
*MBB
= &MF
->front();
2108 auto It
= MBB
->SkipPHIsAndLabels(MBB
->begin()), E
= MBB
->end();
2109 bool IsHeader
= false;
2111 for (; It
!= E
&& It
!= I
; ++It
) {
2112 Opcode
= It
->getOpcode();
2113 if (Opcode
== SPIRV::OpFunction
|| Opcode
== SPIRV::OpFunctionParameter
) {
2115 } else if (IsHeader
&&
2116 !(Opcode
== SPIRV::ASSIGN_TYPE
|| Opcode
== SPIRV::OpLabel
)) {
2121 return BuildMI(*MBB
, It
, It
->getDebugLoc(), TII
.get(SPIRV::OpVariable
))
2123 .addUse(GR
.getSPIRVTypeID(ResType
))
2124 .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function
))
2125 .constrainAllUses(TII
, TRI
, RBI
);
2128 bool SPIRVInstructionSelector::selectBranch(MachineInstr
&I
) const {
2129 // InstructionSelector walks backwards through the instructions. We can use
2130 // both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR
2131 // first, so can generate an OpBranchConditional here. If there is no
2132 // G_BRCOND, we just use OpBranch for a regular unconditional branch.
2133 const MachineInstr
*PrevI
= I
.getPrevNode();
2134 MachineBasicBlock
&MBB
= *I
.getParent();
2135 if (PrevI
!= nullptr && PrevI
->getOpcode() == TargetOpcode::G_BRCOND
) {
2136 return BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpBranchConditional
))
2137 .addUse(PrevI
->getOperand(0).getReg())
2138 .addMBB(PrevI
->getOperand(1).getMBB())
2139 .addMBB(I
.getOperand(0).getMBB())
2140 .constrainAllUses(TII
, TRI
, RBI
);
2142 return BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpBranch
))
2143 .addMBB(I
.getOperand(0).getMBB())
2144 .constrainAllUses(TII
, TRI
, RBI
);
2147 bool SPIRVInstructionSelector::selectBranchCond(MachineInstr
&I
) const {
2148 // InstructionSelector walks backwards through the instructions. For an
2149 // explicit conditional branch with no fallthrough, we use both a G_BR and a
2150 // G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and
2151 // generate the OpBranchConditional in selectBranch above.
2153 // If an OpBranchConditional has been generated, we simply return, as the work
2154 // is alread done. If there is no OpBranchConditional, LLVM must be relying on
2155 // implicit fallthrough to the next basic block, so we need to create an
2156 // OpBranchConditional with an explicit "false" argument pointing to the next
2157 // basic block that LLVM would fall through to.
2158 const MachineInstr
*NextI
= I
.getNextNode();
2159 // Check if this has already been successfully selected.
2160 if (NextI
!= nullptr && NextI
->getOpcode() == SPIRV::OpBranchConditional
)
2162 // Must be relying on implicit block fallthrough, so generate an
2163 // OpBranchConditional with the "next" basic block as the "false" target.
2164 MachineBasicBlock
&MBB
= *I
.getParent();
2165 unsigned NextMBBNum
= MBB
.getNextNode()->getNumber();
2166 MachineBasicBlock
*NextMBB
= I
.getMF()->getBlockNumbered(NextMBBNum
);
2167 return BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpBranchConditional
))
2168 .addUse(I
.getOperand(0).getReg())
2169 .addMBB(I
.getOperand(1).getMBB())
2171 .constrainAllUses(TII
, TRI
, RBI
);
2174 bool SPIRVInstructionSelector::selectPhi(Register ResVReg
,
2175 const SPIRVType
*ResType
,
2176 MachineInstr
&I
) const {
2177 auto MIB
= BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(SPIRV::OpPhi
))
2179 .addUse(GR
.getSPIRVTypeID(ResType
));
2180 const unsigned NumOps
= I
.getNumOperands();
2181 for (unsigned i
= 1; i
< NumOps
; i
+= 2) {
2182 MIB
.addUse(I
.getOperand(i
+ 0).getReg());
2183 MIB
.addMBB(I
.getOperand(i
+ 1).getMBB());
2185 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
2188 bool SPIRVInstructionSelector::selectGlobalValue(
2189 Register ResVReg
, MachineInstr
&I
, const MachineInstr
*Init
) const {
2190 // FIXME: don't use MachineIRBuilder here, replace it with BuildMI.
2191 MachineIRBuilder
MIRBuilder(I
);
2192 const GlobalValue
*GV
= I
.getOperand(1).getGlobal();
2193 Type
*GVType
= toTypedPointer(GR
.getDeducedGlobalValueType(GV
));
2194 SPIRVType
*PointerBaseType
;
2195 if (GVType
->isArrayTy()) {
2196 SPIRVType
*ArrayElementType
=
2197 GR
.getOrCreateSPIRVType(GVType
->getArrayElementType(), MIRBuilder
,
2198 SPIRV::AccessQualifier::ReadWrite
, false);
2199 PointerBaseType
= GR
.getOrCreateSPIRVArrayType(
2200 ArrayElementType
, GVType
->getArrayNumElements(), I
, TII
);
2202 PointerBaseType
= GR
.getOrCreateSPIRVType(
2203 GVType
, MIRBuilder
, SPIRV::AccessQualifier::ReadWrite
, false);
2205 SPIRVType
*ResType
= GR
.getOrCreateSPIRVPointerType(
2206 PointerBaseType
, I
, TII
,
2207 addressSpaceToStorageClass(GV
->getAddressSpace(), STI
));
2209 std::string GlobalIdent
;
2210 if (!GV
->hasName()) {
2211 unsigned &ID
= UnnamedGlobalIDs
[GV
];
2213 ID
= UnnamedGlobalIDs
.size();
2214 GlobalIdent
= "__unnamed_" + Twine(ID
).str();
2216 GlobalIdent
= GV
->getGlobalIdentifier();
2219 // Behaviour of functions as operands depends on availability of the
2220 // corresponding extension (SPV_INTEL_function_pointers):
2221 // - If there is an extension to operate with functions as operands:
2222 // We create a proper constant operand and evaluate a correct type for a
2223 // function pointer.
2224 // - Without the required extension:
2225 // We have functions as operands in tests with blocks of instruction e.g. in
2226 // transcoding/global_block.ll. These operands are not used and should be
2227 // substituted by zero constants. Their type is expected to be always
2228 // OpTypePointer Function %uchar.
2229 if (isa
<Function
>(GV
)) {
2230 const Constant
*ConstVal
= GV
;
2231 MachineBasicBlock
&BB
= *I
.getParent();
2232 Register NewReg
= GR
.find(ConstVal
, GR
.CurMF
);
2233 if (!NewReg
.isValid()) {
2234 Register NewReg
= ResVReg
;
2235 GR
.add(ConstVal
, GR
.CurMF
, NewReg
);
2236 const Function
*GVFun
=
2237 STI
.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers
)
2238 ? dyn_cast
<Function
>(GV
)
2241 // References to a function via function pointers generate virtual
2242 // registers without a definition. We will resolve it later, during
2243 // module analysis stage.
2244 MachineRegisterInfo
*MRI
= MIRBuilder
.getMRI();
2245 Register FuncVReg
= MRI
->createGenericVirtualRegister(LLT::scalar(32));
2246 MRI
->setRegClass(FuncVReg
, &SPIRV::IDRegClass
);
2247 MachineInstrBuilder MB
=
2248 BuildMI(BB
, I
, I
.getDebugLoc(),
2249 TII
.get(SPIRV::OpConstantFunctionPointerINTEL
))
2251 .addUse(GR
.getSPIRVTypeID(ResType
))
2253 // mapping the function pointer to the used Function
2254 GR
.recordFunctionPointer(&MB
.getInstr()->getOperand(2), GVFun
);
2255 return MB
.constrainAllUses(TII
, TRI
, RBI
);
2257 return BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpConstantNull
))
2259 .addUse(GR
.getSPIRVTypeID(ResType
))
2260 .constrainAllUses(TII
, TRI
, RBI
);
2262 assert(NewReg
!= ResVReg
);
2263 return BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(TargetOpcode::COPY
))
2266 .constrainAllUses(TII
, TRI
, RBI
);
2268 auto GlobalVar
= cast
<GlobalVariable
>(GV
);
2269 assert(GlobalVar
->getName() != "llvm.global.annotations");
2271 bool HasInit
= GlobalVar
->hasInitializer() &&
2272 !isa
<UndefValue
>(GlobalVar
->getInitializer());
2273 // Skip empty declaration for GVs with initilaizers till we get the decl with
2274 // passed initializer.
2275 if (HasInit
&& !Init
)
2278 unsigned AddrSpace
= GV
->getAddressSpace();
2279 SPIRV::StorageClass::StorageClass Storage
=
2280 addressSpaceToStorageClass(AddrSpace
, STI
);
2281 bool HasLnkTy
= GV
->getLinkage() != GlobalValue::InternalLinkage
&&
2282 Storage
!= SPIRV::StorageClass::Function
;
2283 SPIRV::LinkageType::LinkageType LnkType
=
2284 (GV
->isDeclaration() || GV
->hasAvailableExternallyLinkage())
2285 ? SPIRV::LinkageType::Import
2286 : (GV
->getLinkage() == GlobalValue::LinkOnceODRLinkage
&&
2287 STI
.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr
)
2288 ? SPIRV::LinkageType::LinkOnceODR
2289 : SPIRV::LinkageType::Export
);
2291 Register Reg
= GR
.buildGlobalVariable(ResVReg
, ResType
, GlobalIdent
, GV
,
2292 Storage
, Init
, GlobalVar
->isConstant(),
2293 HasLnkTy
, LnkType
, MIRBuilder
, true);
2294 return Reg
.isValid();
2297 bool SPIRVInstructionSelector::selectLog10(Register ResVReg
,
2298 const SPIRVType
*ResType
,
2299 MachineInstr
&I
) const {
2300 if (STI
.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std
)) {
2301 return selectExtInst(ResVReg
, ResType
, I
, CL::log10
);
2304 // There is no log10 instruction in the GLSL Extended Instruction set, so it
2305 // is implemented as:
2306 // log10(x) = log2(x) * (1 / log2(10))
2307 // = log2(x) * 0.30103
2309 MachineIRBuilder
MIRBuilder(I
);
2310 MachineBasicBlock
&BB
= *I
.getParent();
2313 Register VarReg
= MRI
->createVirtualRegister(&SPIRV::IDRegClass
);
2315 BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpExtInst
))
2317 .addUse(GR
.getSPIRVTypeID(ResType
))
2318 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450
))
2320 .add(I
.getOperand(1))
2321 .constrainAllUses(TII
, TRI
, RBI
);
2324 assert(ResType
->getOpcode() == SPIRV::OpTypeVector
||
2325 ResType
->getOpcode() == SPIRV::OpTypeFloat
);
2326 // TODO: Add matrix implementation once supported by the HLSL frontend.
2327 const SPIRVType
*SpirvScalarType
=
2328 ResType
->getOpcode() == SPIRV::OpTypeVector
2329 ? GR
.getSPIRVTypeForVReg(ResType
->getOperand(1).getReg())
2332 GR
.buildConstantFP(APFloat(0.30103f
), MIRBuilder
, SpirvScalarType
);
2334 // Multiply log2(x) by 0.30103 to get log10(x) result.
2335 auto Opcode
= ResType
->getOpcode() == SPIRV::OpTypeVector
2336 ? SPIRV::OpVectorTimesScalar
2338 Result
&= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(Opcode
))
2340 .addUse(GR
.getSPIRVTypeID(ResType
))
2343 .constrainAllUses(TII
, TRI
, RBI
);
2348 bool SPIRVInstructionSelector::selectSpvThreadId(Register ResVReg
,
2349 const SPIRVType
*ResType
,
2350 MachineInstr
&I
) const {
2351 // DX intrinsic: @llvm.dx.thread.id(i32)
2352 // ID Name Description
2353 // 93 ThreadId reads the thread ID
2355 MachineIRBuilder
MIRBuilder(I
);
2356 const SPIRVType
*U32Type
= GR
.getOrCreateSPIRVIntegerType(32, MIRBuilder
);
2357 const SPIRVType
*Vec3Ty
=
2358 GR
.getOrCreateSPIRVVectorType(U32Type
, 3, MIRBuilder
);
2359 const SPIRVType
*PtrType
= GR
.getOrCreateSPIRVPointerType(
2360 Vec3Ty
, MIRBuilder
, SPIRV::StorageClass::Input
);
2362 // Create new register for GlobalInvocationID builtin variable.
2363 Register NewRegister
=
2364 MIRBuilder
.getMRI()->createVirtualRegister(&SPIRV::IDRegClass
);
2365 MIRBuilder
.getMRI()->setType(NewRegister
, LLT::pointer(0, 32));
2366 GR
.assignSPIRVTypeToVReg(PtrType
, NewRegister
, MIRBuilder
.getMF());
2368 // Build GlobalInvocationID global variable with the necessary decorations.
2369 Register Variable
= GR
.buildGlobalVariable(
2370 NewRegister
, PtrType
,
2371 getLinkStringForBuiltIn(SPIRV::BuiltIn::GlobalInvocationId
), nullptr,
2372 SPIRV::StorageClass::Input
, nullptr, true, true,
2373 SPIRV::LinkageType::Import
, MIRBuilder
, false);
2375 // Create new register for loading value.
2376 MachineRegisterInfo
*MRI
= MIRBuilder
.getMRI();
2377 Register LoadedRegister
= MRI
->createVirtualRegister(&SPIRV::IDRegClass
);
2378 MIRBuilder
.getMRI()->setType(LoadedRegister
, LLT::pointer(0, 32));
2379 GR
.assignSPIRVTypeToVReg(Vec3Ty
, LoadedRegister
, MIRBuilder
.getMF());
2381 // Load v3uint value from the global variable.
2382 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(SPIRV::OpLoad
))
2383 .addDef(LoadedRegister
)
2384 .addUse(GR
.getSPIRVTypeID(Vec3Ty
))
2387 // Get Thread ID index. Expecting operand is a constant immediate value,
2388 // wrapped in a type assignment.
2389 assert(I
.getOperand(2).isReg());
2390 Register ThreadIdReg
= I
.getOperand(2).getReg();
2391 SPIRVType
*ConstTy
= this->MRI
->getVRegDef(ThreadIdReg
);
2392 assert(ConstTy
&& ConstTy
->getOpcode() == SPIRV::ASSIGN_TYPE
&&
2393 ConstTy
->getOperand(1).isReg());
2394 Register ConstReg
= ConstTy
->getOperand(1).getReg();
2395 const MachineInstr
*Const
= this->MRI
->getVRegDef(ConstReg
);
2396 assert(Const
&& Const
->getOpcode() == TargetOpcode::G_CONSTANT
);
2397 const llvm::APInt
&Val
= Const
->getOperand(1).getCImm()->getValue();
2398 const uint32_t ThreadId
= Val
.getZExtValue();
2400 // Extract the thread ID from the loaded vector value.
2401 MachineBasicBlock
&BB
= *I
.getParent();
2402 auto MIB
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpCompositeExtract
))
2404 .addUse(GR
.getSPIRVTypeID(ResType
))
2405 .addUse(LoadedRegister
)
2407 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
2411 InstructionSelector
*
2412 createSPIRVInstructionSelector(const SPIRVTargetMachine
&TM
,
2413 const SPIRVSubtarget
&Subtarget
,
2414 const RegisterBankInfo
&RBI
) {
2415 return new SPIRVInstructionSelector(TM
, Subtarget
, RBI
);