1 //===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the targeting of the InstructionSelector class for
11 // TODO: This should be generated by TableGen.
13 //===----------------------------------------------------------------------===//
15 #include "MCTargetDesc/SPIRVBaseInfo.h"
16 #include "MCTargetDesc/SPIRVMCTargetDesc.h"
18 #include "SPIRVGlobalRegistry.h"
19 #include "SPIRVInstrInfo.h"
20 #include "SPIRVRegisterBankInfo.h"
21 #include "SPIRVRegisterInfo.h"
22 #include "SPIRVTargetMachine.h"
23 #include "SPIRVUtils.h"
24 #include "llvm/ADT/APFloat.h"
25 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
26 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
27 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
28 #include "llvm/CodeGen/MachineInstrBuilder.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/IR/IntrinsicsSPIRV.h"
31 #include "llvm/Support/Debug.h"
33 #define DEBUG_TYPE "spirv-isel"
36 namespace CL
= SPIRV::OpenCLExtInst
;
37 namespace GL
= SPIRV::GLSLExtInst
;
40 std::vector
<std::pair
<SPIRV::InstructionSet::InstructionSet
, uint32_t>>;
44 #define GET_GLOBALISEL_PREDICATE_BITSET
45 #include "SPIRVGenGlobalISel.inc"
46 #undef GET_GLOBALISEL_PREDICATE_BITSET
48 class SPIRVInstructionSelector
: public InstructionSelector
{
49 const SPIRVSubtarget
&STI
;
50 const SPIRVInstrInfo
&TII
;
51 const SPIRVRegisterInfo
&TRI
;
52 const RegisterBankInfo
&RBI
;
53 SPIRVGlobalRegistry
&GR
;
54 MachineRegisterInfo
*MRI
;
57 SPIRVInstructionSelector(const SPIRVTargetMachine
&TM
,
58 const SPIRVSubtarget
&ST
,
59 const RegisterBankInfo
&RBI
);
60 void setupMF(MachineFunction
&MF
, GISelKnownBits
*KB
,
61 CodeGenCoverage
*CoverageInfo
, ProfileSummaryInfo
*PSI
,
62 BlockFrequencyInfo
*BFI
) override
;
63 // Common selection code. Instruction-specific selection occurs in spvSelect.
64 bool select(MachineInstr
&I
) override
;
65 static const char *getName() { return DEBUG_TYPE
; }
67 #define GET_GLOBALISEL_PREDICATES_DECL
68 #include "SPIRVGenGlobalISel.inc"
69 #undef GET_GLOBALISEL_PREDICATES_DECL
71 #define GET_GLOBALISEL_TEMPORARIES_DECL
72 #include "SPIRVGenGlobalISel.inc"
73 #undef GET_GLOBALISEL_TEMPORARIES_DECL
76 // tblgen-erated 'select' implementation, used as the initial selector for
77 // the patterns that don't require complex C++.
78 bool selectImpl(MachineInstr
&I
, CodeGenCoverage
&CoverageInfo
) const;
80 // All instruction-specific selection that didn't happen in "select()".
81 // Is basically a large Switch/Case delegating to all other select method.
82 bool spvSelect(Register ResVReg
, const SPIRVType
*ResType
,
83 MachineInstr
&I
) const;
85 bool selectGlobalValue(Register ResVReg
, MachineInstr
&I
,
86 const MachineInstr
*Init
= nullptr) const;
88 bool selectUnOpWithSrc(Register ResVReg
, const SPIRVType
*ResType
,
89 MachineInstr
&I
, Register SrcReg
,
90 unsigned Opcode
) const;
91 bool selectUnOp(Register ResVReg
, const SPIRVType
*ResType
, MachineInstr
&I
,
92 unsigned Opcode
) const;
94 bool selectLoad(Register ResVReg
, const SPIRVType
*ResType
,
95 MachineInstr
&I
) const;
96 bool selectStore(MachineInstr
&I
) const;
98 bool selectMemOperation(Register ResVReg
, MachineInstr
&I
) const;
100 bool selectAtomicRMW(Register ResVReg
, const SPIRVType
*ResType
,
101 MachineInstr
&I
, unsigned NewOpcode
) const;
103 bool selectAtomicCmpXchg(Register ResVReg
, const SPIRVType
*ResType
,
104 MachineInstr
&I
) const;
106 bool selectFence(MachineInstr
&I
) const;
108 bool selectAddrSpaceCast(Register ResVReg
, const SPIRVType
*ResType
,
109 MachineInstr
&I
) const;
111 bool selectBitreverse(Register ResVReg
, const SPIRVType
*ResType
,
112 MachineInstr
&I
) const;
114 bool selectConstVector(Register ResVReg
, const SPIRVType
*ResType
,
115 MachineInstr
&I
) const;
117 bool selectCmp(Register ResVReg
, const SPIRVType
*ResType
,
118 unsigned comparisonOpcode
, MachineInstr
&I
) const;
120 bool selectICmp(Register ResVReg
, const SPIRVType
*ResType
,
121 MachineInstr
&I
) const;
122 bool selectFCmp(Register ResVReg
, const SPIRVType
*ResType
,
123 MachineInstr
&I
) const;
125 void renderImm32(MachineInstrBuilder
&MIB
, const MachineInstr
&I
,
127 void renderFImm32(MachineInstrBuilder
&MIB
, const MachineInstr
&I
,
130 bool selectConst(Register ResVReg
, const SPIRVType
*ResType
, const APInt
&Imm
,
131 MachineInstr
&I
) const;
133 bool selectSelect(Register ResVReg
, const SPIRVType
*ResType
, MachineInstr
&I
,
134 bool IsSigned
) const;
135 bool selectIToF(Register ResVReg
, const SPIRVType
*ResType
, MachineInstr
&I
,
136 bool IsSigned
, unsigned Opcode
) const;
137 bool selectExt(Register ResVReg
, const SPIRVType
*ResType
, MachineInstr
&I
,
138 bool IsSigned
) const;
140 bool selectTrunc(Register ResVReg
, const SPIRVType
*ResType
,
141 MachineInstr
&I
) const;
143 bool selectIntToBool(Register IntReg
, Register ResVReg
, MachineInstr
&I
,
144 const SPIRVType
*intTy
, const SPIRVType
*boolTy
) const;
146 bool selectOpUndef(Register ResVReg
, const SPIRVType
*ResType
,
147 MachineInstr
&I
) const;
148 bool selectIntrinsic(Register ResVReg
, const SPIRVType
*ResType
,
149 MachineInstr
&I
) const;
150 bool selectExtractVal(Register ResVReg
, const SPIRVType
*ResType
,
151 MachineInstr
&I
) const;
152 bool selectInsertVal(Register ResVReg
, const SPIRVType
*ResType
,
153 MachineInstr
&I
) const;
154 bool selectExtractElt(Register ResVReg
, const SPIRVType
*ResType
,
155 MachineInstr
&I
) const;
156 bool selectInsertElt(Register ResVReg
, const SPIRVType
*ResType
,
157 MachineInstr
&I
) const;
158 bool selectGEP(Register ResVReg
, const SPIRVType
*ResType
,
159 MachineInstr
&I
) const;
161 bool selectFrameIndex(Register ResVReg
, const SPIRVType
*ResType
,
162 MachineInstr
&I
) const;
164 bool selectBranch(MachineInstr
&I
) const;
165 bool selectBranchCond(MachineInstr
&I
) const;
167 bool selectPhi(Register ResVReg
, const SPIRVType
*ResType
,
168 MachineInstr
&I
) const;
170 bool selectExtInst(Register ResVReg
, const SPIRVType
*ResType
,
171 MachineInstr
&I
, CL::OpenCLExtInst CLInst
) const;
172 bool selectExtInst(Register ResVReg
, const SPIRVType
*ResType
,
173 MachineInstr
&I
, CL::OpenCLExtInst CLInst
,
174 GL::GLSLExtInst GLInst
) const;
175 bool selectExtInst(Register ResVReg
, const SPIRVType
*ResType
,
176 MachineInstr
&I
, const ExtInstList
&ExtInsts
) const;
178 bool selectLog10(Register ResVReg
, const SPIRVType
*ResType
,
179 MachineInstr
&I
) const;
181 Register
buildI32Constant(uint32_t Val
, MachineInstr
&I
,
182 const SPIRVType
*ResType
= nullptr) const;
184 Register
buildZerosVal(const SPIRVType
*ResType
, MachineInstr
&I
) const;
185 Register
buildOnesVal(bool AllOnes
, const SPIRVType
*ResType
,
186 MachineInstr
&I
) const;
189 } // end anonymous namespace
191 #define GET_GLOBALISEL_IMPL
192 #include "SPIRVGenGlobalISel.inc"
193 #undef GET_GLOBALISEL_IMPL
195 SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine
&TM
,
196 const SPIRVSubtarget
&ST
,
197 const RegisterBankInfo
&RBI
)
198 : InstructionSelector(), STI(ST
), TII(*ST
.getInstrInfo()),
199 TRI(*ST
.getRegisterInfo()), RBI(RBI
), GR(*ST
.getSPIRVGlobalRegistry()),
200 #define GET_GLOBALISEL_PREDICATES_INIT
201 #include "SPIRVGenGlobalISel.inc"
202 #undef GET_GLOBALISEL_PREDICATES_INIT
203 #define GET_GLOBALISEL_TEMPORARIES_INIT
204 #include "SPIRVGenGlobalISel.inc"
205 #undef GET_GLOBALISEL_TEMPORARIES_INIT
209 void SPIRVInstructionSelector::setupMF(MachineFunction
&MF
, GISelKnownBits
*KB
,
210 CodeGenCoverage
*CoverageInfo
,
211 ProfileSummaryInfo
*PSI
,
212 BlockFrequencyInfo
*BFI
) {
213 MRI
= &MF
.getRegInfo();
214 GR
.setCurrentFunc(MF
);
215 InstructionSelector::setupMF(MF
, KB
, CoverageInfo
, PSI
, BFI
);
218 static bool isImm(const MachineOperand
&MO
, MachineRegisterInfo
*MRI
);
220 // Defined in SPIRVLegalizerInfo.cpp.
221 extern bool isTypeFoldingSupported(unsigned Opcode
);
223 bool SPIRVInstructionSelector::select(MachineInstr
&I
) {
224 assert(I
.getParent() && "Instruction should be in a basic block!");
225 assert(I
.getParent()->getParent() && "Instruction should be in a function!");
227 Register Opcode
= I
.getOpcode();
228 // If it's not a GMIR instruction, we've selected it already.
229 if (!isPreISelGenericOpcode(Opcode
)) {
230 if (Opcode
== SPIRV::ASSIGN_TYPE
) { // These pseudos aren't needed any more.
231 auto *Def
= MRI
->getVRegDef(I
.getOperand(1).getReg());
232 if (isTypeFoldingSupported(Def
->getOpcode())) {
233 auto Res
= selectImpl(I
, *CoverageInfo
);
234 assert(Res
|| Def
->getOpcode() == TargetOpcode::G_CONSTANT
);
238 MRI
->replaceRegWith(I
.getOperand(1).getReg(), I
.getOperand(0).getReg());
239 I
.removeFromParent();
241 } else if (I
.getNumDefs() == 1) {
242 // Make all vregs 32 bits (for SPIR-V IDs).
243 MRI
->setType(I
.getOperand(0).getReg(), LLT::scalar(32));
245 return constrainSelectedInstRegOperands(I
, TII
, TRI
, RBI
);
248 if (I
.getNumOperands() != I
.getNumExplicitOperands()) {
249 LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n");
253 // Common code for getting return reg+type, and removing selected instr
254 // from parent occurs here. Instr-specific selection happens in spvSelect().
255 bool HasDefs
= I
.getNumDefs() > 0;
256 Register ResVReg
= HasDefs
? I
.getOperand(0).getReg() : Register(0);
257 SPIRVType
*ResType
= HasDefs
? GR
.getSPIRVTypeForVReg(ResVReg
) : nullptr;
258 assert(!HasDefs
|| ResType
|| I
.getOpcode() == TargetOpcode::G_GLOBAL_VALUE
);
259 if (spvSelect(ResVReg
, ResType
, I
)) {
260 if (HasDefs
) // Make all vregs 32 bits (for SPIR-V IDs).
261 MRI
->setType(ResVReg
, LLT::scalar(32));
262 I
.removeFromParent();
268 bool SPIRVInstructionSelector::spvSelect(Register ResVReg
,
269 const SPIRVType
*ResType
,
270 MachineInstr
&I
) const {
271 assert(!isTypeFoldingSupported(I
.getOpcode()) ||
272 I
.getOpcode() == TargetOpcode::G_CONSTANT
);
273 const unsigned Opcode
= I
.getOpcode();
275 case TargetOpcode::G_CONSTANT
:
276 return selectConst(ResVReg
, ResType
, I
.getOperand(1).getCImm()->getValue(),
278 case TargetOpcode::G_GLOBAL_VALUE
:
279 return selectGlobalValue(ResVReg
, I
);
280 case TargetOpcode::G_IMPLICIT_DEF
:
281 return selectOpUndef(ResVReg
, ResType
, I
);
283 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
:
284 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS
:
285 return selectIntrinsic(ResVReg
, ResType
, I
);
286 case TargetOpcode::G_BITREVERSE
:
287 return selectBitreverse(ResVReg
, ResType
, I
);
289 case TargetOpcode::G_BUILD_VECTOR
:
290 return selectConstVector(ResVReg
, ResType
, I
);
292 case TargetOpcode::G_SHUFFLE_VECTOR
: {
293 MachineBasicBlock
&BB
= *I
.getParent();
294 auto MIB
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpVectorShuffle
))
296 .addUse(GR
.getSPIRVTypeID(ResType
))
297 .addUse(I
.getOperand(1).getReg())
298 .addUse(I
.getOperand(2).getReg());
299 for (auto V
: I
.getOperand(3).getShuffleMask())
301 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
303 case TargetOpcode::G_MEMMOVE
:
304 case TargetOpcode::G_MEMCPY
:
305 case TargetOpcode::G_MEMSET
:
306 return selectMemOperation(ResVReg
, I
);
308 case TargetOpcode::G_ICMP
:
309 return selectICmp(ResVReg
, ResType
, I
);
310 case TargetOpcode::G_FCMP
:
311 return selectFCmp(ResVReg
, ResType
, I
);
313 case TargetOpcode::G_FRAME_INDEX
:
314 return selectFrameIndex(ResVReg
, ResType
, I
);
316 case TargetOpcode::G_LOAD
:
317 return selectLoad(ResVReg
, ResType
, I
);
318 case TargetOpcode::G_STORE
:
319 return selectStore(I
);
321 case TargetOpcode::G_BR
:
322 return selectBranch(I
);
323 case TargetOpcode::G_BRCOND
:
324 return selectBranchCond(I
);
326 case TargetOpcode::G_PHI
:
327 return selectPhi(ResVReg
, ResType
, I
);
329 case TargetOpcode::G_FPTOSI
:
330 return selectUnOp(ResVReg
, ResType
, I
, SPIRV::OpConvertFToS
);
331 case TargetOpcode::G_FPTOUI
:
332 return selectUnOp(ResVReg
, ResType
, I
, SPIRV::OpConvertFToU
);
334 case TargetOpcode::G_SITOFP
:
335 return selectIToF(ResVReg
, ResType
, I
, true, SPIRV::OpConvertSToF
);
336 case TargetOpcode::G_UITOFP
:
337 return selectIToF(ResVReg
, ResType
, I
, false, SPIRV::OpConvertUToF
);
339 case TargetOpcode::G_CTPOP
:
340 return selectUnOp(ResVReg
, ResType
, I
, SPIRV::OpBitCount
);
341 case TargetOpcode::G_SMIN
:
342 return selectExtInst(ResVReg
, ResType
, I
, CL::s_min
, GL::SMin
);
343 case TargetOpcode::G_UMIN
:
344 return selectExtInst(ResVReg
, ResType
, I
, CL::u_min
, GL::UMin
);
346 case TargetOpcode::G_SMAX
:
347 return selectExtInst(ResVReg
, ResType
, I
, CL::s_max
, GL::SMax
);
348 case TargetOpcode::G_UMAX
:
349 return selectExtInst(ResVReg
, ResType
, I
, CL::u_max
, GL::UMax
);
351 case TargetOpcode::G_FMA
:
352 return selectExtInst(ResVReg
, ResType
, I
, CL::fma
, GL::Fma
);
354 case TargetOpcode::G_FPOW
:
355 return selectExtInst(ResVReg
, ResType
, I
, CL::pow
, GL::Pow
);
356 case TargetOpcode::G_FPOWI
:
357 return selectExtInst(ResVReg
, ResType
, I
, CL::pown
);
359 case TargetOpcode::G_FEXP
:
360 return selectExtInst(ResVReg
, ResType
, I
, CL::exp
, GL::Exp
);
361 case TargetOpcode::G_FEXP2
:
362 return selectExtInst(ResVReg
, ResType
, I
, CL::exp2
, GL::Exp2
);
364 case TargetOpcode::G_FLOG
:
365 return selectExtInst(ResVReg
, ResType
, I
, CL::log
, GL::Log
);
366 case TargetOpcode::G_FLOG2
:
367 return selectExtInst(ResVReg
, ResType
, I
, CL::log2
, GL::Log2
);
368 case TargetOpcode::G_FLOG10
:
369 return selectLog10(ResVReg
, ResType
, I
);
371 case TargetOpcode::G_FABS
:
372 return selectExtInst(ResVReg
, ResType
, I
, CL::fabs
, GL::FAbs
);
373 case TargetOpcode::G_ABS
:
374 return selectExtInst(ResVReg
, ResType
, I
, CL::s_abs
, GL::SAbs
);
376 case TargetOpcode::G_FMINNUM
:
377 case TargetOpcode::G_FMINIMUM
:
378 return selectExtInst(ResVReg
, ResType
, I
, CL::fmin
, GL::FMin
);
379 case TargetOpcode::G_FMAXNUM
:
380 case TargetOpcode::G_FMAXIMUM
:
381 return selectExtInst(ResVReg
, ResType
, I
, CL::fmax
, GL::FMax
);
383 case TargetOpcode::G_FCOPYSIGN
:
384 return selectExtInst(ResVReg
, ResType
, I
, CL::copysign
);
386 case TargetOpcode::G_FCEIL
:
387 return selectExtInst(ResVReg
, ResType
, I
, CL::ceil
, GL::Ceil
);
388 case TargetOpcode::G_FFLOOR
:
389 return selectExtInst(ResVReg
, ResType
, I
, CL::floor
, GL::Floor
);
391 case TargetOpcode::G_FCOS
:
392 return selectExtInst(ResVReg
, ResType
, I
, CL::cos
, GL::Cos
);
393 case TargetOpcode::G_FSIN
:
394 return selectExtInst(ResVReg
, ResType
, I
, CL::sin
, GL::Sin
);
396 case TargetOpcode::G_FSQRT
:
397 return selectExtInst(ResVReg
, ResType
, I
, CL::sqrt
, GL::Sqrt
);
399 case TargetOpcode::G_CTTZ
:
400 case TargetOpcode::G_CTTZ_ZERO_UNDEF
:
401 return selectExtInst(ResVReg
, ResType
, I
, CL::ctz
);
402 case TargetOpcode::G_CTLZ
:
403 case TargetOpcode::G_CTLZ_ZERO_UNDEF
:
404 return selectExtInst(ResVReg
, ResType
, I
, CL::clz
);
406 case TargetOpcode::G_INTRINSIC_ROUND
:
407 return selectExtInst(ResVReg
, ResType
, I
, CL::round
, GL::Round
);
408 case TargetOpcode::G_INTRINSIC_ROUNDEVEN
:
409 return selectExtInst(ResVReg
, ResType
, I
, CL::rint
, GL::RoundEven
);
410 case TargetOpcode::G_INTRINSIC_TRUNC
:
411 return selectExtInst(ResVReg
, ResType
, I
, CL::trunc
, GL::Trunc
);
412 case TargetOpcode::G_FRINT
:
413 case TargetOpcode::G_FNEARBYINT
:
414 return selectExtInst(ResVReg
, ResType
, I
, CL::rint
, GL::RoundEven
);
416 case TargetOpcode::G_SMULH
:
417 return selectExtInst(ResVReg
, ResType
, I
, CL::s_mul_hi
);
418 case TargetOpcode::G_UMULH
:
419 return selectExtInst(ResVReg
, ResType
, I
, CL::u_mul_hi
);
421 case TargetOpcode::G_SEXT
:
422 return selectExt(ResVReg
, ResType
, I
, true);
423 case TargetOpcode::G_ANYEXT
:
424 case TargetOpcode::G_ZEXT
:
425 return selectExt(ResVReg
, ResType
, I
, false);
426 case TargetOpcode::G_TRUNC
:
427 return selectTrunc(ResVReg
, ResType
, I
);
428 case TargetOpcode::G_FPTRUNC
:
429 case TargetOpcode::G_FPEXT
:
430 return selectUnOp(ResVReg
, ResType
, I
, SPIRV::OpFConvert
);
432 case TargetOpcode::G_PTRTOINT
:
433 return selectUnOp(ResVReg
, ResType
, I
, SPIRV::OpConvertPtrToU
);
434 case TargetOpcode::G_INTTOPTR
:
435 return selectUnOp(ResVReg
, ResType
, I
, SPIRV::OpConvertUToPtr
);
436 case TargetOpcode::G_BITCAST
:
437 return selectUnOp(ResVReg
, ResType
, I
, SPIRV::OpBitcast
);
438 case TargetOpcode::G_ADDRSPACE_CAST
:
439 return selectAddrSpaceCast(ResVReg
, ResType
, I
);
440 case TargetOpcode::G_PTR_ADD
: {
441 // Currently, we get G_PTR_ADD only as a result of translating
442 // global variables, initialized with constant expressions like GV + Const
443 // (see test opencl/basic/progvar_prog_scope_init.ll).
444 // TODO: extend the handler once we have other cases.
445 assert(I
.getOperand(1).isReg() && I
.getOperand(2).isReg());
446 Register GV
= I
.getOperand(1).getReg();
447 MachineRegisterInfo::def_instr_iterator II
= MRI
->def_instr_begin(GV
);
448 assert(((*II
).getOpcode() == TargetOpcode::G_GLOBAL_VALUE
||
449 (*II
).getOpcode() == TargetOpcode::COPY
||
450 (*II
).getOpcode() == SPIRV::OpVariable
) &&
451 isImm(I
.getOperand(2), MRI
));
452 Register Idx
= buildZerosVal(GR
.getOrCreateSPIRVIntegerType(32, I
, TII
), I
);
453 MachineBasicBlock
&BB
= *I
.getParent();
454 auto MIB
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpSpecConstantOp
))
456 .addUse(GR
.getSPIRVTypeID(ResType
))
457 .addImm(static_cast<uint32_t>(
458 SPIRV::Opcode::InBoundsPtrAccessChain
))
461 .addUse(I
.getOperand(2).getReg());
462 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
465 case TargetOpcode::G_ATOMICRMW_OR
:
466 return selectAtomicRMW(ResVReg
, ResType
, I
, SPIRV::OpAtomicOr
);
467 case TargetOpcode::G_ATOMICRMW_ADD
:
468 return selectAtomicRMW(ResVReg
, ResType
, I
, SPIRV::OpAtomicIAdd
);
469 case TargetOpcode::G_ATOMICRMW_AND
:
470 return selectAtomicRMW(ResVReg
, ResType
, I
, SPIRV::OpAtomicAnd
);
471 case TargetOpcode::G_ATOMICRMW_MAX
:
472 return selectAtomicRMW(ResVReg
, ResType
, I
, SPIRV::OpAtomicSMax
);
473 case TargetOpcode::G_ATOMICRMW_MIN
:
474 return selectAtomicRMW(ResVReg
, ResType
, I
, SPIRV::OpAtomicSMin
);
475 case TargetOpcode::G_ATOMICRMW_SUB
:
476 return selectAtomicRMW(ResVReg
, ResType
, I
, SPIRV::OpAtomicISub
);
477 case TargetOpcode::G_ATOMICRMW_XOR
:
478 return selectAtomicRMW(ResVReg
, ResType
, I
, SPIRV::OpAtomicXor
);
479 case TargetOpcode::G_ATOMICRMW_UMAX
:
480 return selectAtomicRMW(ResVReg
, ResType
, I
, SPIRV::OpAtomicUMax
);
481 case TargetOpcode::G_ATOMICRMW_UMIN
:
482 return selectAtomicRMW(ResVReg
, ResType
, I
, SPIRV::OpAtomicUMin
);
483 case TargetOpcode::G_ATOMICRMW_XCHG
:
484 return selectAtomicRMW(ResVReg
, ResType
, I
, SPIRV::OpAtomicExchange
);
485 case TargetOpcode::G_ATOMIC_CMPXCHG
:
486 return selectAtomicCmpXchg(ResVReg
, ResType
, I
);
488 case TargetOpcode::G_FENCE
:
489 return selectFence(I
);
496 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg
,
497 const SPIRVType
*ResType
,
499 CL::OpenCLExtInst CLInst
) const {
500 return selectExtInst(ResVReg
, ResType
, I
,
501 {{SPIRV::InstructionSet::OpenCL_std
, CLInst
}});
504 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg
,
505 const SPIRVType
*ResType
,
507 CL::OpenCLExtInst CLInst
,
508 GL::GLSLExtInst GLInst
) const {
509 ExtInstList ExtInsts
= {{SPIRV::InstructionSet::OpenCL_std
, CLInst
},
510 {SPIRV::InstructionSet::GLSL_std_450
, GLInst
}};
511 return selectExtInst(ResVReg
, ResType
, I
, ExtInsts
);
514 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg
,
515 const SPIRVType
*ResType
,
517 const ExtInstList
&Insts
) const {
519 for (const auto &Ex
: Insts
) {
520 SPIRV::InstructionSet::InstructionSet Set
= Ex
.first
;
521 uint32_t Opcode
= Ex
.second
;
522 if (STI
.canUseExtInstSet(Set
)) {
523 MachineBasicBlock
&BB
= *I
.getParent();
524 auto MIB
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpExtInst
))
526 .addUse(GR
.getSPIRVTypeID(ResType
))
527 .addImm(static_cast<uint32_t>(Set
))
529 const unsigned NumOps
= I
.getNumOperands();
530 for (unsigned i
= 1; i
< NumOps
; ++i
)
531 MIB
.add(I
.getOperand(i
));
532 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
538 bool SPIRVInstructionSelector::selectUnOpWithSrc(Register ResVReg
,
539 const SPIRVType
*ResType
,
542 unsigned Opcode
) const {
543 return BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(Opcode
))
545 .addUse(GR
.getSPIRVTypeID(ResType
))
547 .constrainAllUses(TII
, TRI
, RBI
);
550 bool SPIRVInstructionSelector::selectUnOp(Register ResVReg
,
551 const SPIRVType
*ResType
,
553 unsigned Opcode
) const {
554 return selectUnOpWithSrc(ResVReg
, ResType
, I
, I
.getOperand(1).getReg(),
558 static SPIRV::Scope::Scope
getScope(SyncScope::ID Ord
) {
560 case SyncScope::SingleThread
:
561 return SPIRV::Scope::Invocation
;
562 case SyncScope::System
:
563 return SPIRV::Scope::Device
;
565 llvm_unreachable("Unsupported synchronization Scope ID.");
569 static void addMemoryOperands(MachineMemOperand
*MemOp
,
570 MachineInstrBuilder
&MIB
) {
571 uint32_t SpvMemOp
= static_cast<uint32_t>(SPIRV::MemoryOperand::None
);
572 if (MemOp
->isVolatile())
573 SpvMemOp
|= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile
);
574 if (MemOp
->isNonTemporal())
575 SpvMemOp
|= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal
);
576 if (MemOp
->getAlign().value())
577 SpvMemOp
|= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned
);
579 if (SpvMemOp
!= static_cast<uint32_t>(SPIRV::MemoryOperand::None
)) {
580 MIB
.addImm(SpvMemOp
);
581 if (SpvMemOp
& static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned
))
582 MIB
.addImm(MemOp
->getAlign().value());
586 static void addMemoryOperands(uint64_t Flags
, MachineInstrBuilder
&MIB
) {
587 uint32_t SpvMemOp
= static_cast<uint32_t>(SPIRV::MemoryOperand::None
);
588 if (Flags
& MachineMemOperand::Flags::MOVolatile
)
589 SpvMemOp
|= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile
);
590 if (Flags
& MachineMemOperand::Flags::MONonTemporal
)
591 SpvMemOp
|= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal
);
593 if (SpvMemOp
!= static_cast<uint32_t>(SPIRV::MemoryOperand::None
))
594 MIB
.addImm(SpvMemOp
);
597 bool SPIRVInstructionSelector::selectLoad(Register ResVReg
,
598 const SPIRVType
*ResType
,
599 MachineInstr
&I
) const {
600 unsigned OpOffset
= isa
<GIntrinsic
>(I
) ? 1 : 0;
601 Register Ptr
= I
.getOperand(1 + OpOffset
).getReg();
602 auto MIB
= BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(SPIRV::OpLoad
))
604 .addUse(GR
.getSPIRVTypeID(ResType
))
606 if (!I
.getNumMemOperands()) {
607 assert(I
.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
||
609 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS
);
610 addMemoryOperands(I
.getOperand(2 + OpOffset
).getImm(), MIB
);
612 addMemoryOperands(*I
.memoperands_begin(), MIB
);
614 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
617 bool SPIRVInstructionSelector::selectStore(MachineInstr
&I
) const {
618 unsigned OpOffset
= isa
<GIntrinsic
>(I
) ? 1 : 0;
619 Register StoreVal
= I
.getOperand(0 + OpOffset
).getReg();
620 Register Ptr
= I
.getOperand(1 + OpOffset
).getReg();
621 MachineBasicBlock
&BB
= *I
.getParent();
622 auto MIB
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpStore
))
625 if (!I
.getNumMemOperands()) {
626 assert(I
.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
||
628 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS
);
629 addMemoryOperands(I
.getOperand(2 + OpOffset
).getImm(), MIB
);
631 addMemoryOperands(*I
.memoperands_begin(), MIB
);
633 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
636 bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg
,
637 MachineInstr
&I
) const {
638 MachineBasicBlock
&BB
= *I
.getParent();
639 Register SrcReg
= I
.getOperand(1).getReg();
640 if (I
.getOpcode() == TargetOpcode::G_MEMSET
) {
641 assert(I
.getOperand(1).isReg() && I
.getOperand(2).isReg());
642 unsigned Val
= getIConstVal(I
.getOperand(1).getReg(), MRI
);
643 unsigned Num
= getIConstVal(I
.getOperand(2).getReg(), MRI
);
644 SPIRVType
*ValTy
= GR
.getOrCreateSPIRVIntegerType(8, I
, TII
);
645 SPIRVType
*ArrTy
= GR
.getOrCreateSPIRVArrayType(ValTy
, Num
, I
, TII
);
646 Register Const
= GR
.getOrCreateConsIntArray(Val
, I
, ArrTy
, TII
);
647 SPIRVType
*VarTy
= GR
.getOrCreateSPIRVPointerType(
648 ArrTy
, I
, TII
, SPIRV::StorageClass::UniformConstant
);
649 // TODO: check if we have such GV, add init, use buildGlobalVariable.
650 Type
*LLVMArrTy
= ArrayType::get(
651 IntegerType::get(GR
.CurMF
->getFunction().getContext(), 8), Num
);
653 new GlobalVariable(LLVMArrTy
, true, GlobalValue::InternalLinkage
);
654 Register VarReg
= MRI
->createGenericVirtualRegister(LLT::scalar(32));
655 GR
.add(GV
, GR
.CurMF
, VarReg
);
657 buildOpDecorate(VarReg
, I
, TII
, SPIRV::Decoration::Constant
, {});
658 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(SPIRV::OpVariable
))
660 .addUse(GR
.getSPIRVTypeID(VarTy
))
661 .addImm(SPIRV::StorageClass::UniformConstant
)
663 .constrainAllUses(TII
, TRI
, RBI
);
664 SPIRVType
*SourceTy
= GR
.getOrCreateSPIRVPointerType(
665 ValTy
, I
, TII
, SPIRV::StorageClass::UniformConstant
);
666 SrcReg
= MRI
->createGenericVirtualRegister(LLT::scalar(32));
667 selectUnOpWithSrc(SrcReg
, SourceTy
, I
, VarReg
, SPIRV::OpBitcast
);
669 auto MIB
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpCopyMemorySized
))
670 .addUse(I
.getOperand(0).getReg())
672 .addUse(I
.getOperand(2).getReg());
673 if (I
.getNumMemOperands())
674 addMemoryOperands(*I
.memoperands_begin(), MIB
);
675 bool Result
= MIB
.constrainAllUses(TII
, TRI
, RBI
);
676 if (ResVReg
.isValid() && ResVReg
!= MIB
->getOperand(0).getReg())
677 BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(TargetOpcode::COPY
), ResVReg
)
678 .addUse(MIB
->getOperand(0).getReg());
682 bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg
,
683 const SPIRVType
*ResType
,
685 unsigned NewOpcode
) const {
686 assert(I
.hasOneMemOperand());
687 const MachineMemOperand
*MemOp
= *I
.memoperands_begin();
688 uint32_t Scope
= static_cast<uint32_t>(getScope(MemOp
->getSyncScopeID()));
689 Register ScopeReg
= buildI32Constant(Scope
, I
);
691 Register Ptr
= I
.getOperand(1).getReg();
692 // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
694 // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
695 AtomicOrdering AO
= MemOp
->getSuccessOrdering();
696 uint32_t MemSem
= static_cast<uint32_t>(getMemSemantics(AO
));
697 Register MemSemReg
= buildI32Constant(MemSem
/*| ScSem*/, I
);
699 return BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(NewOpcode
))
701 .addUse(GR
.getSPIRVTypeID(ResType
))
705 .addUse(I
.getOperand(2).getReg())
706 .constrainAllUses(TII
, TRI
, RBI
);
709 bool SPIRVInstructionSelector::selectFence(MachineInstr
&I
) const {
710 AtomicOrdering AO
= AtomicOrdering(I
.getOperand(0).getImm());
711 uint32_t MemSem
= static_cast<uint32_t>(getMemSemantics(AO
));
712 Register MemSemReg
= buildI32Constant(MemSem
, I
);
713 SyncScope::ID Ord
= SyncScope::ID(I
.getOperand(1).getImm());
714 uint32_t Scope
= static_cast<uint32_t>(getScope(Ord
));
715 Register ScopeReg
= buildI32Constant(Scope
, I
);
716 MachineBasicBlock
&BB
= *I
.getParent();
717 return BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpMemoryBarrier
))
720 .constrainAllUses(TII
, TRI
, RBI
);
723 bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg
,
724 const SPIRVType
*ResType
,
725 MachineInstr
&I
) const {
727 Register MemSemEqReg
;
728 Register MemSemNeqReg
;
729 Register Ptr
= I
.getOperand(2).getReg();
730 if (!isa
<GIntrinsic
>(I
)) {
731 assert(I
.hasOneMemOperand());
732 const MachineMemOperand
*MemOp
= *I
.memoperands_begin();
733 unsigned Scope
= static_cast<uint32_t>(getScope(MemOp
->getSyncScopeID()));
734 ScopeReg
= buildI32Constant(Scope
, I
);
736 unsigned ScSem
= static_cast<uint32_t>(
737 getMemSemanticsForStorageClass(GR
.getPointerStorageClass(Ptr
)));
738 AtomicOrdering AO
= MemOp
->getSuccessOrdering();
739 unsigned MemSemEq
= static_cast<uint32_t>(getMemSemantics(AO
)) | ScSem
;
740 MemSemEqReg
= buildI32Constant(MemSemEq
, I
);
741 AtomicOrdering FO
= MemOp
->getFailureOrdering();
742 unsigned MemSemNeq
= static_cast<uint32_t>(getMemSemantics(FO
)) | ScSem
;
744 MemSemEq
== MemSemNeq
? MemSemEqReg
: buildI32Constant(MemSemNeq
, I
);
746 ScopeReg
= I
.getOperand(5).getReg();
747 MemSemEqReg
= I
.getOperand(6).getReg();
748 MemSemNeqReg
= I
.getOperand(7).getReg();
751 Register Cmp
= I
.getOperand(3).getReg();
752 Register Val
= I
.getOperand(4).getReg();
753 SPIRVType
*SpvValTy
= GR
.getSPIRVTypeForVReg(Val
);
754 Register ACmpRes
= MRI
->createVirtualRegister(&SPIRV::IDRegClass
);
755 const DebugLoc
&DL
= I
.getDebugLoc();
757 BuildMI(*I
.getParent(), I
, DL
, TII
.get(SPIRV::OpAtomicCompareExchange
))
759 .addUse(GR
.getSPIRVTypeID(SpvValTy
))
763 .addUse(MemSemNeqReg
)
766 .constrainAllUses(TII
, TRI
, RBI
);
767 Register CmpSuccReg
= MRI
->createVirtualRegister(&SPIRV::IDRegClass
);
768 SPIRVType
*BoolTy
= GR
.getOrCreateSPIRVBoolType(I
, TII
);
769 Result
|= BuildMI(*I
.getParent(), I
, DL
, TII
.get(SPIRV::OpIEqual
))
771 .addUse(GR
.getSPIRVTypeID(BoolTy
))
774 .constrainAllUses(TII
, TRI
, RBI
);
775 Register TmpReg
= MRI
->createVirtualRegister(&SPIRV::IDRegClass
);
776 Result
|= BuildMI(*I
.getParent(), I
, DL
, TII
.get(SPIRV::OpCompositeInsert
))
778 .addUse(GR
.getSPIRVTypeID(ResType
))
780 .addUse(GR
.getOrCreateUndef(I
, ResType
, TII
))
782 .constrainAllUses(TII
, TRI
, RBI
);
783 Result
|= BuildMI(*I
.getParent(), I
, DL
, TII
.get(SPIRV::OpCompositeInsert
))
785 .addUse(GR
.getSPIRVTypeID(ResType
))
789 .constrainAllUses(TII
, TRI
, RBI
);
793 static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC
) {
795 case SPIRV::StorageClass::Workgroup
:
796 case SPIRV::StorageClass::CrossWorkgroup
:
797 case SPIRV::StorageClass::Function
:
804 // In SPIR-V address space casting can only happen to and from the Generic
805 // storage class. We can also only case Workgroup, CrossWorkgroup, or Function
806 // pointers to and from Generic pointers. As such, we can convert e.g. from
807 // Workgroup to Function by going via a Generic pointer as an intermediary. All
808 // other combinations can only be done by a bitcast, and are probably not safe.
809 bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg
,
810 const SPIRVType
*ResType
,
811 MachineInstr
&I
) const {
812 // If the AddrSpaceCast user is single and in OpConstantComposite or
813 // OpVariable, we should select OpSpecConstantOp.
814 auto UIs
= MRI
->use_instructions(ResVReg
);
815 if (!UIs
.empty() && ++UIs
.begin() == UIs
.end() &&
816 (UIs
.begin()->getOpcode() == SPIRV::OpConstantComposite
||
817 UIs
.begin()->getOpcode() == SPIRV::OpVariable
||
818 isSpvIntrinsic(*UIs
.begin(), Intrinsic::spv_init_global
))) {
819 Register NewReg
= I
.getOperand(1).getReg();
820 MachineBasicBlock
&BB
= *I
.getParent();
821 SPIRVType
*SpvBaseTy
= GR
.getOrCreateSPIRVIntegerType(8, I
, TII
);
822 ResType
= GR
.getOrCreateSPIRVPointerType(SpvBaseTy
, I
, TII
,
823 SPIRV::StorageClass::Generic
);
825 BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpSpecConstantOp
))
827 .addUse(GR
.getSPIRVTypeID(ResType
))
828 .addImm(static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric
))
830 .constrainAllUses(TII
, TRI
, RBI
);
833 Register SrcPtr
= I
.getOperand(1).getReg();
834 SPIRVType
*SrcPtrTy
= GR
.getSPIRVTypeForVReg(SrcPtr
);
835 SPIRV::StorageClass::StorageClass SrcSC
= GR
.getPointerStorageClass(SrcPtr
);
836 SPIRV::StorageClass::StorageClass DstSC
= GR
.getPointerStorageClass(ResVReg
);
838 // Casting from an eligable pointer to Generic.
839 if (DstSC
== SPIRV::StorageClass::Generic
&& isGenericCastablePtr(SrcSC
))
840 return selectUnOp(ResVReg
, ResType
, I
, SPIRV::OpPtrCastToGeneric
);
841 // Casting from Generic to an eligable pointer.
842 if (SrcSC
== SPIRV::StorageClass::Generic
&& isGenericCastablePtr(DstSC
))
843 return selectUnOp(ResVReg
, ResType
, I
, SPIRV::OpGenericCastToPtr
);
844 // Casting between 2 eligable pointers using Generic as an intermediary.
845 if (isGenericCastablePtr(SrcSC
) && isGenericCastablePtr(DstSC
)) {
846 Register Tmp
= MRI
->createVirtualRegister(&SPIRV::IDRegClass
);
847 SPIRVType
*GenericPtrTy
= GR
.getOrCreateSPIRVPointerType(
848 SrcPtrTy
, I
, TII
, SPIRV::StorageClass::Generic
);
849 MachineBasicBlock
&BB
= *I
.getParent();
850 const DebugLoc
&DL
= I
.getDebugLoc();
851 bool Success
= BuildMI(BB
, I
, DL
, TII
.get(SPIRV::OpPtrCastToGeneric
))
853 .addUse(GR
.getSPIRVTypeID(GenericPtrTy
))
855 .constrainAllUses(TII
, TRI
, RBI
);
856 return Success
&& BuildMI(BB
, I
, DL
, TII
.get(SPIRV::OpGenericCastToPtr
))
858 .addUse(GR
.getSPIRVTypeID(ResType
))
860 .constrainAllUses(TII
, TRI
, RBI
);
862 // TODO Should this case just be disallowed completely?
863 // We're casting 2 other arbitrary address spaces, so have to bitcast.
864 return selectUnOp(ResVReg
, ResType
, I
, SPIRV::OpBitcast
);
867 static unsigned getFCmpOpcode(unsigned PredNum
) {
868 auto Pred
= static_cast<CmpInst::Predicate
>(PredNum
);
870 case CmpInst::FCMP_OEQ
:
871 return SPIRV::OpFOrdEqual
;
872 case CmpInst::FCMP_OGE
:
873 return SPIRV::OpFOrdGreaterThanEqual
;
874 case CmpInst::FCMP_OGT
:
875 return SPIRV::OpFOrdGreaterThan
;
876 case CmpInst::FCMP_OLE
:
877 return SPIRV::OpFOrdLessThanEqual
;
878 case CmpInst::FCMP_OLT
:
879 return SPIRV::OpFOrdLessThan
;
880 case CmpInst::FCMP_ONE
:
881 return SPIRV::OpFOrdNotEqual
;
882 case CmpInst::FCMP_ORD
:
883 return SPIRV::OpOrdered
;
884 case CmpInst::FCMP_UEQ
:
885 return SPIRV::OpFUnordEqual
;
886 case CmpInst::FCMP_UGE
:
887 return SPIRV::OpFUnordGreaterThanEqual
;
888 case CmpInst::FCMP_UGT
:
889 return SPIRV::OpFUnordGreaterThan
;
890 case CmpInst::FCMP_ULE
:
891 return SPIRV::OpFUnordLessThanEqual
;
892 case CmpInst::FCMP_ULT
:
893 return SPIRV::OpFUnordLessThan
;
894 case CmpInst::FCMP_UNE
:
895 return SPIRV::OpFUnordNotEqual
;
896 case CmpInst::FCMP_UNO
:
897 return SPIRV::OpUnordered
;
899 llvm_unreachable("Unknown predicate type for FCmp");
903 static unsigned getICmpOpcode(unsigned PredNum
) {
904 auto Pred
= static_cast<CmpInst::Predicate
>(PredNum
);
906 case CmpInst::ICMP_EQ
:
907 return SPIRV::OpIEqual
;
908 case CmpInst::ICMP_NE
:
909 return SPIRV::OpINotEqual
;
910 case CmpInst::ICMP_SGE
:
911 return SPIRV::OpSGreaterThanEqual
;
912 case CmpInst::ICMP_SGT
:
913 return SPIRV::OpSGreaterThan
;
914 case CmpInst::ICMP_SLE
:
915 return SPIRV::OpSLessThanEqual
;
916 case CmpInst::ICMP_SLT
:
917 return SPIRV::OpSLessThan
;
918 case CmpInst::ICMP_UGE
:
919 return SPIRV::OpUGreaterThanEqual
;
920 case CmpInst::ICMP_UGT
:
921 return SPIRV::OpUGreaterThan
;
922 case CmpInst::ICMP_ULE
:
923 return SPIRV::OpULessThanEqual
;
924 case CmpInst::ICMP_ULT
:
925 return SPIRV::OpULessThan
;
927 llvm_unreachable("Unknown predicate type for ICmp");
931 static unsigned getPtrCmpOpcode(unsigned Pred
) {
932 switch (static_cast<CmpInst::Predicate
>(Pred
)) {
933 case CmpInst::ICMP_EQ
:
934 return SPIRV::OpPtrEqual
;
935 case CmpInst::ICMP_NE
:
936 return SPIRV::OpPtrNotEqual
;
938 llvm_unreachable("Unknown predicate type for pointer comparison");
942 // Return the logical operation, or abort if none exists.
943 static unsigned getBoolCmpOpcode(unsigned PredNum
) {
944 auto Pred
= static_cast<CmpInst::Predicate
>(PredNum
);
946 case CmpInst::ICMP_EQ
:
947 return SPIRV::OpLogicalEqual
;
948 case CmpInst::ICMP_NE
:
949 return SPIRV::OpLogicalNotEqual
;
951 llvm_unreachable("Unknown predicate type for Bool comparison");
955 bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg
,
956 const SPIRVType
*ResType
,
957 MachineInstr
&I
) const {
958 MachineBasicBlock
&BB
= *I
.getParent();
959 return BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpBitReverse
))
961 .addUse(GR
.getSPIRVTypeID(ResType
))
962 .addUse(I
.getOperand(1).getReg())
963 .constrainAllUses(TII
, TRI
, RBI
);
966 bool SPIRVInstructionSelector::selectConstVector(Register ResVReg
,
967 const SPIRVType
*ResType
,
968 MachineInstr
&I
) const {
969 // TODO: only const case is supported for now.
971 I
.operands_begin(), I
.operands_end(), [this](const MachineOperand
&MO
) {
976 SPIRVType
*ConstTy
= this->MRI
->getVRegDef(MO
.getReg());
977 assert(ConstTy
&& ConstTy
->getOpcode() == SPIRV::ASSIGN_TYPE
&&
978 ConstTy
->getOperand(1).isReg());
979 Register ConstReg
= ConstTy
->getOperand(1).getReg();
980 const MachineInstr
*Const
= this->MRI
->getVRegDef(ConstReg
);
982 return (Const
->getOpcode() == TargetOpcode::G_CONSTANT
||
983 Const
->getOpcode() == TargetOpcode::G_FCONSTANT
);
986 auto MIB
= BuildMI(*I
.getParent(), I
, I
.getDebugLoc(),
987 TII
.get(SPIRV::OpConstantComposite
))
989 .addUse(GR
.getSPIRVTypeID(ResType
));
990 for (unsigned i
= I
.getNumExplicitDefs(); i
< I
.getNumExplicitOperands(); ++i
)
991 MIB
.addUse(I
.getOperand(i
).getReg());
992 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
995 bool SPIRVInstructionSelector::selectCmp(Register ResVReg
,
996 const SPIRVType
*ResType
,
998 MachineInstr
&I
) const {
999 Register Cmp0
= I
.getOperand(2).getReg();
1000 Register Cmp1
= I
.getOperand(3).getReg();
1001 assert(GR
.getSPIRVTypeForVReg(Cmp0
)->getOpcode() ==
1002 GR
.getSPIRVTypeForVReg(Cmp1
)->getOpcode() &&
1003 "CMP operands should have the same type");
1004 return BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(CmpOpc
))
1006 .addUse(GR
.getSPIRVTypeID(ResType
))
1009 .constrainAllUses(TII
, TRI
, RBI
);
1012 bool SPIRVInstructionSelector::selectICmp(Register ResVReg
,
1013 const SPIRVType
*ResType
,
1014 MachineInstr
&I
) const {
1015 auto Pred
= I
.getOperand(1).getPredicate();
1018 Register CmpOperand
= I
.getOperand(2).getReg();
1019 if (GR
.isScalarOfType(CmpOperand
, SPIRV::OpTypePointer
))
1020 CmpOpc
= getPtrCmpOpcode(Pred
);
1021 else if (GR
.isScalarOrVectorOfType(CmpOperand
, SPIRV::OpTypeBool
))
1022 CmpOpc
= getBoolCmpOpcode(Pred
);
1024 CmpOpc
= getICmpOpcode(Pred
);
1025 return selectCmp(ResVReg
, ResType
, CmpOpc
, I
);
1028 void SPIRVInstructionSelector::renderFImm32(MachineInstrBuilder
&MIB
,
1029 const MachineInstr
&I
,
1031 assert(I
.getOpcode() == TargetOpcode::G_FCONSTANT
&& OpIdx
== -1 &&
1032 "Expected G_FCONSTANT");
1033 const ConstantFP
*FPImm
= I
.getOperand(1).getFPImm();
1034 addNumImm(FPImm
->getValueAPF().bitcastToAPInt(), MIB
);
1037 void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder
&MIB
,
1038 const MachineInstr
&I
,
1040 assert(I
.getOpcode() == TargetOpcode::G_CONSTANT
&& OpIdx
== -1 &&
1041 "Expected G_CONSTANT");
1042 addNumImm(I
.getOperand(1).getCImm()->getValue(), MIB
);
1046 SPIRVInstructionSelector::buildI32Constant(uint32_t Val
, MachineInstr
&I
,
1047 const SPIRVType
*ResType
) const {
1048 Type
*LLVMTy
= IntegerType::get(GR
.CurMF
->getFunction().getContext(), 32);
1049 const SPIRVType
*SpvI32Ty
=
1050 ResType
? ResType
: GR
.getOrCreateSPIRVIntegerType(32, I
, TII
);
1051 // Find a constant in DT or build a new one.
1052 auto ConstInt
= ConstantInt::get(LLVMTy
, Val
);
1053 Register NewReg
= GR
.find(ConstInt
, GR
.CurMF
);
1054 if (!NewReg
.isValid()) {
1055 NewReg
= MRI
->createGenericVirtualRegister(LLT::scalar(32));
1056 GR
.add(ConstInt
, GR
.CurMF
, NewReg
);
1058 MachineBasicBlock
&BB
= *I
.getParent();
1060 MI
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpConstantNull
))
1062 .addUse(GR
.getSPIRVTypeID(SpvI32Ty
));
1064 MI
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpConstantI
))
1066 .addUse(GR
.getSPIRVTypeID(SpvI32Ty
))
1067 .addImm(APInt(32, Val
).getZExtValue());
1069 constrainSelectedInstRegOperands(*MI
, TII
, TRI
, RBI
);
1074 bool SPIRVInstructionSelector::selectFCmp(Register ResVReg
,
1075 const SPIRVType
*ResType
,
1076 MachineInstr
&I
) const {
1077 unsigned CmpOp
= getFCmpOpcode(I
.getOperand(1).getPredicate());
1078 return selectCmp(ResVReg
, ResType
, CmpOp
, I
);
1081 Register
SPIRVInstructionSelector::buildZerosVal(const SPIRVType
*ResType
,
1082 MachineInstr
&I
) const {
1083 if (ResType
->getOpcode() == SPIRV::OpTypeVector
)
1084 return GR
.getOrCreateConsIntVector(0, I
, ResType
, TII
);
1085 return GR
.getOrCreateConstInt(0, I
, ResType
, TII
);
1088 Register
SPIRVInstructionSelector::buildOnesVal(bool AllOnes
,
1089 const SPIRVType
*ResType
,
1090 MachineInstr
&I
) const {
1091 unsigned BitWidth
= GR
.getScalarOrVectorBitWidth(ResType
);
1093 AllOnes
? APInt::getAllOnes(BitWidth
) : APInt::getOneBitSet(BitWidth
, 0);
1094 if (ResType
->getOpcode() == SPIRV::OpTypeVector
)
1095 return GR
.getOrCreateConsIntVector(One
.getZExtValue(), I
, ResType
, TII
);
1096 return GR
.getOrCreateConstInt(One
.getZExtValue(), I
, ResType
, TII
);
1099 bool SPIRVInstructionSelector::selectSelect(Register ResVReg
,
1100 const SPIRVType
*ResType
,
1102 bool IsSigned
) const {
1103 // To extend a bool, we need to use OpSelect between constants.
1104 Register ZeroReg
= buildZerosVal(ResType
, I
);
1105 Register OneReg
= buildOnesVal(IsSigned
, ResType
, I
);
1107 GR
.isScalarOfType(I
.getOperand(1).getReg(), SPIRV::OpTypeBool
);
1109 IsScalarBool
? SPIRV::OpSelectSISCond
: SPIRV::OpSelectSIVCond
;
1110 return BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(Opcode
))
1112 .addUse(GR
.getSPIRVTypeID(ResType
))
1113 .addUse(I
.getOperand(1).getReg())
1116 .constrainAllUses(TII
, TRI
, RBI
);
1119 bool SPIRVInstructionSelector::selectIToF(Register ResVReg
,
1120 const SPIRVType
*ResType
,
1121 MachineInstr
&I
, bool IsSigned
,
1122 unsigned Opcode
) const {
1123 Register SrcReg
= I
.getOperand(1).getReg();
1124 // We can convert bool value directly to float type without OpConvert*ToF,
1125 // however the translator generates OpSelect+OpConvert*ToF, so we do the same.
1126 if (GR
.isScalarOrVectorOfType(I
.getOperand(1).getReg(), SPIRV::OpTypeBool
)) {
1127 unsigned BitWidth
= GR
.getScalarOrVectorBitWidth(ResType
);
1128 SPIRVType
*TmpType
= GR
.getOrCreateSPIRVIntegerType(BitWidth
, I
, TII
);
1129 if (ResType
->getOpcode() == SPIRV::OpTypeVector
) {
1130 const unsigned NumElts
= ResType
->getOperand(2).getImm();
1131 TmpType
= GR
.getOrCreateSPIRVVectorType(TmpType
, NumElts
, I
, TII
);
1133 SrcReg
= MRI
->createVirtualRegister(&SPIRV::IDRegClass
);
1134 selectSelect(SrcReg
, TmpType
, I
, false);
1136 return selectUnOpWithSrc(ResVReg
, ResType
, I
, SrcReg
, Opcode
);
1139 bool SPIRVInstructionSelector::selectExt(Register ResVReg
,
1140 const SPIRVType
*ResType
,
1141 MachineInstr
&I
, bool IsSigned
) const {
1142 if (GR
.isScalarOrVectorOfType(I
.getOperand(1).getReg(), SPIRV::OpTypeBool
))
1143 return selectSelect(ResVReg
, ResType
, I
, IsSigned
);
1144 unsigned Opcode
= IsSigned
? SPIRV::OpSConvert
: SPIRV::OpUConvert
;
1145 return selectUnOp(ResVReg
, ResType
, I
, Opcode
);
1148 bool SPIRVInstructionSelector::selectIntToBool(Register IntReg
,
1151 const SPIRVType
*IntTy
,
1152 const SPIRVType
*BoolTy
) const {
1153 // To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero.
1154 Register BitIntReg
= MRI
->createVirtualRegister(&SPIRV::IDRegClass
);
1155 bool IsVectorTy
= IntTy
->getOpcode() == SPIRV::OpTypeVector
;
1156 unsigned Opcode
= IsVectorTy
? SPIRV::OpBitwiseAndV
: SPIRV::OpBitwiseAndS
;
1157 Register Zero
= buildZerosVal(IntTy
, I
);
1158 Register One
= buildOnesVal(false, IntTy
, I
);
1159 MachineBasicBlock
&BB
= *I
.getParent();
1160 BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(Opcode
))
1162 .addUse(GR
.getSPIRVTypeID(IntTy
))
1165 .constrainAllUses(TII
, TRI
, RBI
);
1166 return BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpINotEqual
))
1168 .addUse(GR
.getSPIRVTypeID(BoolTy
))
1171 .constrainAllUses(TII
, TRI
, RBI
);
1174 bool SPIRVInstructionSelector::selectTrunc(Register ResVReg
,
1175 const SPIRVType
*ResType
,
1176 MachineInstr
&I
) const {
1177 if (GR
.isScalarOrVectorOfType(ResVReg
, SPIRV::OpTypeBool
)) {
1178 Register IntReg
= I
.getOperand(1).getReg();
1179 const SPIRVType
*ArgType
= GR
.getSPIRVTypeForVReg(IntReg
);
1180 return selectIntToBool(IntReg
, ResVReg
, I
, ArgType
, ResType
);
1182 bool IsSigned
= GR
.isScalarOrVectorSigned(ResType
);
1183 unsigned Opcode
= IsSigned
? SPIRV::OpSConvert
: SPIRV::OpUConvert
;
1184 return selectUnOp(ResVReg
, ResType
, I
, Opcode
);
1187 bool SPIRVInstructionSelector::selectConst(Register ResVReg
,
1188 const SPIRVType
*ResType
,
1190 MachineInstr
&I
) const {
1191 unsigned TyOpcode
= ResType
->getOpcode();
1192 assert(TyOpcode
!= SPIRV::OpTypePointer
|| Imm
.isZero());
1193 MachineBasicBlock
&BB
= *I
.getParent();
1194 if ((TyOpcode
== SPIRV::OpTypePointer
|| TyOpcode
== SPIRV::OpTypeEvent
) &&
1196 return BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpConstantNull
))
1198 .addUse(GR
.getSPIRVTypeID(ResType
))
1199 .constrainAllUses(TII
, TRI
, RBI
);
1200 if (TyOpcode
== SPIRV::OpTypeInt
) {
1201 assert(Imm
.getBitWidth() <= 64 && "Unsupported integer width!");
1202 Register Reg
= GR
.getOrCreateConstInt(Imm
.getZExtValue(), I
, ResType
, TII
);
1205 return BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(TargetOpcode::COPY
))
1208 .constrainAllUses(TII
, TRI
, RBI
);
1210 auto MIB
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpConstantI
))
1212 .addUse(GR
.getSPIRVTypeID(ResType
));
1213 // <=32-bit integers should be caught by the sdag pattern.
1214 assert(Imm
.getBitWidth() > 32);
1215 addNumImm(Imm
, MIB
);
1216 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
1219 bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg
,
1220 const SPIRVType
*ResType
,
1221 MachineInstr
&I
) const {
1222 return BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(SPIRV::OpUndef
))
1224 .addUse(GR
.getSPIRVTypeID(ResType
))
1225 .constrainAllUses(TII
, TRI
, RBI
);
1228 static bool isImm(const MachineOperand
&MO
, MachineRegisterInfo
*MRI
) {
1230 const SPIRVType
*TypeInst
= MRI
->getVRegDef(MO
.getReg());
1231 if (TypeInst
->getOpcode() != SPIRV::ASSIGN_TYPE
)
1233 assert(TypeInst
->getOperand(1).isReg());
1234 MachineInstr
*ImmInst
= MRI
->getVRegDef(TypeInst
->getOperand(1).getReg());
1235 return ImmInst
->getOpcode() == TargetOpcode::G_CONSTANT
;
1238 static int64_t foldImm(const MachineOperand
&MO
, MachineRegisterInfo
*MRI
) {
1239 const SPIRVType
*TypeInst
= MRI
->getVRegDef(MO
.getReg());
1240 MachineInstr
*ImmInst
= MRI
->getVRegDef(TypeInst
->getOperand(1).getReg());
1241 assert(ImmInst
->getOpcode() == TargetOpcode::G_CONSTANT
);
1242 return ImmInst
->getOperand(1).getCImm()->getZExtValue();
1245 bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg
,
1246 const SPIRVType
*ResType
,
1247 MachineInstr
&I
) const {
1248 MachineBasicBlock
&BB
= *I
.getParent();
1249 auto MIB
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpCompositeInsert
))
1251 .addUse(GR
.getSPIRVTypeID(ResType
))
1253 .addUse(I
.getOperand(3).getReg())
1254 // composite to insert into
1255 .addUse(I
.getOperand(2).getReg());
1256 for (unsigned i
= 4; i
< I
.getNumOperands(); i
++)
1257 MIB
.addImm(foldImm(I
.getOperand(i
), MRI
));
1258 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
1261 bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg
,
1262 const SPIRVType
*ResType
,
1263 MachineInstr
&I
) const {
1264 MachineBasicBlock
&BB
= *I
.getParent();
1265 auto MIB
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpCompositeExtract
))
1267 .addUse(GR
.getSPIRVTypeID(ResType
))
1268 .addUse(I
.getOperand(2).getReg());
1269 for (unsigned i
= 3; i
< I
.getNumOperands(); i
++)
1270 MIB
.addImm(foldImm(I
.getOperand(i
), MRI
));
1271 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
1274 bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg
,
1275 const SPIRVType
*ResType
,
1276 MachineInstr
&I
) const {
1277 if (isImm(I
.getOperand(4), MRI
))
1278 return selectInsertVal(ResVReg
, ResType
, I
);
1279 MachineBasicBlock
&BB
= *I
.getParent();
1280 return BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpVectorInsertDynamic
))
1282 .addUse(GR
.getSPIRVTypeID(ResType
))
1283 .addUse(I
.getOperand(2).getReg())
1284 .addUse(I
.getOperand(3).getReg())
1285 .addUse(I
.getOperand(4).getReg())
1286 .constrainAllUses(TII
, TRI
, RBI
);
1289 bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg
,
1290 const SPIRVType
*ResType
,
1291 MachineInstr
&I
) const {
1292 if (isImm(I
.getOperand(3), MRI
))
1293 return selectExtractVal(ResVReg
, ResType
, I
);
1294 MachineBasicBlock
&BB
= *I
.getParent();
1295 return BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpVectorExtractDynamic
))
1297 .addUse(GR
.getSPIRVTypeID(ResType
))
1298 .addUse(I
.getOperand(2).getReg())
1299 .addUse(I
.getOperand(3).getReg())
1300 .constrainAllUses(TII
, TRI
, RBI
);
1303 bool SPIRVInstructionSelector::selectGEP(Register ResVReg
,
1304 const SPIRVType
*ResType
,
1305 MachineInstr
&I
) const {
1306 const bool IsGEPInBounds
= I
.getOperand(2).getImm();
1308 // OpAccessChain could be used for OpenCL, but the SPIRV-LLVM Translator only
1309 // relies on PtrAccessChain, so we'll try not to deviate. For Vulkan however,
1310 // we have to use Op[InBounds]AccessChain.
1311 const unsigned Opcode
= STI
.isVulkanEnv()
1312 ? (IsGEPInBounds
? SPIRV::OpInBoundsAccessChain
1313 : SPIRV::OpAccessChain
)
1314 : (IsGEPInBounds
? SPIRV::OpInBoundsPtrAccessChain
1315 : SPIRV::OpPtrAccessChain
);
1317 auto Res
= BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(Opcode
))
1319 .addUse(GR
.getSPIRVTypeID(ResType
))
1320 // Object to get a pointer to.
1321 .addUse(I
.getOperand(3).getReg());
1323 const unsigned StartingIndex
=
1324 (Opcode
== SPIRV::OpAccessChain
|| Opcode
== SPIRV::OpInBoundsAccessChain
)
1327 for (unsigned i
= StartingIndex
; i
< I
.getNumExplicitOperands(); ++i
)
1328 Res
.addUse(I
.getOperand(i
).getReg());
1329 return Res
.constrainAllUses(TII
, TRI
, RBI
);
1332 bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg
,
1333 const SPIRVType
*ResType
,
1334 MachineInstr
&I
) const {
1335 MachineBasicBlock
&BB
= *I
.getParent();
1336 switch (cast
<GIntrinsic
>(I
).getIntrinsicID()) {
1337 case Intrinsic::spv_load
:
1338 return selectLoad(ResVReg
, ResType
, I
);
1339 case Intrinsic::spv_store
:
1340 return selectStore(I
);
1341 case Intrinsic::spv_extractv
:
1342 return selectExtractVal(ResVReg
, ResType
, I
);
1343 case Intrinsic::spv_insertv
:
1344 return selectInsertVal(ResVReg
, ResType
, I
);
1345 case Intrinsic::spv_extractelt
:
1346 return selectExtractElt(ResVReg
, ResType
, I
);
1347 case Intrinsic::spv_insertelt
:
1348 return selectInsertElt(ResVReg
, ResType
, I
);
1349 case Intrinsic::spv_gep
:
1350 return selectGEP(ResVReg
, ResType
, I
);
1351 case Intrinsic::spv_unref_global
:
1352 case Intrinsic::spv_init_global
: {
1353 MachineInstr
*MI
= MRI
->getVRegDef(I
.getOperand(1).getReg());
1354 MachineInstr
*Init
= I
.getNumExplicitOperands() > 2
1355 ? MRI
->getVRegDef(I
.getOperand(2).getReg())
1358 return selectGlobalValue(MI
->getOperand(0).getReg(), *MI
, Init
);
1360 case Intrinsic::spv_undef
: {
1361 auto MIB
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpUndef
))
1363 .addUse(GR
.getSPIRVTypeID(ResType
));
1364 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
1366 case Intrinsic::spv_const_composite
: {
1367 // If no values are attached, the composite is null constant.
1368 bool IsNull
= I
.getNumExplicitDefs() + 1 == I
.getNumExplicitOperands();
1370 IsNull
? SPIRV::OpConstantNull
: SPIRV::OpConstantComposite
;
1371 auto MIB
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(Opcode
))
1373 .addUse(GR
.getSPIRVTypeID(ResType
));
1374 // skip type MD node we already used when generated assign.type for this
1376 for (unsigned i
= I
.getNumExplicitDefs() + 1;
1377 i
< I
.getNumExplicitOperands(); ++i
) {
1378 MIB
.addUse(I
.getOperand(i
).getReg());
1381 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
1383 case Intrinsic::spv_assign_name
: {
1384 auto MIB
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpName
));
1385 MIB
.addUse(I
.getOperand(I
.getNumExplicitDefs() + 1).getReg());
1386 for (unsigned i
= I
.getNumExplicitDefs() + 2;
1387 i
< I
.getNumExplicitOperands(); ++i
) {
1388 MIB
.addImm(I
.getOperand(i
).getImm());
1390 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
1392 case Intrinsic::spv_switch
: {
1393 auto MIB
= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpSwitch
));
1394 for (unsigned i
= 1; i
< I
.getNumExplicitOperands(); ++i
) {
1395 if (I
.getOperand(i
).isReg())
1396 MIB
.addReg(I
.getOperand(i
).getReg());
1397 else if (I
.getOperand(i
).isCImm())
1398 addNumImm(I
.getOperand(i
).getCImm()->getValue(), MIB
);
1399 else if (I
.getOperand(i
).isMBB())
1400 MIB
.addMBB(I
.getOperand(i
).getMBB());
1402 llvm_unreachable("Unexpected OpSwitch operand");
1404 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
1406 case Intrinsic::spv_cmpxchg
:
1407 return selectAtomicCmpXchg(ResVReg
, ResType
, I
);
1408 case Intrinsic::spv_unreachable
:
1409 BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpUnreachable
));
1411 case Intrinsic::spv_alloca
:
1412 return selectFrameIndex(ResVReg
, ResType
, I
);
1413 case Intrinsic::spv_assume
:
1414 if (STI
.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume
))
1415 BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpAssumeTrueKHR
))
1416 .addUse(I
.getOperand(1).getReg());
1418 case Intrinsic::spv_expect
:
1419 if (STI
.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume
))
1420 BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpExpectKHR
))
1422 .addUse(GR
.getSPIRVTypeID(ResType
))
1423 .addUse(I
.getOperand(2).getReg())
1424 .addUse(I
.getOperand(3).getReg());
1427 llvm_unreachable("Intrinsic selection not implemented");
1432 bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg
,
1433 const SPIRVType
*ResType
,
1434 MachineInstr
&I
) const {
1435 return BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(SPIRV::OpVariable
))
1437 .addUse(GR
.getSPIRVTypeID(ResType
))
1438 .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function
))
1439 .constrainAllUses(TII
, TRI
, RBI
);
1442 bool SPIRVInstructionSelector::selectBranch(MachineInstr
&I
) const {
1443 // InstructionSelector walks backwards through the instructions. We can use
1444 // both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR
1445 // first, so can generate an OpBranchConditional here. If there is no
1446 // G_BRCOND, we just use OpBranch for a regular unconditional branch.
1447 const MachineInstr
*PrevI
= I
.getPrevNode();
1448 MachineBasicBlock
&MBB
= *I
.getParent();
1449 if (PrevI
!= nullptr && PrevI
->getOpcode() == TargetOpcode::G_BRCOND
) {
1450 return BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpBranchConditional
))
1451 .addUse(PrevI
->getOperand(0).getReg())
1452 .addMBB(PrevI
->getOperand(1).getMBB())
1453 .addMBB(I
.getOperand(0).getMBB())
1454 .constrainAllUses(TII
, TRI
, RBI
);
1456 return BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpBranch
))
1457 .addMBB(I
.getOperand(0).getMBB())
1458 .constrainAllUses(TII
, TRI
, RBI
);
1461 bool SPIRVInstructionSelector::selectBranchCond(MachineInstr
&I
) const {
1462 // InstructionSelector walks backwards through the instructions. For an
1463 // explicit conditional branch with no fallthrough, we use both a G_BR and a
1464 // G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and
1465 // generate the OpBranchConditional in selectBranch above.
1467 // If an OpBranchConditional has been generated, we simply return, as the work
1468 // is alread done. If there is no OpBranchConditional, LLVM must be relying on
1469 // implicit fallthrough to the next basic block, so we need to create an
1470 // OpBranchConditional with an explicit "false" argument pointing to the next
1471 // basic block that LLVM would fall through to.
1472 const MachineInstr
*NextI
= I
.getNextNode();
1473 // Check if this has already been successfully selected.
1474 if (NextI
!= nullptr && NextI
->getOpcode() == SPIRV::OpBranchConditional
)
1476 // Must be relying on implicit block fallthrough, so generate an
1477 // OpBranchConditional with the "next" basic block as the "false" target.
1478 MachineBasicBlock
&MBB
= *I
.getParent();
1479 unsigned NextMBBNum
= MBB
.getNextNode()->getNumber();
1480 MachineBasicBlock
*NextMBB
= I
.getMF()->getBlockNumbered(NextMBBNum
);
1481 return BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpBranchConditional
))
1482 .addUse(I
.getOperand(0).getReg())
1483 .addMBB(I
.getOperand(1).getMBB())
1485 .constrainAllUses(TII
, TRI
, RBI
);
1488 bool SPIRVInstructionSelector::selectPhi(Register ResVReg
,
1489 const SPIRVType
*ResType
,
1490 MachineInstr
&I
) const {
1491 auto MIB
= BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(SPIRV::OpPhi
))
1493 .addUse(GR
.getSPIRVTypeID(ResType
));
1494 const unsigned NumOps
= I
.getNumOperands();
1495 for (unsigned i
= 1; i
< NumOps
; i
+= 2) {
1496 MIB
.addUse(I
.getOperand(i
+ 0).getReg());
1497 MIB
.addMBB(I
.getOperand(i
+ 1).getMBB());
1499 return MIB
.constrainAllUses(TII
, TRI
, RBI
);
1502 bool SPIRVInstructionSelector::selectGlobalValue(
1503 Register ResVReg
, MachineInstr
&I
, const MachineInstr
*Init
) const {
1504 // FIXME: don't use MachineIRBuilder here, replace it with BuildMI.
1505 MachineIRBuilder
MIRBuilder(I
);
1506 const GlobalValue
*GV
= I
.getOperand(1).getGlobal();
1507 Type
*GVType
= GV
->getValueType();
1508 SPIRVType
*PointerBaseType
;
1509 if (GVType
->isArrayTy()) {
1510 SPIRVType
*ArrayElementType
=
1511 GR
.getOrCreateSPIRVType(GVType
->getArrayElementType(), MIRBuilder
,
1512 SPIRV::AccessQualifier::ReadWrite
, false);
1513 PointerBaseType
= GR
.getOrCreateSPIRVArrayType(
1514 ArrayElementType
, GVType
->getArrayNumElements(), I
, TII
);
1516 PointerBaseType
= GR
.getOrCreateSPIRVType(
1517 GVType
, MIRBuilder
, SPIRV::AccessQualifier::ReadWrite
, false);
1519 SPIRVType
*ResType
= GR
.getOrCreateSPIRVPointerType(
1520 PointerBaseType
, I
, TII
,
1521 addressSpaceToStorageClass(GV
->getAddressSpace()));
1522 std::string GlobalIdent
= GV
->getGlobalIdentifier();
1523 // We have functions as operands in tests with blocks of instruction e.g. in
1524 // transcoding/global_block.ll. These operands are not used and should be
1525 // substituted by zero constants. Their type is expected to be always
1526 // OpTypePointer Function %uchar.
1527 if (isa
<Function
>(GV
)) {
1528 const Constant
*ConstVal
= GV
;
1529 MachineBasicBlock
&BB
= *I
.getParent();
1530 Register NewReg
= GR
.find(ConstVal
, GR
.CurMF
);
1531 if (!NewReg
.isValid()) {
1532 Register NewReg
= ResVReg
;
1533 GR
.add(ConstVal
, GR
.CurMF
, NewReg
);
1534 return BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpConstantNull
))
1536 .addUse(GR
.getSPIRVTypeID(ResType
))
1537 .constrainAllUses(TII
, TRI
, RBI
);
1539 assert(NewReg
!= ResVReg
);
1540 return BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(TargetOpcode::COPY
))
1543 .constrainAllUses(TII
, TRI
, RBI
);
1545 auto GlobalVar
= cast
<GlobalVariable
>(GV
);
1546 assert(GlobalVar
->getName() != "llvm.global.annotations");
1548 bool HasInit
= GlobalVar
->hasInitializer() &&
1549 !isa
<UndefValue
>(GlobalVar
->getInitializer());
1550 // Skip empty declaration for GVs with initilaizers till we get the decl with
1551 // passed initializer.
1552 if (HasInit
&& !Init
)
1555 unsigned AddrSpace
= GV
->getAddressSpace();
1556 SPIRV::StorageClass::StorageClass Storage
=
1557 addressSpaceToStorageClass(AddrSpace
);
1558 bool HasLnkTy
= GV
->getLinkage() != GlobalValue::InternalLinkage
&&
1559 Storage
!= SPIRV::StorageClass::Function
;
1560 SPIRV::LinkageType::LinkageType LnkType
=
1561 (GV
->isDeclaration() || GV
->hasAvailableExternallyLinkage())
1562 ? SPIRV::LinkageType::Import
1563 : SPIRV::LinkageType::Export
;
1565 Register Reg
= GR
.buildGlobalVariable(ResVReg
, ResType
, GlobalIdent
, GV
,
1566 Storage
, Init
, GlobalVar
->isConstant(),
1567 HasLnkTy
, LnkType
, MIRBuilder
, true);
1568 return Reg
.isValid();
1571 bool SPIRVInstructionSelector::selectLog10(Register ResVReg
,
1572 const SPIRVType
*ResType
,
1573 MachineInstr
&I
) const {
1574 if (STI
.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std
)) {
1575 return selectExtInst(ResVReg
, ResType
, I
, CL::log10
);
1578 // There is no log10 instruction in the GLSL Extended Instruction set, so it
1579 // is implemented as:
1580 // log10(x) = log2(x) * (1 / log2(10))
1581 // = log2(x) * 0.30103
1583 MachineIRBuilder
MIRBuilder(I
);
1584 MachineBasicBlock
&BB
= *I
.getParent();
1587 Register VarReg
= MRI
->createVirtualRegister(&SPIRV::IDRegClass
);
1589 BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(SPIRV::OpExtInst
))
1591 .addUse(GR
.getSPIRVTypeID(ResType
))
1592 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450
))
1594 .add(I
.getOperand(1))
1595 .constrainAllUses(TII
, TRI
, RBI
);
1598 assert(ResType
->getOpcode() == SPIRV::OpTypeVector
||
1599 ResType
->getOpcode() == SPIRV::OpTypeFloat
);
1600 // TODO: Add matrix implementation once supported by the HLSL frontend.
1601 const SPIRVType
*SpirvScalarType
=
1602 ResType
->getOpcode() == SPIRV::OpTypeVector
1603 ? GR
.getSPIRVTypeForVReg(ResType
->getOperand(1).getReg())
1606 GR
.buildConstantFP(APFloat(0.30103f
), MIRBuilder
, SpirvScalarType
);
1608 // Multiply log2(x) by 0.30103 to get log10(x) result.
1609 auto Opcode
= ResType
->getOpcode() == SPIRV::OpTypeVector
1610 ? SPIRV::OpVectorTimesScalar
1612 Result
&= BuildMI(BB
, I
, I
.getDebugLoc(), TII
.get(Opcode
))
1614 .addUse(GR
.getSPIRVTypeID(ResType
))
1617 .constrainAllUses(TII
, TRI
, RBI
);
1623 InstructionSelector
*
1624 createSPIRVInstructionSelector(const SPIRVTargetMachine
&TM
,
1625 const SPIRVSubtarget
&Subtarget
,
1626 const RegisterBankInfo
&RBI
) {
1627 return new SPIRVInstructionSelector(TM
, Subtarget
, RBI
);