1 //===-- RISCVExpandPseudoInsts.cpp - Expand pseudo instructions -----------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains a pass that expands pseudo instructions into target
10 // instructions. This pass should be run after register allocation but before
11 // the post-regalloc scheduling pass.
13 //===----------------------------------------------------------------------===//
16 #include "RISCVInstrInfo.h"
17 #include "RISCVTargetMachine.h"
19 #include "llvm/CodeGen/LivePhysRegs.h"
20 #include "llvm/CodeGen/MachineFunctionPass.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #define RISCV_EXPAND_PSEUDO_NAME "RISCV pseudo instruction expansion pass"
29 class RISCVExpandPseudo
: public MachineFunctionPass
{
31 const RISCVInstrInfo
*TII
;
34 RISCVExpandPseudo() : MachineFunctionPass(ID
) {
35 initializeRISCVExpandPseudoPass(*PassRegistry::getPassRegistry());
38 bool runOnMachineFunction(MachineFunction
&MF
) override
;
40 StringRef
getPassName() const override
{ return RISCV_EXPAND_PSEUDO_NAME
; }
43 bool expandMBB(MachineBasicBlock
&MBB
);
44 bool expandMI(MachineBasicBlock
&MBB
, MachineBasicBlock::iterator MBBI
,
45 MachineBasicBlock::iterator
&NextMBBI
);
46 bool expandAtomicBinOp(MachineBasicBlock
&MBB
,
47 MachineBasicBlock::iterator MBBI
, AtomicRMWInst::BinOp
,
48 bool IsMasked
, int Width
,
49 MachineBasicBlock::iterator
&NextMBBI
);
50 bool expandAtomicMinMaxOp(MachineBasicBlock
&MBB
,
51 MachineBasicBlock::iterator MBBI
,
52 AtomicRMWInst::BinOp
, bool IsMasked
, int Width
,
53 MachineBasicBlock::iterator
&NextMBBI
);
54 bool expandAtomicCmpXchg(MachineBasicBlock
&MBB
,
55 MachineBasicBlock::iterator MBBI
, bool IsMasked
,
56 int Width
, MachineBasicBlock::iterator
&NextMBBI
);
57 bool expandAuipcInstPair(MachineBasicBlock
&MBB
,
58 MachineBasicBlock::iterator MBBI
,
59 MachineBasicBlock::iterator
&NextMBBI
,
60 unsigned FlagsHi
, unsigned SecondOpcode
);
61 bool expandLoadLocalAddress(MachineBasicBlock
&MBB
,
62 MachineBasicBlock::iterator MBBI
,
63 MachineBasicBlock::iterator
&NextMBBI
);
64 bool expandLoadAddress(MachineBasicBlock
&MBB
,
65 MachineBasicBlock::iterator MBBI
,
66 MachineBasicBlock::iterator
&NextMBBI
);
67 bool expandLoadTLSIEAddress(MachineBasicBlock
&MBB
,
68 MachineBasicBlock::iterator MBBI
,
69 MachineBasicBlock::iterator
&NextMBBI
);
70 bool expandLoadTLSGDAddress(MachineBasicBlock
&MBB
,
71 MachineBasicBlock::iterator MBBI
,
72 MachineBasicBlock::iterator
&NextMBBI
);
75 char RISCVExpandPseudo::ID
= 0;
77 bool RISCVExpandPseudo::runOnMachineFunction(MachineFunction
&MF
) {
78 TII
= static_cast<const RISCVInstrInfo
*>(MF
.getSubtarget().getInstrInfo());
79 bool Modified
= false;
81 Modified
|= expandMBB(MBB
);
85 bool RISCVExpandPseudo::expandMBB(MachineBasicBlock
&MBB
) {
86 bool Modified
= false;
88 MachineBasicBlock::iterator MBBI
= MBB
.begin(), E
= MBB
.end();
90 MachineBasicBlock::iterator NMBBI
= std::next(MBBI
);
91 Modified
|= expandMI(MBB
, MBBI
, NMBBI
);
98 bool RISCVExpandPseudo::expandMI(MachineBasicBlock
&MBB
,
99 MachineBasicBlock::iterator MBBI
,
100 MachineBasicBlock::iterator
&NextMBBI
) {
101 switch (MBBI
->getOpcode()) {
102 case RISCV::PseudoAtomicLoadNand32
:
103 return expandAtomicBinOp(MBB
, MBBI
, AtomicRMWInst::Nand
, false, 32,
105 case RISCV::PseudoAtomicLoadNand64
:
106 return expandAtomicBinOp(MBB
, MBBI
, AtomicRMWInst::Nand
, false, 64,
108 case RISCV::PseudoMaskedAtomicSwap32
:
109 return expandAtomicBinOp(MBB
, MBBI
, AtomicRMWInst::Xchg
, true, 32,
111 case RISCV::PseudoMaskedAtomicLoadAdd32
:
112 return expandAtomicBinOp(MBB
, MBBI
, AtomicRMWInst::Add
, true, 32, NextMBBI
);
113 case RISCV::PseudoMaskedAtomicLoadSub32
:
114 return expandAtomicBinOp(MBB
, MBBI
, AtomicRMWInst::Sub
, true, 32, NextMBBI
);
115 case RISCV::PseudoMaskedAtomicLoadNand32
:
116 return expandAtomicBinOp(MBB
, MBBI
, AtomicRMWInst::Nand
, true, 32,
118 case RISCV::PseudoMaskedAtomicLoadMax32
:
119 return expandAtomicMinMaxOp(MBB
, MBBI
, AtomicRMWInst::Max
, true, 32,
121 case RISCV::PseudoMaskedAtomicLoadMin32
:
122 return expandAtomicMinMaxOp(MBB
, MBBI
, AtomicRMWInst::Min
, true, 32,
124 case RISCV::PseudoMaskedAtomicLoadUMax32
:
125 return expandAtomicMinMaxOp(MBB
, MBBI
, AtomicRMWInst::UMax
, true, 32,
127 case RISCV::PseudoMaskedAtomicLoadUMin32
:
128 return expandAtomicMinMaxOp(MBB
, MBBI
, AtomicRMWInst::UMin
, true, 32,
130 case RISCV::PseudoCmpXchg32
:
131 return expandAtomicCmpXchg(MBB
, MBBI
, false, 32, NextMBBI
);
132 case RISCV::PseudoCmpXchg64
:
133 return expandAtomicCmpXchg(MBB
, MBBI
, false, 64, NextMBBI
);
134 case RISCV::PseudoMaskedCmpXchg32
:
135 return expandAtomicCmpXchg(MBB
, MBBI
, true, 32, NextMBBI
);
136 case RISCV::PseudoLLA
:
137 return expandLoadLocalAddress(MBB
, MBBI
, NextMBBI
);
138 case RISCV::PseudoLA
:
139 return expandLoadAddress(MBB
, MBBI
, NextMBBI
);
140 case RISCV::PseudoLA_TLS_IE
:
141 return expandLoadTLSIEAddress(MBB
, MBBI
, NextMBBI
);
142 case RISCV::PseudoLA_TLS_GD
:
143 return expandLoadTLSGDAddress(MBB
, MBBI
, NextMBBI
);
149 static unsigned getLRForRMW32(AtomicOrdering Ordering
) {
152 llvm_unreachable("Unexpected AtomicOrdering");
153 case AtomicOrdering::Monotonic
:
155 case AtomicOrdering::Acquire
:
156 return RISCV::LR_W_AQ
;
157 case AtomicOrdering::Release
:
159 case AtomicOrdering::AcquireRelease
:
160 return RISCV::LR_W_AQ
;
161 case AtomicOrdering::SequentiallyConsistent
:
162 return RISCV::LR_W_AQ_RL
;
166 static unsigned getSCForRMW32(AtomicOrdering Ordering
) {
169 llvm_unreachable("Unexpected AtomicOrdering");
170 case AtomicOrdering::Monotonic
:
172 case AtomicOrdering::Acquire
:
174 case AtomicOrdering::Release
:
175 return RISCV::SC_W_RL
;
176 case AtomicOrdering::AcquireRelease
:
177 return RISCV::SC_W_RL
;
178 case AtomicOrdering::SequentiallyConsistent
:
179 return RISCV::SC_W_AQ_RL
;
183 static unsigned getLRForRMW64(AtomicOrdering Ordering
) {
186 llvm_unreachable("Unexpected AtomicOrdering");
187 case AtomicOrdering::Monotonic
:
189 case AtomicOrdering::Acquire
:
190 return RISCV::LR_D_AQ
;
191 case AtomicOrdering::Release
:
193 case AtomicOrdering::AcquireRelease
:
194 return RISCV::LR_D_AQ
;
195 case AtomicOrdering::SequentiallyConsistent
:
196 return RISCV::LR_D_AQ_RL
;
200 static unsigned getSCForRMW64(AtomicOrdering Ordering
) {
203 llvm_unreachable("Unexpected AtomicOrdering");
204 case AtomicOrdering::Monotonic
:
206 case AtomicOrdering::Acquire
:
208 case AtomicOrdering::Release
:
209 return RISCV::SC_D_RL
;
210 case AtomicOrdering::AcquireRelease
:
211 return RISCV::SC_D_RL
;
212 case AtomicOrdering::SequentiallyConsistent
:
213 return RISCV::SC_D_AQ_RL
;
217 static unsigned getLRForRMW(AtomicOrdering Ordering
, int Width
) {
219 return getLRForRMW32(Ordering
);
221 return getLRForRMW64(Ordering
);
222 llvm_unreachable("Unexpected LR width\n");
225 static unsigned getSCForRMW(AtomicOrdering Ordering
, int Width
) {
227 return getSCForRMW32(Ordering
);
229 return getSCForRMW64(Ordering
);
230 llvm_unreachable("Unexpected SC width\n");
233 static void doAtomicBinOpExpansion(const RISCVInstrInfo
*TII
, MachineInstr
&MI
,
234 DebugLoc DL
, MachineBasicBlock
*ThisMBB
,
235 MachineBasicBlock
*LoopMBB
,
236 MachineBasicBlock
*DoneMBB
,
237 AtomicRMWInst::BinOp BinOp
, int Width
) {
238 Register DestReg
= MI
.getOperand(0).getReg();
239 Register ScratchReg
= MI
.getOperand(1).getReg();
240 Register AddrReg
= MI
.getOperand(2).getReg();
241 Register IncrReg
= MI
.getOperand(3).getReg();
242 AtomicOrdering Ordering
=
243 static_cast<AtomicOrdering
>(MI
.getOperand(4).getImm());
246 // lr.[w|d] dest, (addr)
247 // binop scratch, dest, val
248 // sc.[w|d] scratch, scratch, (addr)
249 // bnez scratch, loop
250 BuildMI(LoopMBB
, DL
, TII
->get(getLRForRMW(Ordering
, Width
)), DestReg
)
254 llvm_unreachable("Unexpected AtomicRMW BinOp");
255 case AtomicRMWInst::Nand
:
256 BuildMI(LoopMBB
, DL
, TII
->get(RISCV::AND
), ScratchReg
)
259 BuildMI(LoopMBB
, DL
, TII
->get(RISCV::XORI
), ScratchReg
)
264 BuildMI(LoopMBB
, DL
, TII
->get(getSCForRMW(Ordering
, Width
)), ScratchReg
)
267 BuildMI(LoopMBB
, DL
, TII
->get(RISCV::BNE
))
273 static void insertMaskedMerge(const RISCVInstrInfo
*TII
, DebugLoc DL
,
274 MachineBasicBlock
*MBB
, Register DestReg
,
275 Register OldValReg
, Register NewValReg
,
276 Register MaskReg
, Register ScratchReg
) {
277 assert(OldValReg
!= ScratchReg
&& "OldValReg and ScratchReg must be unique");
278 assert(OldValReg
!= MaskReg
&& "OldValReg and MaskReg must be unique");
279 assert(ScratchReg
!= MaskReg
&& "ScratchReg and MaskReg must be unique");
281 // We select bits from newval and oldval using:
282 // https://graphics.stanford.edu/~seander/bithacks.html#MaskedMerge
283 // r = oldval ^ ((oldval ^ newval) & masktargetdata);
284 BuildMI(MBB
, DL
, TII
->get(RISCV::XOR
), ScratchReg
)
287 BuildMI(MBB
, DL
, TII
->get(RISCV::AND
), ScratchReg
)
290 BuildMI(MBB
, DL
, TII
->get(RISCV::XOR
), DestReg
)
295 static void doMaskedAtomicBinOpExpansion(
296 const RISCVInstrInfo
*TII
, MachineInstr
&MI
, DebugLoc DL
,
297 MachineBasicBlock
*ThisMBB
, MachineBasicBlock
*LoopMBB
,
298 MachineBasicBlock
*DoneMBB
, AtomicRMWInst::BinOp BinOp
, int Width
) {
299 assert(Width
== 32 && "Should never need to expand masked 64-bit operations");
300 Register DestReg
= MI
.getOperand(0).getReg();
301 Register ScratchReg
= MI
.getOperand(1).getReg();
302 Register AddrReg
= MI
.getOperand(2).getReg();
303 Register IncrReg
= MI
.getOperand(3).getReg();
304 Register MaskReg
= MI
.getOperand(4).getReg();
305 AtomicOrdering Ordering
=
306 static_cast<AtomicOrdering
>(MI
.getOperand(5).getImm());
309 // lr.w destreg, (alignedaddr)
310 // binop scratch, destreg, incr
311 // xor scratch, destreg, scratch
312 // and scratch, scratch, masktargetdata
313 // xor scratch, destreg, scratch
314 // sc.w scratch, scratch, (alignedaddr)
315 // bnez scratch, loop
316 BuildMI(LoopMBB
, DL
, TII
->get(getLRForRMW32(Ordering
)), DestReg
)
320 llvm_unreachable("Unexpected AtomicRMW BinOp");
321 case AtomicRMWInst::Xchg
:
322 BuildMI(LoopMBB
, DL
, TII
->get(RISCV::ADD
), ScratchReg
)
326 case AtomicRMWInst::Add
:
327 BuildMI(LoopMBB
, DL
, TII
->get(RISCV::ADD
), ScratchReg
)
331 case AtomicRMWInst::Sub
:
332 BuildMI(LoopMBB
, DL
, TII
->get(RISCV::SUB
), ScratchReg
)
336 case AtomicRMWInst::Nand
:
337 BuildMI(LoopMBB
, DL
, TII
->get(RISCV::AND
), ScratchReg
)
340 BuildMI(LoopMBB
, DL
, TII
->get(RISCV::XORI
), ScratchReg
)
346 insertMaskedMerge(TII
, DL
, LoopMBB
, ScratchReg
, DestReg
, ScratchReg
, MaskReg
,
349 BuildMI(LoopMBB
, DL
, TII
->get(getSCForRMW32(Ordering
)), ScratchReg
)
352 BuildMI(LoopMBB
, DL
, TII
->get(RISCV::BNE
))
358 bool RISCVExpandPseudo::expandAtomicBinOp(
359 MachineBasicBlock
&MBB
, MachineBasicBlock::iterator MBBI
,
360 AtomicRMWInst::BinOp BinOp
, bool IsMasked
, int Width
,
361 MachineBasicBlock::iterator
&NextMBBI
) {
362 MachineInstr
&MI
= *MBBI
;
363 DebugLoc DL
= MI
.getDebugLoc();
365 MachineFunction
*MF
= MBB
.getParent();
366 auto LoopMBB
= MF
->CreateMachineBasicBlock(MBB
.getBasicBlock());
367 auto DoneMBB
= MF
->CreateMachineBasicBlock(MBB
.getBasicBlock());
370 MF
->insert(++MBB
.getIterator(), LoopMBB
);
371 MF
->insert(++LoopMBB
->getIterator(), DoneMBB
);
373 // Set up successors and transfer remaining instructions to DoneMBB.
374 LoopMBB
->addSuccessor(LoopMBB
);
375 LoopMBB
->addSuccessor(DoneMBB
);
376 DoneMBB
->splice(DoneMBB
->end(), &MBB
, MI
, MBB
.end());
377 DoneMBB
->transferSuccessors(&MBB
);
378 MBB
.addSuccessor(LoopMBB
);
381 doAtomicBinOpExpansion(TII
, MI
, DL
, &MBB
, LoopMBB
, DoneMBB
, BinOp
, Width
);
383 doMaskedAtomicBinOpExpansion(TII
, MI
, DL
, &MBB
, LoopMBB
, DoneMBB
, BinOp
,
386 NextMBBI
= MBB
.end();
387 MI
.eraseFromParent();
389 LivePhysRegs LiveRegs
;
390 computeAndAddLiveIns(LiveRegs
, *LoopMBB
);
391 computeAndAddLiveIns(LiveRegs
, *DoneMBB
);
396 static void insertSext(const RISCVInstrInfo
*TII
, DebugLoc DL
,
397 MachineBasicBlock
*MBB
, Register ValReg
,
399 BuildMI(MBB
, DL
, TII
->get(RISCV::SLL
), ValReg
)
402 BuildMI(MBB
, DL
, TII
->get(RISCV::SRA
), ValReg
)
407 bool RISCVExpandPseudo::expandAtomicMinMaxOp(
408 MachineBasicBlock
&MBB
, MachineBasicBlock::iterator MBBI
,
409 AtomicRMWInst::BinOp BinOp
, bool IsMasked
, int Width
,
410 MachineBasicBlock::iterator
&NextMBBI
) {
411 assert(IsMasked
== true &&
412 "Should only need to expand masked atomic max/min");
413 assert(Width
== 32 && "Should never need to expand masked 64-bit operations");
415 MachineInstr
&MI
= *MBBI
;
416 DebugLoc DL
= MI
.getDebugLoc();
417 MachineFunction
*MF
= MBB
.getParent();
418 auto LoopHeadMBB
= MF
->CreateMachineBasicBlock(MBB
.getBasicBlock());
419 auto LoopIfBodyMBB
= MF
->CreateMachineBasicBlock(MBB
.getBasicBlock());
420 auto LoopTailMBB
= MF
->CreateMachineBasicBlock(MBB
.getBasicBlock());
421 auto DoneMBB
= MF
->CreateMachineBasicBlock(MBB
.getBasicBlock());
424 MF
->insert(++MBB
.getIterator(), LoopHeadMBB
);
425 MF
->insert(++LoopHeadMBB
->getIterator(), LoopIfBodyMBB
);
426 MF
->insert(++LoopIfBodyMBB
->getIterator(), LoopTailMBB
);
427 MF
->insert(++LoopTailMBB
->getIterator(), DoneMBB
);
429 // Set up successors and transfer remaining instructions to DoneMBB.
430 LoopHeadMBB
->addSuccessor(LoopIfBodyMBB
);
431 LoopHeadMBB
->addSuccessor(LoopTailMBB
);
432 LoopIfBodyMBB
->addSuccessor(LoopTailMBB
);
433 LoopTailMBB
->addSuccessor(LoopHeadMBB
);
434 LoopTailMBB
->addSuccessor(DoneMBB
);
435 DoneMBB
->splice(DoneMBB
->end(), &MBB
, MI
, MBB
.end());
436 DoneMBB
->transferSuccessors(&MBB
);
437 MBB
.addSuccessor(LoopHeadMBB
);
439 Register DestReg
= MI
.getOperand(0).getReg();
440 Register Scratch1Reg
= MI
.getOperand(1).getReg();
441 Register Scratch2Reg
= MI
.getOperand(2).getReg();
442 Register AddrReg
= MI
.getOperand(3).getReg();
443 Register IncrReg
= MI
.getOperand(4).getReg();
444 Register MaskReg
= MI
.getOperand(5).getReg();
445 bool IsSigned
= BinOp
== AtomicRMWInst::Min
|| BinOp
== AtomicRMWInst::Max
;
446 AtomicOrdering Ordering
=
447 static_cast<AtomicOrdering
>(MI
.getOperand(IsSigned
? 7 : 6).getImm());
451 // lr.w destreg, (alignedaddr)
452 // and scratch2, destreg, mask
453 // mv scratch1, destreg
454 // [sext scratch2 if signed min/max]
455 // ifnochangeneeded scratch2, incr, .looptail
456 BuildMI(LoopHeadMBB
, DL
, TII
->get(getLRForRMW32(Ordering
)), DestReg
)
458 BuildMI(LoopHeadMBB
, DL
, TII
->get(RISCV::AND
), Scratch2Reg
)
461 BuildMI(LoopHeadMBB
, DL
, TII
->get(RISCV::ADDI
), Scratch1Reg
)
467 llvm_unreachable("Unexpected AtomicRMW BinOp");
468 case AtomicRMWInst::Max
: {
469 insertSext(TII
, DL
, LoopHeadMBB
, Scratch2Reg
, MI
.getOperand(6).getReg());
470 BuildMI(LoopHeadMBB
, DL
, TII
->get(RISCV::BGE
))
473 .addMBB(LoopTailMBB
);
476 case AtomicRMWInst::Min
: {
477 insertSext(TII
, DL
, LoopHeadMBB
, Scratch2Reg
, MI
.getOperand(6).getReg());
478 BuildMI(LoopHeadMBB
, DL
, TII
->get(RISCV::BGE
))
481 .addMBB(LoopTailMBB
);
484 case AtomicRMWInst::UMax
:
485 BuildMI(LoopHeadMBB
, DL
, TII
->get(RISCV::BGEU
))
488 .addMBB(LoopTailMBB
);
490 case AtomicRMWInst::UMin
:
491 BuildMI(LoopHeadMBB
, DL
, TII
->get(RISCV::BGEU
))
494 .addMBB(LoopTailMBB
);
499 // xor scratch1, destreg, incr
500 // and scratch1, scratch1, mask
501 // xor scratch1, destreg, scratch1
502 insertMaskedMerge(TII
, DL
, LoopIfBodyMBB
, Scratch1Reg
, DestReg
, IncrReg
,
503 MaskReg
, Scratch1Reg
);
506 // sc.w scratch1, scratch1, (addr)
507 // bnez scratch1, loop
508 BuildMI(LoopTailMBB
, DL
, TII
->get(getSCForRMW32(Ordering
)), Scratch1Reg
)
510 .addReg(Scratch1Reg
);
511 BuildMI(LoopTailMBB
, DL
, TII
->get(RISCV::BNE
))
514 .addMBB(LoopHeadMBB
);
516 NextMBBI
= MBB
.end();
517 MI
.eraseFromParent();
519 LivePhysRegs LiveRegs
;
520 computeAndAddLiveIns(LiveRegs
, *LoopHeadMBB
);
521 computeAndAddLiveIns(LiveRegs
, *LoopIfBodyMBB
);
522 computeAndAddLiveIns(LiveRegs
, *LoopTailMBB
);
523 computeAndAddLiveIns(LiveRegs
, *DoneMBB
);
528 bool RISCVExpandPseudo::expandAtomicCmpXchg(
529 MachineBasicBlock
&MBB
, MachineBasicBlock::iterator MBBI
, bool IsMasked
,
530 int Width
, MachineBasicBlock::iterator
&NextMBBI
) {
531 MachineInstr
&MI
= *MBBI
;
532 DebugLoc DL
= MI
.getDebugLoc();
533 MachineFunction
*MF
= MBB
.getParent();
534 auto LoopHeadMBB
= MF
->CreateMachineBasicBlock(MBB
.getBasicBlock());
535 auto LoopTailMBB
= MF
->CreateMachineBasicBlock(MBB
.getBasicBlock());
536 auto DoneMBB
= MF
->CreateMachineBasicBlock(MBB
.getBasicBlock());
539 MF
->insert(++MBB
.getIterator(), LoopHeadMBB
);
540 MF
->insert(++LoopHeadMBB
->getIterator(), LoopTailMBB
);
541 MF
->insert(++LoopTailMBB
->getIterator(), DoneMBB
);
543 // Set up successors and transfer remaining instructions to DoneMBB.
544 LoopHeadMBB
->addSuccessor(LoopTailMBB
);
545 LoopHeadMBB
->addSuccessor(DoneMBB
);
546 LoopTailMBB
->addSuccessor(DoneMBB
);
547 LoopTailMBB
->addSuccessor(LoopHeadMBB
);
548 DoneMBB
->splice(DoneMBB
->end(), &MBB
, MI
, MBB
.end());
549 DoneMBB
->transferSuccessors(&MBB
);
550 MBB
.addSuccessor(LoopHeadMBB
);
552 Register DestReg
= MI
.getOperand(0).getReg();
553 Register ScratchReg
= MI
.getOperand(1).getReg();
554 Register AddrReg
= MI
.getOperand(2).getReg();
555 Register CmpValReg
= MI
.getOperand(3).getReg();
556 Register NewValReg
= MI
.getOperand(4).getReg();
557 AtomicOrdering Ordering
=
558 static_cast<AtomicOrdering
>(MI
.getOperand(IsMasked
? 6 : 5).getImm());
562 // lr.[w|d] dest, (addr)
563 // bne dest, cmpval, done
564 BuildMI(LoopHeadMBB
, DL
, TII
->get(getLRForRMW(Ordering
, Width
)), DestReg
)
566 BuildMI(LoopHeadMBB
, DL
, TII
->get(RISCV::BNE
))
571 // sc.[w|d] scratch, newval, (addr)
572 // bnez scratch, loophead
573 BuildMI(LoopTailMBB
, DL
, TII
->get(getSCForRMW(Ordering
, Width
)), ScratchReg
)
576 BuildMI(LoopTailMBB
, DL
, TII
->get(RISCV::BNE
))
579 .addMBB(LoopHeadMBB
);
583 // and scratch, dest, mask
584 // bne scratch, cmpval, done
585 Register MaskReg
= MI
.getOperand(5).getReg();
586 BuildMI(LoopHeadMBB
, DL
, TII
->get(getLRForRMW(Ordering
, Width
)), DestReg
)
588 BuildMI(LoopHeadMBB
, DL
, TII
->get(RISCV::AND
), ScratchReg
)
591 BuildMI(LoopHeadMBB
, DL
, TII
->get(RISCV::BNE
))
597 // xor scratch, dest, newval
598 // and scratch, scratch, mask
599 // xor scratch, dest, scratch
600 // sc.w scratch, scratch, (adrr)
601 // bnez scratch, loophead
602 insertMaskedMerge(TII
, DL
, LoopTailMBB
, ScratchReg
, DestReg
, NewValReg
,
603 MaskReg
, ScratchReg
);
604 BuildMI(LoopTailMBB
, DL
, TII
->get(getSCForRMW(Ordering
, Width
)), ScratchReg
)
607 BuildMI(LoopTailMBB
, DL
, TII
->get(RISCV::BNE
))
610 .addMBB(LoopHeadMBB
);
613 NextMBBI
= MBB
.end();
614 MI
.eraseFromParent();
616 LivePhysRegs LiveRegs
;
617 computeAndAddLiveIns(LiveRegs
, *LoopHeadMBB
);
618 computeAndAddLiveIns(LiveRegs
, *LoopTailMBB
);
619 computeAndAddLiveIns(LiveRegs
, *DoneMBB
);
624 bool RISCVExpandPseudo::expandAuipcInstPair(
625 MachineBasicBlock
&MBB
, MachineBasicBlock::iterator MBBI
,
626 MachineBasicBlock::iterator
&NextMBBI
, unsigned FlagsHi
,
627 unsigned SecondOpcode
) {
628 MachineFunction
*MF
= MBB
.getParent();
629 MachineInstr
&MI
= *MBBI
;
630 DebugLoc DL
= MI
.getDebugLoc();
632 Register DestReg
= MI
.getOperand(0).getReg();
633 const MachineOperand
&Symbol
= MI
.getOperand(1);
635 MachineBasicBlock
*NewMBB
= MF
->CreateMachineBasicBlock(MBB
.getBasicBlock());
637 // Tell AsmPrinter that we unconditionally want the symbol of this label to be
639 NewMBB
->setLabelMustBeEmitted();
641 MF
->insert(++MBB
.getIterator(), NewMBB
);
643 BuildMI(NewMBB
, DL
, TII
->get(RISCV::AUIPC
), DestReg
)
644 .addDisp(Symbol
, 0, FlagsHi
);
645 BuildMI(NewMBB
, DL
, TII
->get(SecondOpcode
), DestReg
)
647 .addMBB(NewMBB
, RISCVII::MO_PCREL_LO
);
649 // Move all the rest of the instructions to NewMBB.
650 NewMBB
->splice(NewMBB
->end(), &MBB
, std::next(MBBI
), MBB
.end());
651 // Update machine-CFG edges.
652 NewMBB
->transferSuccessorsAndUpdatePHIs(&MBB
);
653 // Make the original basic block fall-through to the new.
654 MBB
.addSuccessor(NewMBB
);
656 // Make sure live-ins are correctly attached to this new basic block.
657 LivePhysRegs LiveRegs
;
658 computeAndAddLiveIns(LiveRegs
, *NewMBB
);
660 NextMBBI
= MBB
.end();
661 MI
.eraseFromParent();
665 bool RISCVExpandPseudo::expandLoadLocalAddress(
666 MachineBasicBlock
&MBB
, MachineBasicBlock::iterator MBBI
,
667 MachineBasicBlock::iterator
&NextMBBI
) {
668 return expandAuipcInstPair(MBB
, MBBI
, NextMBBI
, RISCVII::MO_PCREL_HI
,
672 bool RISCVExpandPseudo::expandLoadAddress(
673 MachineBasicBlock
&MBB
, MachineBasicBlock::iterator MBBI
,
674 MachineBasicBlock::iterator
&NextMBBI
) {
675 MachineFunction
*MF
= MBB
.getParent();
677 unsigned SecondOpcode
;
679 if (MF
->getTarget().isPositionIndependent()) {
680 const auto &STI
= MF
->getSubtarget
<RISCVSubtarget
>();
681 SecondOpcode
= STI
.is64Bit() ? RISCV::LD
: RISCV::LW
;
682 FlagsHi
= RISCVII::MO_GOT_HI
;
684 SecondOpcode
= RISCV::ADDI
;
685 FlagsHi
= RISCVII::MO_PCREL_HI
;
687 return expandAuipcInstPair(MBB
, MBBI
, NextMBBI
, FlagsHi
, SecondOpcode
);
690 bool RISCVExpandPseudo::expandLoadTLSIEAddress(
691 MachineBasicBlock
&MBB
, MachineBasicBlock::iterator MBBI
,
692 MachineBasicBlock::iterator
&NextMBBI
) {
693 MachineFunction
*MF
= MBB
.getParent();
695 const auto &STI
= MF
->getSubtarget
<RISCVSubtarget
>();
696 unsigned SecondOpcode
= STI
.is64Bit() ? RISCV::LD
: RISCV::LW
;
697 return expandAuipcInstPair(MBB
, MBBI
, NextMBBI
, RISCVII::MO_TLS_GOT_HI
,
701 bool RISCVExpandPseudo::expandLoadTLSGDAddress(
702 MachineBasicBlock
&MBB
, MachineBasicBlock::iterator MBBI
,
703 MachineBasicBlock::iterator
&NextMBBI
) {
704 return expandAuipcInstPair(MBB
, MBBI
, NextMBBI
, RISCVII::MO_TLS_GD_HI
,
708 } // end of anonymous namespace
710 INITIALIZE_PASS(RISCVExpandPseudo
, "riscv-expand-pseudo",
711 RISCV_EXPAND_PSEUDO_NAME
, false, false)
714 FunctionPass
*createRISCVExpandPseudoPass() { return new RISCVExpandPseudo(); }
716 } // end of namespace llvm