[RISCV] Fix mgather -> riscv.masked.strided.load combine not extending indices (...
[llvm-project.git] / llvm / lib / Target / RISCV / RISCVMakeCompressible.cpp
blobff21fe1d406463762f0dbb4eac9522238a385339
1 //===-- RISCVMakeCompressible.cpp - Make more instructions compressible ---===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass searches for instructions that are prevented from being compressed
10 // by one of the following:
12 // 1. The use of a single uncompressed register.
13 // 2. A base register + offset where the offset is too large to be compressed
14 // and the base register may or may not be compressed.
17 // For case 1, if a compressed register is available, then the uncompressed
18 // register is copied to the compressed register and its uses are replaced.
20 // For example, storing zero uses the uncompressible zero register:
21 // sw zero, 0(a0) # if zero
22 // sw zero, 8(a0) # if zero
23 // sw zero, 4(a0) # if zero
24 // sw zero, 24(a0) # if zero
26 // If a compressed register (e.g. a1) is available, the above can be transformed
27 // to the following to improve code size:
28 // li a1, 0
29 // c.sw a1, 0(a0)
30 // c.sw a1, 8(a0)
31 // c.sw a1, 4(a0)
32 // c.sw a1, 24(a0)
35 // For case 2, if a compressed register is available, then the original base
36 // is copied and adjusted such that:
38 // new_base_register = base_register + adjustment
39 // base_register + large_offset = new_base_register + small_offset
41 // For example, the following offsets are too large for c.sw:
42 // lui a2, 983065
43 // sw a1, -236(a2)
44 // sw a1, -240(a2)
45 // sw a1, -244(a2)
46 // sw a1, -248(a2)
47 // sw a1, -252(a2)
48 // sw a0, -256(a2)
50 // If a compressed register is available (e.g. a3), a new base could be created
51 // such that the addresses can accessed with a compressible offset, thus
52 // improving code size:
53 // lui a2, 983065
54 // addi a3, a2, -256
55 // c.sw a1, 20(a3)
56 // c.sw a1, 16(a3)
57 // c.sw a1, 12(a3)
58 // c.sw a1, 8(a3)
59 // c.sw a1, 4(a3)
60 // c.sw a0, 0(a3)
63 // This optimization is only applied if there are enough uses of the copied
64 // register for code size to be reduced.
66 //===----------------------------------------------------------------------===//
68 #include "RISCV.h"
69 #include "RISCVSubtarget.h"
70 #include "llvm/CodeGen/Passes.h"
71 #include "llvm/CodeGen/RegisterScavenging.h"
72 #include "llvm/MC/TargetRegistry.h"
73 #include "llvm/Support/Debug.h"
75 using namespace llvm;
77 #define DEBUG_TYPE "riscv-make-compressible"
78 #define RISCV_COMPRESS_INSTRS_NAME "RISC-V Make Compressible"
80 namespace {
82 struct RISCVMakeCompressibleOpt : public MachineFunctionPass {
83 static char ID;
85 bool runOnMachineFunction(MachineFunction &Fn) override;
87 RISCVMakeCompressibleOpt() : MachineFunctionPass(ID) {}
89 StringRef getPassName() const override { return RISCV_COMPRESS_INSTRS_NAME; }
91 } // namespace
93 char RISCVMakeCompressibleOpt::ID = 0;
94 INITIALIZE_PASS(RISCVMakeCompressibleOpt, "riscv-make-compressible",
95 RISCV_COMPRESS_INSTRS_NAME, false, false)
97 // Return log2(widthInBytes) of load/store done by Opcode.
98 static unsigned log2LdstWidth(unsigned Opcode) {
99 switch (Opcode) {
100 default:
101 llvm_unreachable("Unexpected opcode");
102 case RISCV::LW:
103 case RISCV::SW:
104 case RISCV::FLW:
105 case RISCV::FSW:
106 return 2;
107 case RISCV::LD:
108 case RISCV::SD:
109 case RISCV::FLD:
110 case RISCV::FSD:
111 return 3;
115 // Return a mask for the offset bits of a non-stack-pointer based compressed
116 // load/store.
117 static uint8_t compressedLDSTOffsetMask(unsigned Opcode) {
118 return 0x1f << log2LdstWidth(Opcode);
121 // Return true if Offset fits within a compressed stack-pointer based
122 // load/store.
123 static bool compressibleSPOffset(int64_t Offset, unsigned Opcode) {
124 return log2LdstWidth(Opcode) == 2 ? isShiftedUInt<6, 2>(Offset)
125 : isShiftedUInt<6, 3>(Offset);
128 // Given an offset for a load/store, return the adjustment required to the base
129 // register such that the address can be accessed with a compressible offset.
130 // This will return 0 if the offset is already compressible.
131 static int64_t getBaseAdjustForCompression(int64_t Offset, unsigned Opcode) {
132 // Return the excess bits that do not fit in a compressible offset.
133 return Offset & ~compressedLDSTOffsetMask(Opcode);
136 // Return true if Reg is in a compressed register class.
137 static bool isCompressedReg(Register Reg) {
138 return RISCV::GPRCRegClass.contains(Reg) ||
139 RISCV::FPR32CRegClass.contains(Reg) ||
140 RISCV::FPR64CRegClass.contains(Reg);
143 // Return true if MI is a load for which there exists a compressed version.
144 static bool isCompressibleLoad(const MachineInstr &MI) {
145 const RISCVSubtarget &STI = MI.getMF()->getSubtarget<RISCVSubtarget>();
146 const unsigned Opcode = MI.getOpcode();
148 return Opcode == RISCV::LW || (!STI.is64Bit() && Opcode == RISCV::FLW) ||
149 Opcode == RISCV::LD || Opcode == RISCV::FLD;
152 // Return true if MI is a store for which there exists a compressed version.
153 static bool isCompressibleStore(const MachineInstr &MI) {
154 const RISCVSubtarget &STI = MI.getMF()->getSubtarget<RISCVSubtarget>();
155 const unsigned Opcode = MI.getOpcode();
157 return Opcode == RISCV::SW || (!STI.is64Bit() && Opcode == RISCV::FSW) ||
158 Opcode == RISCV::SD || Opcode == RISCV::FSD;
161 // Find a single register and/or large offset which, if compressible, would
162 // allow the given instruction to be compressed.
164 // Possible return values:
166 // {Reg, 0} - Uncompressed Reg needs replacing with a compressed
167 // register.
168 // {Reg, N} - Reg needs replacing with a compressed register and
169 // N needs adding to the new register. (Reg may be
170 // compressed or uncompressed).
171 // {RISCV::NoRegister, 0} - No suitable optimization found for this
172 // instruction.
173 static RegImmPair getRegImmPairPreventingCompression(const MachineInstr &MI) {
174 const unsigned Opcode = MI.getOpcode();
176 if (isCompressibleLoad(MI) || isCompressibleStore(MI)) {
177 const MachineOperand &MOImm = MI.getOperand(2);
178 if (!MOImm.isImm())
179 return RegImmPair(RISCV::NoRegister, 0);
181 int64_t Offset = MOImm.getImm();
182 int64_t NewBaseAdjust = getBaseAdjustForCompression(Offset, Opcode);
183 Register Base = MI.getOperand(1).getReg();
185 // Memory accesses via the stack pointer do not have a requirement for
186 // either of the registers to be compressible and can take a larger offset.
187 if (RISCV::SPRegClass.contains(Base)) {
188 if (!compressibleSPOffset(Offset, Opcode) && NewBaseAdjust)
189 return RegImmPair(Base, NewBaseAdjust);
190 } else {
191 Register SrcDest = MI.getOperand(0).getReg();
192 bool SrcDestCompressed = isCompressedReg(SrcDest);
193 bool BaseCompressed = isCompressedReg(Base);
195 // If only Base and/or offset prevent compression, then return Base and
196 // any adjustment required to make the offset compressible.
197 if ((!BaseCompressed || NewBaseAdjust) && SrcDestCompressed)
198 return RegImmPair(Base, NewBaseAdjust);
200 // For loads, we can only change the base register since dest is defined
201 // rather than used.
203 // For stores, we can change SrcDest (and Base if SrcDest == Base) but
204 // cannot resolve an uncompressible offset in this case.
205 if (isCompressibleStore(MI)) {
206 if (!SrcDestCompressed && (BaseCompressed || SrcDest == Base) &&
207 !NewBaseAdjust)
208 return RegImmPair(SrcDest, NewBaseAdjust);
212 return RegImmPair(RISCV::NoRegister, 0);
215 // Check all uses after FirstMI of the given register, keeping a vector of
216 // instructions that would be compressible if the given register (and offset if
217 // applicable) were compressible.
219 // If there are enough uses for this optimization to improve code size and a
220 // compressed register is available, return that compressed register.
221 static Register analyzeCompressibleUses(MachineInstr &FirstMI,
222 RegImmPair RegImm,
223 SmallVectorImpl<MachineInstr *> &MIs) {
224 MachineBasicBlock &MBB = *FirstMI.getParent();
225 const TargetRegisterInfo *TRI =
226 MBB.getParent()->getSubtarget().getRegisterInfo();
228 for (MachineBasicBlock::instr_iterator I = FirstMI.getIterator(),
229 E = MBB.instr_end();
230 I != E; ++I) {
231 MachineInstr &MI = *I;
233 // Determine if this is an instruction which would benefit from using the
234 // new register.
235 RegImmPair CandidateRegImm = getRegImmPairPreventingCompression(MI);
236 if (CandidateRegImm.Reg == RegImm.Reg && CandidateRegImm.Imm == RegImm.Imm)
237 MIs.push_back(&MI);
239 // If RegImm.Reg is modified by this instruction, then we cannot optimize
240 // past this instruction. If the register is already compressed, then it may
241 // possible to optimize a large offset in the current instruction - this
242 // will have been detected by the preceeding call to
243 // getRegImmPairPreventingCompression.
244 if (MI.modifiesRegister(RegImm.Reg, TRI))
245 break;
248 // Adjusting the base costs one new uncompressed addi and therefore three uses
249 // are required for a code size reduction. If no base adjustment is required,
250 // then copying the register costs one new c.mv (or c.li Rd, 0 for "copying"
251 // the zero register) and therefore two uses are required for a code size
252 // reduction.
253 if (MIs.size() < 2 || (RegImm.Imm != 0 && MIs.size() < 3))
254 return RISCV::NoRegister;
256 // Find a compressible register which will be available from the first
257 // instruction we care about to the last.
258 const TargetRegisterClass *RCToScavenge;
260 // Work out the compressed register class from which to scavenge.
261 if (RISCV::GPRRegClass.contains(RegImm.Reg))
262 RCToScavenge = &RISCV::GPRCRegClass;
263 else if (RISCV::FPR32RegClass.contains(RegImm.Reg))
264 RCToScavenge = &RISCV::FPR32CRegClass;
265 else if (RISCV::FPR64RegClass.contains(RegImm.Reg))
266 RCToScavenge = &RISCV::FPR64CRegClass;
267 else
268 return RISCV::NoRegister;
270 RegScavenger RS;
271 RS.enterBasicBlockEnd(MBB);
272 RS.backward(std::next(MIs.back()->getIterator()));
273 return RS.scavengeRegisterBackwards(*RCToScavenge, FirstMI.getIterator(),
274 /*RestoreAfter=*/false, /*SPAdj=*/0,
275 /*AllowSpill=*/false);
278 // Update uses of the old register in the given instruction to the new register.
279 static void updateOperands(MachineInstr &MI, RegImmPair OldRegImm,
280 Register NewReg) {
281 unsigned Opcode = MI.getOpcode();
283 // If this pass is extended to support more instructions, the check for
284 // definedness may need to be strengthened.
285 assert((isCompressibleLoad(MI) || isCompressibleStore(MI)) &&
286 "Unsupported instruction for this optimization.");
288 int SkipN = 0;
290 // Skip the first (value) operand to a store instruction (except if the store
291 // offset is zero) in order to avoid an incorrect transformation.
292 // e.g. sd a0, 808(a0) to addi a2, a0, 768; sd a2, 40(a2)
293 if (isCompressibleStore(MI) && OldRegImm.Imm != 0)
294 SkipN = 1;
296 // Update registers
297 for (MachineOperand &MO : drop_begin(MI.operands(), SkipN))
298 if (MO.isReg() && MO.getReg() == OldRegImm.Reg) {
299 // Do not update operands that define the old register.
301 // The new register was scavenged for the range of instructions that are
302 // being updated, therefore it should not be defined within this range
303 // except possibly in the final instruction.
304 if (MO.isDef()) {
305 assert(isCompressibleLoad(MI));
306 continue;
308 // Update reg
309 MO.setReg(NewReg);
312 // Update offset
313 MachineOperand &MOImm = MI.getOperand(2);
314 int64_t NewOffset = MOImm.getImm() & compressedLDSTOffsetMask(Opcode);
315 MOImm.setImm(NewOffset);
318 bool RISCVMakeCompressibleOpt::runOnMachineFunction(MachineFunction &Fn) {
319 // This is a size optimization.
320 if (skipFunction(Fn.getFunction()) || !Fn.getFunction().hasMinSize())
321 return false;
323 const RISCVSubtarget &STI = Fn.getSubtarget<RISCVSubtarget>();
324 const RISCVInstrInfo &TII = *STI.getInstrInfo();
326 // This optimization only makes sense if compressed instructions are emitted.
327 // FIXME: Support Zca, Zcf, Zcd granularity.
328 if (!STI.hasStdExtC())
329 return false;
331 for (MachineBasicBlock &MBB : Fn) {
332 LLVM_DEBUG(dbgs() << "MBB: " << MBB.getName() << "\n");
333 for (MachineInstr &MI : MBB) {
334 // Determine if this instruction would otherwise be compressed if not for
335 // an uncompressible register or offset.
336 RegImmPair RegImm = getRegImmPairPreventingCompression(MI);
337 if (!RegImm.Reg && RegImm.Imm == 0)
338 continue;
340 // Determine if there is a set of instructions for which replacing this
341 // register with a compressed register (and compressible offset if
342 // applicable) is possible and will allow compression.
343 SmallVector<MachineInstr *, 8> MIs;
344 Register NewReg = analyzeCompressibleUses(MI, RegImm, MIs);
345 if (!NewReg)
346 continue;
348 // Create the appropriate copy and/or offset.
349 if (RISCV::GPRRegClass.contains(RegImm.Reg)) {
350 assert(isInt<12>(RegImm.Imm));
351 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(RISCV::ADDI), NewReg)
352 .addReg(RegImm.Reg)
353 .addImm(RegImm.Imm);
354 } else {
355 // If we are looking at replacing an FPR register we don't expect to
356 // have any offset. The only compressible FP instructions with an offset
357 // are loads and stores, for which the offset applies to the GPR operand
358 // not the FPR operand.
359 assert(RegImm.Imm == 0);
360 unsigned Opcode = RISCV::FPR32RegClass.contains(RegImm.Reg)
361 ? RISCV::FSGNJ_S
362 : RISCV::FSGNJ_D;
363 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(Opcode), NewReg)
364 .addReg(RegImm.Reg)
365 .addReg(RegImm.Reg);
368 // Update the set of instructions to use the compressed register and
369 // compressible offset instead. These instructions should now be
370 // compressible.
371 // TODO: Update all uses if RegImm.Imm == 0? Not just those that are
372 // expected to become compressible.
373 for (MachineInstr *UpdateMI : MIs)
374 updateOperands(*UpdateMI, RegImm, NewReg);
377 return true;
380 /// Returns an instance of the Make Compressible Optimization pass.
381 FunctionPass *llvm::createRISCVMakeCompressibleOptPass() {
382 return new RISCVMakeCompressibleOpt();