zpu: managed to compile program that writes constant to global variable
[llvm/zpu.git] / lib / Target / ARM / Thumb2SizeReduction.cpp
blob67768e5068e4e2e534d1de9888e69387f62099d1
1 //===-- Thumb2SizeReduction.cpp - Thumb2 code size reduction pass -*- C++ -*-=//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
10 #define DEBUG_TYPE "t2-reduce-size"
11 #include "ARM.h"
12 #include "ARMAddressingModes.h"
13 #include "ARMBaseRegisterInfo.h"
14 #include "ARMBaseInstrInfo.h"
15 #include "Thumb2InstrInfo.h"
16 #include "llvm/CodeGen/MachineInstr.h"
17 #include "llvm/CodeGen/MachineInstrBuilder.h"
18 #include "llvm/CodeGen/MachineFunctionPass.h"
19 #include "llvm/Support/CommandLine.h"
20 #include "llvm/Support/Debug.h"
21 #include "llvm/Support/raw_ostream.h"
22 #include "llvm/ADT/DenseMap.h"
23 #include "llvm/ADT/Statistic.h"
24 using namespace llvm;
26 STATISTIC(NumNarrows, "Number of 32-bit instrs reduced to 16-bit ones");
27 STATISTIC(Num2Addrs, "Number of 32-bit instrs reduced to 2addr 16-bit ones");
28 STATISTIC(NumLdSts, "Number of 32-bit load / store reduced to 16-bit ones");
30 static cl::opt<int> ReduceLimit("t2-reduce-limit",
31 cl::init(-1), cl::Hidden);
32 static cl::opt<int> ReduceLimit2Addr("t2-reduce-limit2",
33 cl::init(-1), cl::Hidden);
34 static cl::opt<int> ReduceLimitLdSt("t2-reduce-limit3",
35 cl::init(-1), cl::Hidden);
37 namespace {
38 /// ReduceTable - A static table with information on mapping from wide
39 /// opcodes to narrow
40 struct ReduceEntry {
41 unsigned WideOpc; // Wide opcode
42 unsigned NarrowOpc1; // Narrow opcode to transform to
43 unsigned NarrowOpc2; // Narrow opcode when it's two-address
44 uint8_t Imm1Limit; // Limit of immediate field (bits)
45 uint8_t Imm2Limit; // Limit of immediate field when it's two-address
46 unsigned LowRegs1 : 1; // Only possible if low-registers are used
47 unsigned LowRegs2 : 1; // Only possible if low-registers are used (2addr)
48 unsigned PredCC1 : 2; // 0 - If predicated, cc is on and vice versa.
49 // 1 - No cc field.
50 // 2 - Always set CPSR.
51 unsigned PredCC2 : 2;
52 unsigned Special : 1; // Needs to be dealt with specially
55 static const ReduceEntry ReduceTable[] = {
56 // Wide, Narrow1, Narrow2, imm1,imm2, lo1, lo2, P/C, S
57 { ARM::t2ADCrr, 0, ARM::tADC, 0, 0, 0, 1, 0,0, 0 },
58 { ARM::t2ADDri, ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 0,0, 0 },
59 { ARM::t2ADDrr, ARM::tADDrr, ARM::tADDhirr, 0, 0, 1, 0, 0,1, 0 },
60 // Note: immediate scale is 4.
61 { ARM::t2ADDrSPi,ARM::tADDrSPi,0, 8, 0, 1, 0, 1,0, 0 },
62 { ARM::t2ADDSri,ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 2,2, 1 },
63 { ARM::t2ADDSrr,ARM::tADDrr, 0, 0, 0, 1, 0, 2,0, 1 },
64 { ARM::t2ANDrr, 0, ARM::tAND, 0, 0, 0, 1, 0,0, 0 },
65 { ARM::t2ASRri, ARM::tASRri, 0, 5, 0, 1, 0, 0,0, 0 },
66 { ARM::t2ASRrr, 0, ARM::tASRrr, 0, 0, 0, 1, 0,0, 0 },
67 { ARM::t2BICrr, 0, ARM::tBIC, 0, 0, 0, 1, 0,0, 0 },
68 //FIXME: Disable CMN, as CCodes are backwards from compare expectations
69 //{ ARM::t2CMNrr, ARM::tCMN, 0, 0, 0, 1, 0, 2,0, 0 },
70 { ARM::t2CMPri, ARM::tCMPi8, 0, 8, 0, 1, 0, 2,0, 0 },
71 { ARM::t2CMPrr, ARM::tCMPhir, 0, 0, 0, 0, 0, 2,0, 0 },
72 { ARM::t2CMPzri,ARM::tCMPzi8, 0, 8, 0, 1, 0, 2,0, 0 },
73 { ARM::t2CMPzrr,ARM::tCMPzhir,0, 0, 0, 0, 0, 2,0, 0 },
74 { ARM::t2EORrr, 0, ARM::tEOR, 0, 0, 0, 1, 0,0, 0 },
75 // FIXME: adr.n immediate offset must be multiple of 4.
76 //{ ARM::t2LEApcrelJT,ARM::tLEApcrelJT, 0, 0, 0, 1, 0, 1,0, 0 },
77 { ARM::t2LSLri, ARM::tLSLri, 0, 5, 0, 1, 0, 0,0, 0 },
78 { ARM::t2LSLrr, 0, ARM::tLSLrr, 0, 0, 0, 1, 0,0, 0 },
79 { ARM::t2LSRri, ARM::tLSRri, 0, 5, 0, 1, 0, 0,0, 0 },
80 { ARM::t2LSRrr, 0, ARM::tLSRrr, 0, 0, 0, 1, 0,0, 0 },
81 { ARM::t2MOVi, ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 0 },
82 { ARM::t2MOVi16,ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 1 },
83 // FIXME: Do we need the 16-bit 'S' variant?
84 { ARM::t2MOVr,ARM::tMOVgpr2gpr,0, 0, 0, 0, 0, 1,0, 0 },
85 { ARM::t2MOVCCr,0, ARM::tMOVCCr, 0, 0, 0, 0, 0,1, 0 },
86 { ARM::t2MOVCCi,0, ARM::tMOVCCi, 0, 8, 0, 1, 0,1, 0 },
87 { ARM::t2MUL, 0, ARM::tMUL, 0, 0, 0, 1, 0,0, 0 },
88 { ARM::t2MVNr, ARM::tMVN, 0, 0, 0, 1, 0, 0,0, 0 },
89 { ARM::t2ORRrr, 0, ARM::tORR, 0, 0, 0, 1, 0,0, 0 },
90 { ARM::t2REV, ARM::tREV, 0, 0, 0, 1, 0, 1,0, 0 },
91 { ARM::t2REV16, ARM::tREV16, 0, 0, 0, 1, 0, 1,0, 0 },
92 { ARM::t2REVSH, ARM::tREVSH, 0, 0, 0, 1, 0, 1,0, 0 },
93 { ARM::t2RORrr, 0, ARM::tROR, 0, 0, 0, 1, 0,0, 0 },
94 { ARM::t2RSBri, ARM::tRSB, 0, 0, 0, 1, 0, 0,0, 1 },
95 { ARM::t2RSBSri,ARM::tRSB, 0, 0, 0, 1, 0, 2,0, 1 },
96 { ARM::t2SBCrr, 0, ARM::tSBC, 0, 0, 0, 1, 0,0, 0 },
97 { ARM::t2SUBri, ARM::tSUBi3, ARM::tSUBi8, 3, 8, 1, 1, 0,0, 0 },
98 { ARM::t2SUBrr, ARM::tSUBrr, 0, 0, 0, 1, 0, 0,0, 0 },
99 { ARM::t2SUBSri,ARM::tSUBi3, ARM::tSUBi8, 3, 8, 1, 1, 2,2, 0 },
100 { ARM::t2SUBSrr,ARM::tSUBrr, 0, 0, 0, 1, 0, 2,0, 0 },
101 { ARM::t2SXTBr, ARM::tSXTB, 0, 0, 0, 1, 0, 1,0, 0 },
102 { ARM::t2SXTHr, ARM::tSXTH, 0, 0, 0, 1, 0, 1,0, 0 },
103 { ARM::t2TSTrr, ARM::tTST, 0, 0, 0, 1, 0, 2,0, 0 },
104 { ARM::t2UXTBr, ARM::tUXTB, 0, 0, 0, 1, 0, 1,0, 0 },
105 { ARM::t2UXTHr, ARM::tUXTH, 0, 0, 0, 1, 0, 1,0, 0 },
107 // FIXME: Clean this up after splitting each Thumb load / store opcode
108 // into multiple ones.
109 { ARM::t2LDRi12,ARM::tLDR, ARM::tLDRspi, 5, 8, 1, 0, 0,0, 1 },
110 { ARM::t2LDRs, ARM::tLDR, 0, 0, 0, 1, 0, 0,0, 1 },
111 { ARM::t2LDRBi12,ARM::tLDRB, 0, 5, 0, 1, 0, 0,0, 1 },
112 { ARM::t2LDRBs, ARM::tLDRB, 0, 0, 0, 1, 0, 0,0, 1 },
113 { ARM::t2LDRHi12,ARM::tLDRH, 0, 5, 0, 1, 0, 0,0, 1 },
114 { ARM::t2LDRHs, ARM::tLDRH, 0, 0, 0, 1, 0, 0,0, 1 },
115 { ARM::t2LDRSBs,ARM::tLDRSB, 0, 0, 0, 1, 0, 0,0, 1 },
116 { ARM::t2LDRSHs,ARM::tLDRSH, 0, 0, 0, 1, 0, 0,0, 1 },
117 { ARM::t2STRi12,ARM::tSTR, ARM::tSTRspi, 5, 8, 1, 0, 0,0, 1 },
118 { ARM::t2STRs, ARM::tSTR, 0, 0, 0, 1, 0, 0,0, 1 },
119 { ARM::t2STRBi12,ARM::tSTRB, 0, 5, 0, 1, 0, 0,0, 1 },
120 { ARM::t2STRBs, ARM::tSTRB, 0, 0, 0, 1, 0, 0,0, 1 },
121 { ARM::t2STRHi12,ARM::tSTRH, 0, 5, 0, 1, 0, 0,0, 1 },
122 { ARM::t2STRHs, ARM::tSTRH, 0, 0, 0, 1, 0, 0,0, 1 },
124 { ARM::t2LDM, ARM::tLDM, 0, 0, 0, 1, 1, 1,1, 1 },
125 { ARM::t2LDM_RET,0, ARM::tPOP_RET, 0, 0, 1, 1, 1,1, 1 },
126 { ARM::t2LDM_UPD,ARM::tLDM_UPD,ARM::tPOP, 0, 0, 1, 1, 1,1, 1 },
127 // ARM::t2STM (with no basereg writeback) has no Thumb1 equivalent
128 { ARM::t2STM_UPD,ARM::tSTM_UPD,ARM::tPUSH, 0, 0, 1, 1, 1,1, 1 },
131 class Thumb2SizeReduce : public MachineFunctionPass {
132 public:
133 static char ID;
134 Thumb2SizeReduce();
136 const Thumb2InstrInfo *TII;
138 virtual bool runOnMachineFunction(MachineFunction &MF);
140 virtual const char *getPassName() const {
141 return "Thumb2 instruction size reduction pass";
144 private:
145 /// ReduceOpcodeMap - Maps wide opcode to index of entry in ReduceTable.
146 DenseMap<unsigned, unsigned> ReduceOpcodeMap;
148 bool VerifyPredAndCC(MachineInstr *MI, const ReduceEntry &Entry,
149 bool is2Addr, ARMCC::CondCodes Pred,
150 bool LiveCPSR, bool &HasCC, bool &CCDead);
152 bool ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
153 const ReduceEntry &Entry);
155 bool ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
156 const ReduceEntry &Entry, bool LiveCPSR);
158 /// ReduceTo2Addr - Reduce a 32-bit instruction to a 16-bit two-address
159 /// instruction.
160 bool ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
161 const ReduceEntry &Entry,
162 bool LiveCPSR);
164 /// ReduceToNarrow - Reduce a 32-bit instruction to a 16-bit
165 /// non-two-address instruction.
166 bool ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
167 const ReduceEntry &Entry,
168 bool LiveCPSR);
170 /// ReduceMBB - Reduce width of instructions in the specified basic block.
171 bool ReduceMBB(MachineBasicBlock &MBB);
173 char Thumb2SizeReduce::ID = 0;
176 Thumb2SizeReduce::Thumb2SizeReduce() : MachineFunctionPass(ID) {
177 for (unsigned i = 0, e = array_lengthof(ReduceTable); i != e; ++i) {
178 unsigned FromOpc = ReduceTable[i].WideOpc;
179 if (!ReduceOpcodeMap.insert(std::make_pair(FromOpc, i)).second)
180 assert(false && "Duplicated entries?");
184 static bool HasImplicitCPSRDef(const TargetInstrDesc &TID) {
185 for (const unsigned *Regs = TID.ImplicitDefs; *Regs; ++Regs)
186 if (*Regs == ARM::CPSR)
187 return true;
188 return false;
191 bool
192 Thumb2SizeReduce::VerifyPredAndCC(MachineInstr *MI, const ReduceEntry &Entry,
193 bool is2Addr, ARMCC::CondCodes Pred,
194 bool LiveCPSR, bool &HasCC, bool &CCDead) {
195 if ((is2Addr && Entry.PredCC2 == 0) ||
196 (!is2Addr && Entry.PredCC1 == 0)) {
197 if (Pred == ARMCC::AL) {
198 // Not predicated, must set CPSR.
199 if (!HasCC) {
200 // Original instruction was not setting CPSR, but CPSR is not
201 // currently live anyway. It's ok to set it. The CPSR def is
202 // dead though.
203 if (!LiveCPSR) {
204 HasCC = true;
205 CCDead = true;
206 return true;
208 return false;
210 } else {
211 // Predicated, must not set CPSR.
212 if (HasCC)
213 return false;
215 } else if ((is2Addr && Entry.PredCC2 == 2) ||
216 (!is2Addr && Entry.PredCC1 == 2)) {
217 /// Old opcode has an optional def of CPSR.
218 if (HasCC)
219 return true;
220 // If old opcode does not implicitly define CPSR, then it's not ok since
221 // these new opcodes' CPSR def is not meant to be thrown away. e.g. CMP.
222 if (!HasImplicitCPSRDef(MI->getDesc()))
223 return false;
224 HasCC = true;
225 } else {
226 // 16-bit instruction does not set CPSR.
227 if (HasCC)
228 return false;
231 return true;
234 static bool VerifyLowRegs(MachineInstr *MI) {
235 unsigned Opc = MI->getOpcode();
236 bool isPCOk = (Opc == ARM::t2LDM_RET || Opc == ARM::t2LDM ||
237 Opc == ARM::t2LDM_UPD);
238 bool isLROk = (Opc == ARM::t2STM_UPD);
239 bool isSPOk = isPCOk || isLROk || (Opc == ARM::t2ADDrSPi);
240 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
241 const MachineOperand &MO = MI->getOperand(i);
242 if (!MO.isReg() || MO.isImplicit())
243 continue;
244 unsigned Reg = MO.getReg();
245 if (Reg == 0 || Reg == ARM::CPSR)
246 continue;
247 if (isPCOk && Reg == ARM::PC)
248 continue;
249 if (isLROk && Reg == ARM::LR)
250 continue;
251 if (Reg == ARM::SP) {
252 if (isSPOk)
253 continue;
254 if (i == 1 && (Opc == ARM::t2LDRi12 || Opc == ARM::t2STRi12))
255 // Special case for these ldr / str with sp as base register.
256 continue;
258 if (!isARMLowRegister(Reg))
259 return false;
261 return true;
264 bool
265 Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
266 const ReduceEntry &Entry) {
267 if (ReduceLimitLdSt != -1 && ((int)NumLdSts >= ReduceLimitLdSt))
268 return false;
270 unsigned Scale = 1;
271 bool HasImmOffset = false;
272 bool HasShift = false;
273 bool HasOffReg = true;
274 bool isLdStMul = false;
275 unsigned Opc = Entry.NarrowOpc1;
276 unsigned OpNum = 3; // First 'rest' of operands.
277 uint8_t ImmLimit = Entry.Imm1Limit;
278 switch (Entry.WideOpc) {
279 default:
280 llvm_unreachable("Unexpected Thumb2 load / store opcode!");
281 case ARM::t2LDRi12:
282 case ARM::t2STRi12: {
283 unsigned BaseReg = MI->getOperand(1).getReg();
284 if (BaseReg == ARM::SP) {
285 Opc = Entry.NarrowOpc2;
286 ImmLimit = Entry.Imm2Limit;
287 HasOffReg = false;
289 Scale = 4;
290 HasImmOffset = true;
291 break;
293 case ARM::t2LDRBi12:
294 case ARM::t2STRBi12:
295 HasImmOffset = true;
296 break;
297 case ARM::t2LDRHi12:
298 case ARM::t2STRHi12:
299 Scale = 2;
300 HasImmOffset = true;
301 break;
302 case ARM::t2LDRs:
303 case ARM::t2LDRBs:
304 case ARM::t2LDRHs:
305 case ARM::t2LDRSBs:
306 case ARM::t2LDRSHs:
307 case ARM::t2STRs:
308 case ARM::t2STRBs:
309 case ARM::t2STRHs:
310 HasShift = true;
311 OpNum = 4;
312 break;
313 case ARM::t2LDM: {
314 unsigned BaseReg = MI->getOperand(0).getReg();
315 ARM_AM::AMSubMode Mode = ARM_AM::getAM4SubMode(MI->getOperand(1).getImm());
316 if (!isARMLowRegister(BaseReg) || Mode != ARM_AM::ia)
317 return false;
318 // For the non-writeback version (this one), the base register must be
319 // one of the registers being loaded.
320 bool isOK = false;
321 for (unsigned i = 4; i < MI->getNumOperands(); ++i) {
322 if (MI->getOperand(i).getReg() == BaseReg) {
323 isOK = true;
324 break;
327 if (!isOK)
328 return false;
330 OpNum = 0;
331 isLdStMul = true;
332 break;
334 case ARM::t2LDM_RET: {
335 unsigned BaseReg = MI->getOperand(1).getReg();
336 if (BaseReg != ARM::SP)
337 return false;
338 Opc = Entry.NarrowOpc2; // tPOP_RET
339 OpNum = 3;
340 isLdStMul = true;
341 break;
343 case ARM::t2LDM_UPD:
344 case ARM::t2STM_UPD: {
345 OpNum = 0;
346 unsigned BaseReg = MI->getOperand(1).getReg();
347 ARM_AM::AMSubMode Mode = ARM_AM::getAM4SubMode(MI->getOperand(2).getImm());
348 if (BaseReg == ARM::SP &&
349 ((Entry.WideOpc == ARM::t2LDM_UPD && Mode == ARM_AM::ia) ||
350 (Entry.WideOpc == ARM::t2STM_UPD && Mode == ARM_AM::db))) {
351 Opc = Entry.NarrowOpc2; // tPOP or tPUSH
352 OpNum = 3;
353 } else if (!isARMLowRegister(BaseReg) || Mode != ARM_AM::ia) {
354 return false;
356 isLdStMul = true;
357 break;
361 unsigned OffsetReg = 0;
362 bool OffsetKill = false;
363 if (HasShift) {
364 OffsetReg = MI->getOperand(2).getReg();
365 OffsetKill = MI->getOperand(2).isKill();
366 if (MI->getOperand(3).getImm())
367 // Thumb1 addressing mode doesn't support shift.
368 return false;
371 unsigned OffsetImm = 0;
372 if (HasImmOffset) {
373 OffsetImm = MI->getOperand(2).getImm();
374 unsigned MaxOffset = ((1 << ImmLimit) - 1) * Scale;
375 if ((OffsetImm & (Scale-1)) || OffsetImm > MaxOffset)
376 // Make sure the immediate field fits.
377 return false;
380 // Add the 16-bit load / store instruction.
381 // FIXME: Thumb1 addressing mode encode both immediate and register offset.
382 DebugLoc dl = MI->getDebugLoc();
383 MachineInstrBuilder MIB = BuildMI(MBB, *MI, dl, TII->get(Opc));
384 if (!isLdStMul) {
385 MIB.addOperand(MI->getOperand(0)).addOperand(MI->getOperand(1));
386 if (Opc != ARM::tLDRSB && Opc != ARM::tLDRSH) {
387 // tLDRSB and tLDRSH do not have an immediate offset field. On the other
388 // hand, it must have an offset register.
389 // FIXME: Remove this special case.
390 MIB.addImm(OffsetImm/Scale);
392 assert((!HasShift || OffsetReg) && "Invalid so_reg load / store address!");
394 if (HasOffReg)
395 MIB.addReg(OffsetReg, getKillRegState(OffsetKill));
398 // Transfer the rest of operands.
399 for (unsigned e = MI->getNumOperands(); OpNum != e; ++OpNum)
400 MIB.addOperand(MI->getOperand(OpNum));
402 // Transfer memoperands.
403 (*MIB).setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
405 DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB);
407 MBB.erase(MI);
408 ++NumLdSts;
409 return true;
412 bool
413 Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
414 const ReduceEntry &Entry,
415 bool LiveCPSR) {
416 if (Entry.LowRegs1 && !VerifyLowRegs(MI))
417 return false;
419 const TargetInstrDesc &TID = MI->getDesc();
420 if (TID.mayLoad() || TID.mayStore())
421 return ReduceLoadStore(MBB, MI, Entry);
423 unsigned Opc = MI->getOpcode();
424 switch (Opc) {
425 default: break;
426 case ARM::t2ADDSri:
427 case ARM::t2ADDSrr: {
428 unsigned PredReg = 0;
429 if (getInstrPredicate(MI, PredReg) == ARMCC::AL) {
430 switch (Opc) {
431 default: break;
432 case ARM::t2ADDSri: {
433 if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR))
434 return true;
435 // fallthrough
437 case ARM::t2ADDSrr:
438 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR);
441 break;
443 case ARM::t2RSBri:
444 case ARM::t2RSBSri:
445 if (MI->getOperand(2).getImm() == 0)
446 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR);
447 break;
448 case ARM::t2MOVi16:
449 // Can convert only 'pure' immediate operands, not immediates obtained as
450 // globals' addresses.
451 if (MI->getOperand(1).isImm())
452 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR);
453 break;
455 return false;
458 bool
459 Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
460 const ReduceEntry &Entry,
461 bool LiveCPSR) {
463 if (ReduceLimit2Addr != -1 && ((int)Num2Addrs >= ReduceLimit2Addr))
464 return false;
466 unsigned Reg0 = MI->getOperand(0).getReg();
467 unsigned Reg1 = MI->getOperand(1).getReg();
468 if (Reg0 != Reg1) {
469 // Try to commute the operands to make it a 2-address instruction.
470 unsigned CommOpIdx1, CommOpIdx2;
471 if (!TII->findCommutedOpIndices(MI, CommOpIdx1, CommOpIdx2) ||
472 CommOpIdx1 != 1 || MI->getOperand(CommOpIdx2).getReg() != Reg0)
473 return false;
474 MachineInstr *CommutedMI = TII->commuteInstruction(MI);
475 if (!CommutedMI)
476 return false;
478 if (Entry.LowRegs2 && !isARMLowRegister(Reg0))
479 return false;
480 if (Entry.Imm2Limit) {
481 unsigned Imm = MI->getOperand(2).getImm();
482 unsigned Limit = (1 << Entry.Imm2Limit) - 1;
483 if (Imm > Limit)
484 return false;
485 } else {
486 unsigned Reg2 = MI->getOperand(2).getReg();
487 if (Entry.LowRegs2 && !isARMLowRegister(Reg2))
488 return false;
491 // Check if it's possible / necessary to transfer the predicate.
492 const TargetInstrDesc &NewTID = TII->get(Entry.NarrowOpc2);
493 unsigned PredReg = 0;
494 ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
495 bool SkipPred = false;
496 if (Pred != ARMCC::AL) {
497 if (!NewTID.isPredicable())
498 // Can't transfer predicate, fail.
499 return false;
500 } else {
501 SkipPred = !NewTID.isPredicable();
504 bool HasCC = false;
505 bool CCDead = false;
506 const TargetInstrDesc &TID = MI->getDesc();
507 if (TID.hasOptionalDef()) {
508 unsigned NumOps = TID.getNumOperands();
509 HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR);
510 if (HasCC && MI->getOperand(NumOps-1).isDead())
511 CCDead = true;
513 if (!VerifyPredAndCC(MI, Entry, true, Pred, LiveCPSR, HasCC, CCDead))
514 return false;
516 // Add the 16-bit instruction.
517 DebugLoc dl = MI->getDebugLoc();
518 MachineInstrBuilder MIB = BuildMI(MBB, *MI, dl, NewTID);
519 MIB.addOperand(MI->getOperand(0));
520 if (NewTID.hasOptionalDef()) {
521 if (HasCC)
522 AddDefaultT1CC(MIB, CCDead);
523 else
524 AddNoT1CC(MIB);
527 // Transfer the rest of operands.
528 unsigned NumOps = TID.getNumOperands();
529 for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
530 if (i < NumOps && TID.OpInfo[i].isOptionalDef())
531 continue;
532 if (SkipPred && TID.OpInfo[i].isPredicate())
533 continue;
534 MIB.addOperand(MI->getOperand(i));
537 DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB);
539 MBB.erase(MI);
540 ++Num2Addrs;
541 return true;
544 bool
545 Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
546 const ReduceEntry &Entry,
547 bool LiveCPSR) {
548 if (ReduceLimit != -1 && ((int)NumNarrows >= ReduceLimit))
549 return false;
551 unsigned Limit = ~0U;
552 unsigned Scale = (Entry.WideOpc == ARM::t2ADDrSPi) ? 4 : 1;
553 if (Entry.Imm1Limit)
554 Limit = ((1 << Entry.Imm1Limit) - 1) * Scale;
556 const TargetInstrDesc &TID = MI->getDesc();
557 for (unsigned i = 0, e = TID.getNumOperands(); i != e; ++i) {
558 if (TID.OpInfo[i].isPredicate())
559 continue;
560 const MachineOperand &MO = MI->getOperand(i);
561 if (MO.isReg()) {
562 unsigned Reg = MO.getReg();
563 if (!Reg || Reg == ARM::CPSR)
564 continue;
565 if (Entry.WideOpc == ARM::t2ADDrSPi && Reg == ARM::SP)
566 continue;
567 if (Entry.LowRegs1 && !isARMLowRegister(Reg))
568 return false;
569 } else if (MO.isImm() &&
570 !TID.OpInfo[i].isPredicate()) {
571 if (((unsigned)MO.getImm()) > Limit || (MO.getImm() & (Scale-1)) != 0)
572 return false;
576 // Check if it's possible / necessary to transfer the predicate.
577 const TargetInstrDesc &NewTID = TII->get(Entry.NarrowOpc1);
578 unsigned PredReg = 0;
579 ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
580 bool SkipPred = false;
581 if (Pred != ARMCC::AL) {
582 if (!NewTID.isPredicable())
583 // Can't transfer predicate, fail.
584 return false;
585 } else {
586 SkipPred = !NewTID.isPredicable();
589 bool HasCC = false;
590 bool CCDead = false;
591 if (TID.hasOptionalDef()) {
592 unsigned NumOps = TID.getNumOperands();
593 HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR);
594 if (HasCC && MI->getOperand(NumOps-1).isDead())
595 CCDead = true;
597 if (!VerifyPredAndCC(MI, Entry, false, Pred, LiveCPSR, HasCC, CCDead))
598 return false;
600 // Add the 16-bit instruction.
601 DebugLoc dl = MI->getDebugLoc();
602 MachineInstrBuilder MIB = BuildMI(MBB, *MI, dl, NewTID);
603 MIB.addOperand(MI->getOperand(0));
604 if (NewTID.hasOptionalDef()) {
605 if (HasCC)
606 AddDefaultT1CC(MIB, CCDead);
607 else
608 AddNoT1CC(MIB);
611 // Transfer the rest of operands.
612 unsigned NumOps = TID.getNumOperands();
613 for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
614 if (i < NumOps && TID.OpInfo[i].isOptionalDef())
615 continue;
616 if ((TID.getOpcode() == ARM::t2RSBSri ||
617 TID.getOpcode() == ARM::t2RSBri) && i == 2)
618 // Skip the zero immediate operand, it's now implicit.
619 continue;
620 bool isPred = (i < NumOps && TID.OpInfo[i].isPredicate());
621 if (SkipPred && isPred)
622 continue;
623 const MachineOperand &MO = MI->getOperand(i);
624 if (Scale > 1 && !isPred && MO.isImm())
625 MIB.addImm(MO.getImm() / Scale);
626 else {
627 if (MO.isReg() && MO.isImplicit() && MO.getReg() == ARM::CPSR)
628 // Skip implicit def of CPSR. Either it's modeled as an optional
629 // def now or it's already an implicit def on the new instruction.
630 continue;
631 MIB.addOperand(MO);
634 if (!TID.isPredicable() && NewTID.isPredicable())
635 AddDefaultPred(MIB);
637 DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB);
639 MBB.erase(MI);
640 ++NumNarrows;
641 return true;
644 static bool UpdateCPSRDef(MachineInstr &MI, bool LiveCPSR) {
645 bool HasDef = false;
646 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
647 const MachineOperand &MO = MI.getOperand(i);
648 if (!MO.isReg() || MO.isUndef() || MO.isUse())
649 continue;
650 if (MO.getReg() != ARM::CPSR)
651 continue;
652 if (!MO.isDead())
653 HasDef = true;
656 return HasDef || LiveCPSR;
659 static bool UpdateCPSRUse(MachineInstr &MI, bool LiveCPSR) {
660 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
661 const MachineOperand &MO = MI.getOperand(i);
662 if (!MO.isReg() || MO.isUndef() || MO.isDef())
663 continue;
664 if (MO.getReg() != ARM::CPSR)
665 continue;
666 assert(LiveCPSR && "CPSR liveness tracking is wrong!");
667 if (MO.isKill()) {
668 LiveCPSR = false;
669 break;
673 return LiveCPSR;
676 bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) {
677 bool Modified = false;
679 // Yes, CPSR could be livein.
680 bool LiveCPSR = MBB.isLiveIn(ARM::CPSR);
682 MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end();
683 MachineBasicBlock::iterator NextMII;
684 for (; MII != E; MII = NextMII) {
685 NextMII = llvm::next(MII);
687 MachineInstr *MI = &*MII;
688 LiveCPSR = UpdateCPSRUse(*MI, LiveCPSR);
690 unsigned Opcode = MI->getOpcode();
691 DenseMap<unsigned, unsigned>::iterator OPI = ReduceOpcodeMap.find(Opcode);
692 if (OPI != ReduceOpcodeMap.end()) {
693 const ReduceEntry &Entry = ReduceTable[OPI->second];
694 // Ignore "special" cases for now.
695 if (Entry.Special) {
696 if (ReduceSpecial(MBB, MI, Entry, LiveCPSR)) {
697 Modified = true;
698 MachineBasicBlock::iterator I = prior(NextMII);
699 MI = &*I;
701 goto ProcessNext;
704 // Try to transform to a 16-bit two-address instruction.
705 if (Entry.NarrowOpc2 && ReduceTo2Addr(MBB, MI, Entry, LiveCPSR)) {
706 Modified = true;
707 MachineBasicBlock::iterator I = prior(NextMII);
708 MI = &*I;
709 goto ProcessNext;
712 // Try to transform to a 16-bit non-two-address instruction.
713 if (Entry.NarrowOpc1 && ReduceToNarrow(MBB, MI, Entry, LiveCPSR)) {
714 Modified = true;
715 MachineBasicBlock::iterator I = prior(NextMII);
716 MI = &*I;
720 ProcessNext:
721 LiveCPSR = UpdateCPSRDef(*MI, LiveCPSR);
724 return Modified;
727 bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) {
728 const TargetMachine &TM = MF.getTarget();
729 TII = static_cast<const Thumb2InstrInfo*>(TM.getInstrInfo());
731 bool Modified = false;
732 for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
733 Modified |= ReduceMBB(*I);
734 return Modified;
737 /// createThumb2SizeReductionPass - Returns an instance of the Thumb2 size
738 /// reduction pass.
739 FunctionPass *llvm::createThumb2SizeReductionPass() {
740 return new Thumb2SizeReduce();