Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / llvm / lib / Target / Mips / MipsISelLowering.cpp
blob061d035b7e246c7b4fb001883ca0f382f248f4cc
1 //===- MipsISelLowering.cpp - Mips DAG Lowering Implementation ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that Mips uses to lower LLVM code into a
10 // selection DAG.
12 //===----------------------------------------------------------------------===//
14 #include "MipsISelLowering.h"
15 #include "MCTargetDesc/MipsBaseInfo.h"
16 #include "MCTargetDesc/MipsInstPrinter.h"
17 #include "MCTargetDesc/MipsMCTargetDesc.h"
18 #include "MipsCCState.h"
19 #include "MipsInstrInfo.h"
20 #include "MipsMachineFunction.h"
21 #include "MipsRegisterInfo.h"
22 #include "MipsSubtarget.h"
23 #include "MipsTargetMachine.h"
24 #include "MipsTargetObjectFile.h"
25 #include "llvm/ADT/APFloat.h"
26 #include "llvm/ADT/ArrayRef.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/Statistic.h"
29 #include "llvm/ADT/StringRef.h"
30 #include "llvm/ADT/StringSwitch.h"
31 #include "llvm/CodeGen/CallingConvLower.h"
32 #include "llvm/CodeGen/FunctionLoweringInfo.h"
33 #include "llvm/CodeGen/ISDOpcodes.h"
34 #include "llvm/CodeGen/MachineBasicBlock.h"
35 #include "llvm/CodeGen/MachineFrameInfo.h"
36 #include "llvm/CodeGen/MachineFunction.h"
37 #include "llvm/CodeGen/MachineInstr.h"
38 #include "llvm/CodeGen/MachineInstrBuilder.h"
39 #include "llvm/CodeGen/MachineJumpTableInfo.h"
40 #include "llvm/CodeGen/MachineMemOperand.h"
41 #include "llvm/CodeGen/MachineOperand.h"
42 #include "llvm/CodeGen/MachineRegisterInfo.h"
43 #include "llvm/CodeGen/MachineValueType.h"
44 #include "llvm/CodeGen/RuntimeLibcalls.h"
45 #include "llvm/CodeGen/SelectionDAG.h"
46 #include "llvm/CodeGen/SelectionDAGNodes.h"
47 #include "llvm/CodeGen/TargetFrameLowering.h"
48 #include "llvm/CodeGen/TargetInstrInfo.h"
49 #include "llvm/CodeGen/TargetRegisterInfo.h"
50 #include "llvm/CodeGen/ValueTypes.h"
51 #include "llvm/IR/CallingConv.h"
52 #include "llvm/IR/Constants.h"
53 #include "llvm/IR/DataLayout.h"
54 #include "llvm/IR/DebugLoc.h"
55 #include "llvm/IR/DerivedTypes.h"
56 #include "llvm/IR/Function.h"
57 #include "llvm/IR/GlobalValue.h"
58 #include "llvm/IR/Type.h"
59 #include "llvm/IR/Value.h"
60 #include "llvm/MC/MCContext.h"
61 #include "llvm/MC/MCRegisterInfo.h"
62 #include "llvm/Support/Casting.h"
63 #include "llvm/Support/CodeGen.h"
64 #include "llvm/Support/CommandLine.h"
65 #include "llvm/Support/Compiler.h"
66 #include "llvm/Support/ErrorHandling.h"
67 #include "llvm/Support/MathExtras.h"
68 #include "llvm/Target/TargetMachine.h"
69 #include "llvm/Target/TargetOptions.h"
70 #include <algorithm>
71 #include <cassert>
72 #include <cctype>
73 #include <cstdint>
74 #include <deque>
75 #include <iterator>
76 #include <utility>
77 #include <vector>
79 using namespace llvm;
81 #define DEBUG_TYPE "mips-lower"
83 STATISTIC(NumTailCalls, "Number of tail calls");
85 static cl::opt<bool>
86 NoZeroDivCheck("mno-check-zero-division", cl::Hidden,
87 cl::desc("MIPS: Don't trap on integer division by zero."),
88 cl::init(false));
90 extern cl::opt<bool> EmitJalrReloc;
92 static const MCPhysReg Mips64DPRegs[8] = {
93 Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64,
94 Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64
97 // The MIPS MSA ABI passes vector arguments in the integer register set.
98 // The number of integer registers used is dependant on the ABI used.
99 MVT MipsTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
100 CallingConv::ID CC,
101 EVT VT) const {
102 if (!VT.isVector())
103 return getRegisterType(Context, VT);
105 if (VT.isPow2VectorType() && VT.getVectorElementType().isRound())
106 return Subtarget.isABI_O32() || VT.getSizeInBits() == 32 ? MVT::i32
107 : MVT::i64;
108 return getRegisterType(Context, VT.getVectorElementType());
111 unsigned MipsTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
112 CallingConv::ID CC,
113 EVT VT) const {
114 if (VT.isVector()) {
115 if (VT.isPow2VectorType() && VT.getVectorElementType().isRound())
116 return divideCeil(VT.getSizeInBits(), Subtarget.isABI_O32() ? 32 : 64);
117 return VT.getVectorNumElements() *
118 getNumRegisters(Context, VT.getVectorElementType());
120 return MipsTargetLowering::getNumRegisters(Context, VT);
123 unsigned MipsTargetLowering::getVectorTypeBreakdownForCallingConv(
124 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
125 unsigned &NumIntermediates, MVT &RegisterVT) const {
126 if (VT.isPow2VectorType()) {
127 IntermediateVT = getRegisterTypeForCallingConv(Context, CC, VT);
128 RegisterVT = IntermediateVT.getSimpleVT();
129 NumIntermediates = getNumRegistersForCallingConv(Context, CC, VT);
130 return NumIntermediates;
132 IntermediateVT = VT.getVectorElementType();
133 NumIntermediates = VT.getVectorNumElements();
134 RegisterVT = getRegisterType(Context, IntermediateVT);
135 return NumIntermediates * getNumRegisters(Context, IntermediateVT);
138 SDValue MipsTargetLowering::getGlobalReg(SelectionDAG &DAG, EVT Ty) const {
139 MachineFunction &MF = DAG.getMachineFunction();
140 MipsFunctionInfo *FI = MF.getInfo<MipsFunctionInfo>();
141 return DAG.getRegister(FI->getGlobalBaseReg(MF), Ty);
144 SDValue MipsTargetLowering::getTargetNode(GlobalAddressSDNode *N, EVT Ty,
145 SelectionDAG &DAG,
146 unsigned Flag) const {
147 return DAG.getTargetGlobalAddress(N->getGlobal(), SDLoc(N), Ty, 0, Flag);
150 SDValue MipsTargetLowering::getTargetNode(ExternalSymbolSDNode *N, EVT Ty,
151 SelectionDAG &DAG,
152 unsigned Flag) const {
153 return DAG.getTargetExternalSymbol(N->getSymbol(), Ty, Flag);
156 SDValue MipsTargetLowering::getTargetNode(BlockAddressSDNode *N, EVT Ty,
157 SelectionDAG &DAG,
158 unsigned Flag) const {
159 return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, 0, Flag);
162 SDValue MipsTargetLowering::getTargetNode(JumpTableSDNode *N, EVT Ty,
163 SelectionDAG &DAG,
164 unsigned Flag) const {
165 return DAG.getTargetJumpTable(N->getIndex(), Ty, Flag);
168 SDValue MipsTargetLowering::getTargetNode(ConstantPoolSDNode *N, EVT Ty,
169 SelectionDAG &DAG,
170 unsigned Flag) const {
171 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
172 N->getOffset(), Flag);
175 const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const {
176 switch ((MipsISD::NodeType)Opcode) {
177 case MipsISD::FIRST_NUMBER: break;
178 case MipsISD::JmpLink: return "MipsISD::JmpLink";
179 case MipsISD::TailCall: return "MipsISD::TailCall";
180 case MipsISD::Highest: return "MipsISD::Highest";
181 case MipsISD::Higher: return "MipsISD::Higher";
182 case MipsISD::Hi: return "MipsISD::Hi";
183 case MipsISD::Lo: return "MipsISD::Lo";
184 case MipsISD::GotHi: return "MipsISD::GotHi";
185 case MipsISD::TlsHi: return "MipsISD::TlsHi";
186 case MipsISD::GPRel: return "MipsISD::GPRel";
187 case MipsISD::ThreadPointer: return "MipsISD::ThreadPointer";
188 case MipsISD::Ret: return "MipsISD::Ret";
189 case MipsISD::ERet: return "MipsISD::ERet";
190 case MipsISD::EH_RETURN: return "MipsISD::EH_RETURN";
191 case MipsISD::FAbs: return "MipsISD::FAbs";
192 case MipsISD::FMS: return "MipsISD::FMS";
193 case MipsISD::FPBrcond: return "MipsISD::FPBrcond";
194 case MipsISD::FPCmp: return "MipsISD::FPCmp";
195 case MipsISD::FSELECT: return "MipsISD::FSELECT";
196 case MipsISD::MTC1_D64: return "MipsISD::MTC1_D64";
197 case MipsISD::CMovFP_T: return "MipsISD::CMovFP_T";
198 case MipsISD::CMovFP_F: return "MipsISD::CMovFP_F";
199 case MipsISD::TruncIntFP: return "MipsISD::TruncIntFP";
200 case MipsISD::MFHI: return "MipsISD::MFHI";
201 case MipsISD::MFLO: return "MipsISD::MFLO";
202 case MipsISD::MTLOHI: return "MipsISD::MTLOHI";
203 case MipsISD::Mult: return "MipsISD::Mult";
204 case MipsISD::Multu: return "MipsISD::Multu";
205 case MipsISD::MAdd: return "MipsISD::MAdd";
206 case MipsISD::MAddu: return "MipsISD::MAddu";
207 case MipsISD::MSub: return "MipsISD::MSub";
208 case MipsISD::MSubu: return "MipsISD::MSubu";
209 case MipsISD::DivRem: return "MipsISD::DivRem";
210 case MipsISD::DivRemU: return "MipsISD::DivRemU";
211 case MipsISD::DivRem16: return "MipsISD::DivRem16";
212 case MipsISD::DivRemU16: return "MipsISD::DivRemU16";
213 case MipsISD::BuildPairF64: return "MipsISD::BuildPairF64";
214 case MipsISD::ExtractElementF64: return "MipsISD::ExtractElementF64";
215 case MipsISD::Wrapper: return "MipsISD::Wrapper";
216 case MipsISD::DynAlloc: return "MipsISD::DynAlloc";
217 case MipsISD::Sync: return "MipsISD::Sync";
218 case MipsISD::Ext: return "MipsISD::Ext";
219 case MipsISD::Ins: return "MipsISD::Ins";
220 case MipsISD::CIns: return "MipsISD::CIns";
221 case MipsISD::LWL: return "MipsISD::LWL";
222 case MipsISD::LWR: return "MipsISD::LWR";
223 case MipsISD::SWL: return "MipsISD::SWL";
224 case MipsISD::SWR: return "MipsISD::SWR";
225 case MipsISD::LDL: return "MipsISD::LDL";
226 case MipsISD::LDR: return "MipsISD::LDR";
227 case MipsISD::SDL: return "MipsISD::SDL";
228 case MipsISD::SDR: return "MipsISD::SDR";
229 case MipsISD::EXTP: return "MipsISD::EXTP";
230 case MipsISD::EXTPDP: return "MipsISD::EXTPDP";
231 case MipsISD::EXTR_S_H: return "MipsISD::EXTR_S_H";
232 case MipsISD::EXTR_W: return "MipsISD::EXTR_W";
233 case MipsISD::EXTR_R_W: return "MipsISD::EXTR_R_W";
234 case MipsISD::EXTR_RS_W: return "MipsISD::EXTR_RS_W";
235 case MipsISD::SHILO: return "MipsISD::SHILO";
236 case MipsISD::MTHLIP: return "MipsISD::MTHLIP";
237 case MipsISD::MULSAQ_S_W_PH: return "MipsISD::MULSAQ_S_W_PH";
238 case MipsISD::MAQ_S_W_PHL: return "MipsISD::MAQ_S_W_PHL";
239 case MipsISD::MAQ_S_W_PHR: return "MipsISD::MAQ_S_W_PHR";
240 case MipsISD::MAQ_SA_W_PHL: return "MipsISD::MAQ_SA_W_PHL";
241 case MipsISD::MAQ_SA_W_PHR: return "MipsISD::MAQ_SA_W_PHR";
242 case MipsISD::DPAU_H_QBL: return "MipsISD::DPAU_H_QBL";
243 case MipsISD::DPAU_H_QBR: return "MipsISD::DPAU_H_QBR";
244 case MipsISD::DPSU_H_QBL: return "MipsISD::DPSU_H_QBL";
245 case MipsISD::DPSU_H_QBR: return "MipsISD::DPSU_H_QBR";
246 case MipsISD::DPAQ_S_W_PH: return "MipsISD::DPAQ_S_W_PH";
247 case MipsISD::DPSQ_S_W_PH: return "MipsISD::DPSQ_S_W_PH";
248 case MipsISD::DPAQ_SA_L_W: return "MipsISD::DPAQ_SA_L_W";
249 case MipsISD::DPSQ_SA_L_W: return "MipsISD::DPSQ_SA_L_W";
250 case MipsISD::DPA_W_PH: return "MipsISD::DPA_W_PH";
251 case MipsISD::DPS_W_PH: return "MipsISD::DPS_W_PH";
252 case MipsISD::DPAQX_S_W_PH: return "MipsISD::DPAQX_S_W_PH";
253 case MipsISD::DPAQX_SA_W_PH: return "MipsISD::DPAQX_SA_W_PH";
254 case MipsISD::DPAX_W_PH: return "MipsISD::DPAX_W_PH";
255 case MipsISD::DPSX_W_PH: return "MipsISD::DPSX_W_PH";
256 case MipsISD::DPSQX_S_W_PH: return "MipsISD::DPSQX_S_W_PH";
257 case MipsISD::DPSQX_SA_W_PH: return "MipsISD::DPSQX_SA_W_PH";
258 case MipsISD::MULSA_W_PH: return "MipsISD::MULSA_W_PH";
259 case MipsISD::MULT: return "MipsISD::MULT";
260 case MipsISD::MULTU: return "MipsISD::MULTU";
261 case MipsISD::MADD_DSP: return "MipsISD::MADD_DSP";
262 case MipsISD::MADDU_DSP: return "MipsISD::MADDU_DSP";
263 case MipsISD::MSUB_DSP: return "MipsISD::MSUB_DSP";
264 case MipsISD::MSUBU_DSP: return "MipsISD::MSUBU_DSP";
265 case MipsISD::SHLL_DSP: return "MipsISD::SHLL_DSP";
266 case MipsISD::SHRA_DSP: return "MipsISD::SHRA_DSP";
267 case MipsISD::SHRL_DSP: return "MipsISD::SHRL_DSP";
268 case MipsISD::SETCC_DSP: return "MipsISD::SETCC_DSP";
269 case MipsISD::SELECT_CC_DSP: return "MipsISD::SELECT_CC_DSP";
270 case MipsISD::VALL_ZERO: return "MipsISD::VALL_ZERO";
271 case MipsISD::VANY_ZERO: return "MipsISD::VANY_ZERO";
272 case MipsISD::VALL_NONZERO: return "MipsISD::VALL_NONZERO";
273 case MipsISD::VANY_NONZERO: return "MipsISD::VANY_NONZERO";
274 case MipsISD::VCEQ: return "MipsISD::VCEQ";
275 case MipsISD::VCLE_S: return "MipsISD::VCLE_S";
276 case MipsISD::VCLE_U: return "MipsISD::VCLE_U";
277 case MipsISD::VCLT_S: return "MipsISD::VCLT_S";
278 case MipsISD::VCLT_U: return "MipsISD::VCLT_U";
279 case MipsISD::VEXTRACT_SEXT_ELT: return "MipsISD::VEXTRACT_SEXT_ELT";
280 case MipsISD::VEXTRACT_ZEXT_ELT: return "MipsISD::VEXTRACT_ZEXT_ELT";
281 case MipsISD::VNOR: return "MipsISD::VNOR";
282 case MipsISD::VSHF: return "MipsISD::VSHF";
283 case MipsISD::SHF: return "MipsISD::SHF";
284 case MipsISD::ILVEV: return "MipsISD::ILVEV";
285 case MipsISD::ILVOD: return "MipsISD::ILVOD";
286 case MipsISD::ILVL: return "MipsISD::ILVL";
287 case MipsISD::ILVR: return "MipsISD::ILVR";
288 case MipsISD::PCKEV: return "MipsISD::PCKEV";
289 case MipsISD::PCKOD: return "MipsISD::PCKOD";
290 case MipsISD::INSVE: return "MipsISD::INSVE";
292 return nullptr;
295 MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
296 const MipsSubtarget &STI)
297 : TargetLowering(TM), Subtarget(STI), ABI(TM.getABI()) {
298 // Mips does not have i1 type, so use i32 for
299 // setcc operations results (slt, sgt, ...).
300 setBooleanContents(ZeroOrOneBooleanContent);
301 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
302 // The cmp.cond.fmt instruction in MIPS32r6/MIPS64r6 uses 0 and -1 like MSA
303 // does. Integer booleans still use 0 and 1.
304 if (Subtarget.hasMips32r6())
305 setBooleanContents(ZeroOrOneBooleanContent,
306 ZeroOrNegativeOneBooleanContent);
308 // Load extented operations for i1 types must be promoted
309 for (MVT VT : MVT::integer_valuetypes()) {
310 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
311 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
312 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
315 // MIPS doesn't have extending float->double load/store. Set LoadExtAction
316 // for f32, f16
317 for (MVT VT : MVT::fp_valuetypes()) {
318 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
319 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
322 // Set LoadExtAction for f16 vectors to Expand
323 for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
324 MVT F16VT = MVT::getVectorVT(MVT::f16, VT.getVectorNumElements());
325 if (F16VT.isValid())
326 setLoadExtAction(ISD::EXTLOAD, VT, F16VT, Expand);
329 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
330 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
332 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
334 // Used by legalize types to correctly generate the setcc result.
335 // Without this, every float setcc comes with a AND/OR with the result,
336 // we don't want this, since the fpcmp result goes to a flag register,
337 // which is used implicitly by brcond and select operations.
338 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
340 // Mips Custom Operations
341 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
342 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
343 setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
344 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
345 setOperationAction(ISD::JumpTable, MVT::i32, Custom);
346 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
347 setOperationAction(ISD::SELECT, MVT::f32, Custom);
348 setOperationAction(ISD::SELECT, MVT::f64, Custom);
349 setOperationAction(ISD::SELECT, MVT::i32, Custom);
350 setOperationAction(ISD::SETCC, MVT::f32, Custom);
351 setOperationAction(ISD::SETCC, MVT::f64, Custom);
352 setOperationAction(ISD::BRCOND, MVT::Other, Custom);
353 setOperationAction(ISD::FABS, MVT::f32, Custom);
354 setOperationAction(ISD::FABS, MVT::f64, Custom);
355 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
356 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
357 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
359 if (Subtarget.isGP64bit()) {
360 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
361 setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
362 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
363 setOperationAction(ISD::JumpTable, MVT::i64, Custom);
364 setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
365 setOperationAction(ISD::SELECT, MVT::i64, Custom);
366 setOperationAction(ISD::LOAD, MVT::i64, Custom);
367 setOperationAction(ISD::STORE, MVT::i64, Custom);
368 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
369 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
370 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
371 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
374 if (!Subtarget.isGP64bit()) {
375 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
376 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
377 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
380 setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
381 if (Subtarget.isGP64bit())
382 setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
384 setOperationAction(ISD::SDIV, MVT::i32, Expand);
385 setOperationAction(ISD::SREM, MVT::i32, Expand);
386 setOperationAction(ISD::UDIV, MVT::i32, Expand);
387 setOperationAction(ISD::UREM, MVT::i32, Expand);
388 setOperationAction(ISD::SDIV, MVT::i64, Expand);
389 setOperationAction(ISD::SREM, MVT::i64, Expand);
390 setOperationAction(ISD::UDIV, MVT::i64, Expand);
391 setOperationAction(ISD::UREM, MVT::i64, Expand);
393 // Operations not directly supported by Mips.
394 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
395 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
396 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
397 setOperationAction(ISD::BR_CC, MVT::i64, Expand);
398 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
399 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
400 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
401 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
402 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
403 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
404 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
405 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
406 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
407 if (Subtarget.hasCnMips()) {
408 setOperationAction(ISD::CTPOP, MVT::i32, Legal);
409 setOperationAction(ISD::CTPOP, MVT::i64, Legal);
410 } else {
411 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
412 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
414 setOperationAction(ISD::CTTZ, MVT::i32, Expand);
415 setOperationAction(ISD::CTTZ, MVT::i64, Expand);
416 setOperationAction(ISD::ROTL, MVT::i32, Expand);
417 setOperationAction(ISD::ROTL, MVT::i64, Expand);
418 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
419 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
421 if (!Subtarget.hasMips32r2())
422 setOperationAction(ISD::ROTR, MVT::i32, Expand);
424 if (!Subtarget.hasMips64r2())
425 setOperationAction(ISD::ROTR, MVT::i64, Expand);
427 setOperationAction(ISD::FSIN, MVT::f32, Expand);
428 setOperationAction(ISD::FSIN, MVT::f64, Expand);
429 setOperationAction(ISD::FCOS, MVT::f32, Expand);
430 setOperationAction(ISD::FCOS, MVT::f64, Expand);
431 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
432 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
433 setOperationAction(ISD::FPOW, MVT::f32, Expand);
434 setOperationAction(ISD::FPOW, MVT::f64, Expand);
435 setOperationAction(ISD::FLOG, MVT::f32, Expand);
436 setOperationAction(ISD::FLOG2, MVT::f32, Expand);
437 setOperationAction(ISD::FLOG10, MVT::f32, Expand);
438 setOperationAction(ISD::FEXP, MVT::f32, Expand);
439 setOperationAction(ISD::FMA, MVT::f32, Expand);
440 setOperationAction(ISD::FMA, MVT::f64, Expand);
441 setOperationAction(ISD::FREM, MVT::f32, Expand);
442 setOperationAction(ISD::FREM, MVT::f64, Expand);
444 // Lower f16 conversion operations into library calls
445 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
446 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
447 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
448 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
450 setOperationAction(ISD::EH_RETURN, MVT::Other, Custom);
452 setOperationAction(ISD::VASTART, MVT::Other, Custom);
453 setOperationAction(ISD::VAARG, MVT::Other, Custom);
454 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
455 setOperationAction(ISD::VAEND, MVT::Other, Expand);
457 // Use the default for now
458 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
459 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
461 if (!Subtarget.isGP64bit()) {
462 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand);
463 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
466 if (!Subtarget.hasMips32r2()) {
467 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
468 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
471 // MIPS16 lacks MIPS32's clz and clo instructions.
472 if (!Subtarget.hasMips32() || Subtarget.inMips16Mode())
473 setOperationAction(ISD::CTLZ, MVT::i32, Expand);
474 if (!Subtarget.hasMips64())
475 setOperationAction(ISD::CTLZ, MVT::i64, Expand);
477 if (!Subtarget.hasMips32r2())
478 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
479 if (!Subtarget.hasMips64r2())
480 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
482 if (Subtarget.isGP64bit()) {
483 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, MVT::i32, Custom);
484 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, MVT::i32, Custom);
485 setLoadExtAction(ISD::EXTLOAD, MVT::i64, MVT::i32, Custom);
486 setTruncStoreAction(MVT::i64, MVT::i32, Custom);
489 setOperationAction(ISD::TRAP, MVT::Other, Legal);
491 setTargetDAGCombine({ISD::SDIVREM, ISD::UDIVREM, ISD::SELECT, ISD::AND,
492 ISD::OR, ISD::ADD, ISD::SUB, ISD::AssertZext, ISD::SHL});
494 if (ABI.IsO32()) {
495 // These libcalls are not available in 32-bit.
496 setLibcallName(RTLIB::SHL_I128, nullptr);
497 setLibcallName(RTLIB::SRL_I128, nullptr);
498 setLibcallName(RTLIB::SRA_I128, nullptr);
499 setLibcallName(RTLIB::MUL_I128, nullptr);
500 setLibcallName(RTLIB::MULO_I64, nullptr);
501 setLibcallName(RTLIB::MULO_I128, nullptr);
504 if (Subtarget.isGP64bit())
505 setMaxAtomicSizeInBitsSupported(64);
506 else
507 setMaxAtomicSizeInBitsSupported(32);
509 setMinFunctionAlignment(Subtarget.isGP64bit() ? Align(8) : Align(4));
511 // The arguments on the stack are defined in terms of 4-byte slots on O32
512 // and 8-byte slots on N32/N64.
513 setMinStackArgumentAlignment((ABI.IsN32() || ABI.IsN64()) ? Align(8)
514 : Align(4));
516 setStackPointerRegisterToSaveRestore(ABI.IsN64() ? Mips::SP_64 : Mips::SP);
518 MaxStoresPerMemcpy = 16;
520 isMicroMips = Subtarget.inMicroMipsMode();
523 const MipsTargetLowering *
524 MipsTargetLowering::create(const MipsTargetMachine &TM,
525 const MipsSubtarget &STI) {
526 if (STI.inMips16Mode())
527 return createMips16TargetLowering(TM, STI);
529 return createMipsSETargetLowering(TM, STI);
532 // Create a fast isel object.
533 FastISel *
534 MipsTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
535 const TargetLibraryInfo *libInfo) const {
536 const MipsTargetMachine &TM =
537 static_cast<const MipsTargetMachine &>(funcInfo.MF->getTarget());
539 // We support only the standard encoding [MIPS32,MIPS32R5] ISAs.
540 bool UseFastISel = TM.Options.EnableFastISel && Subtarget.hasMips32() &&
541 !Subtarget.hasMips32r6() && !Subtarget.inMips16Mode() &&
542 !Subtarget.inMicroMipsMode();
544 // Disable if either of the following is true:
545 // We do not generate PIC, the ABI is not O32, XGOT is being used.
546 if (!TM.isPositionIndependent() || !TM.getABI().IsO32() ||
547 Subtarget.useXGOT())
548 UseFastISel = false;
550 return UseFastISel ? Mips::createFastISel(funcInfo, libInfo) : nullptr;
553 EVT MipsTargetLowering::getSetCCResultType(const DataLayout &, LLVMContext &,
554 EVT VT) const {
555 if (!VT.isVector())
556 return MVT::i32;
557 return VT.changeVectorElementTypeToInteger();
560 static SDValue performDivRemCombine(SDNode *N, SelectionDAG &DAG,
561 TargetLowering::DAGCombinerInfo &DCI,
562 const MipsSubtarget &Subtarget) {
563 if (DCI.isBeforeLegalizeOps())
564 return SDValue();
566 EVT Ty = N->getValueType(0);
567 unsigned LO = (Ty == MVT::i32) ? Mips::LO0 : Mips::LO0_64;
568 unsigned HI = (Ty == MVT::i32) ? Mips::HI0 : Mips::HI0_64;
569 unsigned Opc = N->getOpcode() == ISD::SDIVREM ? MipsISD::DivRem16 :
570 MipsISD::DivRemU16;
571 SDLoc DL(N);
573 SDValue DivRem = DAG.getNode(Opc, DL, MVT::Glue,
574 N->getOperand(0), N->getOperand(1));
575 SDValue InChain = DAG.getEntryNode();
576 SDValue InGlue = DivRem;
578 // insert MFLO
579 if (N->hasAnyUseOfValue(0)) {
580 SDValue CopyFromLo = DAG.getCopyFromReg(InChain, DL, LO, Ty,
581 InGlue);
582 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), CopyFromLo);
583 InChain = CopyFromLo.getValue(1);
584 InGlue = CopyFromLo.getValue(2);
587 // insert MFHI
588 if (N->hasAnyUseOfValue(1)) {
589 SDValue CopyFromHi = DAG.getCopyFromReg(InChain, DL,
590 HI, Ty, InGlue);
591 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), CopyFromHi);
594 return SDValue();
597 static Mips::CondCode condCodeToFCC(ISD::CondCode CC) {
598 switch (CC) {
599 default: llvm_unreachable("Unknown fp condition code!");
600 case ISD::SETEQ:
601 case ISD::SETOEQ: return Mips::FCOND_OEQ;
602 case ISD::SETUNE: return Mips::FCOND_UNE;
603 case ISD::SETLT:
604 case ISD::SETOLT: return Mips::FCOND_OLT;
605 case ISD::SETGT:
606 case ISD::SETOGT: return Mips::FCOND_OGT;
607 case ISD::SETLE:
608 case ISD::SETOLE: return Mips::FCOND_OLE;
609 case ISD::SETGE:
610 case ISD::SETOGE: return Mips::FCOND_OGE;
611 case ISD::SETULT: return Mips::FCOND_ULT;
612 case ISD::SETULE: return Mips::FCOND_ULE;
613 case ISD::SETUGT: return Mips::FCOND_UGT;
614 case ISD::SETUGE: return Mips::FCOND_UGE;
615 case ISD::SETUO: return Mips::FCOND_UN;
616 case ISD::SETO: return Mips::FCOND_OR;
617 case ISD::SETNE:
618 case ISD::SETONE: return Mips::FCOND_ONE;
619 case ISD::SETUEQ: return Mips::FCOND_UEQ;
623 /// This function returns true if the floating point conditional branches and
624 /// conditional moves which use condition code CC should be inverted.
625 static bool invertFPCondCodeUser(Mips::CondCode CC) {
626 if (CC >= Mips::FCOND_F && CC <= Mips::FCOND_NGT)
627 return false;
629 assert((CC >= Mips::FCOND_T && CC <= Mips::FCOND_GT) &&
630 "Illegal Condition Code");
632 return true;
635 // Creates and returns an FPCmp node from a setcc node.
636 // Returns Op if setcc is not a floating point comparison.
637 static SDValue createFPCmp(SelectionDAG &DAG, const SDValue &Op) {
638 // must be a SETCC node
639 if (Op.getOpcode() != ISD::SETCC)
640 return Op;
642 SDValue LHS = Op.getOperand(0);
644 if (!LHS.getValueType().isFloatingPoint())
645 return Op;
647 SDValue RHS = Op.getOperand(1);
648 SDLoc DL(Op);
650 // Assume the 3rd operand is a CondCodeSDNode. Add code to check the type of
651 // node if necessary.
652 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
654 return DAG.getNode(MipsISD::FPCmp, DL, MVT::Glue, LHS, RHS,
655 DAG.getConstant(condCodeToFCC(CC), DL, MVT::i32));
658 // Creates and returns a CMovFPT/F node.
659 static SDValue createCMovFP(SelectionDAG &DAG, SDValue Cond, SDValue True,
660 SDValue False, const SDLoc &DL) {
661 ConstantSDNode *CC = cast<ConstantSDNode>(Cond.getOperand(2));
662 bool invert = invertFPCondCodeUser((Mips::CondCode)CC->getSExtValue());
663 SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
665 return DAG.getNode((invert ? MipsISD::CMovFP_F : MipsISD::CMovFP_T), DL,
666 True.getValueType(), True, FCC0, False, Cond);
669 static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG,
670 TargetLowering::DAGCombinerInfo &DCI,
671 const MipsSubtarget &Subtarget) {
672 if (DCI.isBeforeLegalizeOps())
673 return SDValue();
675 SDValue SetCC = N->getOperand(0);
677 if ((SetCC.getOpcode() != ISD::SETCC) ||
678 !SetCC.getOperand(0).getValueType().isInteger())
679 return SDValue();
681 SDValue False = N->getOperand(2);
682 EVT FalseTy = False.getValueType();
684 if (!FalseTy.isInteger())
685 return SDValue();
687 ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(False);
689 // If the RHS (False) is 0, we swap the order of the operands
690 // of ISD::SELECT (obviously also inverting the condition) so that we can
691 // take advantage of conditional moves using the $0 register.
692 // Example:
693 // return (a != 0) ? x : 0;
694 // load $reg, x
695 // movz $reg, $0, a
696 if (!FalseC)
697 return SDValue();
699 const SDLoc DL(N);
701 if (!FalseC->getZExtValue()) {
702 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
703 SDValue True = N->getOperand(1);
705 SetCC = DAG.getSetCC(DL, SetCC.getValueType(), SetCC.getOperand(0),
706 SetCC.getOperand(1),
707 ISD::getSetCCInverse(CC, SetCC.getValueType()));
709 return DAG.getNode(ISD::SELECT, DL, FalseTy, SetCC, False, True);
712 // If both operands are integer constants there's a possibility that we
713 // can do some interesting optimizations.
714 SDValue True = N->getOperand(1);
715 ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(True);
717 if (!TrueC || !True.getValueType().isInteger())
718 return SDValue();
720 // We'll also ignore MVT::i64 operands as this optimizations proves
721 // to be ineffective because of the required sign extensions as the result
722 // of a SETCC operator is always MVT::i32 for non-vector types.
723 if (True.getValueType() == MVT::i64)
724 return SDValue();
726 int64_t Diff = TrueC->getSExtValue() - FalseC->getSExtValue();
728 // 1) (a < x) ? y : y-1
729 // slti $reg1, a, x
730 // addiu $reg2, $reg1, y-1
731 if (Diff == 1)
732 return DAG.getNode(ISD::ADD, DL, SetCC.getValueType(), SetCC, False);
734 // 2) (a < x) ? y-1 : y
735 // slti $reg1, a, x
736 // xor $reg1, $reg1, 1
737 // addiu $reg2, $reg1, y-1
738 if (Diff == -1) {
739 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
740 SetCC = DAG.getSetCC(DL, SetCC.getValueType(), SetCC.getOperand(0),
741 SetCC.getOperand(1),
742 ISD::getSetCCInverse(CC, SetCC.getValueType()));
743 return DAG.getNode(ISD::ADD, DL, SetCC.getValueType(), SetCC, True);
746 // Could not optimize.
747 return SDValue();
750 static SDValue performCMovFPCombine(SDNode *N, SelectionDAG &DAG,
751 TargetLowering::DAGCombinerInfo &DCI,
752 const MipsSubtarget &Subtarget) {
753 if (DCI.isBeforeLegalizeOps())
754 return SDValue();
756 SDValue ValueIfTrue = N->getOperand(0), ValueIfFalse = N->getOperand(2);
758 ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(ValueIfFalse);
759 if (!FalseC || FalseC->getZExtValue())
760 return SDValue();
762 // Since RHS (False) is 0, we swap the order of the True/False operands
763 // (obviously also inverting the condition) so that we can
764 // take advantage of conditional moves using the $0 register.
765 // Example:
766 // return (a != 0) ? x : 0;
767 // load $reg, x
768 // movz $reg, $0, a
769 unsigned Opc = (N->getOpcode() == MipsISD::CMovFP_T) ? MipsISD::CMovFP_F :
770 MipsISD::CMovFP_T;
772 SDValue FCC = N->getOperand(1), Glue = N->getOperand(3);
773 return DAG.getNode(Opc, SDLoc(N), ValueIfFalse.getValueType(),
774 ValueIfFalse, FCC, ValueIfTrue, Glue);
777 static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
778 TargetLowering::DAGCombinerInfo &DCI,
779 const MipsSubtarget &Subtarget) {
780 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasExtractInsert())
781 return SDValue();
783 SDValue FirstOperand = N->getOperand(0);
784 unsigned FirstOperandOpc = FirstOperand.getOpcode();
785 SDValue Mask = N->getOperand(1);
786 EVT ValTy = N->getValueType(0);
787 SDLoc DL(N);
789 uint64_t Pos = 0;
790 unsigned SMPos, SMSize;
791 ConstantSDNode *CN;
792 SDValue NewOperand;
793 unsigned Opc;
795 // Op's second operand must be a shifted mask.
796 if (!(CN = dyn_cast<ConstantSDNode>(Mask)) ||
797 !isShiftedMask_64(CN->getZExtValue(), SMPos, SMSize))
798 return SDValue();
800 if (FirstOperandOpc == ISD::SRA || FirstOperandOpc == ISD::SRL) {
801 // Pattern match EXT.
802 // $dst = and ((sra or srl) $src , pos), (2**size - 1)
803 // => ext $dst, $src, pos, size
805 // The second operand of the shift must be an immediate.
806 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.getOperand(1))))
807 return SDValue();
809 Pos = CN->getZExtValue();
811 // Return if the shifted mask does not start at bit 0 or the sum of its size
812 // and Pos exceeds the word's size.
813 if (SMPos != 0 || Pos + SMSize > ValTy.getSizeInBits())
814 return SDValue();
816 Opc = MipsISD::Ext;
817 NewOperand = FirstOperand.getOperand(0);
818 } else if (FirstOperandOpc == ISD::SHL && Subtarget.hasCnMips()) {
819 // Pattern match CINS.
820 // $dst = and (shl $src , pos), mask
821 // => cins $dst, $src, pos, size
822 // mask is a shifted mask with consecutive 1's, pos = shift amount,
823 // size = population count.
825 // The second operand of the shift must be an immediate.
826 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.getOperand(1))))
827 return SDValue();
829 Pos = CN->getZExtValue();
831 if (SMPos != Pos || Pos >= ValTy.getSizeInBits() || SMSize >= 32 ||
832 Pos + SMSize > ValTy.getSizeInBits())
833 return SDValue();
835 NewOperand = FirstOperand.getOperand(0);
836 // SMSize is 'location' (position) in this case, not size.
837 SMSize--;
838 Opc = MipsISD::CIns;
839 } else {
840 // Pattern match EXT.
841 // $dst = and $src, (2**size - 1) , if size > 16
842 // => ext $dst, $src, pos, size , pos = 0
844 // If the mask is <= 0xffff, andi can be used instead.
845 if (CN->getZExtValue() <= 0xffff)
846 return SDValue();
848 // Return if the mask doesn't start at position 0.
849 if (SMPos)
850 return SDValue();
852 Opc = MipsISD::Ext;
853 NewOperand = FirstOperand;
855 return DAG.getNode(Opc, DL, ValTy, NewOperand,
856 DAG.getConstant(Pos, DL, MVT::i32),
857 DAG.getConstant(SMSize, DL, MVT::i32));
860 static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
861 TargetLowering::DAGCombinerInfo &DCI,
862 const MipsSubtarget &Subtarget) {
863 // Pattern match INS.
864 // $dst = or (and $src1 , mask0), (and (shl $src, pos), mask1),
865 // where mask1 = (2**size - 1) << pos, mask0 = ~mask1
866 // => ins $dst, $src, size, pos, $src1
867 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasExtractInsert())
868 return SDValue();
870 SDValue And0 = N->getOperand(0), And1 = N->getOperand(1);
871 unsigned SMPos0, SMSize0, SMPos1, SMSize1;
872 ConstantSDNode *CN, *CN1;
874 // See if Op's first operand matches (and $src1 , mask0).
875 if (And0.getOpcode() != ISD::AND)
876 return SDValue();
878 if (!(CN = dyn_cast<ConstantSDNode>(And0.getOperand(1))) ||
879 !isShiftedMask_64(~CN->getSExtValue(), SMPos0, SMSize0))
880 return SDValue();
882 // See if Op's second operand matches (and (shl $src, pos), mask1).
883 if (And1.getOpcode() == ISD::AND &&
884 And1.getOperand(0).getOpcode() == ISD::SHL) {
886 if (!(CN = dyn_cast<ConstantSDNode>(And1.getOperand(1))) ||
887 !isShiftedMask_64(CN->getZExtValue(), SMPos1, SMSize1))
888 return SDValue();
890 // The shift masks must have the same position and size.
891 if (SMPos0 != SMPos1 || SMSize0 != SMSize1)
892 return SDValue();
894 SDValue Shl = And1.getOperand(0);
896 if (!(CN = dyn_cast<ConstantSDNode>(Shl.getOperand(1))))
897 return SDValue();
899 unsigned Shamt = CN->getZExtValue();
901 // Return if the shift amount and the first bit position of mask are not the
902 // same.
903 EVT ValTy = N->getValueType(0);
904 if ((Shamt != SMPos0) || (SMPos0 + SMSize0 > ValTy.getSizeInBits()))
905 return SDValue();
907 SDLoc DL(N);
908 return DAG.getNode(MipsISD::Ins, DL, ValTy, Shl.getOperand(0),
909 DAG.getConstant(SMPos0, DL, MVT::i32),
910 DAG.getConstant(SMSize0, DL, MVT::i32),
911 And0.getOperand(0));
912 } else {
913 // Pattern match DINS.
914 // $dst = or (and $src, mask0), mask1
915 // where mask0 = ((1 << SMSize0) -1) << SMPos0
916 // => dins $dst, $src, pos, size
917 if (~CN->getSExtValue() == ((((int64_t)1 << SMSize0) - 1) << SMPos0) &&
918 ((SMSize0 + SMPos0 <= 64 && Subtarget.hasMips64r2()) ||
919 (SMSize0 + SMPos0 <= 32))) {
920 // Check if AND instruction has constant as argument
921 bool isConstCase = And1.getOpcode() != ISD::AND;
922 if (And1.getOpcode() == ISD::AND) {
923 if (!(CN1 = dyn_cast<ConstantSDNode>(And1->getOperand(1))))
924 return SDValue();
925 } else {
926 if (!(CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1))))
927 return SDValue();
929 // Don't generate INS if constant OR operand doesn't fit into bits
930 // cleared by constant AND operand.
931 if (CN->getSExtValue() & CN1->getSExtValue())
932 return SDValue();
934 SDLoc DL(N);
935 EVT ValTy = N->getOperand(0)->getValueType(0);
936 SDValue Const1;
937 SDValue SrlX;
938 if (!isConstCase) {
939 Const1 = DAG.getConstant(SMPos0, DL, MVT::i32);
940 SrlX = DAG.getNode(ISD::SRL, DL, And1->getValueType(0), And1, Const1);
942 return DAG.getNode(
943 MipsISD::Ins, DL, N->getValueType(0),
944 isConstCase
945 ? DAG.getConstant(CN1->getSExtValue() >> SMPos0, DL, ValTy)
946 : SrlX,
947 DAG.getConstant(SMPos0, DL, MVT::i32),
948 DAG.getConstant(ValTy.getSizeInBits() / 8 < 8 ? SMSize0 & 31
949 : SMSize0,
950 DL, MVT::i32),
951 And0->getOperand(0));
954 return SDValue();
958 static SDValue performMADD_MSUBCombine(SDNode *ROOTNode, SelectionDAG &CurDAG,
959 const MipsSubtarget &Subtarget) {
960 // ROOTNode must have a multiplication as an operand for the match to be
961 // successful.
962 if (ROOTNode->getOperand(0).getOpcode() != ISD::MUL &&
963 ROOTNode->getOperand(1).getOpcode() != ISD::MUL)
964 return SDValue();
966 // In the case where we have a multiplication as the left operand of
967 // of a subtraction, we can't combine into a MipsISD::MSub node as the
968 // the instruction definition of msub(u) places the multiplication on
969 // on the right.
970 if (ROOTNode->getOpcode() == ISD::SUB &&
971 ROOTNode->getOperand(0).getOpcode() == ISD::MUL)
972 return SDValue();
974 // We don't handle vector types here.
975 if (ROOTNode->getValueType(0).isVector())
976 return SDValue();
978 // For MIPS64, madd / msub instructions are inefficent to use with 64 bit
979 // arithmetic. E.g.
980 // (add (mul a b) c) =>
981 // let res = (madd (mthi (drotr c 32))x(mtlo c) a b) in
982 // MIPS64: (or (dsll (mfhi res) 32) (dsrl (dsll (mflo res) 32) 32)
983 // or
984 // MIPS64R2: (dins (mflo res) (mfhi res) 32 32)
986 // The overhead of setting up the Hi/Lo registers and reassembling the
987 // result makes this a dubious optimzation for MIPS64. The core of the
988 // problem is that Hi/Lo contain the upper and lower 32 bits of the
989 // operand and result.
991 // It requires a chain of 4 add/mul for MIPS64R2 to get better code
992 // density than doing it naively, 5 for MIPS64. Additionally, using
993 // madd/msub on MIPS64 requires the operands actually be 32 bit sign
994 // extended operands, not true 64 bit values.
996 // FIXME: For the moment, disable this completely for MIPS64.
997 if (Subtarget.hasMips64())
998 return SDValue();
1000 SDValue Mult = ROOTNode->getOperand(0).getOpcode() == ISD::MUL
1001 ? ROOTNode->getOperand(0)
1002 : ROOTNode->getOperand(1);
1004 SDValue AddOperand = ROOTNode->getOperand(0).getOpcode() == ISD::MUL
1005 ? ROOTNode->getOperand(1)
1006 : ROOTNode->getOperand(0);
1008 // Transform this to a MADD only if the user of this node is the add.
1009 // If there are other users of the mul, this function returns here.
1010 if (!Mult.hasOneUse())
1011 return SDValue();
1013 // maddu and madd are unusual instructions in that on MIPS64 bits 63..31
1014 // must be in canonical form, i.e. sign extended. For MIPS32, the operands
1015 // of the multiply must have 32 or more sign bits, otherwise we cannot
1016 // perform this optimization. We have to check this here as we're performing
1017 // this optimization pre-legalization.
1018 SDValue MultLHS = Mult->getOperand(0);
1019 SDValue MultRHS = Mult->getOperand(1);
1021 bool IsSigned = MultLHS->getOpcode() == ISD::SIGN_EXTEND &&
1022 MultRHS->getOpcode() == ISD::SIGN_EXTEND;
1023 bool IsUnsigned = MultLHS->getOpcode() == ISD::ZERO_EXTEND &&
1024 MultRHS->getOpcode() == ISD::ZERO_EXTEND;
1026 if (!IsSigned && !IsUnsigned)
1027 return SDValue();
1029 // Initialize accumulator.
1030 SDLoc DL(ROOTNode);
1031 SDValue BottomHalf, TopHalf;
1032 std::tie(BottomHalf, TopHalf) =
1033 CurDAG.SplitScalar(AddOperand, DL, MVT::i32, MVT::i32);
1034 SDValue ACCIn =
1035 CurDAG.getNode(MipsISD::MTLOHI, DL, MVT::Untyped, BottomHalf, TopHalf);
1037 // Create MipsMAdd(u) / MipsMSub(u) node.
1038 bool IsAdd = ROOTNode->getOpcode() == ISD::ADD;
1039 unsigned Opcode = IsAdd ? (IsUnsigned ? MipsISD::MAddu : MipsISD::MAdd)
1040 : (IsUnsigned ? MipsISD::MSubu : MipsISD::MSub);
1041 SDValue MAddOps[3] = {
1042 CurDAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mult->getOperand(0)),
1043 CurDAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mult->getOperand(1)), ACCIn};
1044 EVT VTs[2] = {MVT::i32, MVT::i32};
1045 SDValue MAdd = CurDAG.getNode(Opcode, DL, VTs, MAddOps);
1047 SDValue ResLo = CurDAG.getNode(MipsISD::MFLO, DL, MVT::i32, MAdd);
1048 SDValue ResHi = CurDAG.getNode(MipsISD::MFHI, DL, MVT::i32, MAdd);
1049 SDValue Combined =
1050 CurDAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, ResLo, ResHi);
1051 return Combined;
1054 static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG,
1055 TargetLowering::DAGCombinerInfo &DCI,
1056 const MipsSubtarget &Subtarget) {
1057 // (sub v0 (mul v1, v2)) => (msub v1, v2, v0)
1058 if (DCI.isBeforeLegalizeOps()) {
1059 if (Subtarget.hasMips32() && !Subtarget.hasMips32r6() &&
1060 !Subtarget.inMips16Mode() && N->getValueType(0) == MVT::i64)
1061 return performMADD_MSUBCombine(N, DAG, Subtarget);
1063 return SDValue();
1066 return SDValue();
1069 static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
1070 TargetLowering::DAGCombinerInfo &DCI,
1071 const MipsSubtarget &Subtarget) {
1072 // (add v0 (mul v1, v2)) => (madd v1, v2, v0)
1073 if (DCI.isBeforeLegalizeOps()) {
1074 if (Subtarget.hasMips32() && !Subtarget.hasMips32r6() &&
1075 !Subtarget.inMips16Mode() && N->getValueType(0) == MVT::i64)
1076 return performMADD_MSUBCombine(N, DAG, Subtarget);
1078 return SDValue();
1081 // (add v0, (add v1, abs_lo(tjt))) => (add (add v0, v1), abs_lo(tjt))
1082 SDValue Add = N->getOperand(1);
1084 if (Add.getOpcode() != ISD::ADD)
1085 return SDValue();
1087 SDValue Lo = Add.getOperand(1);
1089 if ((Lo.getOpcode() != MipsISD::Lo) ||
1090 (Lo.getOperand(0).getOpcode() != ISD::TargetJumpTable))
1091 return SDValue();
1093 EVT ValTy = N->getValueType(0);
1094 SDLoc DL(N);
1096 SDValue Add1 = DAG.getNode(ISD::ADD, DL, ValTy, N->getOperand(0),
1097 Add.getOperand(0));
1098 return DAG.getNode(ISD::ADD, DL, ValTy, Add1, Lo);
1101 static SDValue performSHLCombine(SDNode *N, SelectionDAG &DAG,
1102 TargetLowering::DAGCombinerInfo &DCI,
1103 const MipsSubtarget &Subtarget) {
1104 // Pattern match CINS.
1105 // $dst = shl (and $src , imm), pos
1106 // => cins $dst, $src, pos, size
1108 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasCnMips())
1109 return SDValue();
1111 SDValue FirstOperand = N->getOperand(0);
1112 unsigned FirstOperandOpc = FirstOperand.getOpcode();
1113 SDValue SecondOperand = N->getOperand(1);
1114 EVT ValTy = N->getValueType(0);
1115 SDLoc DL(N);
1117 uint64_t Pos = 0;
1118 unsigned SMPos, SMSize;
1119 ConstantSDNode *CN;
1120 SDValue NewOperand;
1122 // The second operand of the shift must be an immediate.
1123 if (!(CN = dyn_cast<ConstantSDNode>(SecondOperand)))
1124 return SDValue();
1126 Pos = CN->getZExtValue();
1128 if (Pos >= ValTy.getSizeInBits())
1129 return SDValue();
1131 if (FirstOperandOpc != ISD::AND)
1132 return SDValue();
1134 // AND's second operand must be a shifted mask.
1135 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.getOperand(1))) ||
1136 !isShiftedMask_64(CN->getZExtValue(), SMPos, SMSize))
1137 return SDValue();
1139 // Return if the shifted mask does not start at bit 0 or the sum of its size
1140 // and Pos exceeds the word's size.
1141 if (SMPos != 0 || SMSize > 32 || Pos + SMSize > ValTy.getSizeInBits())
1142 return SDValue();
1144 NewOperand = FirstOperand.getOperand(0);
1145 // SMSize is 'location' (position) in this case, not size.
1146 SMSize--;
1148 return DAG.getNode(MipsISD::CIns, DL, ValTy, NewOperand,
1149 DAG.getConstant(Pos, DL, MVT::i32),
1150 DAG.getConstant(SMSize, DL, MVT::i32));
1153 SDValue MipsTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI)
1154 const {
1155 SelectionDAG &DAG = DCI.DAG;
1156 unsigned Opc = N->getOpcode();
1158 switch (Opc) {
1159 default: break;
1160 case ISD::SDIVREM:
1161 case ISD::UDIVREM:
1162 return performDivRemCombine(N, DAG, DCI, Subtarget);
1163 case ISD::SELECT:
1164 return performSELECTCombine(N, DAG, DCI, Subtarget);
1165 case MipsISD::CMovFP_F:
1166 case MipsISD::CMovFP_T:
1167 return performCMovFPCombine(N, DAG, DCI, Subtarget);
1168 case ISD::AND:
1169 return performANDCombine(N, DAG, DCI, Subtarget);
1170 case ISD::OR:
1171 return performORCombine(N, DAG, DCI, Subtarget);
1172 case ISD::ADD:
1173 return performADDCombine(N, DAG, DCI, Subtarget);
1174 case ISD::SHL:
1175 return performSHLCombine(N, DAG, DCI, Subtarget);
1176 case ISD::SUB:
1177 return performSUBCombine(N, DAG, DCI, Subtarget);
1180 return SDValue();
1183 bool MipsTargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
1184 return Subtarget.hasMips32();
1187 bool MipsTargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {
1188 return Subtarget.hasMips32();
1191 bool MipsTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
1192 // We can use ANDI+SLTIU as a bit test. Y contains the bit position.
1193 // For MIPSR2 or later, we may be able to use the `ext` instruction or its'
1194 // double-word variants.
1195 if (auto *C = dyn_cast<ConstantSDNode>(Y))
1196 return C->getAPIntValue().ule(15);
1198 return false;
1201 bool MipsTargetLowering::shouldFoldConstantShiftPairToMask(
1202 const SDNode *N, CombineLevel Level) const {
1203 assert(((N->getOpcode() == ISD::SHL &&
1204 N->getOperand(0).getOpcode() == ISD::SRL) ||
1205 (N->getOpcode() == ISD::SRL &&
1206 N->getOperand(0).getOpcode() == ISD::SHL)) &&
1207 "Expected shift-shift mask");
1209 if (N->getOperand(0).getValueType().isVector())
1210 return false;
1211 return true;
1214 void
1215 MipsTargetLowering::ReplaceNodeResults(SDNode *N,
1216 SmallVectorImpl<SDValue> &Results,
1217 SelectionDAG &DAG) const {
1218 return LowerOperationWrapper(N, Results, DAG);
1221 SDValue MipsTargetLowering::
1222 LowerOperation(SDValue Op, SelectionDAG &DAG) const
1224 switch (Op.getOpcode())
1226 case ISD::BRCOND: return lowerBRCOND(Op, DAG);
1227 case ISD::ConstantPool: return lowerConstantPool(Op, DAG);
1228 case ISD::GlobalAddress: return lowerGlobalAddress(Op, DAG);
1229 case ISD::BlockAddress: return lowerBlockAddress(Op, DAG);
1230 case ISD::GlobalTLSAddress: return lowerGlobalTLSAddress(Op, DAG);
1231 case ISD::JumpTable: return lowerJumpTable(Op, DAG);
1232 case ISD::SELECT: return lowerSELECT(Op, DAG);
1233 case ISD::SETCC: return lowerSETCC(Op, DAG);
1234 case ISD::VASTART: return lowerVASTART(Op, DAG);
1235 case ISD::VAARG: return lowerVAARG(Op, DAG);
1236 case ISD::FCOPYSIGN: return lowerFCOPYSIGN(Op, DAG);
1237 case ISD::FABS: return lowerFABS(Op, DAG);
1238 case ISD::FRAMEADDR: return lowerFRAMEADDR(Op, DAG);
1239 case ISD::RETURNADDR: return lowerRETURNADDR(Op, DAG);
1240 case ISD::EH_RETURN: return lowerEH_RETURN(Op, DAG);
1241 case ISD::ATOMIC_FENCE: return lowerATOMIC_FENCE(Op, DAG);
1242 case ISD::SHL_PARTS: return lowerShiftLeftParts(Op, DAG);
1243 case ISD::SRA_PARTS: return lowerShiftRightParts(Op, DAG, true);
1244 case ISD::SRL_PARTS: return lowerShiftRightParts(Op, DAG, false);
1245 case ISD::LOAD: return lowerLOAD(Op, DAG);
1246 case ISD::STORE: return lowerSTORE(Op, DAG);
1247 case ISD::EH_DWARF_CFA: return lowerEH_DWARF_CFA(Op, DAG);
1248 case ISD::FP_TO_SINT: return lowerFP_TO_SINT(Op, DAG);
1250 return SDValue();
1253 //===----------------------------------------------------------------------===//
1254 // Lower helper functions
1255 //===----------------------------------------------------------------------===//
1257 // addLiveIn - This helper function adds the specified physical register to the
1258 // MachineFunction as a live in value. It also creates a corresponding
1259 // virtual register for it.
1260 static unsigned
1261 addLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC)
1263 Register VReg = MF.getRegInfo().createVirtualRegister(RC);
1264 MF.getRegInfo().addLiveIn(PReg, VReg);
1265 return VReg;
1268 static MachineBasicBlock *insertDivByZeroTrap(MachineInstr &MI,
1269 MachineBasicBlock &MBB,
1270 const TargetInstrInfo &TII,
1271 bool Is64Bit, bool IsMicroMips) {
1272 if (NoZeroDivCheck)
1273 return &MBB;
1275 // Insert instruction "teq $divisor_reg, $zero, 7".
1276 MachineBasicBlock::iterator I(MI);
1277 MachineInstrBuilder MIB;
1278 MachineOperand &Divisor = MI.getOperand(2);
1279 MIB = BuildMI(MBB, std::next(I), MI.getDebugLoc(),
1280 TII.get(IsMicroMips ? Mips::TEQ_MM : Mips::TEQ))
1281 .addReg(Divisor.getReg(), getKillRegState(Divisor.isKill()))
1282 .addReg(Mips::ZERO)
1283 .addImm(7);
1285 // Use the 32-bit sub-register if this is a 64-bit division.
1286 if (Is64Bit)
1287 MIB->getOperand(0).setSubReg(Mips::sub_32);
1289 // Clear Divisor's kill flag.
1290 Divisor.setIsKill(false);
1292 // We would normally delete the original instruction here but in this case
1293 // we only needed to inject an additional instruction rather than replace it.
1295 return &MBB;
1298 MachineBasicBlock *
1299 MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
1300 MachineBasicBlock *BB) const {
1301 switch (MI.getOpcode()) {
1302 default:
1303 llvm_unreachable("Unexpected instr type to insert");
1304 case Mips::ATOMIC_LOAD_ADD_I8:
1305 return emitAtomicBinaryPartword(MI, BB, 1);
1306 case Mips::ATOMIC_LOAD_ADD_I16:
1307 return emitAtomicBinaryPartword(MI, BB, 2);
1308 case Mips::ATOMIC_LOAD_ADD_I32:
1309 return emitAtomicBinary(MI, BB);
1310 case Mips::ATOMIC_LOAD_ADD_I64:
1311 return emitAtomicBinary(MI, BB);
1313 case Mips::ATOMIC_LOAD_AND_I8:
1314 return emitAtomicBinaryPartword(MI, BB, 1);
1315 case Mips::ATOMIC_LOAD_AND_I16:
1316 return emitAtomicBinaryPartword(MI, BB, 2);
1317 case Mips::ATOMIC_LOAD_AND_I32:
1318 return emitAtomicBinary(MI, BB);
1319 case Mips::ATOMIC_LOAD_AND_I64:
1320 return emitAtomicBinary(MI, BB);
1322 case Mips::ATOMIC_LOAD_OR_I8:
1323 return emitAtomicBinaryPartword(MI, BB, 1);
1324 case Mips::ATOMIC_LOAD_OR_I16:
1325 return emitAtomicBinaryPartword(MI, BB, 2);
1326 case Mips::ATOMIC_LOAD_OR_I32:
1327 return emitAtomicBinary(MI, BB);
1328 case Mips::ATOMIC_LOAD_OR_I64:
1329 return emitAtomicBinary(MI, BB);
1331 case Mips::ATOMIC_LOAD_XOR_I8:
1332 return emitAtomicBinaryPartword(MI, BB, 1);
1333 case Mips::ATOMIC_LOAD_XOR_I16:
1334 return emitAtomicBinaryPartword(MI, BB, 2);
1335 case Mips::ATOMIC_LOAD_XOR_I32:
1336 return emitAtomicBinary(MI, BB);
1337 case Mips::ATOMIC_LOAD_XOR_I64:
1338 return emitAtomicBinary(MI, BB);
1340 case Mips::ATOMIC_LOAD_NAND_I8:
1341 return emitAtomicBinaryPartword(MI, BB, 1);
1342 case Mips::ATOMIC_LOAD_NAND_I16:
1343 return emitAtomicBinaryPartword(MI, BB, 2);
1344 case Mips::ATOMIC_LOAD_NAND_I32:
1345 return emitAtomicBinary(MI, BB);
1346 case Mips::ATOMIC_LOAD_NAND_I64:
1347 return emitAtomicBinary(MI, BB);
1349 case Mips::ATOMIC_LOAD_SUB_I8:
1350 return emitAtomicBinaryPartword(MI, BB, 1);
1351 case Mips::ATOMIC_LOAD_SUB_I16:
1352 return emitAtomicBinaryPartword(MI, BB, 2);
1353 case Mips::ATOMIC_LOAD_SUB_I32:
1354 return emitAtomicBinary(MI, BB);
1355 case Mips::ATOMIC_LOAD_SUB_I64:
1356 return emitAtomicBinary(MI, BB);
1358 case Mips::ATOMIC_SWAP_I8:
1359 return emitAtomicBinaryPartword(MI, BB, 1);
1360 case Mips::ATOMIC_SWAP_I16:
1361 return emitAtomicBinaryPartword(MI, BB, 2);
1362 case Mips::ATOMIC_SWAP_I32:
1363 return emitAtomicBinary(MI, BB);
1364 case Mips::ATOMIC_SWAP_I64:
1365 return emitAtomicBinary(MI, BB);
1367 case Mips::ATOMIC_CMP_SWAP_I8:
1368 return emitAtomicCmpSwapPartword(MI, BB, 1);
1369 case Mips::ATOMIC_CMP_SWAP_I16:
1370 return emitAtomicCmpSwapPartword(MI, BB, 2);
1371 case Mips::ATOMIC_CMP_SWAP_I32:
1372 return emitAtomicCmpSwap(MI, BB);
1373 case Mips::ATOMIC_CMP_SWAP_I64:
1374 return emitAtomicCmpSwap(MI, BB);
1376 case Mips::ATOMIC_LOAD_MIN_I8:
1377 return emitAtomicBinaryPartword(MI, BB, 1);
1378 case Mips::ATOMIC_LOAD_MIN_I16:
1379 return emitAtomicBinaryPartword(MI, BB, 2);
1380 case Mips::ATOMIC_LOAD_MIN_I32:
1381 return emitAtomicBinary(MI, BB);
1382 case Mips::ATOMIC_LOAD_MIN_I64:
1383 return emitAtomicBinary(MI, BB);
1385 case Mips::ATOMIC_LOAD_MAX_I8:
1386 return emitAtomicBinaryPartword(MI, BB, 1);
1387 case Mips::ATOMIC_LOAD_MAX_I16:
1388 return emitAtomicBinaryPartword(MI, BB, 2);
1389 case Mips::ATOMIC_LOAD_MAX_I32:
1390 return emitAtomicBinary(MI, BB);
1391 case Mips::ATOMIC_LOAD_MAX_I64:
1392 return emitAtomicBinary(MI, BB);
1394 case Mips::ATOMIC_LOAD_UMIN_I8:
1395 return emitAtomicBinaryPartword(MI, BB, 1);
1396 case Mips::ATOMIC_LOAD_UMIN_I16:
1397 return emitAtomicBinaryPartword(MI, BB, 2);
1398 case Mips::ATOMIC_LOAD_UMIN_I32:
1399 return emitAtomicBinary(MI, BB);
1400 case Mips::ATOMIC_LOAD_UMIN_I64:
1401 return emitAtomicBinary(MI, BB);
1403 case Mips::ATOMIC_LOAD_UMAX_I8:
1404 return emitAtomicBinaryPartword(MI, BB, 1);
1405 case Mips::ATOMIC_LOAD_UMAX_I16:
1406 return emitAtomicBinaryPartword(MI, BB, 2);
1407 case Mips::ATOMIC_LOAD_UMAX_I32:
1408 return emitAtomicBinary(MI, BB);
1409 case Mips::ATOMIC_LOAD_UMAX_I64:
1410 return emitAtomicBinary(MI, BB);
1412 case Mips::PseudoSDIV:
1413 case Mips::PseudoUDIV:
1414 case Mips::DIV:
1415 case Mips::DIVU:
1416 case Mips::MOD:
1417 case Mips::MODU:
1418 return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), false,
1419 false);
1420 case Mips::SDIV_MM_Pseudo:
1421 case Mips::UDIV_MM_Pseudo:
1422 case Mips::SDIV_MM:
1423 case Mips::UDIV_MM:
1424 case Mips::DIV_MMR6:
1425 case Mips::DIVU_MMR6:
1426 case Mips::MOD_MMR6:
1427 case Mips::MODU_MMR6:
1428 return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), false, true);
1429 case Mips::PseudoDSDIV:
1430 case Mips::PseudoDUDIV:
1431 case Mips::DDIV:
1432 case Mips::DDIVU:
1433 case Mips::DMOD:
1434 case Mips::DMODU:
1435 return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), true, false);
1437 case Mips::PseudoSELECT_I:
1438 case Mips::PseudoSELECT_I64:
1439 case Mips::PseudoSELECT_S:
1440 case Mips::PseudoSELECT_D32:
1441 case Mips::PseudoSELECT_D64:
1442 return emitPseudoSELECT(MI, BB, false, Mips::BNE);
1443 case Mips::PseudoSELECTFP_F_I:
1444 case Mips::PseudoSELECTFP_F_I64:
1445 case Mips::PseudoSELECTFP_F_S:
1446 case Mips::PseudoSELECTFP_F_D32:
1447 case Mips::PseudoSELECTFP_F_D64:
1448 return emitPseudoSELECT(MI, BB, true, Mips::BC1F);
1449 case Mips::PseudoSELECTFP_T_I:
1450 case Mips::PseudoSELECTFP_T_I64:
1451 case Mips::PseudoSELECTFP_T_S:
1452 case Mips::PseudoSELECTFP_T_D32:
1453 case Mips::PseudoSELECTFP_T_D64:
1454 return emitPseudoSELECT(MI, BB, true, Mips::BC1T);
1455 case Mips::PseudoD_SELECT_I:
1456 case Mips::PseudoD_SELECT_I64:
1457 return emitPseudoD_SELECT(MI, BB);
1458 case Mips::LDR_W:
1459 return emitLDR_W(MI, BB);
1460 case Mips::LDR_D:
1461 return emitLDR_D(MI, BB);
1462 case Mips::STR_W:
1463 return emitSTR_W(MI, BB);
1464 case Mips::STR_D:
1465 return emitSTR_D(MI, BB);
1469 // This function also handles Mips::ATOMIC_SWAP_I32 (when BinOpcode == 0), and
1470 // Mips::ATOMIC_LOAD_NAND_I32 (when Nand == true)
1471 MachineBasicBlock *
1472 MipsTargetLowering::emitAtomicBinary(MachineInstr &MI,
1473 MachineBasicBlock *BB) const {
1475 MachineFunction *MF = BB->getParent();
1476 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1477 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1478 DebugLoc DL = MI.getDebugLoc();
1480 unsigned AtomicOp;
1481 bool NeedsAdditionalReg = false;
1482 switch (MI.getOpcode()) {
1483 case Mips::ATOMIC_LOAD_ADD_I32:
1484 AtomicOp = Mips::ATOMIC_LOAD_ADD_I32_POSTRA;
1485 break;
1486 case Mips::ATOMIC_LOAD_SUB_I32:
1487 AtomicOp = Mips::ATOMIC_LOAD_SUB_I32_POSTRA;
1488 break;
1489 case Mips::ATOMIC_LOAD_AND_I32:
1490 AtomicOp = Mips::ATOMIC_LOAD_AND_I32_POSTRA;
1491 break;
1492 case Mips::ATOMIC_LOAD_OR_I32:
1493 AtomicOp = Mips::ATOMIC_LOAD_OR_I32_POSTRA;
1494 break;
1495 case Mips::ATOMIC_LOAD_XOR_I32:
1496 AtomicOp = Mips::ATOMIC_LOAD_XOR_I32_POSTRA;
1497 break;
1498 case Mips::ATOMIC_LOAD_NAND_I32:
1499 AtomicOp = Mips::ATOMIC_LOAD_NAND_I32_POSTRA;
1500 break;
1501 case Mips::ATOMIC_SWAP_I32:
1502 AtomicOp = Mips::ATOMIC_SWAP_I32_POSTRA;
1503 break;
1504 case Mips::ATOMIC_LOAD_ADD_I64:
1505 AtomicOp = Mips::ATOMIC_LOAD_ADD_I64_POSTRA;
1506 break;
1507 case Mips::ATOMIC_LOAD_SUB_I64:
1508 AtomicOp = Mips::ATOMIC_LOAD_SUB_I64_POSTRA;
1509 break;
1510 case Mips::ATOMIC_LOAD_AND_I64:
1511 AtomicOp = Mips::ATOMIC_LOAD_AND_I64_POSTRA;
1512 break;
1513 case Mips::ATOMIC_LOAD_OR_I64:
1514 AtomicOp = Mips::ATOMIC_LOAD_OR_I64_POSTRA;
1515 break;
1516 case Mips::ATOMIC_LOAD_XOR_I64:
1517 AtomicOp = Mips::ATOMIC_LOAD_XOR_I64_POSTRA;
1518 break;
1519 case Mips::ATOMIC_LOAD_NAND_I64:
1520 AtomicOp = Mips::ATOMIC_LOAD_NAND_I64_POSTRA;
1521 break;
1522 case Mips::ATOMIC_SWAP_I64:
1523 AtomicOp = Mips::ATOMIC_SWAP_I64_POSTRA;
1524 break;
1525 case Mips::ATOMIC_LOAD_MIN_I32:
1526 AtomicOp = Mips::ATOMIC_LOAD_MIN_I32_POSTRA;
1527 NeedsAdditionalReg = true;
1528 break;
1529 case Mips::ATOMIC_LOAD_MAX_I32:
1530 AtomicOp = Mips::ATOMIC_LOAD_MAX_I32_POSTRA;
1531 NeedsAdditionalReg = true;
1532 break;
1533 case Mips::ATOMIC_LOAD_UMIN_I32:
1534 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I32_POSTRA;
1535 NeedsAdditionalReg = true;
1536 break;
1537 case Mips::ATOMIC_LOAD_UMAX_I32:
1538 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I32_POSTRA;
1539 NeedsAdditionalReg = true;
1540 break;
1541 case Mips::ATOMIC_LOAD_MIN_I64:
1542 AtomicOp = Mips::ATOMIC_LOAD_MIN_I64_POSTRA;
1543 NeedsAdditionalReg = true;
1544 break;
1545 case Mips::ATOMIC_LOAD_MAX_I64:
1546 AtomicOp = Mips::ATOMIC_LOAD_MAX_I64_POSTRA;
1547 NeedsAdditionalReg = true;
1548 break;
1549 case Mips::ATOMIC_LOAD_UMIN_I64:
1550 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I64_POSTRA;
1551 NeedsAdditionalReg = true;
1552 break;
1553 case Mips::ATOMIC_LOAD_UMAX_I64:
1554 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I64_POSTRA;
1555 NeedsAdditionalReg = true;
1556 break;
1557 default:
1558 llvm_unreachable("Unknown pseudo atomic for replacement!");
1561 Register OldVal = MI.getOperand(0).getReg();
1562 Register Ptr = MI.getOperand(1).getReg();
1563 Register Incr = MI.getOperand(2).getReg();
1564 Register Scratch = RegInfo.createVirtualRegister(RegInfo.getRegClass(OldVal));
1566 MachineBasicBlock::iterator II(MI);
1568 // The scratch registers here with the EarlyClobber | Define | Implicit
1569 // flags is used to persuade the register allocator and the machine
1570 // verifier to accept the usage of this register. This has to be a real
1571 // register which has an UNDEF value but is dead after the instruction which
1572 // is unique among the registers chosen for the instruction.
1574 // The EarlyClobber flag has the semantic properties that the operand it is
1575 // attached to is clobbered before the rest of the inputs are read. Hence it
1576 // must be unique among the operands to the instruction.
1577 // The Define flag is needed to coerce the machine verifier that an Undef
1578 // value isn't a problem.
1579 // The Dead flag is needed as the value in scratch isn't used by any other
1580 // instruction. Kill isn't used as Dead is more precise.
1581 // The implicit flag is here due to the interaction between the other flags
1582 // and the machine verifier.
1584 // For correctness purpose, a new pseudo is introduced here. We need this
1585 // new pseudo, so that FastRegisterAllocator does not see an ll/sc sequence
1586 // that is spread over >1 basic blocks. A register allocator which
1587 // introduces (or any codegen infact) a store, can violate the expectations
1588 // of the hardware.
1590 // An atomic read-modify-write sequence starts with a linked load
1591 // instruction and ends with a store conditional instruction. The atomic
1592 // read-modify-write sequence fails if any of the following conditions
1593 // occur between the execution of ll and sc:
1594 // * A coherent store is completed by another process or coherent I/O
1595 // module into the block of synchronizable physical memory containing
1596 // the word. The size and alignment of the block is
1597 // implementation-dependent.
1598 // * A coherent store is executed between an LL and SC sequence on the
1599 // same processor to the block of synchornizable physical memory
1600 // containing the word.
1603 Register PtrCopy = RegInfo.createVirtualRegister(RegInfo.getRegClass(Ptr));
1604 Register IncrCopy = RegInfo.createVirtualRegister(RegInfo.getRegClass(Incr));
1606 BuildMI(*BB, II, DL, TII->get(Mips::COPY), IncrCopy).addReg(Incr);
1607 BuildMI(*BB, II, DL, TII->get(Mips::COPY), PtrCopy).addReg(Ptr);
1609 MachineInstrBuilder MIB =
1610 BuildMI(*BB, II, DL, TII->get(AtomicOp))
1611 .addReg(OldVal, RegState::Define | RegState::EarlyClobber)
1612 .addReg(PtrCopy)
1613 .addReg(IncrCopy)
1614 .addReg(Scratch, RegState::Define | RegState::EarlyClobber |
1615 RegState::Implicit | RegState::Dead);
1616 if (NeedsAdditionalReg) {
1617 Register Scratch2 =
1618 RegInfo.createVirtualRegister(RegInfo.getRegClass(OldVal));
1619 MIB.addReg(Scratch2, RegState::Define | RegState::EarlyClobber |
1620 RegState::Implicit | RegState::Dead);
1623 MI.eraseFromParent();
1625 return BB;
1628 MachineBasicBlock *MipsTargetLowering::emitSignExtendToI32InReg(
1629 MachineInstr &MI, MachineBasicBlock *BB, unsigned Size, unsigned DstReg,
1630 unsigned SrcReg) const {
1631 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1632 const DebugLoc &DL = MI.getDebugLoc();
1634 if (Subtarget.hasMips32r2() && Size == 1) {
1635 BuildMI(BB, DL, TII->get(Mips::SEB), DstReg).addReg(SrcReg);
1636 return BB;
1639 if (Subtarget.hasMips32r2() && Size == 2) {
1640 BuildMI(BB, DL, TII->get(Mips::SEH), DstReg).addReg(SrcReg);
1641 return BB;
1644 MachineFunction *MF = BB->getParent();
1645 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1646 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1647 Register ScrReg = RegInfo.createVirtualRegister(RC);
1649 assert(Size < 32);
1650 int64_t ShiftImm = 32 - (Size * 8);
1652 BuildMI(BB, DL, TII->get(Mips::SLL), ScrReg).addReg(SrcReg).addImm(ShiftImm);
1653 BuildMI(BB, DL, TII->get(Mips::SRA), DstReg).addReg(ScrReg).addImm(ShiftImm);
1655 return BB;
1658 MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
1659 MachineInstr &MI, MachineBasicBlock *BB, unsigned Size) const {
1660 assert((Size == 1 || Size == 2) &&
1661 "Unsupported size for EmitAtomicBinaryPartial.");
1663 MachineFunction *MF = BB->getParent();
1664 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1665 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1666 const bool ArePtrs64bit = ABI.ArePtrs64bit();
1667 const TargetRegisterClass *RCp =
1668 getRegClassFor(ArePtrs64bit ? MVT::i64 : MVT::i32);
1669 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1670 DebugLoc DL = MI.getDebugLoc();
1672 Register Dest = MI.getOperand(0).getReg();
1673 Register Ptr = MI.getOperand(1).getReg();
1674 Register Incr = MI.getOperand(2).getReg();
1676 Register AlignedAddr = RegInfo.createVirtualRegister(RCp);
1677 Register ShiftAmt = RegInfo.createVirtualRegister(RC);
1678 Register Mask = RegInfo.createVirtualRegister(RC);
1679 Register Mask2 = RegInfo.createVirtualRegister(RC);
1680 Register Incr2 = RegInfo.createVirtualRegister(RC);
1681 Register MaskLSB2 = RegInfo.createVirtualRegister(RCp);
1682 Register PtrLSB2 = RegInfo.createVirtualRegister(RC);
1683 Register MaskUpper = RegInfo.createVirtualRegister(RC);
1684 Register Scratch = RegInfo.createVirtualRegister(RC);
1685 Register Scratch2 = RegInfo.createVirtualRegister(RC);
1686 Register Scratch3 = RegInfo.createVirtualRegister(RC);
1688 unsigned AtomicOp = 0;
1689 bool NeedsAdditionalReg = false;
1690 switch (MI.getOpcode()) {
1691 case Mips::ATOMIC_LOAD_NAND_I8:
1692 AtomicOp = Mips::ATOMIC_LOAD_NAND_I8_POSTRA;
1693 break;
1694 case Mips::ATOMIC_LOAD_NAND_I16:
1695 AtomicOp = Mips::ATOMIC_LOAD_NAND_I16_POSTRA;
1696 break;
1697 case Mips::ATOMIC_SWAP_I8:
1698 AtomicOp = Mips::ATOMIC_SWAP_I8_POSTRA;
1699 break;
1700 case Mips::ATOMIC_SWAP_I16:
1701 AtomicOp = Mips::ATOMIC_SWAP_I16_POSTRA;
1702 break;
1703 case Mips::ATOMIC_LOAD_ADD_I8:
1704 AtomicOp = Mips::ATOMIC_LOAD_ADD_I8_POSTRA;
1705 break;
1706 case Mips::ATOMIC_LOAD_ADD_I16:
1707 AtomicOp = Mips::ATOMIC_LOAD_ADD_I16_POSTRA;
1708 break;
1709 case Mips::ATOMIC_LOAD_SUB_I8:
1710 AtomicOp = Mips::ATOMIC_LOAD_SUB_I8_POSTRA;
1711 break;
1712 case Mips::ATOMIC_LOAD_SUB_I16:
1713 AtomicOp = Mips::ATOMIC_LOAD_SUB_I16_POSTRA;
1714 break;
1715 case Mips::ATOMIC_LOAD_AND_I8:
1716 AtomicOp = Mips::ATOMIC_LOAD_AND_I8_POSTRA;
1717 break;
1718 case Mips::ATOMIC_LOAD_AND_I16:
1719 AtomicOp = Mips::ATOMIC_LOAD_AND_I16_POSTRA;
1720 break;
1721 case Mips::ATOMIC_LOAD_OR_I8:
1722 AtomicOp = Mips::ATOMIC_LOAD_OR_I8_POSTRA;
1723 break;
1724 case Mips::ATOMIC_LOAD_OR_I16:
1725 AtomicOp = Mips::ATOMIC_LOAD_OR_I16_POSTRA;
1726 break;
1727 case Mips::ATOMIC_LOAD_XOR_I8:
1728 AtomicOp = Mips::ATOMIC_LOAD_XOR_I8_POSTRA;
1729 break;
1730 case Mips::ATOMIC_LOAD_XOR_I16:
1731 AtomicOp = Mips::ATOMIC_LOAD_XOR_I16_POSTRA;
1732 break;
1733 case Mips::ATOMIC_LOAD_MIN_I8:
1734 AtomicOp = Mips::ATOMIC_LOAD_MIN_I8_POSTRA;
1735 NeedsAdditionalReg = true;
1736 break;
1737 case Mips::ATOMIC_LOAD_MIN_I16:
1738 AtomicOp = Mips::ATOMIC_LOAD_MIN_I16_POSTRA;
1739 NeedsAdditionalReg = true;
1740 break;
1741 case Mips::ATOMIC_LOAD_MAX_I8:
1742 AtomicOp = Mips::ATOMIC_LOAD_MAX_I8_POSTRA;
1743 NeedsAdditionalReg = true;
1744 break;
1745 case Mips::ATOMIC_LOAD_MAX_I16:
1746 AtomicOp = Mips::ATOMIC_LOAD_MAX_I16_POSTRA;
1747 NeedsAdditionalReg = true;
1748 break;
1749 case Mips::ATOMIC_LOAD_UMIN_I8:
1750 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I8_POSTRA;
1751 NeedsAdditionalReg = true;
1752 break;
1753 case Mips::ATOMIC_LOAD_UMIN_I16:
1754 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I16_POSTRA;
1755 NeedsAdditionalReg = true;
1756 break;
1757 case Mips::ATOMIC_LOAD_UMAX_I8:
1758 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I8_POSTRA;
1759 NeedsAdditionalReg = true;
1760 break;
1761 case Mips::ATOMIC_LOAD_UMAX_I16:
1762 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I16_POSTRA;
1763 NeedsAdditionalReg = true;
1764 break;
1765 default:
1766 llvm_unreachable("Unknown subword atomic pseudo for expansion!");
1769 // insert new blocks after the current block
1770 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1771 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1772 MachineFunction::iterator It = ++BB->getIterator();
1773 MF->insert(It, exitMBB);
1775 // Transfer the remainder of BB and its successor edges to exitMBB.
1776 exitMBB->splice(exitMBB->begin(), BB,
1777 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1778 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
1780 BB->addSuccessor(exitMBB, BranchProbability::getOne());
1782 // thisMBB:
1783 // addiu masklsb2,$0,-4 # 0xfffffffc
1784 // and alignedaddr,ptr,masklsb2
1785 // andi ptrlsb2,ptr,3
1786 // sll shiftamt,ptrlsb2,3
1787 // ori maskupper,$0,255 # 0xff
1788 // sll mask,maskupper,shiftamt
1789 // nor mask2,$0,mask
1790 // sll incr2,incr,shiftamt
1792 int64_t MaskImm = (Size == 1) ? 255 : 65535;
1793 BuildMI(BB, DL, TII->get(ABI.GetPtrAddiuOp()), MaskLSB2)
1794 .addReg(ABI.GetNullPtr()).addImm(-4);
1795 BuildMI(BB, DL, TII->get(ABI.GetPtrAndOp()), AlignedAddr)
1796 .addReg(Ptr).addReg(MaskLSB2);
1797 BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2)
1798 .addReg(Ptr, 0, ArePtrs64bit ? Mips::sub_32 : 0).addImm(3);
1799 if (Subtarget.isLittle()) {
1800 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
1801 } else {
1802 Register Off = RegInfo.createVirtualRegister(RC);
1803 BuildMI(BB, DL, TII->get(Mips::XORi), Off)
1804 .addReg(PtrLSB2).addImm((Size == 1) ? 3 : 2);
1805 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(Off).addImm(3);
1807 BuildMI(BB, DL, TII->get(Mips::ORi), MaskUpper)
1808 .addReg(Mips::ZERO).addImm(MaskImm);
1809 BuildMI(BB, DL, TII->get(Mips::SLLV), Mask)
1810 .addReg(MaskUpper).addReg(ShiftAmt);
1811 BuildMI(BB, DL, TII->get(Mips::NOR), Mask2).addReg(Mips::ZERO).addReg(Mask);
1812 BuildMI(BB, DL, TII->get(Mips::SLLV), Incr2).addReg(Incr).addReg(ShiftAmt);
1815 // The purposes of the flags on the scratch registers is explained in
1816 // emitAtomicBinary. In summary, we need a scratch register which is going to
1817 // be undef, that is unique among registers chosen for the instruction.
1819 MachineInstrBuilder MIB =
1820 BuildMI(BB, DL, TII->get(AtomicOp))
1821 .addReg(Dest, RegState::Define | RegState::EarlyClobber)
1822 .addReg(AlignedAddr)
1823 .addReg(Incr2)
1824 .addReg(Mask)
1825 .addReg(Mask2)
1826 .addReg(ShiftAmt)
1827 .addReg(Scratch, RegState::EarlyClobber | RegState::Define |
1828 RegState::Dead | RegState::Implicit)
1829 .addReg(Scratch2, RegState::EarlyClobber | RegState::Define |
1830 RegState::Dead | RegState::Implicit)
1831 .addReg(Scratch3, RegState::EarlyClobber | RegState::Define |
1832 RegState::Dead | RegState::Implicit);
1833 if (NeedsAdditionalReg) {
1834 Register Scratch4 = RegInfo.createVirtualRegister(RC);
1835 MIB.addReg(Scratch4, RegState::EarlyClobber | RegState::Define |
1836 RegState::Dead | RegState::Implicit);
1839 MI.eraseFromParent(); // The instruction is gone now.
1841 return exitMBB;
1844 // Lower atomic compare and swap to a pseudo instruction, taking care to
1845 // define a scratch register for the pseudo instruction's expansion. The
1846 // instruction is expanded after the register allocator as to prevent
1847 // the insertion of stores between the linked load and the store conditional.
1849 MachineBasicBlock *
1850 MipsTargetLowering::emitAtomicCmpSwap(MachineInstr &MI,
1851 MachineBasicBlock *BB) const {
1853 assert((MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ||
1854 MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I64) &&
1855 "Unsupported atomic pseudo for EmitAtomicCmpSwap.");
1857 const unsigned Size = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ? 4 : 8;
1859 MachineFunction *MF = BB->getParent();
1860 MachineRegisterInfo &MRI = MF->getRegInfo();
1861 const TargetRegisterClass *RC = getRegClassFor(MVT::getIntegerVT(Size * 8));
1862 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1863 DebugLoc DL = MI.getDebugLoc();
1865 unsigned AtomicOp = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32
1866 ? Mips::ATOMIC_CMP_SWAP_I32_POSTRA
1867 : Mips::ATOMIC_CMP_SWAP_I64_POSTRA;
1868 Register Dest = MI.getOperand(0).getReg();
1869 Register Ptr = MI.getOperand(1).getReg();
1870 Register OldVal = MI.getOperand(2).getReg();
1871 Register NewVal = MI.getOperand(3).getReg();
1873 Register Scratch = MRI.createVirtualRegister(RC);
1874 MachineBasicBlock::iterator II(MI);
1876 // We need to create copies of the various registers and kill them at the
1877 // atomic pseudo. If the copies are not made, when the atomic is expanded
1878 // after fast register allocation, the spills will end up outside of the
1879 // blocks that their values are defined in, causing livein errors.
1881 Register PtrCopy = MRI.createVirtualRegister(MRI.getRegClass(Ptr));
1882 Register OldValCopy = MRI.createVirtualRegister(MRI.getRegClass(OldVal));
1883 Register NewValCopy = MRI.createVirtualRegister(MRI.getRegClass(NewVal));
1885 BuildMI(*BB, II, DL, TII->get(Mips::COPY), PtrCopy).addReg(Ptr);
1886 BuildMI(*BB, II, DL, TII->get(Mips::COPY), OldValCopy).addReg(OldVal);
1887 BuildMI(*BB, II, DL, TII->get(Mips::COPY), NewValCopy).addReg(NewVal);
1889 // The purposes of the flags on the scratch registers is explained in
1890 // emitAtomicBinary. In summary, we need a scratch register which is going to
1891 // be undef, that is unique among registers chosen for the instruction.
1893 BuildMI(*BB, II, DL, TII->get(AtomicOp))
1894 .addReg(Dest, RegState::Define | RegState::EarlyClobber)
1895 .addReg(PtrCopy, RegState::Kill)
1896 .addReg(OldValCopy, RegState::Kill)
1897 .addReg(NewValCopy, RegState::Kill)
1898 .addReg(Scratch, RegState::EarlyClobber | RegState::Define |
1899 RegState::Dead | RegState::Implicit);
1901 MI.eraseFromParent(); // The instruction is gone now.
1903 return BB;
1906 MachineBasicBlock *MipsTargetLowering::emitAtomicCmpSwapPartword(
1907 MachineInstr &MI, MachineBasicBlock *BB, unsigned Size) const {
1908 assert((Size == 1 || Size == 2) &&
1909 "Unsupported size for EmitAtomicCmpSwapPartial.");
1911 MachineFunction *MF = BB->getParent();
1912 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1913 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1914 const bool ArePtrs64bit = ABI.ArePtrs64bit();
1915 const TargetRegisterClass *RCp =
1916 getRegClassFor(ArePtrs64bit ? MVT::i64 : MVT::i32);
1917 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1918 DebugLoc DL = MI.getDebugLoc();
1920 Register Dest = MI.getOperand(0).getReg();
1921 Register Ptr = MI.getOperand(1).getReg();
1922 Register CmpVal = MI.getOperand(2).getReg();
1923 Register NewVal = MI.getOperand(3).getReg();
1925 Register AlignedAddr = RegInfo.createVirtualRegister(RCp);
1926 Register ShiftAmt = RegInfo.createVirtualRegister(RC);
1927 Register Mask = RegInfo.createVirtualRegister(RC);
1928 Register Mask2 = RegInfo.createVirtualRegister(RC);
1929 Register ShiftedCmpVal = RegInfo.createVirtualRegister(RC);
1930 Register ShiftedNewVal = RegInfo.createVirtualRegister(RC);
1931 Register MaskLSB2 = RegInfo.createVirtualRegister(RCp);
1932 Register PtrLSB2 = RegInfo.createVirtualRegister(RC);
1933 Register MaskUpper = RegInfo.createVirtualRegister(RC);
1934 Register MaskedCmpVal = RegInfo.createVirtualRegister(RC);
1935 Register MaskedNewVal = RegInfo.createVirtualRegister(RC);
1936 unsigned AtomicOp = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I8
1937 ? Mips::ATOMIC_CMP_SWAP_I8_POSTRA
1938 : Mips::ATOMIC_CMP_SWAP_I16_POSTRA;
1940 // The scratch registers here with the EarlyClobber | Define | Dead | Implicit
1941 // flags are used to coerce the register allocator and the machine verifier to
1942 // accept the usage of these registers.
1943 // The EarlyClobber flag has the semantic properties that the operand it is
1944 // attached to is clobbered before the rest of the inputs are read. Hence it
1945 // must be unique among the operands to the instruction.
1946 // The Define flag is needed to coerce the machine verifier that an Undef
1947 // value isn't a problem.
1948 // The Dead flag is needed as the value in scratch isn't used by any other
1949 // instruction. Kill isn't used as Dead is more precise.
1950 Register Scratch = RegInfo.createVirtualRegister(RC);
1951 Register Scratch2 = RegInfo.createVirtualRegister(RC);
1953 // insert new blocks after the current block
1954 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1955 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1956 MachineFunction::iterator It = ++BB->getIterator();
1957 MF->insert(It, exitMBB);
1959 // Transfer the remainder of BB and its successor edges to exitMBB.
1960 exitMBB->splice(exitMBB->begin(), BB,
1961 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1962 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
1964 BB->addSuccessor(exitMBB, BranchProbability::getOne());
1966 // thisMBB:
1967 // addiu masklsb2,$0,-4 # 0xfffffffc
1968 // and alignedaddr,ptr,masklsb2
1969 // andi ptrlsb2,ptr,3
1970 // xori ptrlsb2,ptrlsb2,3 # Only for BE
1971 // sll shiftamt,ptrlsb2,3
1972 // ori maskupper,$0,255 # 0xff
1973 // sll mask,maskupper,shiftamt
1974 // nor mask2,$0,mask
1975 // andi maskedcmpval,cmpval,255
1976 // sll shiftedcmpval,maskedcmpval,shiftamt
1977 // andi maskednewval,newval,255
1978 // sll shiftednewval,maskednewval,shiftamt
1979 int64_t MaskImm = (Size == 1) ? 255 : 65535;
1980 BuildMI(BB, DL, TII->get(ArePtrs64bit ? Mips::DADDiu : Mips::ADDiu), MaskLSB2)
1981 .addReg(ABI.GetNullPtr()).addImm(-4);
1982 BuildMI(BB, DL, TII->get(ArePtrs64bit ? Mips::AND64 : Mips::AND), AlignedAddr)
1983 .addReg(Ptr).addReg(MaskLSB2);
1984 BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2)
1985 .addReg(Ptr, 0, ArePtrs64bit ? Mips::sub_32 : 0).addImm(3);
1986 if (Subtarget.isLittle()) {
1987 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
1988 } else {
1989 Register Off = RegInfo.createVirtualRegister(RC);
1990 BuildMI(BB, DL, TII->get(Mips::XORi), Off)
1991 .addReg(PtrLSB2).addImm((Size == 1) ? 3 : 2);
1992 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(Off).addImm(3);
1994 BuildMI(BB, DL, TII->get(Mips::ORi), MaskUpper)
1995 .addReg(Mips::ZERO).addImm(MaskImm);
1996 BuildMI(BB, DL, TII->get(Mips::SLLV), Mask)
1997 .addReg(MaskUpper).addReg(ShiftAmt);
1998 BuildMI(BB, DL, TII->get(Mips::NOR), Mask2).addReg(Mips::ZERO).addReg(Mask);
1999 BuildMI(BB, DL, TII->get(Mips::ANDi), MaskedCmpVal)
2000 .addReg(CmpVal).addImm(MaskImm);
2001 BuildMI(BB, DL, TII->get(Mips::SLLV), ShiftedCmpVal)
2002 .addReg(MaskedCmpVal).addReg(ShiftAmt);
2003 BuildMI(BB, DL, TII->get(Mips::ANDi), MaskedNewVal)
2004 .addReg(NewVal).addImm(MaskImm);
2005 BuildMI(BB, DL, TII->get(Mips::SLLV), ShiftedNewVal)
2006 .addReg(MaskedNewVal).addReg(ShiftAmt);
2008 // The purposes of the flags on the scratch registers are explained in
2009 // emitAtomicBinary. In summary, we need a scratch register which is going to
2010 // be undef, that is unique among the register chosen for the instruction.
2012 BuildMI(BB, DL, TII->get(AtomicOp))
2013 .addReg(Dest, RegState::Define | RegState::EarlyClobber)
2014 .addReg(AlignedAddr)
2015 .addReg(Mask)
2016 .addReg(ShiftedCmpVal)
2017 .addReg(Mask2)
2018 .addReg(ShiftedNewVal)
2019 .addReg(ShiftAmt)
2020 .addReg(Scratch, RegState::EarlyClobber | RegState::Define |
2021 RegState::Dead | RegState::Implicit)
2022 .addReg(Scratch2, RegState::EarlyClobber | RegState::Define |
2023 RegState::Dead | RegState::Implicit);
2025 MI.eraseFromParent(); // The instruction is gone now.
2027 return exitMBB;
2030 SDValue MipsTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
2031 // The first operand is the chain, the second is the condition, the third is
2032 // the block to branch to if the condition is true.
2033 SDValue Chain = Op.getOperand(0);
2034 SDValue Dest = Op.getOperand(2);
2035 SDLoc DL(Op);
2037 assert(!Subtarget.hasMips32r6() && !Subtarget.hasMips64r6());
2038 SDValue CondRes = createFPCmp(DAG, Op.getOperand(1));
2040 // Return if flag is not set by a floating point comparison.
2041 if (CondRes.getOpcode() != MipsISD::FPCmp)
2042 return Op;
2044 SDValue CCNode = CondRes.getOperand(2);
2045 Mips::CondCode CC =
2046 (Mips::CondCode)cast<ConstantSDNode>(CCNode)->getZExtValue();
2047 unsigned Opc = invertFPCondCodeUser(CC) ? Mips::BRANCH_F : Mips::BRANCH_T;
2048 SDValue BrCode = DAG.getConstant(Opc, DL, MVT::i32);
2049 SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
2050 return DAG.getNode(MipsISD::FPBrcond, DL, Op.getValueType(), Chain, BrCode,
2051 FCC0, Dest, CondRes);
2054 SDValue MipsTargetLowering::
2055 lowerSELECT(SDValue Op, SelectionDAG &DAG) const
2057 assert(!Subtarget.hasMips32r6() && !Subtarget.hasMips64r6());
2058 SDValue Cond = createFPCmp(DAG, Op.getOperand(0));
2060 // Return if flag is not set by a floating point comparison.
2061 if (Cond.getOpcode() != MipsISD::FPCmp)
2062 return Op;
2064 return createCMovFP(DAG, Cond, Op.getOperand(1), Op.getOperand(2),
2065 SDLoc(Op));
2068 SDValue MipsTargetLowering::lowerSETCC(SDValue Op, SelectionDAG &DAG) const {
2069 assert(!Subtarget.hasMips32r6() && !Subtarget.hasMips64r6());
2070 SDValue Cond = createFPCmp(DAG, Op);
2072 assert(Cond.getOpcode() == MipsISD::FPCmp &&
2073 "Floating point operand expected.");
2075 SDLoc DL(Op);
2076 SDValue True = DAG.getConstant(1, DL, MVT::i32);
2077 SDValue False = DAG.getConstant(0, DL, MVT::i32);
2079 return createCMovFP(DAG, Cond, True, False, DL);
2082 SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op,
2083 SelectionDAG &DAG) const {
2084 EVT Ty = Op.getValueType();
2085 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2086 const GlobalValue *GV = N->getGlobal();
2088 if (!isPositionIndependent()) {
2089 const MipsTargetObjectFile *TLOF =
2090 static_cast<const MipsTargetObjectFile *>(
2091 getTargetMachine().getObjFileLowering());
2092 const GlobalObject *GO = GV->getAliaseeObject();
2093 if (GO && TLOF->IsGlobalInSmallSection(GO, getTargetMachine()))
2094 // %gp_rel relocation
2095 return getAddrGPRel(N, SDLoc(N), Ty, DAG, ABI.IsN64());
2097 // %hi/%lo relocation
2098 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2099 // %highest/%higher/%hi/%lo relocation
2100 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2103 // Every other architecture would use shouldAssumeDSOLocal in here, but
2104 // mips is special.
2105 // * In PIC code mips requires got loads even for local statics!
2106 // * To save on got entries, for local statics the got entry contains the
2107 // page and an additional add instruction takes care of the low bits.
2108 // * It is legal to access a hidden symbol with a non hidden undefined,
2109 // so one cannot guarantee that all access to a hidden symbol will know
2110 // it is hidden.
2111 // * Mips linkers don't support creating a page and a full got entry for
2112 // the same symbol.
2113 // * Given all that, we have to use a full got entry for hidden symbols :-(
2114 if (GV->hasLocalLinkage())
2115 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2117 if (Subtarget.useXGOT())
2118 return getAddrGlobalLargeGOT(
2119 N, SDLoc(N), Ty, DAG, MipsII::MO_GOT_HI16, MipsII::MO_GOT_LO16,
2120 DAG.getEntryNode(),
2121 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2123 return getAddrGlobal(
2124 N, SDLoc(N), Ty, DAG,
2125 (ABI.IsN32() || ABI.IsN64()) ? MipsII::MO_GOT_DISP : MipsII::MO_GOT,
2126 DAG.getEntryNode(), MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2129 SDValue MipsTargetLowering::lowerBlockAddress(SDValue Op,
2130 SelectionDAG &DAG) const {
2131 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
2132 EVT Ty = Op.getValueType();
2134 if (!isPositionIndependent())
2135 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2136 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2138 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2141 SDValue MipsTargetLowering::
2142 lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
2144 // If the relocation model is PIC, use the General Dynamic TLS Model or
2145 // Local Dynamic TLS model, otherwise use the Initial Exec or
2146 // Local Exec TLS Model.
2148 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2149 if (DAG.getTarget().useEmulatedTLS())
2150 return LowerToTLSEmulatedModel(GA, DAG);
2152 SDLoc DL(GA);
2153 const GlobalValue *GV = GA->getGlobal();
2154 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2156 TLSModel::Model model = getTargetMachine().getTLSModel(GV);
2158 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
2159 // General Dynamic and Local Dynamic TLS Model.
2160 unsigned Flag = (model == TLSModel::LocalDynamic) ? MipsII::MO_TLSLDM
2161 : MipsII::MO_TLSGD;
2163 SDValue TGA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, Flag);
2164 SDValue Argument = DAG.getNode(MipsISD::Wrapper, DL, PtrVT,
2165 getGlobalReg(DAG, PtrVT), TGA);
2166 unsigned PtrSize = PtrVT.getSizeInBits();
2167 IntegerType *PtrTy = Type::getIntNTy(*DAG.getContext(), PtrSize);
2169 SDValue TlsGetAddr = DAG.getExternalSymbol("__tls_get_addr", PtrVT);
2171 ArgListTy Args;
2172 ArgListEntry Entry;
2173 Entry.Node = Argument;
2174 Entry.Ty = PtrTy;
2175 Args.push_back(Entry);
2177 TargetLowering::CallLoweringInfo CLI(DAG);
2178 CLI.setDebugLoc(DL)
2179 .setChain(DAG.getEntryNode())
2180 .setLibCallee(CallingConv::C, PtrTy, TlsGetAddr, std::move(Args));
2181 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
2183 SDValue Ret = CallResult.first;
2185 if (model != TLSModel::LocalDynamic)
2186 return Ret;
2188 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2189 MipsII::MO_DTPREL_HI);
2190 SDValue Hi = DAG.getNode(MipsISD::TlsHi, DL, PtrVT, TGAHi);
2191 SDValue TGALo = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2192 MipsII::MO_DTPREL_LO);
2193 SDValue Lo = DAG.getNode(MipsISD::Lo, DL, PtrVT, TGALo);
2194 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Ret);
2195 return DAG.getNode(ISD::ADD, DL, PtrVT, Add, Lo);
2198 SDValue Offset;
2199 if (model == TLSModel::InitialExec) {
2200 // Initial Exec TLS Model
2201 SDValue TGA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2202 MipsII::MO_GOTTPREL);
2203 TGA = DAG.getNode(MipsISD::Wrapper, DL, PtrVT, getGlobalReg(DAG, PtrVT),
2204 TGA);
2205 Offset =
2206 DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), TGA, MachinePointerInfo());
2207 } else {
2208 // Local Exec TLS Model
2209 assert(model == TLSModel::LocalExec);
2210 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2211 MipsII::MO_TPREL_HI);
2212 SDValue TGALo = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2213 MipsII::MO_TPREL_LO);
2214 SDValue Hi = DAG.getNode(MipsISD::TlsHi, DL, PtrVT, TGAHi);
2215 SDValue Lo = DAG.getNode(MipsISD::Lo, DL, PtrVT, TGALo);
2216 Offset = DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
2219 SDValue ThreadPointer = DAG.getNode(MipsISD::ThreadPointer, DL, PtrVT);
2220 return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadPointer, Offset);
2223 SDValue MipsTargetLowering::
2224 lowerJumpTable(SDValue Op, SelectionDAG &DAG) const
2226 JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
2227 EVT Ty = Op.getValueType();
2229 if (!isPositionIndependent())
2230 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2231 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2233 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2236 SDValue MipsTargetLowering::
2237 lowerConstantPool(SDValue Op, SelectionDAG &DAG) const
2239 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
2240 EVT Ty = Op.getValueType();
2242 if (!isPositionIndependent()) {
2243 const MipsTargetObjectFile *TLOF =
2244 static_cast<const MipsTargetObjectFile *>(
2245 getTargetMachine().getObjFileLowering());
2247 if (TLOF->IsConstantInSmallSection(DAG.getDataLayout(), N->getConstVal(),
2248 getTargetMachine()))
2249 // %gp_rel relocation
2250 return getAddrGPRel(N, SDLoc(N), Ty, DAG, ABI.IsN64());
2252 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2253 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2256 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2259 SDValue MipsTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
2260 MachineFunction &MF = DAG.getMachineFunction();
2261 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
2263 SDLoc DL(Op);
2264 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
2265 getPointerTy(MF.getDataLayout()));
2267 // vastart just stores the address of the VarArgsFrameIndex slot into the
2268 // memory location argument.
2269 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2270 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
2271 MachinePointerInfo(SV));
2274 SDValue MipsTargetLowering::lowerVAARG(SDValue Op, SelectionDAG &DAG) const {
2275 SDNode *Node = Op.getNode();
2276 EVT VT = Node->getValueType(0);
2277 SDValue Chain = Node->getOperand(0);
2278 SDValue VAListPtr = Node->getOperand(1);
2279 const Align Align =
2280 llvm::MaybeAlign(Node->getConstantOperandVal(3)).valueOrOne();
2281 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2282 SDLoc DL(Node);
2283 unsigned ArgSlotSizeInBytes = (ABI.IsN32() || ABI.IsN64()) ? 8 : 4;
2285 SDValue VAListLoad = DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL, Chain,
2286 VAListPtr, MachinePointerInfo(SV));
2287 SDValue VAList = VAListLoad;
2289 // Re-align the pointer if necessary.
2290 // It should only ever be necessary for 64-bit types on O32 since the minimum
2291 // argument alignment is the same as the maximum type alignment for N32/N64.
2293 // FIXME: We currently align too often. The code generator doesn't notice
2294 // when the pointer is still aligned from the last va_arg (or pair of
2295 // va_args for the i64 on O32 case).
2296 if (Align > getMinStackArgumentAlignment()) {
2297 VAList = DAG.getNode(
2298 ISD::ADD, DL, VAList.getValueType(), VAList,
2299 DAG.getConstant(Align.value() - 1, DL, VAList.getValueType()));
2301 VAList = DAG.getNode(
2302 ISD::AND, DL, VAList.getValueType(), VAList,
2303 DAG.getConstant(-(int64_t)Align.value(), DL, VAList.getValueType()));
2306 // Increment the pointer, VAList, to the next vaarg.
2307 auto &TD = DAG.getDataLayout();
2308 unsigned ArgSizeInBytes =
2309 TD.getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext()));
2310 SDValue Tmp3 =
2311 DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
2312 DAG.getConstant(alignTo(ArgSizeInBytes, ArgSlotSizeInBytes),
2313 DL, VAList.getValueType()));
2314 // Store the incremented VAList to the legalized pointer
2315 Chain = DAG.getStore(VAListLoad.getValue(1), DL, Tmp3, VAListPtr,
2316 MachinePointerInfo(SV));
2318 // In big-endian mode we must adjust the pointer when the load size is smaller
2319 // than the argument slot size. We must also reduce the known alignment to
2320 // match. For example in the N64 ABI, we must add 4 bytes to the offset to get
2321 // the correct half of the slot, and reduce the alignment from 8 (slot
2322 // alignment) down to 4 (type alignment).
2323 if (!Subtarget.isLittle() && ArgSizeInBytes < ArgSlotSizeInBytes) {
2324 unsigned Adjustment = ArgSlotSizeInBytes - ArgSizeInBytes;
2325 VAList = DAG.getNode(ISD::ADD, DL, VAListPtr.getValueType(), VAList,
2326 DAG.getIntPtrConstant(Adjustment, DL));
2328 // Load the actual argument out of the pointer VAList
2329 return DAG.getLoad(VT, DL, Chain, VAList, MachinePointerInfo());
2332 static SDValue lowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG,
2333 bool HasExtractInsert) {
2334 EVT TyX = Op.getOperand(0).getValueType();
2335 EVT TyY = Op.getOperand(1).getValueType();
2336 SDLoc DL(Op);
2337 SDValue Const1 = DAG.getConstant(1, DL, MVT::i32);
2338 SDValue Const31 = DAG.getConstant(31, DL, MVT::i32);
2339 SDValue Res;
2341 // If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
2342 // to i32.
2343 SDValue X = (TyX == MVT::f32) ?
2344 DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(0)) :
2345 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(0),
2346 Const1);
2347 SDValue Y = (TyY == MVT::f32) ?
2348 DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(1)) :
2349 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(1),
2350 Const1);
2352 if (HasExtractInsert) {
2353 // ext E, Y, 31, 1 ; extract bit31 of Y
2354 // ins X, E, 31, 1 ; insert extracted bit at bit31 of X
2355 SDValue E = DAG.getNode(MipsISD::Ext, DL, MVT::i32, Y, Const31, Const1);
2356 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i32, E, Const31, Const1, X);
2357 } else {
2358 // sll SllX, X, 1
2359 // srl SrlX, SllX, 1
2360 // srl SrlY, Y, 31
2361 // sll SllY, SrlX, 31
2362 // or Or, SrlX, SllY
2363 SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i32, X, Const1);
2364 SDValue SrlX = DAG.getNode(ISD::SRL, DL, MVT::i32, SllX, Const1);
2365 SDValue SrlY = DAG.getNode(ISD::SRL, DL, MVT::i32, Y, Const31);
2366 SDValue SllY = DAG.getNode(ISD::SHL, DL, MVT::i32, SrlY, Const31);
2367 Res = DAG.getNode(ISD::OR, DL, MVT::i32, SrlX, SllY);
2370 if (TyX == MVT::f32)
2371 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), Res);
2373 SDValue LowX = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
2374 Op.getOperand(0),
2375 DAG.getConstant(0, DL, MVT::i32));
2376 return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
2379 static SDValue lowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG,
2380 bool HasExtractInsert) {
2381 unsigned WidthX = Op.getOperand(0).getValueSizeInBits();
2382 unsigned WidthY = Op.getOperand(1).getValueSizeInBits();
2383 EVT TyX = MVT::getIntegerVT(WidthX), TyY = MVT::getIntegerVT(WidthY);
2384 SDLoc DL(Op);
2385 SDValue Const1 = DAG.getConstant(1, DL, MVT::i32);
2387 // Bitcast to integer nodes.
2388 SDValue X = DAG.getNode(ISD::BITCAST, DL, TyX, Op.getOperand(0));
2389 SDValue Y = DAG.getNode(ISD::BITCAST, DL, TyY, Op.getOperand(1));
2391 if (HasExtractInsert) {
2392 // ext E, Y, width(Y) - 1, 1 ; extract bit width(Y)-1 of Y
2393 // ins X, E, width(X) - 1, 1 ; insert extracted bit at bit width(X)-1 of X
2394 SDValue E = DAG.getNode(MipsISD::Ext, DL, TyY, Y,
2395 DAG.getConstant(WidthY - 1, DL, MVT::i32), Const1);
2397 if (WidthX > WidthY)
2398 E = DAG.getNode(ISD::ZERO_EXTEND, DL, TyX, E);
2399 else if (WidthY > WidthX)
2400 E = DAG.getNode(ISD::TRUNCATE, DL, TyX, E);
2402 SDValue I = DAG.getNode(MipsISD::Ins, DL, TyX, E,
2403 DAG.getConstant(WidthX - 1, DL, MVT::i32), Const1,
2405 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), I);
2408 // (d)sll SllX, X, 1
2409 // (d)srl SrlX, SllX, 1
2410 // (d)srl SrlY, Y, width(Y)-1
2411 // (d)sll SllY, SrlX, width(Y)-1
2412 // or Or, SrlX, SllY
2413 SDValue SllX = DAG.getNode(ISD::SHL, DL, TyX, X, Const1);
2414 SDValue SrlX = DAG.getNode(ISD::SRL, DL, TyX, SllX, Const1);
2415 SDValue SrlY = DAG.getNode(ISD::SRL, DL, TyY, Y,
2416 DAG.getConstant(WidthY - 1, DL, MVT::i32));
2418 if (WidthX > WidthY)
2419 SrlY = DAG.getNode(ISD::ZERO_EXTEND, DL, TyX, SrlY);
2420 else if (WidthY > WidthX)
2421 SrlY = DAG.getNode(ISD::TRUNCATE, DL, TyX, SrlY);
2423 SDValue SllY = DAG.getNode(ISD::SHL, DL, TyX, SrlY,
2424 DAG.getConstant(WidthX - 1, DL, MVT::i32));
2425 SDValue Or = DAG.getNode(ISD::OR, DL, TyX, SrlX, SllY);
2426 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), Or);
2429 SDValue
2430 MipsTargetLowering::lowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
2431 if (Subtarget.isGP64bit())
2432 return lowerFCOPYSIGN64(Op, DAG, Subtarget.hasExtractInsert());
2434 return lowerFCOPYSIGN32(Op, DAG, Subtarget.hasExtractInsert());
2437 SDValue MipsTargetLowering::lowerFABS32(SDValue Op, SelectionDAG &DAG,
2438 bool HasExtractInsert) const {
2439 SDLoc DL(Op);
2440 SDValue Res, Const1 = DAG.getConstant(1, DL, MVT::i32);
2442 if (DAG.getTarget().Options.NoNaNsFPMath || Subtarget.inAbs2008Mode())
2443 return DAG.getNode(MipsISD::FAbs, DL, Op.getValueType(), Op.getOperand(0));
2445 // If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
2446 // to i32.
2447 SDValue X = (Op.getValueType() == MVT::f32)
2448 ? DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(0))
2449 : DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
2450 Op.getOperand(0), Const1);
2452 // Clear MSB.
2453 if (HasExtractInsert)
2454 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i32,
2455 DAG.getRegister(Mips::ZERO, MVT::i32),
2456 DAG.getConstant(31, DL, MVT::i32), Const1, X);
2457 else {
2458 // TODO: Provide DAG patterns which transform (and x, cst)
2459 // back to a (shl (srl x (clz cst)) (clz cst)) sequence.
2460 SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i32, X, Const1);
2461 Res = DAG.getNode(ISD::SRL, DL, MVT::i32, SllX, Const1);
2464 if (Op.getValueType() == MVT::f32)
2465 return DAG.getNode(ISD::BITCAST, DL, MVT::f32, Res);
2467 // FIXME: For mips32r2, the sequence of (BuildPairF64 (ins (ExtractElementF64
2468 // Op 1), $zero, 31 1) (ExtractElementF64 Op 0)) and the Op has one use, we
2469 // should be able to drop the usage of mfc1/mtc1 and rewrite the register in
2470 // place.
2471 SDValue LowX =
2472 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(0),
2473 DAG.getConstant(0, DL, MVT::i32));
2474 return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
2477 SDValue MipsTargetLowering::lowerFABS64(SDValue Op, SelectionDAG &DAG,
2478 bool HasExtractInsert) const {
2479 SDLoc DL(Op);
2480 SDValue Res, Const1 = DAG.getConstant(1, DL, MVT::i32);
2482 if (DAG.getTarget().Options.NoNaNsFPMath || Subtarget.inAbs2008Mode())
2483 return DAG.getNode(MipsISD::FAbs, DL, Op.getValueType(), Op.getOperand(0));
2485 // Bitcast to integer node.
2486 SDValue X = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Op.getOperand(0));
2488 // Clear MSB.
2489 if (HasExtractInsert)
2490 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i64,
2491 DAG.getRegister(Mips::ZERO_64, MVT::i64),
2492 DAG.getConstant(63, DL, MVT::i32), Const1, X);
2493 else {
2494 SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i64, X, Const1);
2495 Res = DAG.getNode(ISD::SRL, DL, MVT::i64, SllX, Const1);
2498 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, Res);
2501 SDValue MipsTargetLowering::lowerFABS(SDValue Op, SelectionDAG &DAG) const {
2502 if ((ABI.IsN32() || ABI.IsN64()) && (Op.getValueType() == MVT::f64))
2503 return lowerFABS64(Op, DAG, Subtarget.hasExtractInsert());
2505 return lowerFABS32(Op, DAG, Subtarget.hasExtractInsert());
2508 SDValue MipsTargetLowering::
2509 lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
2510 // check the depth
2511 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0) {
2512 DAG.getContext()->emitError(
2513 "return address can be determined only for current frame");
2514 return SDValue();
2517 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
2518 MFI.setFrameAddressIsTaken(true);
2519 EVT VT = Op.getValueType();
2520 SDLoc DL(Op);
2521 SDValue FrameAddr = DAG.getCopyFromReg(
2522 DAG.getEntryNode(), DL, ABI.IsN64() ? Mips::FP_64 : Mips::FP, VT);
2523 return FrameAddr;
2526 SDValue MipsTargetLowering::lowerRETURNADDR(SDValue Op,
2527 SelectionDAG &DAG) const {
2528 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
2529 return SDValue();
2531 // check the depth
2532 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0) {
2533 DAG.getContext()->emitError(
2534 "return address can be determined only for current frame");
2535 return SDValue();
2538 MachineFunction &MF = DAG.getMachineFunction();
2539 MachineFrameInfo &MFI = MF.getFrameInfo();
2540 MVT VT = Op.getSimpleValueType();
2541 unsigned RA = ABI.IsN64() ? Mips::RA_64 : Mips::RA;
2542 MFI.setReturnAddressIsTaken(true);
2544 // Return RA, which contains the return address. Mark it an implicit live-in.
2545 Register Reg = MF.addLiveIn(RA, getRegClassFor(VT));
2546 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), Reg, VT);
2549 // An EH_RETURN is the result of lowering llvm.eh.return which in turn is
2550 // generated from __builtin_eh_return (offset, handler)
2551 // The effect of this is to adjust the stack pointer by "offset"
2552 // and then branch to "handler".
2553 SDValue MipsTargetLowering::lowerEH_RETURN(SDValue Op, SelectionDAG &DAG)
2554 const {
2555 MachineFunction &MF = DAG.getMachineFunction();
2556 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
2558 MipsFI->setCallsEhReturn();
2559 SDValue Chain = Op.getOperand(0);
2560 SDValue Offset = Op.getOperand(1);
2561 SDValue Handler = Op.getOperand(2);
2562 SDLoc DL(Op);
2563 EVT Ty = ABI.IsN64() ? MVT::i64 : MVT::i32;
2565 // Store stack offset in V1, store jump target in V0. Glue CopyToReg and
2566 // EH_RETURN nodes, so that instructions are emitted back-to-back.
2567 unsigned OffsetReg = ABI.IsN64() ? Mips::V1_64 : Mips::V1;
2568 unsigned AddrReg = ABI.IsN64() ? Mips::V0_64 : Mips::V0;
2569 Chain = DAG.getCopyToReg(Chain, DL, OffsetReg, Offset, SDValue());
2570 Chain = DAG.getCopyToReg(Chain, DL, AddrReg, Handler, Chain.getValue(1));
2571 return DAG.getNode(MipsISD::EH_RETURN, DL, MVT::Other, Chain,
2572 DAG.getRegister(OffsetReg, Ty),
2573 DAG.getRegister(AddrReg, getPointerTy(MF.getDataLayout())),
2574 Chain.getValue(1));
2577 SDValue MipsTargetLowering::lowerATOMIC_FENCE(SDValue Op,
2578 SelectionDAG &DAG) const {
2579 // FIXME: Need pseudo-fence for 'singlethread' fences
2580 // FIXME: Set SType for weaker fences where supported/appropriate.
2581 unsigned SType = 0;
2582 SDLoc DL(Op);
2583 return DAG.getNode(MipsISD::Sync, DL, MVT::Other, Op.getOperand(0),
2584 DAG.getConstant(SType, DL, MVT::i32));
2587 SDValue MipsTargetLowering::lowerShiftLeftParts(SDValue Op,
2588 SelectionDAG &DAG) const {
2589 SDLoc DL(Op);
2590 MVT VT = Subtarget.isGP64bit() ? MVT::i64 : MVT::i32;
2592 SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
2593 SDValue Shamt = Op.getOperand(2);
2594 // if shamt < (VT.bits):
2595 // lo = (shl lo, shamt)
2596 // hi = (or (shl hi, shamt) (srl (srl lo, 1), ~shamt))
2597 // else:
2598 // lo = 0
2599 // hi = (shl lo, shamt[4:0])
2600 SDValue Not = DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
2601 DAG.getConstant(-1, DL, MVT::i32));
2602 SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo,
2603 DAG.getConstant(1, DL, VT));
2604 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, Not);
2605 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
2606 SDValue Or = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
2607 SDValue ShiftLeftLo = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
2608 SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
2609 DAG.getConstant(VT.getSizeInBits(), DL, MVT::i32));
2610 Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2611 DAG.getConstant(0, DL, VT), ShiftLeftLo);
2612 Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftLeftLo, Or);
2614 SDValue Ops[2] = {Lo, Hi};
2615 return DAG.getMergeValues(Ops, DL);
2618 SDValue MipsTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
2619 bool IsSRA) const {
2620 SDLoc DL(Op);
2621 SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
2622 SDValue Shamt = Op.getOperand(2);
2623 MVT VT = Subtarget.isGP64bit() ? MVT::i64 : MVT::i32;
2625 // if shamt < (VT.bits):
2626 // lo = (or (shl (shl hi, 1), ~shamt) (srl lo, shamt))
2627 // if isSRA:
2628 // hi = (sra hi, shamt)
2629 // else:
2630 // hi = (srl hi, shamt)
2631 // else:
2632 // if isSRA:
2633 // lo = (sra hi, shamt[4:0])
2634 // hi = (sra hi, 31)
2635 // else:
2636 // lo = (srl hi, shamt[4:0])
2637 // hi = 0
2638 SDValue Not = DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
2639 DAG.getConstant(-1, DL, MVT::i32));
2640 SDValue ShiftLeft1Hi = DAG.getNode(ISD::SHL, DL, VT, Hi,
2641 DAG.getConstant(1, DL, VT));
2642 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, ShiftLeft1Hi, Not);
2643 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
2644 SDValue Or = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
2645 SDValue ShiftRightHi = DAG.getNode(IsSRA ? ISD::SRA : ISD::SRL,
2646 DL, VT, Hi, Shamt);
2647 SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
2648 DAG.getConstant(VT.getSizeInBits(), DL, MVT::i32));
2649 SDValue Ext = DAG.getNode(ISD::SRA, DL, VT, Hi,
2650 DAG.getConstant(VT.getSizeInBits() - 1, DL, VT));
2652 if (!(Subtarget.hasMips4() || Subtarget.hasMips32())) {
2653 SDVTList VTList = DAG.getVTList(VT, VT);
2654 return DAG.getNode(Subtarget.isGP64bit() ? Mips::PseudoD_SELECT_I64
2655 : Mips::PseudoD_SELECT_I,
2656 DL, VTList, Cond, ShiftRightHi,
2657 IsSRA ? Ext : DAG.getConstant(0, DL, VT), Or,
2658 ShiftRightHi);
2661 Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftRightHi, Or);
2662 Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2663 IsSRA ? Ext : DAG.getConstant(0, DL, VT), ShiftRightHi);
2665 SDValue Ops[2] = {Lo, Hi};
2666 return DAG.getMergeValues(Ops, DL);
2669 static SDValue createLoadLR(unsigned Opc, SelectionDAG &DAG, LoadSDNode *LD,
2670 SDValue Chain, SDValue Src, unsigned Offset) {
2671 SDValue Ptr = LD->getBasePtr();
2672 EVT VT = LD->getValueType(0), MemVT = LD->getMemoryVT();
2673 EVT BasePtrVT = Ptr.getValueType();
2674 SDLoc DL(LD);
2675 SDVTList VTList = DAG.getVTList(VT, MVT::Other);
2677 if (Offset)
2678 Ptr = DAG.getNode(ISD::ADD, DL, BasePtrVT, Ptr,
2679 DAG.getConstant(Offset, DL, BasePtrVT));
2681 SDValue Ops[] = { Chain, Ptr, Src };
2682 return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, MemVT,
2683 LD->getMemOperand());
2686 // Expand an unaligned 32 or 64-bit integer load node.
2687 SDValue MipsTargetLowering::lowerLOAD(SDValue Op, SelectionDAG &DAG) const {
2688 LoadSDNode *LD = cast<LoadSDNode>(Op);
2689 EVT MemVT = LD->getMemoryVT();
2691 if (Subtarget.systemSupportsUnalignedAccess())
2692 return Op;
2694 // Return if load is aligned or if MemVT is neither i32 nor i64.
2695 if ((LD->getAlign().value() >= (MemVT.getSizeInBits() / 8)) ||
2696 ((MemVT != MVT::i32) && (MemVT != MVT::i64)))
2697 return SDValue();
2699 bool IsLittle = Subtarget.isLittle();
2700 EVT VT = Op.getValueType();
2701 ISD::LoadExtType ExtType = LD->getExtensionType();
2702 SDValue Chain = LD->getChain(), Undef = DAG.getUNDEF(VT);
2704 assert((VT == MVT::i32) || (VT == MVT::i64));
2706 // Expand
2707 // (set dst, (i64 (load baseptr)))
2708 // to
2709 // (set tmp, (ldl (add baseptr, 7), undef))
2710 // (set dst, (ldr baseptr, tmp))
2711 if ((VT == MVT::i64) && (ExtType == ISD::NON_EXTLOAD)) {
2712 SDValue LDL = createLoadLR(MipsISD::LDL, DAG, LD, Chain, Undef,
2713 IsLittle ? 7 : 0);
2714 return createLoadLR(MipsISD::LDR, DAG, LD, LDL.getValue(1), LDL,
2715 IsLittle ? 0 : 7);
2718 SDValue LWL = createLoadLR(MipsISD::LWL, DAG, LD, Chain, Undef,
2719 IsLittle ? 3 : 0);
2720 SDValue LWR = createLoadLR(MipsISD::LWR, DAG, LD, LWL.getValue(1), LWL,
2721 IsLittle ? 0 : 3);
2723 // Expand
2724 // (set dst, (i32 (load baseptr))) or
2725 // (set dst, (i64 (sextload baseptr))) or
2726 // (set dst, (i64 (extload baseptr)))
2727 // to
2728 // (set tmp, (lwl (add baseptr, 3), undef))
2729 // (set dst, (lwr baseptr, tmp))
2730 if ((VT == MVT::i32) || (ExtType == ISD::SEXTLOAD) ||
2731 (ExtType == ISD::EXTLOAD))
2732 return LWR;
2734 assert((VT == MVT::i64) && (ExtType == ISD::ZEXTLOAD));
2736 // Expand
2737 // (set dst, (i64 (zextload baseptr)))
2738 // to
2739 // (set tmp0, (lwl (add baseptr, 3), undef))
2740 // (set tmp1, (lwr baseptr, tmp0))
2741 // (set tmp2, (shl tmp1, 32))
2742 // (set dst, (srl tmp2, 32))
2743 SDLoc DL(LD);
2744 SDValue Const32 = DAG.getConstant(32, DL, MVT::i32);
2745 SDValue SLL = DAG.getNode(ISD::SHL, DL, MVT::i64, LWR, Const32);
2746 SDValue SRL = DAG.getNode(ISD::SRL, DL, MVT::i64, SLL, Const32);
2747 SDValue Ops[] = { SRL, LWR.getValue(1) };
2748 return DAG.getMergeValues(Ops, DL);
2751 static SDValue createStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD,
2752 SDValue Chain, unsigned Offset) {
2753 SDValue Ptr = SD->getBasePtr(), Value = SD->getValue();
2754 EVT MemVT = SD->getMemoryVT(), BasePtrVT = Ptr.getValueType();
2755 SDLoc DL(SD);
2756 SDVTList VTList = DAG.getVTList(MVT::Other);
2758 if (Offset)
2759 Ptr = DAG.getNode(ISD::ADD, DL, BasePtrVT, Ptr,
2760 DAG.getConstant(Offset, DL, BasePtrVT));
2762 SDValue Ops[] = { Chain, Value, Ptr };
2763 return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, MemVT,
2764 SD->getMemOperand());
2767 // Expand an unaligned 32 or 64-bit integer store node.
2768 static SDValue lowerUnalignedIntStore(StoreSDNode *SD, SelectionDAG &DAG,
2769 bool IsLittle) {
2770 SDValue Value = SD->getValue(), Chain = SD->getChain();
2771 EVT VT = Value.getValueType();
2773 // Expand
2774 // (store val, baseptr) or
2775 // (truncstore val, baseptr)
2776 // to
2777 // (swl val, (add baseptr, 3))
2778 // (swr val, baseptr)
2779 if ((VT == MVT::i32) || SD->isTruncatingStore()) {
2780 SDValue SWL = createStoreLR(MipsISD::SWL, DAG, SD, Chain,
2781 IsLittle ? 3 : 0);
2782 return createStoreLR(MipsISD::SWR, DAG, SD, SWL, IsLittle ? 0 : 3);
2785 assert(VT == MVT::i64);
2787 // Expand
2788 // (store val, baseptr)
2789 // to
2790 // (sdl val, (add baseptr, 7))
2791 // (sdr val, baseptr)
2792 SDValue SDL = createStoreLR(MipsISD::SDL, DAG, SD, Chain, IsLittle ? 7 : 0);
2793 return createStoreLR(MipsISD::SDR, DAG, SD, SDL, IsLittle ? 0 : 7);
2796 // Lower (store (fp_to_sint $fp) $ptr) to (store (TruncIntFP $fp), $ptr).
2797 static SDValue lowerFP_TO_SINT_STORE(StoreSDNode *SD, SelectionDAG &DAG,
2798 bool SingleFloat) {
2799 SDValue Val = SD->getValue();
2801 if (Val.getOpcode() != ISD::FP_TO_SINT ||
2802 (Val.getValueSizeInBits() > 32 && SingleFloat))
2803 return SDValue();
2805 EVT FPTy = EVT::getFloatingPointVT(Val.getValueSizeInBits());
2806 SDValue Tr = DAG.getNode(MipsISD::TruncIntFP, SDLoc(Val), FPTy,
2807 Val.getOperand(0));
2808 return DAG.getStore(SD->getChain(), SDLoc(SD), Tr, SD->getBasePtr(),
2809 SD->getPointerInfo(), SD->getAlign(),
2810 SD->getMemOperand()->getFlags());
2813 SDValue MipsTargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const {
2814 StoreSDNode *SD = cast<StoreSDNode>(Op);
2815 EVT MemVT = SD->getMemoryVT();
2817 // Lower unaligned integer stores.
2818 if (!Subtarget.systemSupportsUnalignedAccess() &&
2819 (SD->getAlign().value() < (MemVT.getSizeInBits() / 8)) &&
2820 ((MemVT == MVT::i32) || (MemVT == MVT::i64)))
2821 return lowerUnalignedIntStore(SD, DAG, Subtarget.isLittle());
2823 return lowerFP_TO_SINT_STORE(SD, DAG, Subtarget.isSingleFloat());
2826 SDValue MipsTargetLowering::lowerEH_DWARF_CFA(SDValue Op,
2827 SelectionDAG &DAG) const {
2829 // Return a fixed StackObject with offset 0 which points to the old stack
2830 // pointer.
2831 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
2832 EVT ValTy = Op->getValueType(0);
2833 int FI = MFI.CreateFixedObject(Op.getValueSizeInBits() / 8, 0, false);
2834 return DAG.getFrameIndex(FI, ValTy);
2837 SDValue MipsTargetLowering::lowerFP_TO_SINT(SDValue Op,
2838 SelectionDAG &DAG) const {
2839 if (Op.getValueSizeInBits() > 32 && Subtarget.isSingleFloat())
2840 return SDValue();
2842 EVT FPTy = EVT::getFloatingPointVT(Op.getValueSizeInBits());
2843 SDValue Trunc = DAG.getNode(MipsISD::TruncIntFP, SDLoc(Op), FPTy,
2844 Op.getOperand(0));
2845 return DAG.getNode(ISD::BITCAST, SDLoc(Op), Op.getValueType(), Trunc);
2848 //===----------------------------------------------------------------------===//
2849 // Calling Convention Implementation
2850 //===----------------------------------------------------------------------===//
2852 //===----------------------------------------------------------------------===//
2853 // TODO: Implement a generic logic using tblgen that can support this.
2854 // Mips O32 ABI rules:
2855 // ---
2856 // i32 - Passed in A0, A1, A2, A3 and stack
2857 // f32 - Only passed in f32 registers if no int reg has been used yet to hold
2858 // an argument. Otherwise, passed in A1, A2, A3 and stack.
2859 // f64 - Only passed in two aliased f32 registers if no int reg has been used
2860 // yet to hold an argument. Otherwise, use A2, A3 and stack. If A1 is
2861 // not used, it must be shadowed. If only A3 is available, shadow it and
2862 // go to stack.
2863 // vXiX - Received as scalarized i32s, passed in A0 - A3 and the stack.
2864 // vXf32 - Passed in either a pair of registers {A0, A1}, {A2, A3} or {A0 - A3}
2865 // with the remainder spilled to the stack.
2866 // vXf64 - Passed in either {A0, A1, A2, A3} or {A2, A3} and in both cases
2867 // spilling the remainder to the stack.
2869 // For vararg functions, all arguments are passed in A0, A1, A2, A3 and stack.
2870 //===----------------------------------------------------------------------===//
2872 static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
2873 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
2874 CCState &State, ArrayRef<MCPhysReg> F64Regs) {
2875 const MipsSubtarget &Subtarget = static_cast<const MipsSubtarget &>(
2876 State.getMachineFunction().getSubtarget());
2878 static const MCPhysReg IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 };
2880 const MipsCCState * MipsState = static_cast<MipsCCState *>(&State);
2882 static const MCPhysReg F32Regs[] = { Mips::F12, Mips::F14 };
2884 static const MCPhysReg FloatVectorIntRegs[] = { Mips::A0, Mips::A2 };
2886 // Do not process byval args here.
2887 if (ArgFlags.isByVal())
2888 return true;
2890 // Promote i8 and i16
2891 if (ArgFlags.isInReg() && !Subtarget.isLittle()) {
2892 if (LocVT == MVT::i8 || LocVT == MVT::i16 || LocVT == MVT::i32) {
2893 LocVT = MVT::i32;
2894 if (ArgFlags.isSExt())
2895 LocInfo = CCValAssign::SExtUpper;
2896 else if (ArgFlags.isZExt())
2897 LocInfo = CCValAssign::ZExtUpper;
2898 else
2899 LocInfo = CCValAssign::AExtUpper;
2903 // Promote i8 and i16
2904 if (LocVT == MVT::i8 || LocVT == MVT::i16) {
2905 LocVT = MVT::i32;
2906 if (ArgFlags.isSExt())
2907 LocInfo = CCValAssign::SExt;
2908 else if (ArgFlags.isZExt())
2909 LocInfo = CCValAssign::ZExt;
2910 else
2911 LocInfo = CCValAssign::AExt;
2914 unsigned Reg;
2916 // f32 and f64 are allocated in A0, A1, A2, A3 when either of the following
2917 // is true: function is vararg, argument is 3rd or higher, there is previous
2918 // argument which is not f32 or f64.
2919 bool AllocateFloatsInIntReg = State.isVarArg() || ValNo > 1 ||
2920 State.getFirstUnallocated(F32Regs) != ValNo;
2921 Align OrigAlign = ArgFlags.getNonZeroOrigAlign();
2922 bool isI64 = (ValVT == MVT::i32 && OrigAlign == Align(8));
2923 bool isVectorFloat = MipsState->WasOriginalArgVectorFloat(ValNo);
2925 // The MIPS vector ABI for floats passes them in a pair of registers
2926 if (ValVT == MVT::i32 && isVectorFloat) {
2927 // This is the start of an vector that was scalarized into an unknown number
2928 // of components. It doesn't matter how many there are. Allocate one of the
2929 // notional 8 byte aligned registers which map onto the argument stack, and
2930 // shadow the register lost to alignment requirements.
2931 if (ArgFlags.isSplit()) {
2932 Reg = State.AllocateReg(FloatVectorIntRegs);
2933 if (Reg == Mips::A2)
2934 State.AllocateReg(Mips::A1);
2935 else if (Reg == 0)
2936 State.AllocateReg(Mips::A3);
2937 } else {
2938 // If we're an intermediate component of the split, we can just attempt to
2939 // allocate a register directly.
2940 Reg = State.AllocateReg(IntRegs);
2942 } else if (ValVT == MVT::i32 ||
2943 (ValVT == MVT::f32 && AllocateFloatsInIntReg)) {
2944 Reg = State.AllocateReg(IntRegs);
2945 // If this is the first part of an i64 arg,
2946 // the allocated register must be either A0 or A2.
2947 if (isI64 && (Reg == Mips::A1 || Reg == Mips::A3))
2948 Reg = State.AllocateReg(IntRegs);
2949 LocVT = MVT::i32;
2950 } else if (ValVT == MVT::f64 && AllocateFloatsInIntReg) {
2951 // Allocate int register and shadow next int register. If first
2952 // available register is Mips::A1 or Mips::A3, shadow it too.
2953 Reg = State.AllocateReg(IntRegs);
2954 if (Reg == Mips::A1 || Reg == Mips::A3)
2955 Reg = State.AllocateReg(IntRegs);
2957 if (Reg) {
2958 LocVT = MVT::i32;
2960 State.addLoc(
2961 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2962 MCRegister HiReg = State.AllocateReg(IntRegs);
2963 assert(HiReg);
2964 State.addLoc(
2965 CCValAssign::getCustomReg(ValNo, ValVT, HiReg, LocVT, LocInfo));
2966 return false;
2968 } else if (ValVT.isFloatingPoint() && !AllocateFloatsInIntReg) {
2969 // we are guaranteed to find an available float register
2970 if (ValVT == MVT::f32) {
2971 Reg = State.AllocateReg(F32Regs);
2972 // Shadow int register
2973 State.AllocateReg(IntRegs);
2974 } else {
2975 Reg = State.AllocateReg(F64Regs);
2976 // Shadow int registers
2977 unsigned Reg2 = State.AllocateReg(IntRegs);
2978 if (Reg2 == Mips::A1 || Reg2 == Mips::A3)
2979 State.AllocateReg(IntRegs);
2980 State.AllocateReg(IntRegs);
2982 } else
2983 llvm_unreachable("Cannot handle this ValVT.");
2985 if (!Reg) {
2986 unsigned Offset = State.AllocateStack(ValVT.getStoreSize(), OrigAlign);
2987 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
2988 } else
2989 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2991 return false;
2994 static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT,
2995 MVT LocVT, CCValAssign::LocInfo LocInfo,
2996 ISD::ArgFlagsTy ArgFlags, CCState &State) {
2997 static const MCPhysReg F64Regs[] = { Mips::D6, Mips::D7 };
2999 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
3002 static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT,
3003 MVT LocVT, CCValAssign::LocInfo LocInfo,
3004 ISD::ArgFlagsTy ArgFlags, CCState &State) {
3005 static const MCPhysReg F64Regs[] = { Mips::D12_64, Mips::D14_64 };
3007 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
3010 static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
3011 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
3012 CCState &State) LLVM_ATTRIBUTE_UNUSED;
3014 #include "MipsGenCallingConv.inc"
3016 CCAssignFn *MipsTargetLowering::CCAssignFnForCall() const{
3017 return CC_Mips_FixedArg;
3020 CCAssignFn *MipsTargetLowering::CCAssignFnForReturn() const{
3021 return RetCC_Mips;
3023 //===----------------------------------------------------------------------===//
3024 // Call Calling Convention Implementation
3025 //===----------------------------------------------------------------------===//
3027 SDValue MipsTargetLowering::passArgOnStack(SDValue StackPtr, unsigned Offset,
3028 SDValue Chain, SDValue Arg,
3029 const SDLoc &DL, bool IsTailCall,
3030 SelectionDAG &DAG) const {
3031 if (!IsTailCall) {
3032 SDValue PtrOff =
3033 DAG.getNode(ISD::ADD, DL, getPointerTy(DAG.getDataLayout()), StackPtr,
3034 DAG.getIntPtrConstant(Offset, DL));
3035 return DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo());
3038 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
3039 int FI = MFI.CreateFixedObject(Arg.getValueSizeInBits() / 8, Offset, false);
3040 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3041 return DAG.getStore(Chain, DL, Arg, FIN, MachinePointerInfo(), MaybeAlign(),
3042 MachineMemOperand::MOVolatile);
3045 void MipsTargetLowering::
3046 getOpndList(SmallVectorImpl<SDValue> &Ops,
3047 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
3048 bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage,
3049 bool IsCallReloc, CallLoweringInfo &CLI, SDValue Callee,
3050 SDValue Chain) const {
3051 // Insert node "GP copy globalreg" before call to function.
3053 // R_MIPS_CALL* operators (emitted when non-internal functions are called
3054 // in PIC mode) allow symbols to be resolved via lazy binding.
3055 // The lazy binding stub requires GP to point to the GOT.
3056 // Note that we don't need GP to point to the GOT for indirect calls
3057 // (when R_MIPS_CALL* is not used for the call) because Mips linker generates
3058 // lazy binding stub for a function only when R_MIPS_CALL* are the only relocs
3059 // used for the function (that is, Mips linker doesn't generate lazy binding
3060 // stub for a function whose address is taken in the program).
3061 if (IsPICCall && !InternalLinkage && IsCallReloc) {
3062 unsigned GPReg = ABI.IsN64() ? Mips::GP_64 : Mips::GP;
3063 EVT Ty = ABI.IsN64() ? MVT::i64 : MVT::i32;
3064 RegsToPass.push_back(std::make_pair(GPReg, getGlobalReg(CLI.DAG, Ty)));
3067 // Build a sequence of copy-to-reg nodes chained together with token
3068 // chain and flag operands which copy the outgoing args into registers.
3069 // The InGlue in necessary since all emitted instructions must be
3070 // stuck together.
3071 SDValue InGlue;
3073 for (auto &R : RegsToPass) {
3074 Chain = CLI.DAG.getCopyToReg(Chain, CLI.DL, R.first, R.second, InGlue);
3075 InGlue = Chain.getValue(1);
3078 // Add argument registers to the end of the list so that they are
3079 // known live into the call.
3080 for (auto &R : RegsToPass)
3081 Ops.push_back(CLI.DAG.getRegister(R.first, R.second.getValueType()));
3083 // Add a register mask operand representing the call-preserved registers.
3084 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
3085 const uint32_t *Mask =
3086 TRI->getCallPreservedMask(CLI.DAG.getMachineFunction(), CLI.CallConv);
3087 assert(Mask && "Missing call preserved mask for calling convention");
3088 if (Subtarget.inMips16HardFloat()) {
3089 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(CLI.Callee)) {
3090 StringRef Sym = G->getGlobal()->getName();
3091 Function *F = G->getGlobal()->getParent()->getFunction(Sym);
3092 if (F && F->hasFnAttribute("__Mips16RetHelper")) {
3093 Mask = MipsRegisterInfo::getMips16RetHelperMask();
3097 Ops.push_back(CLI.DAG.getRegisterMask(Mask));
3099 if (InGlue.getNode())
3100 Ops.push_back(InGlue);
3103 void MipsTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
3104 SDNode *Node) const {
3105 switch (MI.getOpcode()) {
3106 default:
3107 return;
3108 case Mips::JALR:
3109 case Mips::JALRPseudo:
3110 case Mips::JALR64:
3111 case Mips::JALR64Pseudo:
3112 case Mips::JALR16_MM:
3113 case Mips::JALRC16_MMR6:
3114 case Mips::TAILCALLREG:
3115 case Mips::TAILCALLREG64:
3116 case Mips::TAILCALLR6REG:
3117 case Mips::TAILCALL64R6REG:
3118 case Mips::TAILCALLREG_MM:
3119 case Mips::TAILCALLREG_MMR6: {
3120 if (!EmitJalrReloc ||
3121 Subtarget.inMips16Mode() ||
3122 !isPositionIndependent() ||
3123 Node->getNumOperands() < 1 ||
3124 Node->getOperand(0).getNumOperands() < 2) {
3125 return;
3127 // We are after the callee address, set by LowerCall().
3128 // If added to MI, asm printer will emit .reloc R_MIPS_JALR for the
3129 // symbol.
3130 const SDValue TargetAddr = Node->getOperand(0).getOperand(1);
3131 StringRef Sym;
3132 if (const GlobalAddressSDNode *G =
3133 dyn_cast_or_null<const GlobalAddressSDNode>(TargetAddr)) {
3134 // We must not emit the R_MIPS_JALR relocation against data symbols
3135 // since this will cause run-time crashes if the linker replaces the
3136 // call instruction with a relative branch to the data symbol.
3137 if (!isa<Function>(G->getGlobal())) {
3138 LLVM_DEBUG(dbgs() << "Not adding R_MIPS_JALR against data symbol "
3139 << G->getGlobal()->getName() << "\n");
3140 return;
3142 Sym = G->getGlobal()->getName();
3144 else if (const ExternalSymbolSDNode *ES =
3145 dyn_cast_or_null<const ExternalSymbolSDNode>(TargetAddr)) {
3146 Sym = ES->getSymbol();
3149 if (Sym.empty())
3150 return;
3152 MachineFunction *MF = MI.getParent()->getParent();
3153 MCSymbol *S = MF->getContext().getOrCreateSymbol(Sym);
3154 LLVM_DEBUG(dbgs() << "Adding R_MIPS_JALR against " << Sym << "\n");
3155 MI.addOperand(MachineOperand::CreateMCSymbol(S, MipsII::MO_JALR));
3160 /// LowerCall - functions arguments are copied from virtual regs to
3161 /// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
3162 SDValue
3163 MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
3164 SmallVectorImpl<SDValue> &InVals) const {
3165 SelectionDAG &DAG = CLI.DAG;
3166 SDLoc DL = CLI.DL;
3167 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
3168 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
3169 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
3170 SDValue Chain = CLI.Chain;
3171 SDValue Callee = CLI.Callee;
3172 bool &IsTailCall = CLI.IsTailCall;
3173 CallingConv::ID CallConv = CLI.CallConv;
3174 bool IsVarArg = CLI.IsVarArg;
3176 MachineFunction &MF = DAG.getMachineFunction();
3177 MachineFrameInfo &MFI = MF.getFrameInfo();
3178 const TargetFrameLowering *TFL = Subtarget.getFrameLowering();
3179 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
3180 bool IsPIC = isPositionIndependent();
3182 // Analyze operands of the call, assigning locations to each operand.
3183 SmallVector<CCValAssign, 16> ArgLocs;
3184 MipsCCState CCInfo(
3185 CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, *DAG.getContext(),
3186 MipsCCState::getSpecialCallingConvForCallee(Callee.getNode(), Subtarget));
3188 const ExternalSymbolSDNode *ES =
3189 dyn_cast_or_null<const ExternalSymbolSDNode>(Callee.getNode());
3191 // There is one case where CALLSEQ_START..CALLSEQ_END can be nested, which
3192 // is during the lowering of a call with a byval argument which produces
3193 // a call to memcpy. For the O32 case, this causes the caller to allocate
3194 // stack space for the reserved argument area for the callee, then recursively
3195 // again for the memcpy call. In the NEWABI case, this doesn't occur as those
3196 // ABIs mandate that the callee allocates the reserved argument area. We do
3197 // still produce nested CALLSEQ_START..CALLSEQ_END with zero space though.
3199 // If the callee has a byval argument and memcpy is used, we are mandated
3200 // to already have produced a reserved argument area for the callee for O32.
3201 // Therefore, the reserved argument area can be reused for both calls.
3203 // Other cases of calling memcpy cannot have a chain with a CALLSEQ_START
3204 // present, as we have yet to hook that node onto the chain.
3206 // Hence, the CALLSEQ_START and CALLSEQ_END nodes can be eliminated in this
3207 // case. GCC does a similar trick, in that wherever possible, it calculates
3208 // the maximum out going argument area (including the reserved area), and
3209 // preallocates the stack space on entrance to the caller.
3211 // FIXME: We should do the same for efficiency and space.
3213 // Note: The check on the calling convention below must match
3214 // MipsABIInfo::GetCalleeAllocdArgSizeInBytes().
3215 bool MemcpyInByVal = ES &&
3216 StringRef(ES->getSymbol()) == StringRef("memcpy") &&
3217 CallConv != CallingConv::Fast &&
3218 Chain.getOpcode() == ISD::CALLSEQ_START;
3220 // Allocate the reserved argument area. It seems strange to do this from the
3221 // caller side but removing it breaks the frame size calculation.
3222 unsigned ReservedArgArea =
3223 MemcpyInByVal ? 0 : ABI.GetCalleeAllocdArgSizeInBytes(CallConv);
3224 CCInfo.AllocateStack(ReservedArgArea, Align(1));
3226 CCInfo.AnalyzeCallOperands(Outs, CC_Mips, CLI.getArgs(),
3227 ES ? ES->getSymbol() : nullptr);
3229 // Get a count of how many bytes are to be pushed on the stack.
3230 unsigned StackSize = CCInfo.getStackSize();
3232 // Call site info for function parameters tracking.
3233 MachineFunction::CallSiteInfo CSInfo;
3235 // Check if it's really possible to do a tail call. Restrict it to functions
3236 // that are part of this compilation unit.
3237 bool InternalLinkage = false;
3238 if (IsTailCall) {
3239 IsTailCall = isEligibleForTailCallOptimization(
3240 CCInfo, StackSize, *MF.getInfo<MipsFunctionInfo>());
3241 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
3242 InternalLinkage = G->getGlobal()->hasInternalLinkage();
3243 IsTailCall &= (InternalLinkage || G->getGlobal()->hasLocalLinkage() ||
3244 G->getGlobal()->hasPrivateLinkage() ||
3245 G->getGlobal()->hasHiddenVisibility() ||
3246 G->getGlobal()->hasProtectedVisibility());
3249 if (!IsTailCall && CLI.CB && CLI.CB->isMustTailCall())
3250 report_fatal_error("failed to perform tail call elimination on a call "
3251 "site marked musttail");
3253 if (IsTailCall)
3254 ++NumTailCalls;
3256 // Chain is the output chain of the last Load/Store or CopyToReg node.
3257 // ByValChain is the output chain of the last Memcpy node created for copying
3258 // byval arguments to the stack.
3259 unsigned StackAlignment = TFL->getStackAlignment();
3260 StackSize = alignTo(StackSize, StackAlignment);
3262 if (!(IsTailCall || MemcpyInByVal))
3263 Chain = DAG.getCALLSEQ_START(Chain, StackSize, 0, DL);
3265 SDValue StackPtr =
3266 DAG.getCopyFromReg(Chain, DL, ABI.IsN64() ? Mips::SP_64 : Mips::SP,
3267 getPointerTy(DAG.getDataLayout()));
3269 std::deque<std::pair<unsigned, SDValue>> RegsToPass;
3270 SmallVector<SDValue, 8> MemOpChains;
3272 CCInfo.rewindByValRegsInfo();
3274 // Walk the register/memloc assignments, inserting copies/loads.
3275 for (unsigned i = 0, e = ArgLocs.size(), OutIdx = 0; i != e; ++i, ++OutIdx) {
3276 SDValue Arg = OutVals[OutIdx];
3277 CCValAssign &VA = ArgLocs[i];
3278 MVT ValVT = VA.getValVT(), LocVT = VA.getLocVT();
3279 ISD::ArgFlagsTy Flags = Outs[OutIdx].Flags;
3280 bool UseUpperBits = false;
3282 // ByVal Arg.
3283 if (Flags.isByVal()) {
3284 unsigned FirstByValReg, LastByValReg;
3285 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3286 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3288 assert(Flags.getByValSize() &&
3289 "ByVal args of size 0 should have been ignored by front-end.");
3290 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3291 assert(!IsTailCall &&
3292 "Do not tail-call optimize if there is a byval argument.");
3293 passByValArg(Chain, DL, RegsToPass, MemOpChains, StackPtr, MFI, DAG, Arg,
3294 FirstByValReg, LastByValReg, Flags, Subtarget.isLittle(),
3295 VA);
3296 CCInfo.nextInRegsParam();
3297 continue;
3300 // Promote the value if needed.
3301 switch (VA.getLocInfo()) {
3302 default:
3303 llvm_unreachable("Unknown loc info!");
3304 case CCValAssign::Full:
3305 if (VA.isRegLoc()) {
3306 if ((ValVT == MVT::f32 && LocVT == MVT::i32) ||
3307 (ValVT == MVT::f64 && LocVT == MVT::i64) ||
3308 (ValVT == MVT::i64 && LocVT == MVT::f64))
3309 Arg = DAG.getNode(ISD::BITCAST, DL, LocVT, Arg);
3310 else if (ValVT == MVT::f64 && LocVT == MVT::i32) {
3311 SDValue Lo = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
3312 Arg, DAG.getConstant(0, DL, MVT::i32));
3313 SDValue Hi = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
3314 Arg, DAG.getConstant(1, DL, MVT::i32));
3315 if (!Subtarget.isLittle())
3316 std::swap(Lo, Hi);
3318 assert(VA.needsCustom());
3320 Register LocRegLo = VA.getLocReg();
3321 Register LocRegHigh = ArgLocs[++i].getLocReg();
3322 RegsToPass.push_back(std::make_pair(LocRegLo, Lo));
3323 RegsToPass.push_back(std::make_pair(LocRegHigh, Hi));
3324 continue;
3327 break;
3328 case CCValAssign::BCvt:
3329 Arg = DAG.getNode(ISD::BITCAST, DL, LocVT, Arg);
3330 break;
3331 case CCValAssign::SExtUpper:
3332 UseUpperBits = true;
3333 [[fallthrough]];
3334 case CCValAssign::SExt:
3335 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, LocVT, Arg);
3336 break;
3337 case CCValAssign::ZExtUpper:
3338 UseUpperBits = true;
3339 [[fallthrough]];
3340 case CCValAssign::ZExt:
3341 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, LocVT, Arg);
3342 break;
3343 case CCValAssign::AExtUpper:
3344 UseUpperBits = true;
3345 [[fallthrough]];
3346 case CCValAssign::AExt:
3347 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, LocVT, Arg);
3348 break;
3351 if (UseUpperBits) {
3352 unsigned ValSizeInBits = Outs[OutIdx].ArgVT.getSizeInBits();
3353 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3354 Arg = DAG.getNode(
3355 ISD::SHL, DL, VA.getLocVT(), Arg,
3356 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3359 // Arguments that can be passed on register must be kept at
3360 // RegsToPass vector
3361 if (VA.isRegLoc()) {
3362 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
3364 // If the parameter is passed through reg $D, which splits into
3365 // two physical registers, avoid creating call site info.
3366 if (Mips::AFGR64RegClass.contains(VA.getLocReg()))
3367 continue;
3369 // Collect CSInfo about which register passes which parameter.
3370 const TargetOptions &Options = DAG.getTarget().Options;
3371 if (Options.SupportsDebugEntryValues)
3372 CSInfo.emplace_back(VA.getLocReg(), i);
3374 continue;
3377 // Register can't get to this point...
3378 assert(VA.isMemLoc());
3380 // emit ISD::STORE whichs stores the
3381 // parameter value to a stack Location
3382 MemOpChains.push_back(passArgOnStack(StackPtr, VA.getLocMemOffset(),
3383 Chain, Arg, DL, IsTailCall, DAG));
3386 // Transform all store nodes into one single node because all store
3387 // nodes are independent of each other.
3388 if (!MemOpChains.empty())
3389 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
3391 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
3392 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
3393 // node so that legalize doesn't hack it.
3395 EVT Ty = Callee.getValueType();
3396 bool GlobalOrExternal = false, IsCallReloc = false;
3398 // The long-calls feature is ignored in case of PIC.
3399 // While we do not support -mshared / -mno-shared properly,
3400 // ignore long-calls in case of -mabicalls too.
3401 if (!Subtarget.isABICalls() && !IsPIC) {
3402 // If the function should be called using "long call",
3403 // get its address into a register to prevent using
3404 // of the `jal` instruction for the direct call.
3405 if (auto *N = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3406 if (Subtarget.useLongCalls())
3407 Callee = Subtarget.hasSym32()
3408 ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
3409 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
3410 } else if (auto *N = dyn_cast<GlobalAddressSDNode>(Callee)) {
3411 bool UseLongCalls = Subtarget.useLongCalls();
3412 // If the function has long-call/far/near attribute
3413 // it overrides command line switch pased to the backend.
3414 if (auto *F = dyn_cast<Function>(N->getGlobal())) {
3415 if (F->hasFnAttribute("long-call"))
3416 UseLongCalls = true;
3417 else if (F->hasFnAttribute("short-call"))
3418 UseLongCalls = false;
3420 if (UseLongCalls)
3421 Callee = Subtarget.hasSym32()
3422 ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
3423 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
3427 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
3428 if (IsPIC) {
3429 const GlobalValue *Val = G->getGlobal();
3430 InternalLinkage = Val->hasInternalLinkage();
3432 if (InternalLinkage)
3433 Callee = getAddrLocal(G, DL, Ty, DAG, ABI.IsN32() || ABI.IsN64());
3434 else if (Subtarget.useXGOT()) {
3435 Callee = getAddrGlobalLargeGOT(G, DL, Ty, DAG, MipsII::MO_CALL_HI16,
3436 MipsII::MO_CALL_LO16, Chain,
3437 FuncInfo->callPtrInfo(MF, Val));
3438 IsCallReloc = true;
3439 } else {
3440 Callee = getAddrGlobal(G, DL, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
3441 FuncInfo->callPtrInfo(MF, Val));
3442 IsCallReloc = true;
3444 } else
3445 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL,
3446 getPointerTy(DAG.getDataLayout()), 0,
3447 MipsII::MO_NO_FLAG);
3448 GlobalOrExternal = true;
3450 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3451 const char *Sym = S->getSymbol();
3453 if (!IsPIC) // static
3454 Callee = DAG.getTargetExternalSymbol(
3455 Sym, getPointerTy(DAG.getDataLayout()), MipsII::MO_NO_FLAG);
3456 else if (Subtarget.useXGOT()) {
3457 Callee = getAddrGlobalLargeGOT(S, DL, Ty, DAG, MipsII::MO_CALL_HI16,
3458 MipsII::MO_CALL_LO16, Chain,
3459 FuncInfo->callPtrInfo(MF, Sym));
3460 IsCallReloc = true;
3461 } else { // PIC
3462 Callee = getAddrGlobal(S, DL, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
3463 FuncInfo->callPtrInfo(MF, Sym));
3464 IsCallReloc = true;
3467 GlobalOrExternal = true;
3470 SmallVector<SDValue, 8> Ops(1, Chain);
3471 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3473 getOpndList(Ops, RegsToPass, IsPIC, GlobalOrExternal, InternalLinkage,
3474 IsCallReloc, CLI, Callee, Chain);
3476 if (IsTailCall) {
3477 MF.getFrameInfo().setHasTailCall();
3478 SDValue Ret = DAG.getNode(MipsISD::TailCall, DL, MVT::Other, Ops);
3479 DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
3480 return Ret;
3483 Chain = DAG.getNode(MipsISD::JmpLink, DL, NodeTys, Ops);
3484 SDValue InGlue = Chain.getValue(1);
3486 DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
3488 // Create the CALLSEQ_END node in the case of where it is not a call to
3489 // memcpy.
3490 if (!(MemcpyInByVal)) {
3491 Chain = DAG.getCALLSEQ_END(Chain, StackSize, 0, InGlue, DL);
3492 InGlue = Chain.getValue(1);
3495 // Handle result values, copying them out of physregs into vregs that we
3496 // return.
3497 return LowerCallResult(Chain, InGlue, CallConv, IsVarArg, Ins, DL, DAG,
3498 InVals, CLI);
3501 /// LowerCallResult - Lower the result values of a call into the
3502 /// appropriate copies out of appropriate physical registers.
3503 SDValue MipsTargetLowering::LowerCallResult(
3504 SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool IsVarArg,
3505 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
3506 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
3507 TargetLowering::CallLoweringInfo &CLI) const {
3508 // Assign locations to each value returned by this call.
3509 SmallVector<CCValAssign, 16> RVLocs;
3510 MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
3511 *DAG.getContext());
3513 const ExternalSymbolSDNode *ES =
3514 dyn_cast_or_null<const ExternalSymbolSDNode>(CLI.Callee.getNode());
3515 CCInfo.AnalyzeCallResult(Ins, RetCC_Mips, CLI.RetTy,
3516 ES ? ES->getSymbol() : nullptr);
3518 // Copy all of the result registers out of their specified physreg.
3519 for (unsigned i = 0; i != RVLocs.size(); ++i) {
3520 CCValAssign &VA = RVLocs[i];
3521 assert(VA.isRegLoc() && "Can only return in registers!");
3523 SDValue Val = DAG.getCopyFromReg(Chain, DL, RVLocs[i].getLocReg(),
3524 RVLocs[i].getLocVT(), InGlue);
3525 Chain = Val.getValue(1);
3526 InGlue = Val.getValue(2);
3528 if (VA.isUpperBitsInLoc()) {
3529 unsigned ValSizeInBits = Ins[i].ArgVT.getSizeInBits();
3530 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3531 unsigned Shift =
3532 VA.getLocInfo() == CCValAssign::ZExtUpper ? ISD::SRL : ISD::SRA;
3533 Val = DAG.getNode(
3534 Shift, DL, VA.getLocVT(), Val,
3535 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3538 switch (VA.getLocInfo()) {
3539 default:
3540 llvm_unreachable("Unknown loc info!");
3541 case CCValAssign::Full:
3542 break;
3543 case CCValAssign::BCvt:
3544 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
3545 break;
3546 case CCValAssign::AExt:
3547 case CCValAssign::AExtUpper:
3548 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
3549 break;
3550 case CCValAssign::ZExt:
3551 case CCValAssign::ZExtUpper:
3552 Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
3553 DAG.getValueType(VA.getValVT()));
3554 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
3555 break;
3556 case CCValAssign::SExt:
3557 case CCValAssign::SExtUpper:
3558 Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
3559 DAG.getValueType(VA.getValVT()));
3560 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
3561 break;
3564 InVals.push_back(Val);
3567 return Chain;
3570 static SDValue UnpackFromArgumentSlot(SDValue Val, const CCValAssign &VA,
3571 EVT ArgVT, const SDLoc &DL,
3572 SelectionDAG &DAG) {
3573 MVT LocVT = VA.getLocVT();
3574 EVT ValVT = VA.getValVT();
3576 // Shift into the upper bits if necessary.
3577 switch (VA.getLocInfo()) {
3578 default:
3579 break;
3580 case CCValAssign::AExtUpper:
3581 case CCValAssign::SExtUpper:
3582 case CCValAssign::ZExtUpper: {
3583 unsigned ValSizeInBits = ArgVT.getSizeInBits();
3584 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3585 unsigned Opcode =
3586 VA.getLocInfo() == CCValAssign::ZExtUpper ? ISD::SRL : ISD::SRA;
3587 Val = DAG.getNode(
3588 Opcode, DL, VA.getLocVT(), Val,
3589 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3590 break;
3594 // If this is an value smaller than the argument slot size (32-bit for O32,
3595 // 64-bit for N32/N64), it has been promoted in some way to the argument slot
3596 // size. Extract the value and insert any appropriate assertions regarding
3597 // sign/zero extension.
3598 switch (VA.getLocInfo()) {
3599 default:
3600 llvm_unreachable("Unknown loc info!");
3601 case CCValAssign::Full:
3602 break;
3603 case CCValAssign::AExtUpper:
3604 case CCValAssign::AExt:
3605 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
3606 break;
3607 case CCValAssign::SExtUpper:
3608 case CCValAssign::SExt:
3609 Val = DAG.getNode(ISD::AssertSext, DL, LocVT, Val, DAG.getValueType(ValVT));
3610 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
3611 break;
3612 case CCValAssign::ZExtUpper:
3613 case CCValAssign::ZExt:
3614 Val = DAG.getNode(ISD::AssertZext, DL, LocVT, Val, DAG.getValueType(ValVT));
3615 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
3616 break;
3617 case CCValAssign::BCvt:
3618 Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
3619 break;
3622 return Val;
3625 //===----------------------------------------------------------------------===//
3626 // Formal Arguments Calling Convention Implementation
3627 //===----------------------------------------------------------------------===//
3628 /// LowerFormalArguments - transform physical registers into virtual registers
3629 /// and generate load operations for arguments places on the stack.
3630 SDValue MipsTargetLowering::LowerFormalArguments(
3631 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
3632 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
3633 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3634 MachineFunction &MF = DAG.getMachineFunction();
3635 MachineFrameInfo &MFI = MF.getFrameInfo();
3636 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
3638 MipsFI->setVarArgsFrameIndex(0);
3640 // Used with vargs to acumulate store chains.
3641 std::vector<SDValue> OutChains;
3643 // Assign locations to all of the incoming arguments.
3644 SmallVector<CCValAssign, 16> ArgLocs;
3645 MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
3646 *DAG.getContext());
3647 CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), Align(1));
3648 const Function &Func = DAG.getMachineFunction().getFunction();
3649 Function::const_arg_iterator FuncArg = Func.arg_begin();
3651 if (Func.hasFnAttribute("interrupt") && !Func.arg_empty())
3652 report_fatal_error(
3653 "Functions with the interrupt attribute cannot have arguments!");
3655 CCInfo.AnalyzeFormalArguments(Ins, CC_Mips_FixedArg);
3656 MipsFI->setFormalArgInfo(CCInfo.getStackSize(),
3657 CCInfo.getInRegsParamsCount() > 0);
3659 unsigned CurArgIdx = 0;
3660 CCInfo.rewindByValRegsInfo();
3662 for (unsigned i = 0, e = ArgLocs.size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3663 CCValAssign &VA = ArgLocs[i];
3664 if (Ins[InsIdx].isOrigArg()) {
3665 std::advance(FuncArg, Ins[InsIdx].getOrigArgIndex() - CurArgIdx);
3666 CurArgIdx = Ins[InsIdx].getOrigArgIndex();
3668 EVT ValVT = VA.getValVT();
3669 ISD::ArgFlagsTy Flags = Ins[InsIdx].Flags;
3670 bool IsRegLoc = VA.isRegLoc();
3672 if (Flags.isByVal()) {
3673 assert(Ins[InsIdx].isOrigArg() && "Byval arguments cannot be implicit");
3674 unsigned FirstByValReg, LastByValReg;
3675 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3676 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3678 assert(Flags.getByValSize() &&
3679 "ByVal args of size 0 should have been ignored by front-end.");
3680 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3681 copyByValRegs(Chain, DL, OutChains, DAG, Flags, InVals, &*FuncArg,
3682 FirstByValReg, LastByValReg, VA, CCInfo);
3683 CCInfo.nextInRegsParam();
3684 continue;
3687 // Arguments stored on registers
3688 if (IsRegLoc) {
3689 MVT RegVT = VA.getLocVT();
3690 Register ArgReg = VA.getLocReg();
3691 const TargetRegisterClass *RC = getRegClassFor(RegVT);
3693 // Transform the arguments stored on
3694 // physical registers into virtual ones
3695 unsigned Reg = addLiveIn(DAG.getMachineFunction(), ArgReg, RC);
3696 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT);
3698 ArgValue =
3699 UnpackFromArgumentSlot(ArgValue, VA, Ins[InsIdx].ArgVT, DL, DAG);
3701 // Handle floating point arguments passed in integer registers and
3702 // long double arguments passed in floating point registers.
3703 if ((RegVT == MVT::i32 && ValVT == MVT::f32) ||
3704 (RegVT == MVT::i64 && ValVT == MVT::f64) ||
3705 (RegVT == MVT::f64 && ValVT == MVT::i64))
3706 ArgValue = DAG.getNode(ISD::BITCAST, DL, ValVT, ArgValue);
3707 else if (ABI.IsO32() && RegVT == MVT::i32 &&
3708 ValVT == MVT::f64) {
3709 assert(VA.needsCustom() && "Expected custom argument for f64 split");
3710 CCValAssign &NextVA = ArgLocs[++i];
3711 unsigned Reg2 =
3712 addLiveIn(DAG.getMachineFunction(), NextVA.getLocReg(), RC);
3713 SDValue ArgValue2 = DAG.getCopyFromReg(Chain, DL, Reg2, RegVT);
3714 if (!Subtarget.isLittle())
3715 std::swap(ArgValue, ArgValue2);
3716 ArgValue = DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64,
3717 ArgValue, ArgValue2);
3720 InVals.push_back(ArgValue);
3721 } else { // VA.isRegLoc()
3722 MVT LocVT = VA.getLocVT();
3724 assert(!VA.needsCustom() && "unexpected custom memory argument");
3726 // Only arguments pased on the stack should make it here.
3727 assert(VA.isMemLoc());
3729 // The stack pointer offset is relative to the caller stack frame.
3730 int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,
3731 VA.getLocMemOffset(), true);
3733 // Create load nodes to retrieve arguments from the stack
3734 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3735 SDValue ArgValue = DAG.getLoad(
3736 LocVT, DL, Chain, FIN,
3737 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3738 OutChains.push_back(ArgValue.getValue(1));
3740 ArgValue =
3741 UnpackFromArgumentSlot(ArgValue, VA, Ins[InsIdx].ArgVT, DL, DAG);
3743 InVals.push_back(ArgValue);
3747 for (unsigned i = 0, e = ArgLocs.size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3749 if (ArgLocs[i].needsCustom()) {
3750 ++i;
3751 continue;
3754 // The mips ABIs for returning structs by value requires that we copy
3755 // the sret argument into $v0 for the return. Save the argument into
3756 // a virtual register so that we can access it from the return points.
3757 if (Ins[InsIdx].Flags.isSRet()) {
3758 unsigned Reg = MipsFI->getSRetReturnReg();
3759 if (!Reg) {
3760 Reg = MF.getRegInfo().createVirtualRegister(
3761 getRegClassFor(ABI.IsN64() ? MVT::i64 : MVT::i32));
3762 MipsFI->setSRetReturnReg(Reg);
3764 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[i]);
3765 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Copy, Chain);
3766 break;
3770 if (IsVarArg)
3771 writeVarArgRegs(OutChains, Chain, DL, DAG, CCInfo);
3773 // All stores are grouped in one node to allow the matching between
3774 // the size of Ins and InVals. This only happens when on varg functions
3775 if (!OutChains.empty()) {
3776 OutChains.push_back(Chain);
3777 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
3780 return Chain;
3783 //===----------------------------------------------------------------------===//
3784 // Return Value Calling Convention Implementation
3785 //===----------------------------------------------------------------------===//
3787 bool
3788 MipsTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
3789 MachineFunction &MF, bool IsVarArg,
3790 const SmallVectorImpl<ISD::OutputArg> &Outs,
3791 LLVMContext &Context) const {
3792 SmallVector<CCValAssign, 16> RVLocs;
3793 MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
3794 return CCInfo.CheckReturn(Outs, RetCC_Mips);
3797 bool MipsTargetLowering::shouldSignExtendTypeInLibCall(EVT Type,
3798 bool IsSigned) const {
3799 if ((ABI.IsN32() || ABI.IsN64()) && Type == MVT::i32)
3800 return true;
3802 return IsSigned;
3805 SDValue
3806 MipsTargetLowering::LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
3807 const SDLoc &DL,
3808 SelectionDAG &DAG) const {
3809 MachineFunction &MF = DAG.getMachineFunction();
3810 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
3812 MipsFI->setISR();
3814 return DAG.getNode(MipsISD::ERet, DL, MVT::Other, RetOps);
3817 SDValue
3818 MipsTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
3819 bool IsVarArg,
3820 const SmallVectorImpl<ISD::OutputArg> &Outs,
3821 const SmallVectorImpl<SDValue> &OutVals,
3822 const SDLoc &DL, SelectionDAG &DAG) const {
3823 // CCValAssign - represent the assignment of
3824 // the return value to a location
3825 SmallVector<CCValAssign, 16> RVLocs;
3826 MachineFunction &MF = DAG.getMachineFunction();
3828 // CCState - Info about the registers and stack slot.
3829 MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
3831 // Analyze return values.
3832 CCInfo.AnalyzeReturn(Outs, RetCC_Mips);
3834 SDValue Glue;
3835 SmallVector<SDValue, 4> RetOps(1, Chain);
3837 // Copy the result values into the output registers.
3838 for (unsigned i = 0; i != RVLocs.size(); ++i) {
3839 SDValue Val = OutVals[i];
3840 CCValAssign &VA = RVLocs[i];
3841 assert(VA.isRegLoc() && "Can only return in registers!");
3842 bool UseUpperBits = false;
3844 switch (VA.getLocInfo()) {
3845 default:
3846 llvm_unreachable("Unknown loc info!");
3847 case CCValAssign::Full:
3848 break;
3849 case CCValAssign::BCvt:
3850 Val = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Val);
3851 break;
3852 case CCValAssign::AExtUpper:
3853 UseUpperBits = true;
3854 [[fallthrough]];
3855 case CCValAssign::AExt:
3856 Val = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Val);
3857 break;
3858 case CCValAssign::ZExtUpper:
3859 UseUpperBits = true;
3860 [[fallthrough]];
3861 case CCValAssign::ZExt:
3862 Val = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Val);
3863 break;
3864 case CCValAssign::SExtUpper:
3865 UseUpperBits = true;
3866 [[fallthrough]];
3867 case CCValAssign::SExt:
3868 Val = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Val);
3869 break;
3872 if (UseUpperBits) {
3873 unsigned ValSizeInBits = Outs[i].ArgVT.getSizeInBits();
3874 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3875 Val = DAG.getNode(
3876 ISD::SHL, DL, VA.getLocVT(), Val,
3877 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3880 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
3882 // Guarantee that all emitted copies are stuck together with flags.
3883 Glue = Chain.getValue(1);
3884 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
3887 // The mips ABIs for returning structs by value requires that we copy
3888 // the sret argument into $v0 for the return. We saved the argument into
3889 // a virtual register in the entry block, so now we copy the value out
3890 // and into $v0.
3891 if (MF.getFunction().hasStructRetAttr()) {
3892 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
3893 unsigned Reg = MipsFI->getSRetReturnReg();
3895 if (!Reg)
3896 llvm_unreachable("sret virtual register not created in the entry block");
3897 SDValue Val =
3898 DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(DAG.getDataLayout()));
3899 unsigned V0 = ABI.IsN64() ? Mips::V0_64 : Mips::V0;
3901 Chain = DAG.getCopyToReg(Chain, DL, V0, Val, Glue);
3902 Glue = Chain.getValue(1);
3903 RetOps.push_back(DAG.getRegister(V0, getPointerTy(DAG.getDataLayout())));
3906 RetOps[0] = Chain; // Update chain.
3908 // Add the glue if we have it.
3909 if (Glue.getNode())
3910 RetOps.push_back(Glue);
3912 // ISRs must use "eret".
3913 if (DAG.getMachineFunction().getFunction().hasFnAttribute("interrupt"))
3914 return LowerInterruptReturn(RetOps, DL, DAG);
3916 // Standard return on Mips is a "jr $ra"
3917 return DAG.getNode(MipsISD::Ret, DL, MVT::Other, RetOps);
3920 //===----------------------------------------------------------------------===//
3921 // Mips Inline Assembly Support
3922 //===----------------------------------------------------------------------===//
3924 /// getConstraintType - Given a constraint letter, return the type of
3925 /// constraint it is for this target.
3926 MipsTargetLowering::ConstraintType
3927 MipsTargetLowering::getConstraintType(StringRef Constraint) const {
3928 // Mips specific constraints
3929 // GCC config/mips/constraints.md
3931 // 'd' : An address register. Equivalent to r
3932 // unless generating MIPS16 code.
3933 // 'y' : Equivalent to r; retained for
3934 // backwards compatibility.
3935 // 'c' : A register suitable for use in an indirect
3936 // jump. This will always be $25 for -mabicalls.
3937 // 'l' : The lo register. 1 word storage.
3938 // 'x' : The hilo register pair. Double word storage.
3939 if (Constraint.size() == 1) {
3940 switch (Constraint[0]) {
3941 default : break;
3942 case 'd':
3943 case 'y':
3944 case 'f':
3945 case 'c':
3946 case 'l':
3947 case 'x':
3948 return C_RegisterClass;
3949 case 'R':
3950 return C_Memory;
3954 if (Constraint == "ZC")
3955 return C_Memory;
3957 return TargetLowering::getConstraintType(Constraint);
3960 /// Examine constraint type and operand type and determine a weight value.
3961 /// This object must already have been set up with the operand type
3962 /// and the current alternative constraint selected.
3963 TargetLowering::ConstraintWeight
3964 MipsTargetLowering::getSingleConstraintMatchWeight(
3965 AsmOperandInfo &info, const char *constraint) const {
3966 ConstraintWeight weight = CW_Invalid;
3967 Value *CallOperandVal = info.CallOperandVal;
3968 // If we don't have a value, we can't do a match,
3969 // but allow it at the lowest weight.
3970 if (!CallOperandVal)
3971 return CW_Default;
3972 Type *type = CallOperandVal->getType();
3973 // Look at the constraint type.
3974 switch (*constraint) {
3975 default:
3976 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
3977 break;
3978 case 'd':
3979 case 'y':
3980 if (type->isIntegerTy())
3981 weight = CW_Register;
3982 break;
3983 case 'f': // FPU or MSA register
3984 if (Subtarget.hasMSA() && type->isVectorTy() &&
3985 type->getPrimitiveSizeInBits().getFixedValue() == 128)
3986 weight = CW_Register;
3987 else if (type->isFloatTy())
3988 weight = CW_Register;
3989 break;
3990 case 'c': // $25 for indirect jumps
3991 case 'l': // lo register
3992 case 'x': // hilo register pair
3993 if (type->isIntegerTy())
3994 weight = CW_SpecificReg;
3995 break;
3996 case 'I': // signed 16 bit immediate
3997 case 'J': // integer zero
3998 case 'K': // unsigned 16 bit immediate
3999 case 'L': // signed 32 bit immediate where lower 16 bits are 0
4000 case 'N': // immediate in the range of -65535 to -1 (inclusive)
4001 case 'O': // signed 15 bit immediate (+- 16383)
4002 case 'P': // immediate in the range of 65535 to 1 (inclusive)
4003 if (isa<ConstantInt>(CallOperandVal))
4004 weight = CW_Constant;
4005 break;
4006 case 'R':
4007 weight = CW_Memory;
4008 break;
4010 return weight;
4013 /// This is a helper function to parse a physical register string and split it
4014 /// into non-numeric and numeric parts (Prefix and Reg). The first boolean flag
4015 /// that is returned indicates whether parsing was successful. The second flag
4016 /// is true if the numeric part exists.
4017 static std::pair<bool, bool> parsePhysicalReg(StringRef C, StringRef &Prefix,
4018 unsigned long long &Reg) {
4019 if (C.front() != '{' || C.back() != '}')
4020 return std::make_pair(false, false);
4022 // Search for the first numeric character.
4023 StringRef::const_iterator I, B = C.begin() + 1, E = C.end() - 1;
4024 I = std::find_if(B, E, isdigit);
4026 Prefix = StringRef(B, I - B);
4028 // The second flag is set to false if no numeric characters were found.
4029 if (I == E)
4030 return std::make_pair(true, false);
4032 // Parse the numeric characters.
4033 return std::make_pair(!getAsUnsignedInteger(StringRef(I, E - I), 10, Reg),
4034 true);
4037 EVT MipsTargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
4038 ISD::NodeType) const {
4039 bool Cond = !Subtarget.isABI_O32() && VT.getSizeInBits() == 32;
4040 EVT MinVT = getRegisterType(Cond ? MVT::i64 : MVT::i32);
4041 return VT.bitsLT(MinVT) ? MinVT : VT;
4044 std::pair<unsigned, const TargetRegisterClass *> MipsTargetLowering::
4045 parseRegForInlineAsmConstraint(StringRef C, MVT VT) const {
4046 const TargetRegisterInfo *TRI =
4047 Subtarget.getRegisterInfo();
4048 const TargetRegisterClass *RC;
4049 StringRef Prefix;
4050 unsigned long long Reg;
4052 std::pair<bool, bool> R = parsePhysicalReg(C, Prefix, Reg);
4054 if (!R.first)
4055 return std::make_pair(0U, nullptr);
4057 if ((Prefix == "hi" || Prefix == "lo")) { // Parse hi/lo.
4058 // No numeric characters follow "hi" or "lo".
4059 if (R.second)
4060 return std::make_pair(0U, nullptr);
4062 RC = TRI->getRegClass(Prefix == "hi" ?
4063 Mips::HI32RegClassID : Mips::LO32RegClassID);
4064 return std::make_pair(*(RC->begin()), RC);
4065 } else if (Prefix.startswith("$msa")) {
4066 // Parse $msa(ir|csr|access|save|modify|request|map|unmap)
4068 // No numeric characters follow the name.
4069 if (R.second)
4070 return std::make_pair(0U, nullptr);
4072 Reg = StringSwitch<unsigned long long>(Prefix)
4073 .Case("$msair", Mips::MSAIR)
4074 .Case("$msacsr", Mips::MSACSR)
4075 .Case("$msaaccess", Mips::MSAAccess)
4076 .Case("$msasave", Mips::MSASave)
4077 .Case("$msamodify", Mips::MSAModify)
4078 .Case("$msarequest", Mips::MSARequest)
4079 .Case("$msamap", Mips::MSAMap)
4080 .Case("$msaunmap", Mips::MSAUnmap)
4081 .Default(0);
4083 if (!Reg)
4084 return std::make_pair(0U, nullptr);
4086 RC = TRI->getRegClass(Mips::MSACtrlRegClassID);
4087 return std::make_pair(Reg, RC);
4090 if (!R.second)
4091 return std::make_pair(0U, nullptr);
4093 if (Prefix == "$f") { // Parse $f0-$f31.
4094 // If the size of FP registers is 64-bit or Reg is an even number, select
4095 // the 64-bit register class. Otherwise, select the 32-bit register class.
4096 if (VT == MVT::Other)
4097 VT = (Subtarget.isFP64bit() || !(Reg % 2)) ? MVT::f64 : MVT::f32;
4099 RC = getRegClassFor(VT);
4101 if (RC == &Mips::AFGR64RegClass) {
4102 assert(Reg % 2 == 0);
4103 Reg >>= 1;
4105 } else if (Prefix == "$fcc") // Parse $fcc0-$fcc7.
4106 RC = TRI->getRegClass(Mips::FCCRegClassID);
4107 else if (Prefix == "$w") { // Parse $w0-$w31.
4108 RC = getRegClassFor((VT == MVT::Other) ? MVT::v16i8 : VT);
4109 } else { // Parse $0-$31.
4110 assert(Prefix == "$");
4111 RC = getRegClassFor((VT == MVT::Other) ? MVT::i32 : VT);
4114 assert(Reg < RC->getNumRegs());
4115 return std::make_pair(*(RC->begin() + Reg), RC);
4118 /// Given a register class constraint, like 'r', if this corresponds directly
4119 /// to an LLVM register class, return a register of 0 and the register class
4120 /// pointer.
4121 std::pair<unsigned, const TargetRegisterClass *>
4122 MipsTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
4123 StringRef Constraint,
4124 MVT VT) const {
4125 if (Constraint.size() == 1) {
4126 switch (Constraint[0]) {
4127 case 'd': // Address register. Same as 'r' unless generating MIPS16 code.
4128 case 'y': // Same as 'r'. Exists for compatibility.
4129 case 'r':
4130 if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8 || VT == MVT::i1) {
4131 if (Subtarget.inMips16Mode())
4132 return std::make_pair(0U, &Mips::CPU16RegsRegClass);
4133 return std::make_pair(0U, &Mips::GPR32RegClass);
4135 if (VT == MVT::i64 && !Subtarget.isGP64bit())
4136 return std::make_pair(0U, &Mips::GPR32RegClass);
4137 if (VT == MVT::i64 && Subtarget.isGP64bit())
4138 return std::make_pair(0U, &Mips::GPR64RegClass);
4139 // This will generate an error message
4140 return std::make_pair(0U, nullptr);
4141 case 'f': // FPU or MSA register
4142 if (VT == MVT::v16i8)
4143 return std::make_pair(0U, &Mips::MSA128BRegClass);
4144 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
4145 return std::make_pair(0U, &Mips::MSA128HRegClass);
4146 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
4147 return std::make_pair(0U, &Mips::MSA128WRegClass);
4148 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
4149 return std::make_pair(0U, &Mips::MSA128DRegClass);
4150 else if (VT == MVT::f32)
4151 return std::make_pair(0U, &Mips::FGR32RegClass);
4152 else if ((VT == MVT::f64) && (!Subtarget.isSingleFloat())) {
4153 if (Subtarget.isFP64bit())
4154 return std::make_pair(0U, &Mips::FGR64RegClass);
4155 return std::make_pair(0U, &Mips::AFGR64RegClass);
4157 break;
4158 case 'c': // register suitable for indirect jump
4159 if (VT == MVT::i32)
4160 return std::make_pair((unsigned)Mips::T9, &Mips::GPR32RegClass);
4161 if (VT == MVT::i64)
4162 return std::make_pair((unsigned)Mips::T9_64, &Mips::GPR64RegClass);
4163 // This will generate an error message
4164 return std::make_pair(0U, nullptr);
4165 case 'l': // use the `lo` register to store values
4166 // that are no bigger than a word
4167 if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8)
4168 return std::make_pair((unsigned)Mips::LO0, &Mips::LO32RegClass);
4169 return std::make_pair((unsigned)Mips::LO0_64, &Mips::LO64RegClass);
4170 case 'x': // use the concatenated `hi` and `lo` registers
4171 // to store doubleword values
4172 // Fixme: Not triggering the use of both hi and low
4173 // This will generate an error message
4174 return std::make_pair(0U, nullptr);
4178 if (!Constraint.empty()) {
4179 std::pair<unsigned, const TargetRegisterClass *> R;
4180 R = parseRegForInlineAsmConstraint(Constraint, VT);
4182 if (R.second)
4183 return R;
4186 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
4189 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
4190 /// vector. If it is invalid, don't add anything to Ops.
4191 void MipsTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
4192 StringRef Constraint,
4193 std::vector<SDValue> &Ops,
4194 SelectionDAG &DAG) const {
4195 SDLoc DL(Op);
4196 SDValue Result;
4198 // Only support length 1 constraints for now.
4199 if (Constraint.size() > 1)
4200 return;
4202 char ConstraintLetter = Constraint[0];
4203 switch (ConstraintLetter) {
4204 default: break; // This will fall through to the generic implementation
4205 case 'I': // Signed 16 bit constant
4206 // If this fails, the parent routine will give an error
4207 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4208 EVT Type = Op.getValueType();
4209 int64_t Val = C->getSExtValue();
4210 if (isInt<16>(Val)) {
4211 Result = DAG.getTargetConstant(Val, DL, Type);
4212 break;
4215 return;
4216 case 'J': // integer zero
4217 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4218 EVT Type = Op.getValueType();
4219 int64_t Val = C->getZExtValue();
4220 if (Val == 0) {
4221 Result = DAG.getTargetConstant(0, DL, Type);
4222 break;
4225 return;
4226 case 'K': // unsigned 16 bit immediate
4227 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4228 EVT Type = Op.getValueType();
4229 uint64_t Val = (uint64_t)C->getZExtValue();
4230 if (isUInt<16>(Val)) {
4231 Result = DAG.getTargetConstant(Val, DL, Type);
4232 break;
4235 return;
4236 case 'L': // signed 32 bit immediate where lower 16 bits are 0
4237 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4238 EVT Type = Op.getValueType();
4239 int64_t Val = C->getSExtValue();
4240 if ((isInt<32>(Val)) && ((Val & 0xffff) == 0)){
4241 Result = DAG.getTargetConstant(Val, DL, Type);
4242 break;
4245 return;
4246 case 'N': // immediate in the range of -65535 to -1 (inclusive)
4247 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4248 EVT Type = Op.getValueType();
4249 int64_t Val = C->getSExtValue();
4250 if ((Val >= -65535) && (Val <= -1)) {
4251 Result = DAG.getTargetConstant(Val, DL, Type);
4252 break;
4255 return;
4256 case 'O': // signed 15 bit immediate
4257 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4258 EVT Type = Op.getValueType();
4259 int64_t Val = C->getSExtValue();
4260 if ((isInt<15>(Val))) {
4261 Result = DAG.getTargetConstant(Val, DL, Type);
4262 break;
4265 return;
4266 case 'P': // immediate in the range of 1 to 65535 (inclusive)
4267 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4268 EVT Type = Op.getValueType();
4269 int64_t Val = C->getSExtValue();
4270 if ((Val <= 65535) && (Val >= 1)) {
4271 Result = DAG.getTargetConstant(Val, DL, Type);
4272 break;
4275 return;
4278 if (Result.getNode()) {
4279 Ops.push_back(Result);
4280 return;
4283 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
4286 bool MipsTargetLowering::isLegalAddressingMode(const DataLayout &DL,
4287 const AddrMode &AM, Type *Ty,
4288 unsigned AS,
4289 Instruction *I) const {
4290 // No global is ever allowed as a base.
4291 if (AM.BaseGV)
4292 return false;
4294 switch (AM.Scale) {
4295 case 0: // "r+i" or just "i", depending on HasBaseReg.
4296 break;
4297 case 1:
4298 if (!AM.HasBaseReg) // allow "r+i".
4299 break;
4300 return false; // disallow "r+r" or "r+r+i".
4301 default:
4302 return false;
4305 return true;
4308 bool
4309 MipsTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
4310 // The Mips target isn't yet aware of offsets.
4311 return false;
4314 EVT MipsTargetLowering::getOptimalMemOpType(
4315 const MemOp &Op, const AttributeList &FuncAttributes) const {
4316 if (Subtarget.hasMips64())
4317 return MVT::i64;
4319 return MVT::i32;
4322 bool MipsTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
4323 bool ForCodeSize) const {
4324 if (VT != MVT::f32 && VT != MVT::f64)
4325 return false;
4326 if (Imm.isNegZero())
4327 return false;
4328 return Imm.isZero();
4331 unsigned MipsTargetLowering::getJumpTableEncoding() const {
4333 // FIXME: For space reasons this should be: EK_GPRel32BlockAddress.
4334 if (ABI.IsN64() && isPositionIndependent())
4335 return MachineJumpTableInfo::EK_GPRel64BlockAddress;
4337 return TargetLowering::getJumpTableEncoding();
4340 bool MipsTargetLowering::useSoftFloat() const {
4341 return Subtarget.useSoftFloat();
4344 void MipsTargetLowering::copyByValRegs(
4345 SDValue Chain, const SDLoc &DL, std::vector<SDValue> &OutChains,
4346 SelectionDAG &DAG, const ISD::ArgFlagsTy &Flags,
4347 SmallVectorImpl<SDValue> &InVals, const Argument *FuncArg,
4348 unsigned FirstReg, unsigned LastReg, const CCValAssign &VA,
4349 MipsCCState &State) const {
4350 MachineFunction &MF = DAG.getMachineFunction();
4351 MachineFrameInfo &MFI = MF.getFrameInfo();
4352 unsigned GPRSizeInBytes = Subtarget.getGPRSizeInBytes();
4353 unsigned NumRegs = LastReg - FirstReg;
4354 unsigned RegAreaSize = NumRegs * GPRSizeInBytes;
4355 unsigned FrameObjSize = std::max(Flags.getByValSize(), RegAreaSize);
4356 int FrameObjOffset;
4357 ArrayRef<MCPhysReg> ByValArgRegs = ABI.GetByValArgRegs();
4359 if (RegAreaSize)
4360 FrameObjOffset =
4361 (int)ABI.GetCalleeAllocdArgSizeInBytes(State.getCallingConv()) -
4362 (int)((ByValArgRegs.size() - FirstReg) * GPRSizeInBytes);
4363 else
4364 FrameObjOffset = VA.getLocMemOffset();
4366 // Create frame object.
4367 EVT PtrTy = getPointerTy(DAG.getDataLayout());
4368 // Make the fixed object stored to mutable so that the load instructions
4369 // referencing it have their memory dependencies added.
4370 // Set the frame object as isAliased which clears the underlying objects
4371 // vector in ScheduleDAGInstrs::buildSchedGraph() resulting in addition of all
4372 // stores as dependencies for loads referencing this fixed object.
4373 int FI = MFI.CreateFixedObject(FrameObjSize, FrameObjOffset, false, true);
4374 SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
4375 InVals.push_back(FIN);
4377 if (!NumRegs)
4378 return;
4380 // Copy arg registers.
4381 MVT RegTy = MVT::getIntegerVT(GPRSizeInBytes * 8);
4382 const TargetRegisterClass *RC = getRegClassFor(RegTy);
4384 for (unsigned I = 0; I < NumRegs; ++I) {
4385 unsigned ArgReg = ByValArgRegs[FirstReg + I];
4386 unsigned VReg = addLiveIn(MF, ArgReg, RC);
4387 unsigned Offset = I * GPRSizeInBytes;
4388 SDValue StorePtr = DAG.getNode(ISD::ADD, DL, PtrTy, FIN,
4389 DAG.getConstant(Offset, DL, PtrTy));
4390 SDValue Store = DAG.getStore(Chain, DL, DAG.getRegister(VReg, RegTy),
4391 StorePtr, MachinePointerInfo(FuncArg, Offset));
4392 OutChains.push_back(Store);
4396 // Copy byVal arg to registers and stack.
4397 void MipsTargetLowering::passByValArg(
4398 SDValue Chain, const SDLoc &DL,
4399 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
4400 SmallVectorImpl<SDValue> &MemOpChains, SDValue StackPtr,
4401 MachineFrameInfo &MFI, SelectionDAG &DAG, SDValue Arg, unsigned FirstReg,
4402 unsigned LastReg, const ISD::ArgFlagsTy &Flags, bool isLittle,
4403 const CCValAssign &VA) const {
4404 unsigned ByValSizeInBytes = Flags.getByValSize();
4405 unsigned OffsetInBytes = 0; // From beginning of struct
4406 unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
4407 Align Alignment =
4408 std::min(Flags.getNonZeroByValAlign(), Align(RegSizeInBytes));
4409 EVT PtrTy = getPointerTy(DAG.getDataLayout()),
4410 RegTy = MVT::getIntegerVT(RegSizeInBytes * 8);
4411 unsigned NumRegs = LastReg - FirstReg;
4413 if (NumRegs) {
4414 ArrayRef<MCPhysReg> ArgRegs = ABI.GetByValArgRegs();
4415 bool LeftoverBytes = (NumRegs * RegSizeInBytes > ByValSizeInBytes);
4416 unsigned I = 0;
4418 // Copy words to registers.
4419 for (; I < NumRegs - LeftoverBytes; ++I, OffsetInBytes += RegSizeInBytes) {
4420 SDValue LoadPtr = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
4421 DAG.getConstant(OffsetInBytes, DL, PtrTy));
4422 SDValue LoadVal = DAG.getLoad(RegTy, DL, Chain, LoadPtr,
4423 MachinePointerInfo(), Alignment);
4424 MemOpChains.push_back(LoadVal.getValue(1));
4425 unsigned ArgReg = ArgRegs[FirstReg + I];
4426 RegsToPass.push_back(std::make_pair(ArgReg, LoadVal));
4429 // Return if the struct has been fully copied.
4430 if (ByValSizeInBytes == OffsetInBytes)
4431 return;
4433 // Copy the remainder of the byval argument with sub-word loads and shifts.
4434 if (LeftoverBytes) {
4435 SDValue Val;
4437 for (unsigned LoadSizeInBytes = RegSizeInBytes / 2, TotalBytesLoaded = 0;
4438 OffsetInBytes < ByValSizeInBytes; LoadSizeInBytes /= 2) {
4439 unsigned RemainingSizeInBytes = ByValSizeInBytes - OffsetInBytes;
4441 if (RemainingSizeInBytes < LoadSizeInBytes)
4442 continue;
4444 // Load subword.
4445 SDValue LoadPtr = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
4446 DAG.getConstant(OffsetInBytes, DL,
4447 PtrTy));
4448 SDValue LoadVal = DAG.getExtLoad(
4449 ISD::ZEXTLOAD, DL, RegTy, Chain, LoadPtr, MachinePointerInfo(),
4450 MVT::getIntegerVT(LoadSizeInBytes * 8), Alignment);
4451 MemOpChains.push_back(LoadVal.getValue(1));
4453 // Shift the loaded value.
4454 unsigned Shamt;
4456 if (isLittle)
4457 Shamt = TotalBytesLoaded * 8;
4458 else
4459 Shamt = (RegSizeInBytes - (TotalBytesLoaded + LoadSizeInBytes)) * 8;
4461 SDValue Shift = DAG.getNode(ISD::SHL, DL, RegTy, LoadVal,
4462 DAG.getConstant(Shamt, DL, MVT::i32));
4464 if (Val.getNode())
4465 Val = DAG.getNode(ISD::OR, DL, RegTy, Val, Shift);
4466 else
4467 Val = Shift;
4469 OffsetInBytes += LoadSizeInBytes;
4470 TotalBytesLoaded += LoadSizeInBytes;
4471 Alignment = std::min(Alignment, Align(LoadSizeInBytes));
4474 unsigned ArgReg = ArgRegs[FirstReg + I];
4475 RegsToPass.push_back(std::make_pair(ArgReg, Val));
4476 return;
4480 // Copy remainder of byval arg to it with memcpy.
4481 unsigned MemCpySize = ByValSizeInBytes - OffsetInBytes;
4482 SDValue Src = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
4483 DAG.getConstant(OffsetInBytes, DL, PtrTy));
4484 SDValue Dst = DAG.getNode(ISD::ADD, DL, PtrTy, StackPtr,
4485 DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
4486 Chain = DAG.getMemcpy(
4487 Chain, DL, Dst, Src, DAG.getConstant(MemCpySize, DL, PtrTy),
4488 Align(Alignment), /*isVolatile=*/false, /*AlwaysInline=*/false,
4489 /*isTailCall=*/false, MachinePointerInfo(), MachinePointerInfo());
4490 MemOpChains.push_back(Chain);
4493 void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
4494 SDValue Chain, const SDLoc &DL,
4495 SelectionDAG &DAG,
4496 CCState &State) const {
4497 ArrayRef<MCPhysReg> ArgRegs = ABI.GetVarArgRegs();
4498 unsigned Idx = State.getFirstUnallocated(ArgRegs);
4499 unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
4500 MVT RegTy = MVT::getIntegerVT(RegSizeInBytes * 8);
4501 const TargetRegisterClass *RC = getRegClassFor(RegTy);
4502 MachineFunction &MF = DAG.getMachineFunction();
4503 MachineFrameInfo &MFI = MF.getFrameInfo();
4504 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
4506 // Offset of the first variable argument from stack pointer.
4507 int VaArgOffset;
4509 if (ArgRegs.size() == Idx)
4510 VaArgOffset = alignTo(State.getStackSize(), RegSizeInBytes);
4511 else {
4512 VaArgOffset =
4513 (int)ABI.GetCalleeAllocdArgSizeInBytes(State.getCallingConv()) -
4514 (int)(RegSizeInBytes * (ArgRegs.size() - Idx));
4517 // Record the frame index of the first variable argument
4518 // which is a value necessary to VASTART.
4519 int FI = MFI.CreateFixedObject(RegSizeInBytes, VaArgOffset, true);
4520 MipsFI->setVarArgsFrameIndex(FI);
4522 // Copy the integer registers that have not been used for argument passing
4523 // to the argument register save area. For O32, the save area is allocated
4524 // in the caller's stack frame, while for N32/64, it is allocated in the
4525 // callee's stack frame.
4526 for (unsigned I = Idx; I < ArgRegs.size();
4527 ++I, VaArgOffset += RegSizeInBytes) {
4528 unsigned Reg = addLiveIn(MF, ArgRegs[I], RC);
4529 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegTy);
4530 FI = MFI.CreateFixedObject(RegSizeInBytes, VaArgOffset, true);
4531 SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
4532 SDValue Store =
4533 DAG.getStore(Chain, DL, ArgValue, PtrOff, MachinePointerInfo());
4534 cast<StoreSDNode>(Store.getNode())->getMemOperand()->setValue(
4535 (Value *)nullptr);
4536 OutChains.push_back(Store);
4540 void MipsTargetLowering::HandleByVal(CCState *State, unsigned &Size,
4541 Align Alignment) const {
4542 const TargetFrameLowering *TFL = Subtarget.getFrameLowering();
4544 assert(Size && "Byval argument's size shouldn't be 0.");
4546 Alignment = std::min(Alignment, TFL->getStackAlign());
4548 unsigned FirstReg = 0;
4549 unsigned NumRegs = 0;
4551 if (State->getCallingConv() != CallingConv::Fast) {
4552 unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
4553 ArrayRef<MCPhysReg> IntArgRegs = ABI.GetByValArgRegs();
4554 // FIXME: The O32 case actually describes no shadow registers.
4555 const MCPhysReg *ShadowRegs =
4556 ABI.IsO32() ? IntArgRegs.data() : Mips64DPRegs;
4558 // We used to check the size as well but we can't do that anymore since
4559 // CCState::HandleByVal() rounds up the size after calling this function.
4560 assert(
4561 Alignment >= Align(RegSizeInBytes) &&
4562 "Byval argument's alignment should be a multiple of RegSizeInBytes.");
4564 FirstReg = State->getFirstUnallocated(IntArgRegs);
4566 // If Alignment > RegSizeInBytes, the first arg register must be even.
4567 // FIXME: This condition happens to do the right thing but it's not the
4568 // right way to test it. We want to check that the stack frame offset
4569 // of the register is aligned.
4570 if ((Alignment > RegSizeInBytes) && (FirstReg % 2)) {
4571 State->AllocateReg(IntArgRegs[FirstReg], ShadowRegs[FirstReg]);
4572 ++FirstReg;
4575 // Mark the registers allocated.
4576 Size = alignTo(Size, RegSizeInBytes);
4577 for (unsigned I = FirstReg; Size > 0 && (I < IntArgRegs.size());
4578 Size -= RegSizeInBytes, ++I, ++NumRegs)
4579 State->AllocateReg(IntArgRegs[I], ShadowRegs[I]);
4582 State->addInRegsParamInfo(FirstReg, FirstReg + NumRegs);
4585 MachineBasicBlock *MipsTargetLowering::emitPseudoSELECT(MachineInstr &MI,
4586 MachineBasicBlock *BB,
4587 bool isFPCmp,
4588 unsigned Opc) const {
4589 assert(!(Subtarget.hasMips4() || Subtarget.hasMips32()) &&
4590 "Subtarget already supports SELECT nodes with the use of"
4591 "conditional-move instructions.");
4593 const TargetInstrInfo *TII =
4594 Subtarget.getInstrInfo();
4595 DebugLoc DL = MI.getDebugLoc();
4597 // To "insert" a SELECT instruction, we actually have to insert the
4598 // diamond control-flow pattern. The incoming instruction knows the
4599 // destination vreg to set, the condition code register to branch on, the
4600 // true/false values to select between, and a branch opcode to use.
4601 const BasicBlock *LLVM_BB = BB->getBasicBlock();
4602 MachineFunction::iterator It = ++BB->getIterator();
4604 // thisMBB:
4605 // ...
4606 // TrueVal = ...
4607 // setcc r1, r2, r3
4608 // bNE r1, r0, copy1MBB
4609 // fallthrough --> copy0MBB
4610 MachineBasicBlock *thisMBB = BB;
4611 MachineFunction *F = BB->getParent();
4612 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
4613 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
4614 F->insert(It, copy0MBB);
4615 F->insert(It, sinkMBB);
4617 // Transfer the remainder of BB and its successor edges to sinkMBB.
4618 sinkMBB->splice(sinkMBB->begin(), BB,
4619 std::next(MachineBasicBlock::iterator(MI)), BB->end());
4620 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
4622 // Next, add the true and fallthrough blocks as its successors.
4623 BB->addSuccessor(copy0MBB);
4624 BB->addSuccessor(sinkMBB);
4626 if (isFPCmp) {
4627 // bc1[tf] cc, sinkMBB
4628 BuildMI(BB, DL, TII->get(Opc))
4629 .addReg(MI.getOperand(1).getReg())
4630 .addMBB(sinkMBB);
4631 } else {
4632 // bne rs, $0, sinkMBB
4633 BuildMI(BB, DL, TII->get(Opc))
4634 .addReg(MI.getOperand(1).getReg())
4635 .addReg(Mips::ZERO)
4636 .addMBB(sinkMBB);
4639 // copy0MBB:
4640 // %FalseValue = ...
4641 // # fallthrough to sinkMBB
4642 BB = copy0MBB;
4644 // Update machine-CFG edges
4645 BB->addSuccessor(sinkMBB);
4647 // sinkMBB:
4648 // %Result = phi [ %TrueValue, thisMBB ], [ %FalseValue, copy0MBB ]
4649 // ...
4650 BB = sinkMBB;
4652 BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(0).getReg())
4653 .addReg(MI.getOperand(2).getReg())
4654 .addMBB(thisMBB)
4655 .addReg(MI.getOperand(3).getReg())
4656 .addMBB(copy0MBB);
4658 MI.eraseFromParent(); // The pseudo instruction is gone now.
4660 return BB;
4663 MachineBasicBlock *
4664 MipsTargetLowering::emitPseudoD_SELECT(MachineInstr &MI,
4665 MachineBasicBlock *BB) const {
4666 assert(!(Subtarget.hasMips4() || Subtarget.hasMips32()) &&
4667 "Subtarget already supports SELECT nodes with the use of"
4668 "conditional-move instructions.");
4670 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
4671 DebugLoc DL = MI.getDebugLoc();
4673 // D_SELECT substitutes two SELECT nodes that goes one after another and
4674 // have the same condition operand. On machines which don't have
4675 // conditional-move instruction, it reduces unnecessary branch instructions
4676 // which are result of using two diamond patterns that are result of two
4677 // SELECT pseudo instructions.
4678 const BasicBlock *LLVM_BB = BB->getBasicBlock();
4679 MachineFunction::iterator It = ++BB->getIterator();
4681 // thisMBB:
4682 // ...
4683 // TrueVal = ...
4684 // setcc r1, r2, r3
4685 // bNE r1, r0, copy1MBB
4686 // fallthrough --> copy0MBB
4687 MachineBasicBlock *thisMBB = BB;
4688 MachineFunction *F = BB->getParent();
4689 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
4690 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
4691 F->insert(It, copy0MBB);
4692 F->insert(It, sinkMBB);
4694 // Transfer the remainder of BB and its successor edges to sinkMBB.
4695 sinkMBB->splice(sinkMBB->begin(), BB,
4696 std::next(MachineBasicBlock::iterator(MI)), BB->end());
4697 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
4699 // Next, add the true and fallthrough blocks as its successors.
4700 BB->addSuccessor(copy0MBB);
4701 BB->addSuccessor(sinkMBB);
4703 // bne rs, $0, sinkMBB
4704 BuildMI(BB, DL, TII->get(Mips::BNE))
4705 .addReg(MI.getOperand(2).getReg())
4706 .addReg(Mips::ZERO)
4707 .addMBB(sinkMBB);
4709 // copy0MBB:
4710 // %FalseValue = ...
4711 // # fallthrough to sinkMBB
4712 BB = copy0MBB;
4714 // Update machine-CFG edges
4715 BB->addSuccessor(sinkMBB);
4717 // sinkMBB:
4718 // %Result = phi [ %TrueValue, thisMBB ], [ %FalseValue, copy0MBB ]
4719 // ...
4720 BB = sinkMBB;
4722 // Use two PHI nodes to select two reults
4723 BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(0).getReg())
4724 .addReg(MI.getOperand(3).getReg())
4725 .addMBB(thisMBB)
4726 .addReg(MI.getOperand(5).getReg())
4727 .addMBB(copy0MBB);
4728 BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(1).getReg())
4729 .addReg(MI.getOperand(4).getReg())
4730 .addMBB(thisMBB)
4731 .addReg(MI.getOperand(6).getReg())
4732 .addMBB(copy0MBB);
4734 MI.eraseFromParent(); // The pseudo instruction is gone now.
4736 return BB;
4739 // FIXME? Maybe this could be a TableGen attribute on some registers and
4740 // this table could be generated automatically from RegInfo.
4741 Register
4742 MipsTargetLowering::getRegisterByName(const char *RegName, LLT VT,
4743 const MachineFunction &MF) const {
4744 // The Linux kernel uses $28 and sp.
4745 if (Subtarget.isGP64bit()) {
4746 Register Reg = StringSwitch<Register>(RegName)
4747 .Case("$28", Mips::GP_64)
4748 .Case("sp", Mips::SP_64)
4749 .Default(Register());
4750 if (Reg)
4751 return Reg;
4752 } else {
4753 Register Reg = StringSwitch<Register>(RegName)
4754 .Case("$28", Mips::GP)
4755 .Case("sp", Mips::SP)
4756 .Default(Register());
4757 if (Reg)
4758 return Reg;
4760 report_fatal_error("Invalid register name global variable");
4763 MachineBasicBlock *MipsTargetLowering::emitLDR_W(MachineInstr &MI,
4764 MachineBasicBlock *BB) const {
4765 MachineFunction *MF = BB->getParent();
4766 MachineRegisterInfo &MRI = MF->getRegInfo();
4767 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
4768 const bool IsLittle = Subtarget.isLittle();
4769 DebugLoc DL = MI.getDebugLoc();
4771 Register Dest = MI.getOperand(0).getReg();
4772 Register Address = MI.getOperand(1).getReg();
4773 unsigned Imm = MI.getOperand(2).getImm();
4775 MachineBasicBlock::iterator I(MI);
4777 if (Subtarget.hasMips32r6() || Subtarget.hasMips64r6()) {
4778 // Mips release 6 can load from adress that is not naturally-aligned.
4779 Register Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4780 BuildMI(*BB, I, DL, TII->get(Mips::LW))
4781 .addDef(Temp)
4782 .addUse(Address)
4783 .addImm(Imm);
4784 BuildMI(*BB, I, DL, TII->get(Mips::FILL_W)).addDef(Dest).addUse(Temp);
4785 } else {
4786 // Mips release 5 needs to use instructions that can load from an unaligned
4787 // memory address.
4788 Register LoadHalf = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4789 Register LoadFull = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4790 Register Undef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4791 BuildMI(*BB, I, DL, TII->get(Mips::IMPLICIT_DEF)).addDef(Undef);
4792 BuildMI(*BB, I, DL, TII->get(Mips::LWR))
4793 .addDef(LoadHalf)
4794 .addUse(Address)
4795 .addImm(Imm + (IsLittle ? 0 : 3))
4796 .addUse(Undef);
4797 BuildMI(*BB, I, DL, TII->get(Mips::LWL))
4798 .addDef(LoadFull)
4799 .addUse(Address)
4800 .addImm(Imm + (IsLittle ? 3 : 0))
4801 .addUse(LoadHalf);
4802 BuildMI(*BB, I, DL, TII->get(Mips::FILL_W)).addDef(Dest).addUse(LoadFull);
4805 MI.eraseFromParent();
4806 return BB;
4809 MachineBasicBlock *MipsTargetLowering::emitLDR_D(MachineInstr &MI,
4810 MachineBasicBlock *BB) const {
4811 MachineFunction *MF = BB->getParent();
4812 MachineRegisterInfo &MRI = MF->getRegInfo();
4813 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
4814 const bool IsLittle = Subtarget.isLittle();
4815 DebugLoc DL = MI.getDebugLoc();
4817 Register Dest = MI.getOperand(0).getReg();
4818 Register Address = MI.getOperand(1).getReg();
4819 unsigned Imm = MI.getOperand(2).getImm();
4821 MachineBasicBlock::iterator I(MI);
4823 if (Subtarget.hasMips32r6() || Subtarget.hasMips64r6()) {
4824 // Mips release 6 can load from adress that is not naturally-aligned.
4825 if (Subtarget.isGP64bit()) {
4826 Register Temp = MRI.createVirtualRegister(&Mips::GPR64RegClass);
4827 BuildMI(*BB, I, DL, TII->get(Mips::LD))
4828 .addDef(Temp)
4829 .addUse(Address)
4830 .addImm(Imm);
4831 BuildMI(*BB, I, DL, TII->get(Mips::FILL_D)).addDef(Dest).addUse(Temp);
4832 } else {
4833 Register Wtemp = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
4834 Register Lo = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4835 Register Hi = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4836 BuildMI(*BB, I, DL, TII->get(Mips::LW))
4837 .addDef(Lo)
4838 .addUse(Address)
4839 .addImm(Imm + (IsLittle ? 0 : 4));
4840 BuildMI(*BB, I, DL, TII->get(Mips::LW))
4841 .addDef(Hi)
4842 .addUse(Address)
4843 .addImm(Imm + (IsLittle ? 4 : 0));
4844 BuildMI(*BB, I, DL, TII->get(Mips::FILL_W)).addDef(Wtemp).addUse(Lo);
4845 BuildMI(*BB, I, DL, TII->get(Mips::INSERT_W), Dest)
4846 .addUse(Wtemp)
4847 .addUse(Hi)
4848 .addImm(1);
4850 } else {
4851 // Mips release 5 needs to use instructions that can load from an unaligned
4852 // memory address.
4853 Register LoHalf = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4854 Register LoFull = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4855 Register LoUndef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4856 Register HiHalf = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4857 Register HiFull = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4858 Register HiUndef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4859 Register Wtemp = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
4860 BuildMI(*BB, I, DL, TII->get(Mips::IMPLICIT_DEF)).addDef(LoUndef);
4861 BuildMI(*BB, I, DL, TII->get(Mips::LWR))
4862 .addDef(LoHalf)
4863 .addUse(Address)
4864 .addImm(Imm + (IsLittle ? 0 : 7))
4865 .addUse(LoUndef);
4866 BuildMI(*BB, I, DL, TII->get(Mips::LWL))
4867 .addDef(LoFull)
4868 .addUse(Address)
4869 .addImm(Imm + (IsLittle ? 3 : 4))
4870 .addUse(LoHalf);
4871 BuildMI(*BB, I, DL, TII->get(Mips::IMPLICIT_DEF)).addDef(HiUndef);
4872 BuildMI(*BB, I, DL, TII->get(Mips::LWR))
4873 .addDef(HiHalf)
4874 .addUse(Address)
4875 .addImm(Imm + (IsLittle ? 4 : 3))
4876 .addUse(HiUndef);
4877 BuildMI(*BB, I, DL, TII->get(Mips::LWL))
4878 .addDef(HiFull)
4879 .addUse(Address)
4880 .addImm(Imm + (IsLittle ? 7 : 0))
4881 .addUse(HiHalf);
4882 BuildMI(*BB, I, DL, TII->get(Mips::FILL_W)).addDef(Wtemp).addUse(LoFull);
4883 BuildMI(*BB, I, DL, TII->get(Mips::INSERT_W), Dest)
4884 .addUse(Wtemp)
4885 .addUse(HiFull)
4886 .addImm(1);
4889 MI.eraseFromParent();
4890 return BB;
4893 MachineBasicBlock *MipsTargetLowering::emitSTR_W(MachineInstr &MI,
4894 MachineBasicBlock *BB) const {
4895 MachineFunction *MF = BB->getParent();
4896 MachineRegisterInfo &MRI = MF->getRegInfo();
4897 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
4898 const bool IsLittle = Subtarget.isLittle();
4899 DebugLoc DL = MI.getDebugLoc();
4901 Register StoreVal = MI.getOperand(0).getReg();
4902 Register Address = MI.getOperand(1).getReg();
4903 unsigned Imm = MI.getOperand(2).getImm();
4905 MachineBasicBlock::iterator I(MI);
4907 if (Subtarget.hasMips32r6() || Subtarget.hasMips64r6()) {
4908 // Mips release 6 can store to adress that is not naturally-aligned.
4909 Register BitcastW = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
4910 Register Tmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4911 BuildMI(*BB, I, DL, TII->get(Mips::COPY)).addDef(BitcastW).addUse(StoreVal);
4912 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
4913 .addDef(Tmp)
4914 .addUse(BitcastW)
4915 .addImm(0);
4916 BuildMI(*BB, I, DL, TII->get(Mips::SW))
4917 .addUse(Tmp)
4918 .addUse(Address)
4919 .addImm(Imm);
4920 } else {
4921 // Mips release 5 needs to use instructions that can store to an unaligned
4922 // memory address.
4923 Register Tmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4924 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
4925 .addDef(Tmp)
4926 .addUse(StoreVal)
4927 .addImm(0);
4928 BuildMI(*BB, I, DL, TII->get(Mips::SWR))
4929 .addUse(Tmp)
4930 .addUse(Address)
4931 .addImm(Imm + (IsLittle ? 0 : 3));
4932 BuildMI(*BB, I, DL, TII->get(Mips::SWL))
4933 .addUse(Tmp)
4934 .addUse(Address)
4935 .addImm(Imm + (IsLittle ? 3 : 0));
4938 MI.eraseFromParent();
4940 return BB;
4943 MachineBasicBlock *MipsTargetLowering::emitSTR_D(MachineInstr &MI,
4944 MachineBasicBlock *BB) const {
4945 MachineFunction *MF = BB->getParent();
4946 MachineRegisterInfo &MRI = MF->getRegInfo();
4947 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
4948 const bool IsLittle = Subtarget.isLittle();
4949 DebugLoc DL = MI.getDebugLoc();
4951 Register StoreVal = MI.getOperand(0).getReg();
4952 Register Address = MI.getOperand(1).getReg();
4953 unsigned Imm = MI.getOperand(2).getImm();
4955 MachineBasicBlock::iterator I(MI);
4957 if (Subtarget.hasMips32r6() || Subtarget.hasMips64r6()) {
4958 // Mips release 6 can store to adress that is not naturally-aligned.
4959 if (Subtarget.isGP64bit()) {
4960 Register BitcastD = MRI.createVirtualRegister(&Mips::MSA128DRegClass);
4961 Register Lo = MRI.createVirtualRegister(&Mips::GPR64RegClass);
4962 BuildMI(*BB, I, DL, TII->get(Mips::COPY))
4963 .addDef(BitcastD)
4964 .addUse(StoreVal);
4965 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_D))
4966 .addDef(Lo)
4967 .addUse(BitcastD)
4968 .addImm(0);
4969 BuildMI(*BB, I, DL, TII->get(Mips::SD))
4970 .addUse(Lo)
4971 .addUse(Address)
4972 .addImm(Imm);
4973 } else {
4974 Register BitcastW = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
4975 Register Lo = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4976 Register Hi = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4977 BuildMI(*BB, I, DL, TII->get(Mips::COPY))
4978 .addDef(BitcastW)
4979 .addUse(StoreVal);
4980 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
4981 .addDef(Lo)
4982 .addUse(BitcastW)
4983 .addImm(0);
4984 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
4985 .addDef(Hi)
4986 .addUse(BitcastW)
4987 .addImm(1);
4988 BuildMI(*BB, I, DL, TII->get(Mips::SW))
4989 .addUse(Lo)
4990 .addUse(Address)
4991 .addImm(Imm + (IsLittle ? 0 : 4));
4992 BuildMI(*BB, I, DL, TII->get(Mips::SW))
4993 .addUse(Hi)
4994 .addUse(Address)
4995 .addImm(Imm + (IsLittle ? 4 : 0));
4997 } else {
4998 // Mips release 5 needs to use instructions that can store to an unaligned
4999 // memory address.
5000 Register Bitcast = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
5001 Register Lo = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5002 Register Hi = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5003 BuildMI(*BB, I, DL, TII->get(Mips::COPY)).addDef(Bitcast).addUse(StoreVal);
5004 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
5005 .addDef(Lo)
5006 .addUse(Bitcast)
5007 .addImm(0);
5008 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
5009 .addDef(Hi)
5010 .addUse(Bitcast)
5011 .addImm(1);
5012 BuildMI(*BB, I, DL, TII->get(Mips::SWR))
5013 .addUse(Lo)
5014 .addUse(Address)
5015 .addImm(Imm + (IsLittle ? 0 : 3));
5016 BuildMI(*BB, I, DL, TII->get(Mips::SWL))
5017 .addUse(Lo)
5018 .addUse(Address)
5019 .addImm(Imm + (IsLittle ? 3 : 0));
5020 BuildMI(*BB, I, DL, TII->get(Mips::SWR))
5021 .addUse(Hi)
5022 .addUse(Address)
5023 .addImm(Imm + (IsLittle ? 4 : 7));
5024 BuildMI(*BB, I, DL, TII->get(Mips::SWL))
5025 .addUse(Hi)
5026 .addUse(Address)
5027 .addImm(Imm + (IsLittle ? 7 : 4));
5030 MI.eraseFromParent();
5031 return BB;