1 //===- ARMLegalizerInfo.cpp --------------------------------------*- C++ -*-==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file implements the targeting of the Machinelegalizer class for ARM.
10 /// \todo This should be generated by TableGen.
11 //===----------------------------------------------------------------------===//
13 #include "ARMLegalizerInfo.h"
14 #include "ARMCallLowering.h"
15 #include "ARMSubtarget.h"
16 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
17 #include "llvm/CodeGen/LowLevelType.h"
18 #include "llvm/CodeGen/MachineRegisterInfo.h"
19 #include "llvm/CodeGen/TargetOpcodes.h"
20 #include "llvm/CodeGen/ValueTypes.h"
21 #include "llvm/IR/DerivedTypes.h"
22 #include "llvm/IR/Type.h"
25 using namespace LegalizeActions
;
27 /// FIXME: The following static functions are SizeChangeStrategy functions
28 /// that are meant to temporarily mimic the behaviour of the old legalization
29 /// based on doubling/halving non-legal types as closely as possible. This is
30 /// not entirly possible as only legalizing the types that are exactly a power
31 /// of 2 times the size of the legal types would require specifying all those
33 /// In practice, not specifying those isn't a problem, and the below functions
34 /// should disappear quickly as we add support for legalizing non-power-of-2
35 /// sized types further.
36 static void addAndInterleaveWithUnsupported(
37 LegacyLegalizerInfo::SizeAndActionsVec
&result
,
38 const LegacyLegalizerInfo::SizeAndActionsVec
&v
) {
39 for (unsigned i
= 0; i
< v
.size(); ++i
) {
40 result
.push_back(v
[i
]);
41 if (i
+ 1 < v
[i
].first
&& i
+ 1 < v
.size() &&
42 v
[i
+ 1].first
!= v
[i
].first
+ 1)
43 result
.push_back({v
[i
].first
+ 1, LegacyLegalizeActions::Unsupported
});
47 static LegacyLegalizerInfo::SizeAndActionsVec
48 widen_8_16(const LegacyLegalizerInfo::SizeAndActionsVec
&v
) {
49 assert(v
.size() >= 1);
50 assert(v
[0].first
> 17);
51 LegacyLegalizerInfo::SizeAndActionsVec result
= {
52 {1, LegacyLegalizeActions::Unsupported
},
53 {8, LegacyLegalizeActions::WidenScalar
},
54 {9, LegacyLegalizeActions::Unsupported
},
55 {16, LegacyLegalizeActions::WidenScalar
},
56 {17, LegacyLegalizeActions::Unsupported
}};
57 addAndInterleaveWithUnsupported(result
, v
);
58 auto Largest
= result
.back().first
;
59 result
.push_back({Largest
+ 1, LegacyLegalizeActions::Unsupported
});
63 static bool AEABI(const ARMSubtarget
&ST
) {
64 return ST
.isTargetAEABI() || ST
.isTargetGNUAEABI() || ST
.isTargetMuslAEABI();
67 ARMLegalizerInfo::ARMLegalizerInfo(const ARMSubtarget
&ST
) {
68 using namespace TargetOpcode
;
70 const LLT p0
= LLT::pointer(0, 32);
72 const LLT s1
= LLT::scalar(1);
73 const LLT s8
= LLT::scalar(8);
74 const LLT s16
= LLT::scalar(16);
75 const LLT s32
= LLT::scalar(32);
76 const LLT s64
= LLT::scalar(64);
78 auto &LegacyInfo
= getLegacyLegalizerInfo();
79 if (ST
.isThumb1Only()) {
80 // Thumb1 is not supported yet.
81 LegacyInfo
.computeTables();
82 verify(*ST
.getInstrInfo());
86 getActionDefinitionsBuilder({G_SEXT
, G_ZEXT
, G_ANYEXT
})
87 .legalForCartesianProduct({s8
, s16
, s32
}, {s1
, s8
, s16
});
89 getActionDefinitionsBuilder(G_SEXT_INREG
).lower();
91 getActionDefinitionsBuilder({G_MUL
, G_AND
, G_OR
, G_XOR
})
93 .clampScalar(0, s32
, s32
);
96 getActionDefinitionsBuilder({G_ADD
, G_SUB
})
100 getActionDefinitionsBuilder({G_ADD
, G_SUB
})
104 getActionDefinitionsBuilder({G_ASHR
, G_LSHR
, G_SHL
})
105 .legalFor({{s32
, s32
}})
107 .clampScalar(1, s32
, s32
);
109 bool HasHWDivide
= (!ST
.isThumb() && ST
.hasDivideInARMMode()) ||
110 (ST
.isThumb() && ST
.hasDivideInThumbMode());
112 getActionDefinitionsBuilder({G_SDIV
, G_UDIV
})
114 .clampScalar(0, s32
, s32
);
116 getActionDefinitionsBuilder({G_SDIV
, G_UDIV
})
118 .clampScalar(0, s32
, s32
);
120 for (unsigned Op
: {G_SREM
, G_UREM
}) {
121 LegacyInfo
.setLegalizeScalarToDifferentSizeStrategy(Op
, 0, widen_8_16
);
123 LegacyInfo
.setAction({Op
, s32
}, LegacyLegalizeActions::Lower
);
125 LegacyInfo
.setAction({Op
, s32
}, LegacyLegalizeActions::Custom
);
127 LegacyInfo
.setAction({Op
, s32
}, LegacyLegalizeActions::Libcall
);
130 getActionDefinitionsBuilder(G_INTTOPTR
)
131 .legalFor({{p0
, s32
}})
133 getActionDefinitionsBuilder(G_PTRTOINT
)
134 .legalFor({{s32
, p0
}})
137 getActionDefinitionsBuilder(G_CONSTANT
)
139 .clampScalar(0, s32
, s32
);
141 getActionDefinitionsBuilder(G_ICMP
)
142 .legalForCartesianProduct({s1
}, {s32
, p0
})
145 getActionDefinitionsBuilder(G_SELECT
)
146 .legalForCartesianProduct({s32
, p0
}, {s1
})
149 // We're keeping these builders around because we'll want to add support for
150 // floating point to them.
151 auto &LoadStoreBuilder
= getActionDefinitionsBuilder({G_LOAD
, G_STORE
})
152 .legalForTypesWithMemDesc({{s8
, p0
, s8
, 8},
156 .unsupportedIfMemSizeNotPow2();
158 getActionDefinitionsBuilder(G_FRAME_INDEX
).legalFor({p0
});
159 getActionDefinitionsBuilder(G_GLOBAL_VALUE
).legalFor({p0
});
162 getActionDefinitionsBuilder(G_PHI
)
166 getActionDefinitionsBuilder(G_PTR_ADD
)
167 .legalFor({{p0
, s32
}})
170 getActionDefinitionsBuilder(G_BRCOND
).legalFor({s1
});
172 if (!ST
.useSoftFloat() && ST
.hasVFP2Base()) {
173 getActionDefinitionsBuilder(
174 {G_FADD
, G_FSUB
, G_FMUL
, G_FDIV
, G_FCONSTANT
, G_FNEG
})
175 .legalFor({s32
, s64
});
178 .legalForTypesWithMemDesc({{s64
, p0
, s64
, 32}})
180 PhiBuilder
.legalFor({s64
});
182 getActionDefinitionsBuilder(G_FCMP
).legalForCartesianProduct({s1
},
185 getActionDefinitionsBuilder(G_MERGE_VALUES
).legalFor({{s64
, s32
}});
186 getActionDefinitionsBuilder(G_UNMERGE_VALUES
).legalFor({{s32
, s64
}});
188 getActionDefinitionsBuilder(G_FPEXT
).legalFor({{s64
, s32
}});
189 getActionDefinitionsBuilder(G_FPTRUNC
).legalFor({{s32
, s64
}});
191 getActionDefinitionsBuilder({G_FPTOSI
, G_FPTOUI
})
192 .legalForCartesianProduct({s32
}, {s32
, s64
});
193 getActionDefinitionsBuilder({G_SITOFP
, G_UITOFP
})
194 .legalForCartesianProduct({s32
, s64
}, {s32
});
196 getActionDefinitionsBuilder({G_FADD
, G_FSUB
, G_FMUL
, G_FDIV
})
197 .libcallFor({s32
, s64
});
199 LoadStoreBuilder
.maxScalar(0, s32
);
201 for (auto Ty
: {s32
, s64
})
202 LegacyInfo
.setAction({G_FNEG
, Ty
}, LegacyLegalizeActions::Lower
);
204 getActionDefinitionsBuilder(G_FCONSTANT
).customFor({s32
, s64
});
206 getActionDefinitionsBuilder(G_FCMP
).customForCartesianProduct({s1
},
210 setFCmpLibcallsAEABI();
212 setFCmpLibcallsGNU();
214 getActionDefinitionsBuilder(G_FPEXT
).libcallFor({{s64
, s32
}});
215 getActionDefinitionsBuilder(G_FPTRUNC
).libcallFor({{s32
, s64
}});
217 getActionDefinitionsBuilder({G_FPTOSI
, G_FPTOUI
})
218 .libcallForCartesianProduct({s32
}, {s32
, s64
});
219 getActionDefinitionsBuilder({G_SITOFP
, G_UITOFP
})
220 .libcallForCartesianProduct({s32
, s64
}, {s32
});
223 // Just expand whatever loads and stores are left.
224 LoadStoreBuilder
.lower();
226 if (!ST
.useSoftFloat() && ST
.hasVFP4Base())
227 getActionDefinitionsBuilder(G_FMA
).legalFor({s32
, s64
});
229 getActionDefinitionsBuilder(G_FMA
).libcallFor({s32
, s64
});
231 getActionDefinitionsBuilder({G_FREM
, G_FPOW
}).libcallFor({s32
, s64
});
233 if (ST
.hasV5TOps()) {
234 getActionDefinitionsBuilder(G_CTLZ
)
235 .legalFor({s32
, s32
})
236 .clampScalar(1, s32
, s32
)
237 .clampScalar(0, s32
, s32
);
238 getActionDefinitionsBuilder(G_CTLZ_ZERO_UNDEF
)
239 .lowerFor({s32
, s32
})
240 .clampScalar(1, s32
, s32
)
241 .clampScalar(0, s32
, s32
);
243 getActionDefinitionsBuilder(G_CTLZ_ZERO_UNDEF
)
244 .libcallFor({s32
, s32
})
245 .clampScalar(1, s32
, s32
)
246 .clampScalar(0, s32
, s32
);
247 getActionDefinitionsBuilder(G_CTLZ
)
248 .lowerFor({s32
, s32
})
249 .clampScalar(1, s32
, s32
)
250 .clampScalar(0, s32
, s32
);
253 LegacyInfo
.computeTables();
254 verify(*ST
.getInstrInfo());
257 void ARMLegalizerInfo::setFCmpLibcallsAEABI() {
258 // FCMP_TRUE and FCMP_FALSE don't need libcalls, they should be
259 // default-initialized.
260 FCmp32Libcalls
.resize(CmpInst::LAST_FCMP_PREDICATE
+ 1);
261 FCmp32Libcalls
[CmpInst::FCMP_OEQ
] = {
262 {RTLIB::OEQ_F32
, CmpInst::BAD_ICMP_PREDICATE
}};
263 FCmp32Libcalls
[CmpInst::FCMP_OGE
] = {
264 {RTLIB::OGE_F32
, CmpInst::BAD_ICMP_PREDICATE
}};
265 FCmp32Libcalls
[CmpInst::FCMP_OGT
] = {
266 {RTLIB::OGT_F32
, CmpInst::BAD_ICMP_PREDICATE
}};
267 FCmp32Libcalls
[CmpInst::FCMP_OLE
] = {
268 {RTLIB::OLE_F32
, CmpInst::BAD_ICMP_PREDICATE
}};
269 FCmp32Libcalls
[CmpInst::FCMP_OLT
] = {
270 {RTLIB::OLT_F32
, CmpInst::BAD_ICMP_PREDICATE
}};
271 FCmp32Libcalls
[CmpInst::FCMP_ORD
] = {{RTLIB::UO_F32
, CmpInst::ICMP_EQ
}};
272 FCmp32Libcalls
[CmpInst::FCMP_UGE
] = {{RTLIB::OLT_F32
, CmpInst::ICMP_EQ
}};
273 FCmp32Libcalls
[CmpInst::FCMP_UGT
] = {{RTLIB::OLE_F32
, CmpInst::ICMP_EQ
}};
274 FCmp32Libcalls
[CmpInst::FCMP_ULE
] = {{RTLIB::OGT_F32
, CmpInst::ICMP_EQ
}};
275 FCmp32Libcalls
[CmpInst::FCMP_ULT
] = {{RTLIB::OGE_F32
, CmpInst::ICMP_EQ
}};
276 FCmp32Libcalls
[CmpInst::FCMP_UNE
] = {{RTLIB::UNE_F32
, CmpInst::ICMP_EQ
}};
277 FCmp32Libcalls
[CmpInst::FCMP_UNO
] = {
278 {RTLIB::UO_F32
, CmpInst::BAD_ICMP_PREDICATE
}};
279 FCmp32Libcalls
[CmpInst::FCMP_ONE
] = {
280 {RTLIB::OGT_F32
, CmpInst::BAD_ICMP_PREDICATE
},
281 {RTLIB::OLT_F32
, CmpInst::BAD_ICMP_PREDICATE
}};
282 FCmp32Libcalls
[CmpInst::FCMP_UEQ
] = {
283 {RTLIB::OEQ_F32
, CmpInst::BAD_ICMP_PREDICATE
},
284 {RTLIB::UO_F32
, CmpInst::BAD_ICMP_PREDICATE
}};
286 FCmp64Libcalls
.resize(CmpInst::LAST_FCMP_PREDICATE
+ 1);
287 FCmp64Libcalls
[CmpInst::FCMP_OEQ
] = {
288 {RTLIB::OEQ_F64
, CmpInst::BAD_ICMP_PREDICATE
}};
289 FCmp64Libcalls
[CmpInst::FCMP_OGE
] = {
290 {RTLIB::OGE_F64
, CmpInst::BAD_ICMP_PREDICATE
}};
291 FCmp64Libcalls
[CmpInst::FCMP_OGT
] = {
292 {RTLIB::OGT_F64
, CmpInst::BAD_ICMP_PREDICATE
}};
293 FCmp64Libcalls
[CmpInst::FCMP_OLE
] = {
294 {RTLIB::OLE_F64
, CmpInst::BAD_ICMP_PREDICATE
}};
295 FCmp64Libcalls
[CmpInst::FCMP_OLT
] = {
296 {RTLIB::OLT_F64
, CmpInst::BAD_ICMP_PREDICATE
}};
297 FCmp64Libcalls
[CmpInst::FCMP_ORD
] = {{RTLIB::UO_F64
, CmpInst::ICMP_EQ
}};
298 FCmp64Libcalls
[CmpInst::FCMP_UGE
] = {{RTLIB::OLT_F64
, CmpInst::ICMP_EQ
}};
299 FCmp64Libcalls
[CmpInst::FCMP_UGT
] = {{RTLIB::OLE_F64
, CmpInst::ICMP_EQ
}};
300 FCmp64Libcalls
[CmpInst::FCMP_ULE
] = {{RTLIB::OGT_F64
, CmpInst::ICMP_EQ
}};
301 FCmp64Libcalls
[CmpInst::FCMP_ULT
] = {{RTLIB::OGE_F64
, CmpInst::ICMP_EQ
}};
302 FCmp64Libcalls
[CmpInst::FCMP_UNE
] = {{RTLIB::UNE_F64
, CmpInst::ICMP_EQ
}};
303 FCmp64Libcalls
[CmpInst::FCMP_UNO
] = {
304 {RTLIB::UO_F64
, CmpInst::BAD_ICMP_PREDICATE
}};
305 FCmp64Libcalls
[CmpInst::FCMP_ONE
] = {
306 {RTLIB::OGT_F64
, CmpInst::BAD_ICMP_PREDICATE
},
307 {RTLIB::OLT_F64
, CmpInst::BAD_ICMP_PREDICATE
}};
308 FCmp64Libcalls
[CmpInst::FCMP_UEQ
] = {
309 {RTLIB::OEQ_F64
, CmpInst::BAD_ICMP_PREDICATE
},
310 {RTLIB::UO_F64
, CmpInst::BAD_ICMP_PREDICATE
}};
313 void ARMLegalizerInfo::setFCmpLibcallsGNU() {
314 // FCMP_TRUE and FCMP_FALSE don't need libcalls, they should be
315 // default-initialized.
316 FCmp32Libcalls
.resize(CmpInst::LAST_FCMP_PREDICATE
+ 1);
317 FCmp32Libcalls
[CmpInst::FCMP_OEQ
] = {{RTLIB::OEQ_F32
, CmpInst::ICMP_EQ
}};
318 FCmp32Libcalls
[CmpInst::FCMP_OGE
] = {{RTLIB::OGE_F32
, CmpInst::ICMP_SGE
}};
319 FCmp32Libcalls
[CmpInst::FCMP_OGT
] = {{RTLIB::OGT_F32
, CmpInst::ICMP_SGT
}};
320 FCmp32Libcalls
[CmpInst::FCMP_OLE
] = {{RTLIB::OLE_F32
, CmpInst::ICMP_SLE
}};
321 FCmp32Libcalls
[CmpInst::FCMP_OLT
] = {{RTLIB::OLT_F32
, CmpInst::ICMP_SLT
}};
322 FCmp32Libcalls
[CmpInst::FCMP_ORD
] = {{RTLIB::UO_F32
, CmpInst::ICMP_EQ
}};
323 FCmp32Libcalls
[CmpInst::FCMP_UGE
] = {{RTLIB::OLT_F32
, CmpInst::ICMP_SGE
}};
324 FCmp32Libcalls
[CmpInst::FCMP_UGT
] = {{RTLIB::OLE_F32
, CmpInst::ICMP_SGT
}};
325 FCmp32Libcalls
[CmpInst::FCMP_ULE
] = {{RTLIB::OGT_F32
, CmpInst::ICMP_SLE
}};
326 FCmp32Libcalls
[CmpInst::FCMP_ULT
] = {{RTLIB::OGE_F32
, CmpInst::ICMP_SLT
}};
327 FCmp32Libcalls
[CmpInst::FCMP_UNE
] = {{RTLIB::UNE_F32
, CmpInst::ICMP_NE
}};
328 FCmp32Libcalls
[CmpInst::FCMP_UNO
] = {{RTLIB::UO_F32
, CmpInst::ICMP_NE
}};
329 FCmp32Libcalls
[CmpInst::FCMP_ONE
] = {{RTLIB::OGT_F32
, CmpInst::ICMP_SGT
},
330 {RTLIB::OLT_F32
, CmpInst::ICMP_SLT
}};
331 FCmp32Libcalls
[CmpInst::FCMP_UEQ
] = {{RTLIB::OEQ_F32
, CmpInst::ICMP_EQ
},
332 {RTLIB::UO_F32
, CmpInst::ICMP_NE
}};
334 FCmp64Libcalls
.resize(CmpInst::LAST_FCMP_PREDICATE
+ 1);
335 FCmp64Libcalls
[CmpInst::FCMP_OEQ
] = {{RTLIB::OEQ_F64
, CmpInst::ICMP_EQ
}};
336 FCmp64Libcalls
[CmpInst::FCMP_OGE
] = {{RTLIB::OGE_F64
, CmpInst::ICMP_SGE
}};
337 FCmp64Libcalls
[CmpInst::FCMP_OGT
] = {{RTLIB::OGT_F64
, CmpInst::ICMP_SGT
}};
338 FCmp64Libcalls
[CmpInst::FCMP_OLE
] = {{RTLIB::OLE_F64
, CmpInst::ICMP_SLE
}};
339 FCmp64Libcalls
[CmpInst::FCMP_OLT
] = {{RTLIB::OLT_F64
, CmpInst::ICMP_SLT
}};
340 FCmp64Libcalls
[CmpInst::FCMP_ORD
] = {{RTLIB::UO_F64
, CmpInst::ICMP_EQ
}};
341 FCmp64Libcalls
[CmpInst::FCMP_UGE
] = {{RTLIB::OLT_F64
, CmpInst::ICMP_SGE
}};
342 FCmp64Libcalls
[CmpInst::FCMP_UGT
] = {{RTLIB::OLE_F64
, CmpInst::ICMP_SGT
}};
343 FCmp64Libcalls
[CmpInst::FCMP_ULE
] = {{RTLIB::OGT_F64
, CmpInst::ICMP_SLE
}};
344 FCmp64Libcalls
[CmpInst::FCMP_ULT
] = {{RTLIB::OGE_F64
, CmpInst::ICMP_SLT
}};
345 FCmp64Libcalls
[CmpInst::FCMP_UNE
] = {{RTLIB::UNE_F64
, CmpInst::ICMP_NE
}};
346 FCmp64Libcalls
[CmpInst::FCMP_UNO
] = {{RTLIB::UO_F64
, CmpInst::ICMP_NE
}};
347 FCmp64Libcalls
[CmpInst::FCMP_ONE
] = {{RTLIB::OGT_F64
, CmpInst::ICMP_SGT
},
348 {RTLIB::OLT_F64
, CmpInst::ICMP_SLT
}};
349 FCmp64Libcalls
[CmpInst::FCMP_UEQ
] = {{RTLIB::OEQ_F64
, CmpInst::ICMP_EQ
},
350 {RTLIB::UO_F64
, CmpInst::ICMP_NE
}};
353 ARMLegalizerInfo::FCmpLibcallsList
354 ARMLegalizerInfo::getFCmpLibcalls(CmpInst::Predicate Predicate
,
355 unsigned Size
) const {
356 assert(CmpInst::isFPPredicate(Predicate
) && "Unsupported FCmp predicate");
358 return FCmp32Libcalls
[Predicate
];
360 return FCmp64Libcalls
[Predicate
];
361 llvm_unreachable("Unsupported size for FCmp predicate");
364 bool ARMLegalizerInfo::legalizeCustom(LegalizerHelper
&Helper
,
365 MachineInstr
&MI
) const {
366 using namespace TargetOpcode
;
368 MachineIRBuilder
&MIRBuilder
= Helper
.MIRBuilder
;
369 MachineRegisterInfo
&MRI
= *MIRBuilder
.getMRI();
370 LLVMContext
&Ctx
= MIRBuilder
.getMF().getFunction().getContext();
372 switch (MI
.getOpcode()) {
377 Register OriginalResult
= MI
.getOperand(0).getReg();
378 auto Size
= MRI
.getType(OriginalResult
).getSizeInBits();
383 MI
.getOpcode() == G_SREM
? RTLIB::SDIVREM_I32
: RTLIB::UDIVREM_I32
;
385 // Our divmod libcalls return a struct containing the quotient and the
386 // remainder. Create a new, unused register for the quotient and use the
387 // destination of the original instruction for the remainder.
388 Type
*ArgTy
= Type::getInt32Ty(Ctx
);
389 StructType
*RetTy
= StructType::get(Ctx
, {ArgTy
, ArgTy
}, /* Packed */ true);
390 Register RetRegs
[] = {MRI
.createGenericVirtualRegister(LLT::scalar(32)),
392 auto Status
= createLibcall(MIRBuilder
, Libcall
, {RetRegs
, RetTy
, 0},
393 {{MI
.getOperand(1).getReg(), ArgTy
, 0},
394 {MI
.getOperand(2).getReg(), ArgTy
, 0}});
395 if (Status
!= LegalizerHelper::Legalized
)
400 assert(MRI
.getType(MI
.getOperand(2).getReg()) ==
401 MRI
.getType(MI
.getOperand(3).getReg()) &&
402 "Mismatched operands for G_FCMP");
403 auto OpSize
= MRI
.getType(MI
.getOperand(2).getReg()).getSizeInBits();
405 auto OriginalResult
= MI
.getOperand(0).getReg();
407 static_cast<CmpInst::Predicate
>(MI
.getOperand(1).getPredicate());
408 auto Libcalls
= getFCmpLibcalls(Predicate
, OpSize
);
410 if (Libcalls
.empty()) {
411 assert((Predicate
== CmpInst::FCMP_TRUE
||
412 Predicate
== CmpInst::FCMP_FALSE
) &&
413 "Predicate needs libcalls, but none specified");
414 MIRBuilder
.buildConstant(OriginalResult
,
415 Predicate
== CmpInst::FCMP_TRUE
? 1 : 0);
416 MI
.eraseFromParent();
420 assert((OpSize
== 32 || OpSize
== 64) && "Unsupported operand size");
421 auto *ArgTy
= OpSize
== 32 ? Type::getFloatTy(Ctx
) : Type::getDoubleTy(Ctx
);
422 auto *RetTy
= Type::getInt32Ty(Ctx
);
424 SmallVector
<Register
, 2> Results
;
425 for (auto Libcall
: Libcalls
) {
426 auto LibcallResult
= MRI
.createGenericVirtualRegister(LLT::scalar(32));
427 auto Status
= createLibcall(MIRBuilder
, Libcall
.LibcallID
,
428 {LibcallResult
, RetTy
, 0},
429 {{MI
.getOperand(2).getReg(), ArgTy
, 0},
430 {MI
.getOperand(3).getReg(), ArgTy
, 0}});
432 if (Status
!= LegalizerHelper::Legalized
)
435 auto ProcessedResult
=
438 : MRI
.createGenericVirtualRegister(MRI
.getType(OriginalResult
));
440 // We have a result, but we need to transform it into a proper 1-bit 0 or
441 // 1, taking into account the different peculiarities of the values
442 // returned by the comparison functions.
443 CmpInst::Predicate ResultPred
= Libcall
.Predicate
;
444 if (ResultPred
== CmpInst::BAD_ICMP_PREDICATE
) {
445 // We have a nice 0 or 1, and we just need to truncate it back to 1 bit
446 // to keep the types consistent.
447 MIRBuilder
.buildTrunc(ProcessedResult
, LibcallResult
);
449 // We need to compare against 0.
450 assert(CmpInst::isIntPredicate(ResultPred
) && "Unsupported predicate");
451 auto Zero
= MIRBuilder
.buildConstant(LLT::scalar(32), 0);
452 MIRBuilder
.buildICmp(ResultPred
, ProcessedResult
, LibcallResult
, Zero
);
454 Results
.push_back(ProcessedResult
);
457 if (Results
.size() != 1) {
458 assert(Results
.size() == 2 && "Unexpected number of results");
459 MIRBuilder
.buildOr(OriginalResult
, Results
[0], Results
[1]);
464 // Convert to integer constants, while preserving the binary representation.
466 MI
.getOperand(1).getFPImm()->getValueAPF().bitcastToAPInt();
467 MIRBuilder
.buildConstant(MI
.getOperand(0),
468 *ConstantInt::get(Ctx
, AsInteger
));
473 MI
.eraseFromParent();