[RISCV] Fix mgather -> riscv.masked.strided.load combine not extending indices (...
[llvm-project.git] / llvm / lib / CodeGen / TargetLoweringBase.cpp
blob7dffd8d4f2b28e8d66d401ebedb3668b9fe205fb
1 //===- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements the TargetLoweringBase class.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/ADT/BitVector.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/ADT/StringExtras.h"
17 #include "llvm/ADT/StringRef.h"
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/Analysis/Loads.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/CodeGen/Analysis.h"
22 #include "llvm/CodeGen/ISDOpcodes.h"
23 #include "llvm/CodeGen/MachineBasicBlock.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstr.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineOperand.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/MachineValueType.h"
32 #include "llvm/CodeGen/RuntimeLibcalls.h"
33 #include "llvm/CodeGen/StackMaps.h"
34 #include "llvm/CodeGen/TargetLowering.h"
35 #include "llvm/CodeGen/TargetOpcodes.h"
36 #include "llvm/CodeGen/TargetRegisterInfo.h"
37 #include "llvm/CodeGen/ValueTypes.h"
38 #include "llvm/IR/Attributes.h"
39 #include "llvm/IR/CallingConv.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/Function.h"
43 #include "llvm/IR/GlobalValue.h"
44 #include "llvm/IR/GlobalVariable.h"
45 #include "llvm/IR/IRBuilder.h"
46 #include "llvm/IR/Module.h"
47 #include "llvm/IR/Type.h"
48 #include "llvm/Support/Casting.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Compiler.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Target/TargetMachine.h"
54 #include "llvm/Target/TargetOptions.h"
55 #include "llvm/TargetParser/Triple.h"
56 #include "llvm/Transforms/Utils/SizeOpts.h"
57 #include <algorithm>
58 #include <cassert>
59 #include <cstdint>
60 #include <cstring>
61 #include <iterator>
62 #include <string>
63 #include <tuple>
64 #include <utility>
66 using namespace llvm;
68 static cl::opt<bool> JumpIsExpensiveOverride(
69 "jump-is-expensive", cl::init(false),
70 cl::desc("Do not create extra branches to split comparison logic."),
71 cl::Hidden);
73 static cl::opt<unsigned> MinimumJumpTableEntries
74 ("min-jump-table-entries", cl::init(4), cl::Hidden,
75 cl::desc("Set minimum number of entries to use a jump table."));
77 static cl::opt<unsigned> MaximumJumpTableSize
78 ("max-jump-table-size", cl::init(UINT_MAX), cl::Hidden,
79 cl::desc("Set maximum size of jump tables."));
81 /// Minimum jump table density for normal functions.
82 static cl::opt<unsigned>
83 JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden,
84 cl::desc("Minimum density for building a jump table in "
85 "a normal function"));
87 /// Minimum jump table density for -Os or -Oz functions.
88 static cl::opt<unsigned> OptsizeJumpTableDensity(
89 "optsize-jump-table-density", cl::init(40), cl::Hidden,
90 cl::desc("Minimum density for building a jump table in "
91 "an optsize function"));
93 // FIXME: This option is only to test if the strict fp operation processed
94 // correctly by preventing mutating strict fp operation to normal fp operation
95 // during development. When the backend supports strict float operation, this
96 // option will be meaningless.
97 static cl::opt<bool> DisableStrictNodeMutation("disable-strictnode-mutation",
98 cl::desc("Don't mutate strict-float node to a legalize node"),
99 cl::init(false), cl::Hidden);
101 static bool darwinHasSinCos(const Triple &TT) {
102 assert(TT.isOSDarwin() && "should be called with darwin triple");
103 // Don't bother with 32 bit x86.
104 if (TT.getArch() == Triple::x86)
105 return false;
106 // Macos < 10.9 has no sincos_stret.
107 if (TT.isMacOSX())
108 return !TT.isMacOSXVersionLT(10, 9) && TT.isArch64Bit();
109 // iOS < 7.0 has no sincos_stret.
110 if (TT.isiOS())
111 return !TT.isOSVersionLT(7, 0);
112 // Any other darwin such as WatchOS/TvOS is new enough.
113 return true;
116 void TargetLoweringBase::InitLibcalls(const Triple &TT) {
117 #define HANDLE_LIBCALL(code, name) \
118 setLibcallName(RTLIB::code, name);
119 #include "llvm/IR/RuntimeLibcalls.def"
120 #undef HANDLE_LIBCALL
121 // Initialize calling conventions to their default.
122 for (int LC = 0; LC < RTLIB::UNKNOWN_LIBCALL; ++LC)
123 setLibcallCallingConv((RTLIB::Libcall)LC, CallingConv::C);
125 // For IEEE quad-precision libcall names, PPC uses "kf" instead of "tf".
126 if (TT.isPPC()) {
127 setLibcallName(RTLIB::ADD_F128, "__addkf3");
128 setLibcallName(RTLIB::SUB_F128, "__subkf3");
129 setLibcallName(RTLIB::MUL_F128, "__mulkf3");
130 setLibcallName(RTLIB::DIV_F128, "__divkf3");
131 setLibcallName(RTLIB::POWI_F128, "__powikf2");
132 setLibcallName(RTLIB::FPEXT_F32_F128, "__extendsfkf2");
133 setLibcallName(RTLIB::FPEXT_F64_F128, "__extenddfkf2");
134 setLibcallName(RTLIB::FPROUND_F128_F32, "__trunckfsf2");
135 setLibcallName(RTLIB::FPROUND_F128_F64, "__trunckfdf2");
136 setLibcallName(RTLIB::FPTOSINT_F128_I32, "__fixkfsi");
137 setLibcallName(RTLIB::FPTOSINT_F128_I64, "__fixkfdi");
138 setLibcallName(RTLIB::FPTOSINT_F128_I128, "__fixkfti");
139 setLibcallName(RTLIB::FPTOUINT_F128_I32, "__fixunskfsi");
140 setLibcallName(RTLIB::FPTOUINT_F128_I64, "__fixunskfdi");
141 setLibcallName(RTLIB::FPTOUINT_F128_I128, "__fixunskfti");
142 setLibcallName(RTLIB::SINTTOFP_I32_F128, "__floatsikf");
143 setLibcallName(RTLIB::SINTTOFP_I64_F128, "__floatdikf");
144 setLibcallName(RTLIB::SINTTOFP_I128_F128, "__floattikf");
145 setLibcallName(RTLIB::UINTTOFP_I32_F128, "__floatunsikf");
146 setLibcallName(RTLIB::UINTTOFP_I64_F128, "__floatundikf");
147 setLibcallName(RTLIB::UINTTOFP_I128_F128, "__floatuntikf");
148 setLibcallName(RTLIB::OEQ_F128, "__eqkf2");
149 setLibcallName(RTLIB::UNE_F128, "__nekf2");
150 setLibcallName(RTLIB::OGE_F128, "__gekf2");
151 setLibcallName(RTLIB::OLT_F128, "__ltkf2");
152 setLibcallName(RTLIB::OLE_F128, "__lekf2");
153 setLibcallName(RTLIB::OGT_F128, "__gtkf2");
154 setLibcallName(RTLIB::UO_F128, "__unordkf2");
157 // A few names are different on particular architectures or environments.
158 if (TT.isOSDarwin()) {
159 // For f16/f32 conversions, Darwin uses the standard naming scheme, instead
160 // of the gnueabi-style __gnu_*_ieee.
161 // FIXME: What about other targets?
162 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
163 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
165 // Some darwins have an optimized __bzero/bzero function.
166 switch (TT.getArch()) {
167 case Triple::x86:
168 case Triple::x86_64:
169 if (TT.isMacOSX() && !TT.isMacOSXVersionLT(10, 6))
170 setLibcallName(RTLIB::BZERO, "__bzero");
171 break;
172 case Triple::aarch64:
173 case Triple::aarch64_32:
174 setLibcallName(RTLIB::BZERO, "bzero");
175 break;
176 default:
177 break;
180 if (darwinHasSinCos(TT)) {
181 setLibcallName(RTLIB::SINCOS_STRET_F32, "__sincosf_stret");
182 setLibcallName(RTLIB::SINCOS_STRET_F64, "__sincos_stret");
183 if (TT.isWatchABI()) {
184 setLibcallCallingConv(RTLIB::SINCOS_STRET_F32,
185 CallingConv::ARM_AAPCS_VFP);
186 setLibcallCallingConv(RTLIB::SINCOS_STRET_F64,
187 CallingConv::ARM_AAPCS_VFP);
190 } else {
191 setLibcallName(RTLIB::FPEXT_F16_F32, "__gnu_h2f_ieee");
192 setLibcallName(RTLIB::FPROUND_F32_F16, "__gnu_f2h_ieee");
195 if (TT.isGNUEnvironment() || TT.isOSFuchsia() ||
196 (TT.isAndroid() && !TT.isAndroidVersionLT(9))) {
197 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
198 setLibcallName(RTLIB::SINCOS_F64, "sincos");
199 setLibcallName(RTLIB::SINCOS_F80, "sincosl");
200 setLibcallName(RTLIB::SINCOS_F128, "sincosl");
201 setLibcallName(RTLIB::SINCOS_PPCF128, "sincosl");
204 if (TT.isPS()) {
205 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
206 setLibcallName(RTLIB::SINCOS_F64, "sincos");
209 if (TT.isOSOpenBSD()) {
210 setLibcallName(RTLIB::STACKPROTECTOR_CHECK_FAIL, nullptr);
213 if (TT.isOSWindows() && !TT.isOSCygMing()) {
214 setLibcallName(RTLIB::LDEXP_F32, nullptr);
215 setLibcallName(RTLIB::LDEXP_F80, nullptr);
216 setLibcallName(RTLIB::LDEXP_F128, nullptr);
217 setLibcallName(RTLIB::LDEXP_PPCF128, nullptr);
219 setLibcallName(RTLIB::FREXP_F32, nullptr);
220 setLibcallName(RTLIB::FREXP_F80, nullptr);
221 setLibcallName(RTLIB::FREXP_F128, nullptr);
222 setLibcallName(RTLIB::FREXP_PPCF128, nullptr);
226 /// GetFPLibCall - Helper to return the right libcall for the given floating
227 /// point type, or UNKNOWN_LIBCALL if there is none.
228 RTLIB::Libcall RTLIB::getFPLibCall(EVT VT,
229 RTLIB::Libcall Call_F32,
230 RTLIB::Libcall Call_F64,
231 RTLIB::Libcall Call_F80,
232 RTLIB::Libcall Call_F128,
233 RTLIB::Libcall Call_PPCF128) {
234 return
235 VT == MVT::f32 ? Call_F32 :
236 VT == MVT::f64 ? Call_F64 :
237 VT == MVT::f80 ? Call_F80 :
238 VT == MVT::f128 ? Call_F128 :
239 VT == MVT::ppcf128 ? Call_PPCF128 :
240 RTLIB::UNKNOWN_LIBCALL;
243 /// getFPEXT - Return the FPEXT_*_* value for the given types, or
244 /// UNKNOWN_LIBCALL if there is none.
245 RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) {
246 if (OpVT == MVT::f16) {
247 if (RetVT == MVT::f32)
248 return FPEXT_F16_F32;
249 if (RetVT == MVT::f64)
250 return FPEXT_F16_F64;
251 if (RetVT == MVT::f80)
252 return FPEXT_F16_F80;
253 if (RetVT == MVT::f128)
254 return FPEXT_F16_F128;
255 } else if (OpVT == MVT::f32) {
256 if (RetVT == MVT::f64)
257 return FPEXT_F32_F64;
258 if (RetVT == MVT::f128)
259 return FPEXT_F32_F128;
260 if (RetVT == MVT::ppcf128)
261 return FPEXT_F32_PPCF128;
262 } else if (OpVT == MVT::f64) {
263 if (RetVT == MVT::f128)
264 return FPEXT_F64_F128;
265 else if (RetVT == MVT::ppcf128)
266 return FPEXT_F64_PPCF128;
267 } else if (OpVT == MVT::f80) {
268 if (RetVT == MVT::f128)
269 return FPEXT_F80_F128;
272 return UNKNOWN_LIBCALL;
275 /// getFPROUND - Return the FPROUND_*_* value for the given types, or
276 /// UNKNOWN_LIBCALL if there is none.
277 RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) {
278 if (RetVT == MVT::f16) {
279 if (OpVT == MVT::f32)
280 return FPROUND_F32_F16;
281 if (OpVT == MVT::f64)
282 return FPROUND_F64_F16;
283 if (OpVT == MVT::f80)
284 return FPROUND_F80_F16;
285 if (OpVT == MVT::f128)
286 return FPROUND_F128_F16;
287 if (OpVT == MVT::ppcf128)
288 return FPROUND_PPCF128_F16;
289 } else if (RetVT == MVT::bf16) {
290 if (OpVT == MVT::f32)
291 return FPROUND_F32_BF16;
292 if (OpVT == MVT::f64)
293 return FPROUND_F64_BF16;
294 } else if (RetVT == MVT::f32) {
295 if (OpVT == MVT::f64)
296 return FPROUND_F64_F32;
297 if (OpVT == MVT::f80)
298 return FPROUND_F80_F32;
299 if (OpVT == MVT::f128)
300 return FPROUND_F128_F32;
301 if (OpVT == MVT::ppcf128)
302 return FPROUND_PPCF128_F32;
303 } else if (RetVT == MVT::f64) {
304 if (OpVT == MVT::f80)
305 return FPROUND_F80_F64;
306 if (OpVT == MVT::f128)
307 return FPROUND_F128_F64;
308 if (OpVT == MVT::ppcf128)
309 return FPROUND_PPCF128_F64;
310 } else if (RetVT == MVT::f80) {
311 if (OpVT == MVT::f128)
312 return FPROUND_F128_F80;
315 return UNKNOWN_LIBCALL;
318 /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
319 /// UNKNOWN_LIBCALL if there is none.
320 RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) {
321 if (OpVT == MVT::f16) {
322 if (RetVT == MVT::i32)
323 return FPTOSINT_F16_I32;
324 if (RetVT == MVT::i64)
325 return FPTOSINT_F16_I64;
326 if (RetVT == MVT::i128)
327 return FPTOSINT_F16_I128;
328 } else if (OpVT == MVT::f32) {
329 if (RetVT == MVT::i32)
330 return FPTOSINT_F32_I32;
331 if (RetVT == MVT::i64)
332 return FPTOSINT_F32_I64;
333 if (RetVT == MVT::i128)
334 return FPTOSINT_F32_I128;
335 } else if (OpVT == MVT::f64) {
336 if (RetVT == MVT::i32)
337 return FPTOSINT_F64_I32;
338 if (RetVT == MVT::i64)
339 return FPTOSINT_F64_I64;
340 if (RetVT == MVT::i128)
341 return FPTOSINT_F64_I128;
342 } else if (OpVT == MVT::f80) {
343 if (RetVT == MVT::i32)
344 return FPTOSINT_F80_I32;
345 if (RetVT == MVT::i64)
346 return FPTOSINT_F80_I64;
347 if (RetVT == MVT::i128)
348 return FPTOSINT_F80_I128;
349 } else if (OpVT == MVT::f128) {
350 if (RetVT == MVT::i32)
351 return FPTOSINT_F128_I32;
352 if (RetVT == MVT::i64)
353 return FPTOSINT_F128_I64;
354 if (RetVT == MVT::i128)
355 return FPTOSINT_F128_I128;
356 } else if (OpVT == MVT::ppcf128) {
357 if (RetVT == MVT::i32)
358 return FPTOSINT_PPCF128_I32;
359 if (RetVT == MVT::i64)
360 return FPTOSINT_PPCF128_I64;
361 if (RetVT == MVT::i128)
362 return FPTOSINT_PPCF128_I128;
364 return UNKNOWN_LIBCALL;
367 /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
368 /// UNKNOWN_LIBCALL if there is none.
369 RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) {
370 if (OpVT == MVT::f16) {
371 if (RetVT == MVT::i32)
372 return FPTOUINT_F16_I32;
373 if (RetVT == MVT::i64)
374 return FPTOUINT_F16_I64;
375 if (RetVT == MVT::i128)
376 return FPTOUINT_F16_I128;
377 } else if (OpVT == MVT::f32) {
378 if (RetVT == MVT::i32)
379 return FPTOUINT_F32_I32;
380 if (RetVT == MVT::i64)
381 return FPTOUINT_F32_I64;
382 if (RetVT == MVT::i128)
383 return FPTOUINT_F32_I128;
384 } else if (OpVT == MVT::f64) {
385 if (RetVT == MVT::i32)
386 return FPTOUINT_F64_I32;
387 if (RetVT == MVT::i64)
388 return FPTOUINT_F64_I64;
389 if (RetVT == MVT::i128)
390 return FPTOUINT_F64_I128;
391 } else if (OpVT == MVT::f80) {
392 if (RetVT == MVT::i32)
393 return FPTOUINT_F80_I32;
394 if (RetVT == MVT::i64)
395 return FPTOUINT_F80_I64;
396 if (RetVT == MVT::i128)
397 return FPTOUINT_F80_I128;
398 } else if (OpVT == MVT::f128) {
399 if (RetVT == MVT::i32)
400 return FPTOUINT_F128_I32;
401 if (RetVT == MVT::i64)
402 return FPTOUINT_F128_I64;
403 if (RetVT == MVT::i128)
404 return FPTOUINT_F128_I128;
405 } else if (OpVT == MVT::ppcf128) {
406 if (RetVT == MVT::i32)
407 return FPTOUINT_PPCF128_I32;
408 if (RetVT == MVT::i64)
409 return FPTOUINT_PPCF128_I64;
410 if (RetVT == MVT::i128)
411 return FPTOUINT_PPCF128_I128;
413 return UNKNOWN_LIBCALL;
416 /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
417 /// UNKNOWN_LIBCALL if there is none.
418 RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) {
419 if (OpVT == MVT::i32) {
420 if (RetVT == MVT::f16)
421 return SINTTOFP_I32_F16;
422 if (RetVT == MVT::f32)
423 return SINTTOFP_I32_F32;
424 if (RetVT == MVT::f64)
425 return SINTTOFP_I32_F64;
426 if (RetVT == MVT::f80)
427 return SINTTOFP_I32_F80;
428 if (RetVT == MVT::f128)
429 return SINTTOFP_I32_F128;
430 if (RetVT == MVT::ppcf128)
431 return SINTTOFP_I32_PPCF128;
432 } else if (OpVT == MVT::i64) {
433 if (RetVT == MVT::f16)
434 return SINTTOFP_I64_F16;
435 if (RetVT == MVT::f32)
436 return SINTTOFP_I64_F32;
437 if (RetVT == MVT::f64)
438 return SINTTOFP_I64_F64;
439 if (RetVT == MVT::f80)
440 return SINTTOFP_I64_F80;
441 if (RetVT == MVT::f128)
442 return SINTTOFP_I64_F128;
443 if (RetVT == MVT::ppcf128)
444 return SINTTOFP_I64_PPCF128;
445 } else if (OpVT == MVT::i128) {
446 if (RetVT == MVT::f16)
447 return SINTTOFP_I128_F16;
448 if (RetVT == MVT::f32)
449 return SINTTOFP_I128_F32;
450 if (RetVT == MVT::f64)
451 return SINTTOFP_I128_F64;
452 if (RetVT == MVT::f80)
453 return SINTTOFP_I128_F80;
454 if (RetVT == MVT::f128)
455 return SINTTOFP_I128_F128;
456 if (RetVT == MVT::ppcf128)
457 return SINTTOFP_I128_PPCF128;
459 return UNKNOWN_LIBCALL;
462 /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
463 /// UNKNOWN_LIBCALL if there is none.
464 RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) {
465 if (OpVT == MVT::i32) {
466 if (RetVT == MVT::f16)
467 return UINTTOFP_I32_F16;
468 if (RetVT == MVT::f32)
469 return UINTTOFP_I32_F32;
470 if (RetVT == MVT::f64)
471 return UINTTOFP_I32_F64;
472 if (RetVT == MVT::f80)
473 return UINTTOFP_I32_F80;
474 if (RetVT == MVT::f128)
475 return UINTTOFP_I32_F128;
476 if (RetVT == MVT::ppcf128)
477 return UINTTOFP_I32_PPCF128;
478 } else if (OpVT == MVT::i64) {
479 if (RetVT == MVT::f16)
480 return UINTTOFP_I64_F16;
481 if (RetVT == MVT::f32)
482 return UINTTOFP_I64_F32;
483 if (RetVT == MVT::f64)
484 return UINTTOFP_I64_F64;
485 if (RetVT == MVT::f80)
486 return UINTTOFP_I64_F80;
487 if (RetVT == MVT::f128)
488 return UINTTOFP_I64_F128;
489 if (RetVT == MVT::ppcf128)
490 return UINTTOFP_I64_PPCF128;
491 } else if (OpVT == MVT::i128) {
492 if (RetVT == MVT::f16)
493 return UINTTOFP_I128_F16;
494 if (RetVT == MVT::f32)
495 return UINTTOFP_I128_F32;
496 if (RetVT == MVT::f64)
497 return UINTTOFP_I128_F64;
498 if (RetVT == MVT::f80)
499 return UINTTOFP_I128_F80;
500 if (RetVT == MVT::f128)
501 return UINTTOFP_I128_F128;
502 if (RetVT == MVT::ppcf128)
503 return UINTTOFP_I128_PPCF128;
505 return UNKNOWN_LIBCALL;
508 RTLIB::Libcall RTLIB::getPOWI(EVT RetVT) {
509 return getFPLibCall(RetVT, POWI_F32, POWI_F64, POWI_F80, POWI_F128,
510 POWI_PPCF128);
513 RTLIB::Libcall RTLIB::getLDEXP(EVT RetVT) {
514 return getFPLibCall(RetVT, LDEXP_F32, LDEXP_F64, LDEXP_F80, LDEXP_F128,
515 LDEXP_PPCF128);
518 RTLIB::Libcall RTLIB::getFREXP(EVT RetVT) {
519 return getFPLibCall(RetVT, FREXP_F32, FREXP_F64, FREXP_F80, FREXP_F128,
520 FREXP_PPCF128);
523 RTLIB::Libcall RTLIB::getOutlineAtomicHelper(const Libcall (&LC)[5][4],
524 AtomicOrdering Order,
525 uint64_t MemSize) {
526 unsigned ModeN, ModelN;
527 switch (MemSize) {
528 case 1:
529 ModeN = 0;
530 break;
531 case 2:
532 ModeN = 1;
533 break;
534 case 4:
535 ModeN = 2;
536 break;
537 case 8:
538 ModeN = 3;
539 break;
540 case 16:
541 ModeN = 4;
542 break;
543 default:
544 return RTLIB::UNKNOWN_LIBCALL;
547 switch (Order) {
548 case AtomicOrdering::Monotonic:
549 ModelN = 0;
550 break;
551 case AtomicOrdering::Acquire:
552 ModelN = 1;
553 break;
554 case AtomicOrdering::Release:
555 ModelN = 2;
556 break;
557 case AtomicOrdering::AcquireRelease:
558 case AtomicOrdering::SequentiallyConsistent:
559 ModelN = 3;
560 break;
561 default:
562 return UNKNOWN_LIBCALL;
565 return LC[ModeN][ModelN];
568 RTLIB::Libcall RTLIB::getOUTLINE_ATOMIC(unsigned Opc, AtomicOrdering Order,
569 MVT VT) {
570 if (!VT.isScalarInteger())
571 return UNKNOWN_LIBCALL;
572 uint64_t MemSize = VT.getScalarSizeInBits() / 8;
574 #define LCALLS(A, B) \
575 { A##B##_RELAX, A##B##_ACQ, A##B##_REL, A##B##_ACQ_REL }
576 #define LCALL5(A) \
577 LCALLS(A, 1), LCALLS(A, 2), LCALLS(A, 4), LCALLS(A, 8), LCALLS(A, 16)
578 switch (Opc) {
579 case ISD::ATOMIC_CMP_SWAP: {
580 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_CAS)};
581 return getOutlineAtomicHelper(LC, Order, MemSize);
583 case ISD::ATOMIC_SWAP: {
584 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_SWP)};
585 return getOutlineAtomicHelper(LC, Order, MemSize);
587 case ISD::ATOMIC_LOAD_ADD: {
588 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDADD)};
589 return getOutlineAtomicHelper(LC, Order, MemSize);
591 case ISD::ATOMIC_LOAD_OR: {
592 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDSET)};
593 return getOutlineAtomicHelper(LC, Order, MemSize);
595 case ISD::ATOMIC_LOAD_CLR: {
596 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDCLR)};
597 return getOutlineAtomicHelper(LC, Order, MemSize);
599 case ISD::ATOMIC_LOAD_XOR: {
600 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDEOR)};
601 return getOutlineAtomicHelper(LC, Order, MemSize);
603 default:
604 return UNKNOWN_LIBCALL;
606 #undef LCALLS
607 #undef LCALL5
610 RTLIB::Libcall RTLIB::getSYNC(unsigned Opc, MVT VT) {
611 #define OP_TO_LIBCALL(Name, Enum) \
612 case Name: \
613 switch (VT.SimpleTy) { \
614 default: \
615 return UNKNOWN_LIBCALL; \
616 case MVT::i8: \
617 return Enum##_1; \
618 case MVT::i16: \
619 return Enum##_2; \
620 case MVT::i32: \
621 return Enum##_4; \
622 case MVT::i64: \
623 return Enum##_8; \
624 case MVT::i128: \
625 return Enum##_16; \
628 switch (Opc) {
629 OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET)
630 OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP)
631 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD)
632 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB)
633 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND)
634 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR)
635 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR)
636 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND)
637 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX)
638 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX)
639 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN)
640 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN)
643 #undef OP_TO_LIBCALL
645 return UNKNOWN_LIBCALL;
648 RTLIB::Libcall RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
649 switch (ElementSize) {
650 case 1:
651 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_1;
652 case 2:
653 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_2;
654 case 4:
655 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_4;
656 case 8:
657 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_8;
658 case 16:
659 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_16;
660 default:
661 return UNKNOWN_LIBCALL;
665 RTLIB::Libcall RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
666 switch (ElementSize) {
667 case 1:
668 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1;
669 case 2:
670 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2;
671 case 4:
672 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4;
673 case 8:
674 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8;
675 case 16:
676 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16;
677 default:
678 return UNKNOWN_LIBCALL;
682 RTLIB::Libcall RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
683 switch (ElementSize) {
684 case 1:
685 return MEMSET_ELEMENT_UNORDERED_ATOMIC_1;
686 case 2:
687 return MEMSET_ELEMENT_UNORDERED_ATOMIC_2;
688 case 4:
689 return MEMSET_ELEMENT_UNORDERED_ATOMIC_4;
690 case 8:
691 return MEMSET_ELEMENT_UNORDERED_ATOMIC_8;
692 case 16:
693 return MEMSET_ELEMENT_UNORDERED_ATOMIC_16;
694 default:
695 return UNKNOWN_LIBCALL;
699 /// InitCmpLibcallCCs - Set default comparison libcall CC.
700 static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
701 std::fill(CCs, CCs + RTLIB::UNKNOWN_LIBCALL, ISD::SETCC_INVALID);
702 CCs[RTLIB::OEQ_F32] = ISD::SETEQ;
703 CCs[RTLIB::OEQ_F64] = ISD::SETEQ;
704 CCs[RTLIB::OEQ_F128] = ISD::SETEQ;
705 CCs[RTLIB::OEQ_PPCF128] = ISD::SETEQ;
706 CCs[RTLIB::UNE_F32] = ISD::SETNE;
707 CCs[RTLIB::UNE_F64] = ISD::SETNE;
708 CCs[RTLIB::UNE_F128] = ISD::SETNE;
709 CCs[RTLIB::UNE_PPCF128] = ISD::SETNE;
710 CCs[RTLIB::OGE_F32] = ISD::SETGE;
711 CCs[RTLIB::OGE_F64] = ISD::SETGE;
712 CCs[RTLIB::OGE_F128] = ISD::SETGE;
713 CCs[RTLIB::OGE_PPCF128] = ISD::SETGE;
714 CCs[RTLIB::OLT_F32] = ISD::SETLT;
715 CCs[RTLIB::OLT_F64] = ISD::SETLT;
716 CCs[RTLIB::OLT_F128] = ISD::SETLT;
717 CCs[RTLIB::OLT_PPCF128] = ISD::SETLT;
718 CCs[RTLIB::OLE_F32] = ISD::SETLE;
719 CCs[RTLIB::OLE_F64] = ISD::SETLE;
720 CCs[RTLIB::OLE_F128] = ISD::SETLE;
721 CCs[RTLIB::OLE_PPCF128] = ISD::SETLE;
722 CCs[RTLIB::OGT_F32] = ISD::SETGT;
723 CCs[RTLIB::OGT_F64] = ISD::SETGT;
724 CCs[RTLIB::OGT_F128] = ISD::SETGT;
725 CCs[RTLIB::OGT_PPCF128] = ISD::SETGT;
726 CCs[RTLIB::UO_F32] = ISD::SETNE;
727 CCs[RTLIB::UO_F64] = ISD::SETNE;
728 CCs[RTLIB::UO_F128] = ISD::SETNE;
729 CCs[RTLIB::UO_PPCF128] = ISD::SETNE;
732 /// NOTE: The TargetMachine owns TLOF.
733 TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) {
734 initActions();
736 // Perform these initializations only once.
737 MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove =
738 MaxLoadsPerMemcmp = 8;
739 MaxGluedStoresPerMemcpy = 0;
740 MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize =
741 MaxStoresPerMemmoveOptSize = MaxLoadsPerMemcmpOptSize = 4;
742 HasMultipleConditionRegisters = false;
743 HasExtractBitsInsn = false;
744 JumpIsExpensive = JumpIsExpensiveOverride;
745 PredictableSelectIsExpensive = false;
746 EnableExtLdPromotion = false;
747 StackPointerRegisterToSaveRestore = 0;
748 BooleanContents = UndefinedBooleanContent;
749 BooleanFloatContents = UndefinedBooleanContent;
750 BooleanVectorContents = UndefinedBooleanContent;
751 SchedPreferenceInfo = Sched::ILP;
752 GatherAllAliasesMaxDepth = 18;
753 IsStrictFPEnabled = DisableStrictNodeMutation;
754 MaxBytesForAlignment = 0;
755 MaxAtomicSizeInBitsSupported = 0;
757 // Assume that even with libcalls, no target supports wider than 128 bit
758 // division.
759 MaxDivRemBitWidthSupported = 128;
761 MaxLargeFPConvertBitWidthSupported = llvm::IntegerType::MAX_INT_BITS;
763 MinCmpXchgSizeInBits = 0;
764 SupportsUnalignedAtomics = false;
766 std::fill(std::begin(LibcallRoutineNames), std::end(LibcallRoutineNames), nullptr);
768 InitLibcalls(TM.getTargetTriple());
769 InitCmpLibcallCCs(CmpLibcallCCs);
772 void TargetLoweringBase::initActions() {
773 // All operations default to being supported.
774 memset(OpActions, 0, sizeof(OpActions));
775 memset(LoadExtActions, 0, sizeof(LoadExtActions));
776 memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
777 memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
778 memset(CondCodeActions, 0, sizeof(CondCodeActions));
779 std::fill(std::begin(RegClassForVT), std::end(RegClassForVT), nullptr);
780 std::fill(std::begin(TargetDAGCombineArray),
781 std::end(TargetDAGCombineArray), 0);
783 // We're somewhat special casing MVT::i2 and MVT::i4. Ideally we want to
784 // remove this and targets should individually set these types if not legal.
785 for (ISD::NodeType NT : enum_seq(ISD::DELETED_NODE, ISD::BUILTIN_OP_END,
786 force_iteration_on_noniterable_enum)) {
787 for (MVT VT : {MVT::i2, MVT::i4})
788 OpActions[(unsigned)VT.SimpleTy][NT] = Expand;
790 for (MVT AVT : MVT::all_valuetypes()) {
791 for (MVT VT : {MVT::i2, MVT::i4, MVT::v128i2, MVT::v64i4}) {
792 setTruncStoreAction(AVT, VT, Expand);
793 setLoadExtAction(ISD::EXTLOAD, AVT, VT, Expand);
794 setLoadExtAction(ISD::ZEXTLOAD, AVT, VT, Expand);
797 for (unsigned IM = (unsigned)ISD::PRE_INC;
798 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
799 for (MVT VT : {MVT::i2, MVT::i4}) {
800 setIndexedLoadAction(IM, VT, Expand);
801 setIndexedStoreAction(IM, VT, Expand);
802 setIndexedMaskedLoadAction(IM, VT, Expand);
803 setIndexedMaskedStoreAction(IM, VT, Expand);
807 for (MVT VT : MVT::fp_valuetypes()) {
808 MVT IntVT = MVT::getIntegerVT(VT.getFixedSizeInBits());
809 if (IntVT.isValid()) {
810 setOperationAction(ISD::ATOMIC_SWAP, VT, Promote);
811 AddPromotedToType(ISD::ATOMIC_SWAP, VT, IntVT);
815 // Set default actions for various operations.
816 for (MVT VT : MVT::all_valuetypes()) {
817 // Default all indexed load / store to expand.
818 for (unsigned IM = (unsigned)ISD::PRE_INC;
819 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
820 setIndexedLoadAction(IM, VT, Expand);
821 setIndexedStoreAction(IM, VT, Expand);
822 setIndexedMaskedLoadAction(IM, VT, Expand);
823 setIndexedMaskedStoreAction(IM, VT, Expand);
826 // Most backends expect to see the node which just returns the value loaded.
827 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Expand);
829 // These operations default to expand.
830 setOperationAction({ISD::FGETSIGN, ISD::CONCAT_VECTORS,
831 ISD::FMINNUM, ISD::FMAXNUM,
832 ISD::FMINNUM_IEEE, ISD::FMAXNUM_IEEE,
833 ISD::FMINIMUM, ISD::FMAXIMUM,
834 ISD::FMAD, ISD::SMIN,
835 ISD::SMAX, ISD::UMIN,
836 ISD::UMAX, ISD::ABS,
837 ISD::FSHL, ISD::FSHR,
838 ISD::SADDSAT, ISD::UADDSAT,
839 ISD::SSUBSAT, ISD::USUBSAT,
840 ISD::SSHLSAT, ISD::USHLSAT,
841 ISD::SMULFIX, ISD::SMULFIXSAT,
842 ISD::UMULFIX, ISD::UMULFIXSAT,
843 ISD::SDIVFIX, ISD::SDIVFIXSAT,
844 ISD::UDIVFIX, ISD::UDIVFIXSAT,
845 ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT,
846 ISD::IS_FPCLASS},
847 VT, Expand);
849 // Overflow operations default to expand
850 setOperationAction({ISD::SADDO, ISD::SSUBO, ISD::UADDO, ISD::USUBO,
851 ISD::SMULO, ISD::UMULO},
852 VT, Expand);
854 // Carry-using overflow operations default to expand.
855 setOperationAction({ISD::UADDO_CARRY, ISD::USUBO_CARRY, ISD::SETCCCARRY,
856 ISD::SADDO_CARRY, ISD::SSUBO_CARRY},
857 VT, Expand);
859 // ADDC/ADDE/SUBC/SUBE default to expand.
860 setOperationAction({ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}, VT,
861 Expand);
863 // Halving adds
864 setOperationAction(
865 {ISD::AVGFLOORS, ISD::AVGFLOORU, ISD::AVGCEILS, ISD::AVGCEILU}, VT,
866 Expand);
868 // Absolute difference
869 setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Expand);
871 // These default to Expand so they will be expanded to CTLZ/CTTZ by default.
872 setOperationAction({ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT,
873 Expand);
875 setOperationAction({ISD::BITREVERSE, ISD::PARITY}, VT, Expand);
877 // These library functions default to expand.
878 setOperationAction({ISD::FROUND, ISD::FPOWI, ISD::FLDEXP, ISD::FFREXP}, VT,
879 Expand);
881 // These operations default to expand for vector types.
882 if (VT.isVector())
883 setOperationAction(
884 {ISD::FCOPYSIGN, ISD::SIGN_EXTEND_INREG, ISD::ANY_EXTEND_VECTOR_INREG,
885 ISD::SIGN_EXTEND_VECTOR_INREG, ISD::ZERO_EXTEND_VECTOR_INREG,
886 ISD::SPLAT_VECTOR, ISD::LRINT, ISD::LLRINT},
887 VT, Expand);
889 // Constrained floating-point operations default to expand.
890 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
891 setOperationAction(ISD::STRICT_##DAGN, VT, Expand);
892 #include "llvm/IR/ConstrainedOps.def"
894 // For most targets @llvm.get.dynamic.area.offset just returns 0.
895 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, VT, Expand);
897 // Vector reduction default to expand.
898 setOperationAction(
899 {ISD::VECREDUCE_FADD, ISD::VECREDUCE_FMUL, ISD::VECREDUCE_ADD,
900 ISD::VECREDUCE_MUL, ISD::VECREDUCE_AND, ISD::VECREDUCE_OR,
901 ISD::VECREDUCE_XOR, ISD::VECREDUCE_SMAX, ISD::VECREDUCE_SMIN,
902 ISD::VECREDUCE_UMAX, ISD::VECREDUCE_UMIN, ISD::VECREDUCE_FMAX,
903 ISD::VECREDUCE_FMIN, ISD::VECREDUCE_FMAXIMUM, ISD::VECREDUCE_FMINIMUM,
904 ISD::VECREDUCE_SEQ_FADD, ISD::VECREDUCE_SEQ_FMUL},
905 VT, Expand);
907 // Named vector shuffles default to expand.
908 setOperationAction(ISD::VECTOR_SPLICE, VT, Expand);
910 // VP operations default to expand.
911 #define BEGIN_REGISTER_VP_SDNODE(SDOPC, ...) \
912 setOperationAction(ISD::SDOPC, VT, Expand);
913 #include "llvm/IR/VPIntrinsics.def"
915 // FP environment operations default to expand.
916 setOperationAction(ISD::GET_FPENV, VT, Expand);
917 setOperationAction(ISD::SET_FPENV, VT, Expand);
918 setOperationAction(ISD::RESET_FPENV, VT, Expand);
921 // Most targets ignore the @llvm.prefetch intrinsic.
922 setOperationAction(ISD::PREFETCH, MVT::Other, Expand);
924 // Most targets also ignore the @llvm.readcyclecounter intrinsic.
925 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Expand);
927 // ConstantFP nodes default to expand. Targets can either change this to
928 // Legal, in which case all fp constants are legal, or use isFPImmLegal()
929 // to optimize expansions for certain constants.
930 setOperationAction(ISD::ConstantFP,
931 {MVT::bf16, MVT::f16, MVT::f32, MVT::f64, MVT::f80, MVT::f128},
932 Expand);
934 // These library functions default to expand.
935 setOperationAction({ISD::FCBRT, ISD::FLOG, ISD::FLOG2, ISD::FLOG10, ISD::FEXP,
936 ISD::FEXP2, ISD::FEXP10, ISD::FFLOOR, ISD::FNEARBYINT,
937 ISD::FCEIL, ISD::FRINT, ISD::FTRUNC, ISD::LROUND,
938 ISD::LLROUND, ISD::LRINT, ISD::LLRINT, ISD::FROUNDEVEN},
939 {MVT::f32, MVT::f64, MVT::f128}, Expand);
941 // Default ISD::TRAP to expand (which turns it into abort).
942 setOperationAction(ISD::TRAP, MVT::Other, Expand);
944 // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand"
945 // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP.
946 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand);
948 setOperationAction(ISD::UBSANTRAP, MVT::Other, Expand);
950 setOperationAction(ISD::GET_FPENV_MEM, MVT::Other, Expand);
951 setOperationAction(ISD::SET_FPENV_MEM, MVT::Other, Expand);
953 for (MVT VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64}) {
954 setOperationAction(ISD::GET_FPMODE, VT, Expand);
955 setOperationAction(ISD::SET_FPMODE, VT, Expand);
957 setOperationAction(ISD::RESET_FPMODE, MVT::Other, Expand);
960 MVT TargetLoweringBase::getScalarShiftAmountTy(const DataLayout &DL,
961 EVT) const {
962 return MVT::getIntegerVT(DL.getPointerSizeInBits(0));
965 EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy, const DataLayout &DL,
966 bool LegalTypes) const {
967 assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
968 if (LHSTy.isVector())
969 return LHSTy;
970 MVT ShiftVT =
971 LegalTypes ? getScalarShiftAmountTy(DL, LHSTy) : getPointerTy(DL);
972 // If any possible shift value won't fit in the prefered type, just use
973 // something safe. Assume it will be legalized when the shift is expanded.
974 if (ShiftVT.getSizeInBits() < Log2_32_Ceil(LHSTy.getSizeInBits()))
975 ShiftVT = MVT::i32;
976 assert(ShiftVT.getSizeInBits() >= Log2_32_Ceil(LHSTy.getSizeInBits()) &&
977 "ShiftVT is still too small!");
978 return ShiftVT;
981 bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const {
982 assert(isTypeLegal(VT));
983 switch (Op) {
984 default:
985 return false;
986 case ISD::SDIV:
987 case ISD::UDIV:
988 case ISD::SREM:
989 case ISD::UREM:
990 return true;
994 bool TargetLoweringBase::isFreeAddrSpaceCast(unsigned SrcAS,
995 unsigned DestAS) const {
996 return TM.isNoopAddrSpaceCast(SrcAS, DestAS);
999 void TargetLoweringBase::setJumpIsExpensive(bool isExpensive) {
1000 // If the command-line option was specified, ignore this request.
1001 if (!JumpIsExpensiveOverride.getNumOccurrences())
1002 JumpIsExpensive = isExpensive;
1005 TargetLoweringBase::LegalizeKind
1006 TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const {
1007 // If this is a simple type, use the ComputeRegisterProp mechanism.
1008 if (VT.isSimple()) {
1009 MVT SVT = VT.getSimpleVT();
1010 assert((unsigned)SVT.SimpleTy < std::size(TransformToType));
1011 MVT NVT = TransformToType[SVT.SimpleTy];
1012 LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT);
1014 assert((LA == TypeLegal || LA == TypeSoftenFloat ||
1015 LA == TypeSoftPromoteHalf ||
1016 (NVT.isVector() ||
1017 ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger)) &&
1018 "Promote may not follow Expand or Promote");
1020 if (LA == TypeSplitVector)
1021 return LegalizeKind(LA, EVT(SVT).getHalfNumVectorElementsVT(Context));
1022 if (LA == TypeScalarizeVector)
1023 return LegalizeKind(LA, SVT.getVectorElementType());
1024 return LegalizeKind(LA, NVT);
1027 // Handle Extended Scalar Types.
1028 if (!VT.isVector()) {
1029 assert(VT.isInteger() && "Float types must be simple");
1030 unsigned BitSize = VT.getSizeInBits();
1031 // First promote to a power-of-two size, then expand if necessary.
1032 if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
1033 EVT NVT = VT.getRoundIntegerType(Context);
1034 assert(NVT != VT && "Unable to round integer VT");
1035 LegalizeKind NextStep = getTypeConversion(Context, NVT);
1036 // Avoid multi-step promotion.
1037 if (NextStep.first == TypePromoteInteger)
1038 return NextStep;
1039 // Return rounded integer type.
1040 return LegalizeKind(TypePromoteInteger, NVT);
1043 return LegalizeKind(TypeExpandInteger,
1044 EVT::getIntegerVT(Context, VT.getSizeInBits() / 2));
1047 // Handle vector types.
1048 ElementCount NumElts = VT.getVectorElementCount();
1049 EVT EltVT = VT.getVectorElementType();
1051 // Vectors with only one element are always scalarized.
1052 if (NumElts.isScalar())
1053 return LegalizeKind(TypeScalarizeVector, EltVT);
1055 // Try to widen vector elements until the element type is a power of two and
1056 // promote it to a legal type later on, for example:
1057 // <3 x i8> -> <4 x i8> -> <4 x i32>
1058 if (EltVT.isInteger()) {
1059 // Vectors with a number of elements that is not a power of two are always
1060 // widened, for example <3 x i8> -> <4 x i8>.
1061 if (!VT.isPow2VectorType()) {
1062 NumElts = NumElts.coefficientNextPowerOf2();
1063 EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
1064 return LegalizeKind(TypeWidenVector, NVT);
1067 // Examine the element type.
1068 LegalizeKind LK = getTypeConversion(Context, EltVT);
1070 // If type is to be expanded, split the vector.
1071 // <4 x i140> -> <2 x i140>
1072 if (LK.first == TypeExpandInteger) {
1073 if (VT.getVectorElementCount().isScalable())
1074 return LegalizeKind(TypeScalarizeScalableVector, EltVT);
1075 return LegalizeKind(TypeSplitVector,
1076 VT.getHalfNumVectorElementsVT(Context));
1079 // Promote the integer element types until a legal vector type is found
1080 // or until the element integer type is too big. If a legal type was not
1081 // found, fallback to the usual mechanism of widening/splitting the
1082 // vector.
1083 EVT OldEltVT = EltVT;
1084 while (true) {
1085 // Increase the bitwidth of the element to the next pow-of-two
1086 // (which is greater than 8 bits).
1087 EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits())
1088 .getRoundIntegerType(Context);
1090 // Stop trying when getting a non-simple element type.
1091 // Note that vector elements may be greater than legal vector element
1092 // types. Example: X86 XMM registers hold 64bit element on 32bit
1093 // systems.
1094 if (!EltVT.isSimple())
1095 break;
1097 // Build a new vector type and check if it is legal.
1098 MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1099 // Found a legal promoted vector type.
1100 if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
1101 return LegalizeKind(TypePromoteInteger,
1102 EVT::getVectorVT(Context, EltVT, NumElts));
1105 // Reset the type to the unexpanded type if we did not find a legal vector
1106 // type with a promoted vector element type.
1107 EltVT = OldEltVT;
1110 // Try to widen the vector until a legal type is found.
1111 // If there is no wider legal type, split the vector.
1112 while (true) {
1113 // Round up to the next power of 2.
1114 NumElts = NumElts.coefficientNextPowerOf2();
1116 // If there is no simple vector type with this many elements then there
1117 // cannot be a larger legal vector type. Note that this assumes that
1118 // there are no skipped intermediate vector types in the simple types.
1119 if (!EltVT.isSimple())
1120 break;
1121 MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1122 if (LargerVector == MVT())
1123 break;
1125 // If this type is legal then widen the vector.
1126 if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
1127 return LegalizeKind(TypeWidenVector, LargerVector);
1130 // Widen odd vectors to next power of two.
1131 if (!VT.isPow2VectorType()) {
1132 EVT NVT = VT.getPow2VectorType(Context);
1133 return LegalizeKind(TypeWidenVector, NVT);
1136 if (VT.getVectorElementCount() == ElementCount::getScalable(1))
1137 return LegalizeKind(TypeScalarizeScalableVector, EltVT);
1139 // Vectors with illegal element types are expanded.
1140 EVT NVT = EVT::getVectorVT(Context, EltVT,
1141 VT.getVectorElementCount().divideCoefficientBy(2));
1142 return LegalizeKind(TypeSplitVector, NVT);
1145 static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
1146 unsigned &NumIntermediates,
1147 MVT &RegisterVT,
1148 TargetLoweringBase *TLI) {
1149 // Figure out the right, legal destination reg to copy into.
1150 ElementCount EC = VT.getVectorElementCount();
1151 MVT EltTy = VT.getVectorElementType();
1153 unsigned NumVectorRegs = 1;
1155 // Scalable vectors cannot be scalarized, so splitting or widening is
1156 // required.
1157 if (VT.isScalableVector() && !isPowerOf2_32(EC.getKnownMinValue()))
1158 llvm_unreachable(
1159 "Splitting or widening of non-power-of-2 MVTs is not implemented.");
1161 // FIXME: We don't support non-power-of-2-sized vectors for now.
1162 // Ideally we could break down into LHS/RHS like LegalizeDAG does.
1163 if (!isPowerOf2_32(EC.getKnownMinValue())) {
1164 // Split EC to unit size (scalable property is preserved).
1165 NumVectorRegs = EC.getKnownMinValue();
1166 EC = ElementCount::getFixed(1);
1169 // Divide the input until we get to a supported size. This will
1170 // always end up with an EC that represent a scalar or a scalable
1171 // scalar.
1172 while (EC.getKnownMinValue() > 1 &&
1173 !TLI->isTypeLegal(MVT::getVectorVT(EltTy, EC))) {
1174 EC = EC.divideCoefficientBy(2);
1175 NumVectorRegs <<= 1;
1178 NumIntermediates = NumVectorRegs;
1180 MVT NewVT = MVT::getVectorVT(EltTy, EC);
1181 if (!TLI->isTypeLegal(NewVT))
1182 NewVT = EltTy;
1183 IntermediateVT = NewVT;
1185 unsigned LaneSizeInBits = NewVT.getScalarSizeInBits();
1187 // Convert sizes such as i33 to i64.
1188 LaneSizeInBits = llvm::bit_ceil(LaneSizeInBits);
1190 MVT DestVT = TLI->getRegisterType(NewVT);
1191 RegisterVT = DestVT;
1192 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
1193 return NumVectorRegs * (LaneSizeInBits / DestVT.getScalarSizeInBits());
1195 // Otherwise, promotion or legal types use the same number of registers as
1196 // the vector decimated to the appropriate level.
1197 return NumVectorRegs;
1200 /// isLegalRC - Return true if the value types that can be represented by the
1201 /// specified register class are all legal.
1202 bool TargetLoweringBase::isLegalRC(const TargetRegisterInfo &TRI,
1203 const TargetRegisterClass &RC) const {
1204 for (const auto *I = TRI.legalclasstypes_begin(RC); *I != MVT::Other; ++I)
1205 if (isTypeLegal(*I))
1206 return true;
1207 return false;
1210 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
1211 /// sequence of memory operands that is recognized by PrologEpilogInserter.
1212 MachineBasicBlock *
1213 TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI,
1214 MachineBasicBlock *MBB) const {
1215 MachineInstr *MI = &InitialMI;
1216 MachineFunction &MF = *MI->getMF();
1217 MachineFrameInfo &MFI = MF.getFrameInfo();
1219 // We're handling multiple types of operands here:
1220 // PATCHPOINT MetaArgs - live-in, read only, direct
1221 // STATEPOINT Deopt Spill - live-through, read only, indirect
1222 // STATEPOINT Deopt Alloca - live-through, read only, direct
1223 // (We're currently conservative and mark the deopt slots read/write in
1224 // practice.)
1225 // STATEPOINT GC Spill - live-through, read/write, indirect
1226 // STATEPOINT GC Alloca - live-through, read/write, direct
1227 // The live-in vs live-through is handled already (the live through ones are
1228 // all stack slots), but we need to handle the different type of stackmap
1229 // operands and memory effects here.
1231 if (llvm::none_of(MI->operands(),
1232 [](MachineOperand &Operand) { return Operand.isFI(); }))
1233 return MBB;
1235 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc());
1237 // Inherit previous memory operands.
1238 MIB.cloneMemRefs(*MI);
1240 for (unsigned i = 0; i < MI->getNumOperands(); ++i) {
1241 MachineOperand &MO = MI->getOperand(i);
1242 if (!MO.isFI()) {
1243 // Index of Def operand this Use it tied to.
1244 // Since Defs are coming before Uses, if Use is tied, then
1245 // index of Def must be smaller that index of that Use.
1246 // Also, Defs preserve their position in new MI.
1247 unsigned TiedTo = i;
1248 if (MO.isReg() && MO.isTied())
1249 TiedTo = MI->findTiedOperandIdx(i);
1250 MIB.add(MO);
1251 if (TiedTo < i)
1252 MIB->tieOperands(TiedTo, MIB->getNumOperands() - 1);
1253 continue;
1256 // foldMemoryOperand builds a new MI after replacing a single FI operand
1257 // with the canonical set of five x86 addressing-mode operands.
1258 int FI = MO.getIndex();
1260 // Add frame index operands recognized by stackmaps.cpp
1261 if (MFI.isStatepointSpillSlotObjectIndex(FI)) {
1262 // indirect-mem-ref tag, size, #FI, offset.
1263 // Used for spills inserted by StatepointLowering. This codepath is not
1264 // used for patchpoints/stackmaps at all, for these spilling is done via
1265 // foldMemoryOperand callback only.
1266 assert(MI->getOpcode() == TargetOpcode::STATEPOINT && "sanity");
1267 MIB.addImm(StackMaps::IndirectMemRefOp);
1268 MIB.addImm(MFI.getObjectSize(FI));
1269 MIB.add(MO);
1270 MIB.addImm(0);
1271 } else {
1272 // direct-mem-ref tag, #FI, offset.
1273 // Used by patchpoint, and direct alloca arguments to statepoints
1274 MIB.addImm(StackMaps::DirectMemRefOp);
1275 MIB.add(MO);
1276 MIB.addImm(0);
1279 assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!");
1281 // Add a new memory operand for this FI.
1282 assert(MFI.getObjectOffset(FI) != -1);
1284 // Note: STATEPOINT MMOs are added during SelectionDAG. STACKMAP, and
1285 // PATCHPOINT should be updated to do the same. (TODO)
1286 if (MI->getOpcode() != TargetOpcode::STATEPOINT) {
1287 auto Flags = MachineMemOperand::MOLoad;
1288 MachineMemOperand *MMO = MF.getMachineMemOperand(
1289 MachinePointerInfo::getFixedStack(MF, FI), Flags,
1290 MF.getDataLayout().getPointerSize(), MFI.getObjectAlign(FI));
1291 MIB->addMemOperand(MF, MMO);
1294 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
1295 MI->eraseFromParent();
1296 return MBB;
1299 /// findRepresentativeClass - Return the largest legal super-reg register class
1300 /// of the register class for the specified type and its associated "cost".
1301 // This function is in TargetLowering because it uses RegClassForVT which would
1302 // need to be moved to TargetRegisterInfo and would necessitate moving
1303 // isTypeLegal over as well - a massive change that would just require
1304 // TargetLowering having a TargetRegisterInfo class member that it would use.
1305 std::pair<const TargetRegisterClass *, uint8_t>
1306 TargetLoweringBase::findRepresentativeClass(const TargetRegisterInfo *TRI,
1307 MVT VT) const {
1308 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
1309 if (!RC)
1310 return std::make_pair(RC, 0);
1312 // Compute the set of all super-register classes.
1313 BitVector SuperRegRC(TRI->getNumRegClasses());
1314 for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI)
1315 SuperRegRC.setBitsInMask(RCI.getMask());
1317 // Find the first legal register class with the largest spill size.
1318 const TargetRegisterClass *BestRC = RC;
1319 for (unsigned i : SuperRegRC.set_bits()) {
1320 const TargetRegisterClass *SuperRC = TRI->getRegClass(i);
1321 // We want the largest possible spill size.
1322 if (TRI->getSpillSize(*SuperRC) <= TRI->getSpillSize(*BestRC))
1323 continue;
1324 if (!isLegalRC(*TRI, *SuperRC))
1325 continue;
1326 BestRC = SuperRC;
1328 return std::make_pair(BestRC, 1);
1331 /// computeRegisterProperties - Once all of the register classes are added,
1332 /// this allows us to compute derived properties we expose.
1333 void TargetLoweringBase::computeRegisterProperties(
1334 const TargetRegisterInfo *TRI) {
1335 static_assert(MVT::VALUETYPE_SIZE <= MVT::MAX_ALLOWED_VALUETYPE,
1336 "Too many value types for ValueTypeActions to hold!");
1338 // Everything defaults to needing one register.
1339 for (unsigned i = 0; i != MVT::VALUETYPE_SIZE; ++i) {
1340 NumRegistersForVT[i] = 1;
1341 RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i;
1343 // ...except isVoid, which doesn't need any registers.
1344 NumRegistersForVT[MVT::isVoid] = 0;
1346 // Find the largest integer register class.
1347 unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
1348 for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg)
1349 assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
1351 // Every integer value type larger than this largest register takes twice as
1352 // many registers to represent as the previous ValueType.
1353 for (unsigned ExpandedReg = LargestIntReg + 1;
1354 ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) {
1355 NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
1356 RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
1357 TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1);
1358 ValueTypeActions.setTypeAction((MVT::SimpleValueType)ExpandedReg,
1359 TypeExpandInteger);
1362 // Inspect all of the ValueType's smaller than the largest integer
1363 // register to see which ones need promotion.
1364 unsigned LegalIntReg = LargestIntReg;
1365 for (unsigned IntReg = LargestIntReg - 1;
1366 IntReg >= (unsigned)MVT::i1; --IntReg) {
1367 MVT IVT = (MVT::SimpleValueType)IntReg;
1368 if (isTypeLegal(IVT)) {
1369 LegalIntReg = IntReg;
1370 } else {
1371 RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
1372 (MVT::SimpleValueType)LegalIntReg;
1373 ValueTypeActions.setTypeAction(IVT, TypePromoteInteger);
1377 // ppcf128 type is really two f64's.
1378 if (!isTypeLegal(MVT::ppcf128)) {
1379 if (isTypeLegal(MVT::f64)) {
1380 NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64];
1381 RegisterTypeForVT[MVT::ppcf128] = MVT::f64;
1382 TransformToType[MVT::ppcf128] = MVT::f64;
1383 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat);
1384 } else {
1385 NumRegistersForVT[MVT::ppcf128] = NumRegistersForVT[MVT::i128];
1386 RegisterTypeForVT[MVT::ppcf128] = RegisterTypeForVT[MVT::i128];
1387 TransformToType[MVT::ppcf128] = MVT::i128;
1388 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeSoftenFloat);
1392 // Decide how to handle f128. If the target does not have native f128 support,
1393 // expand it to i128 and we will be generating soft float library calls.
1394 if (!isTypeLegal(MVT::f128)) {
1395 NumRegistersForVT[MVT::f128] = NumRegistersForVT[MVT::i128];
1396 RegisterTypeForVT[MVT::f128] = RegisterTypeForVT[MVT::i128];
1397 TransformToType[MVT::f128] = MVT::i128;
1398 ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
1401 // Decide how to handle f80. If the target does not have native f80 support,
1402 // expand it to i96 and we will be generating soft float library calls.
1403 if (!isTypeLegal(MVT::f80)) {
1404 NumRegistersForVT[MVT::f80] = 3*NumRegistersForVT[MVT::i32];
1405 RegisterTypeForVT[MVT::f80] = RegisterTypeForVT[MVT::i32];
1406 TransformToType[MVT::f80] = MVT::i32;
1407 ValueTypeActions.setTypeAction(MVT::f80, TypeSoftenFloat);
1410 // Decide how to handle f64. If the target does not have native f64 support,
1411 // expand it to i64 and we will be generating soft float library calls.
1412 if (!isTypeLegal(MVT::f64)) {
1413 NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64];
1414 RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64];
1415 TransformToType[MVT::f64] = MVT::i64;
1416 ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat);
1419 // Decide how to handle f32. If the target does not have native f32 support,
1420 // expand it to i32 and we will be generating soft float library calls.
1421 if (!isTypeLegal(MVT::f32)) {
1422 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32];
1423 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32];
1424 TransformToType[MVT::f32] = MVT::i32;
1425 ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat);
1428 // Decide how to handle f16. If the target does not have native f16 support,
1429 // promote it to f32, because there are no f16 library calls (except for
1430 // conversions).
1431 if (!isTypeLegal(MVT::f16)) {
1432 // Allow targets to control how we legalize half.
1433 if (softPromoteHalfType()) {
1434 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::i16];
1435 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::i16];
1436 TransformToType[MVT::f16] = MVT::f32;
1437 ValueTypeActions.setTypeAction(MVT::f16, TypeSoftPromoteHalf);
1438 } else {
1439 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32];
1440 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32];
1441 TransformToType[MVT::f16] = MVT::f32;
1442 ValueTypeActions.setTypeAction(MVT::f16, TypePromoteFloat);
1446 // Decide how to handle bf16. If the target does not have native bf16 support,
1447 // promote it to f32, because there are no bf16 library calls (except for
1448 // converting from f32 to bf16).
1449 if (!isTypeLegal(MVT::bf16)) {
1450 NumRegistersForVT[MVT::bf16] = NumRegistersForVT[MVT::f32];
1451 RegisterTypeForVT[MVT::bf16] = RegisterTypeForVT[MVT::f32];
1452 TransformToType[MVT::bf16] = MVT::f32;
1453 ValueTypeActions.setTypeAction(MVT::bf16, TypeSoftPromoteHalf);
1456 // Loop over all of the vector value types to see which need transformations.
1457 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
1458 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
1459 MVT VT = (MVT::SimpleValueType) i;
1460 if (isTypeLegal(VT))
1461 continue;
1463 MVT EltVT = VT.getVectorElementType();
1464 ElementCount EC = VT.getVectorElementCount();
1465 bool IsLegalWiderType = false;
1466 bool IsScalable = VT.isScalableVector();
1467 LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT);
1468 switch (PreferredAction) {
1469 case TypePromoteInteger: {
1470 MVT::SimpleValueType EndVT = IsScalable ?
1471 MVT::LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE :
1472 MVT::LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE;
1473 // Try to promote the elements of integer vectors. If no legal
1474 // promotion was found, fall through to the widen-vector method.
1475 for (unsigned nVT = i + 1;
1476 (MVT::SimpleValueType)nVT <= EndVT; ++nVT) {
1477 MVT SVT = (MVT::SimpleValueType) nVT;
1478 // Promote vectors of integers to vectors with the same number
1479 // of elements, with a wider element type.
1480 if (SVT.getScalarSizeInBits() > EltVT.getFixedSizeInBits() &&
1481 SVT.getVectorElementCount() == EC && isTypeLegal(SVT)) {
1482 TransformToType[i] = SVT;
1483 RegisterTypeForVT[i] = SVT;
1484 NumRegistersForVT[i] = 1;
1485 ValueTypeActions.setTypeAction(VT, TypePromoteInteger);
1486 IsLegalWiderType = true;
1487 break;
1490 if (IsLegalWiderType)
1491 break;
1492 [[fallthrough]];
1495 case TypeWidenVector:
1496 if (isPowerOf2_32(EC.getKnownMinValue())) {
1497 // Try to widen the vector.
1498 for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
1499 MVT SVT = (MVT::SimpleValueType) nVT;
1500 if (SVT.getVectorElementType() == EltVT &&
1501 SVT.isScalableVector() == IsScalable &&
1502 SVT.getVectorElementCount().getKnownMinValue() >
1503 EC.getKnownMinValue() &&
1504 isTypeLegal(SVT)) {
1505 TransformToType[i] = SVT;
1506 RegisterTypeForVT[i] = SVT;
1507 NumRegistersForVT[i] = 1;
1508 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1509 IsLegalWiderType = true;
1510 break;
1513 if (IsLegalWiderType)
1514 break;
1515 } else {
1516 // Only widen to the next power of 2 to keep consistency with EVT.
1517 MVT NVT = VT.getPow2VectorType();
1518 if (isTypeLegal(NVT)) {
1519 TransformToType[i] = NVT;
1520 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1521 RegisterTypeForVT[i] = NVT;
1522 NumRegistersForVT[i] = 1;
1523 break;
1526 [[fallthrough]];
1528 case TypeSplitVector:
1529 case TypeScalarizeVector: {
1530 MVT IntermediateVT;
1531 MVT RegisterVT;
1532 unsigned NumIntermediates;
1533 unsigned NumRegisters = getVectorTypeBreakdownMVT(VT, IntermediateVT,
1534 NumIntermediates, RegisterVT, this);
1535 NumRegistersForVT[i] = NumRegisters;
1536 assert(NumRegistersForVT[i] == NumRegisters &&
1537 "NumRegistersForVT size cannot represent NumRegisters!");
1538 RegisterTypeForVT[i] = RegisterVT;
1540 MVT NVT = VT.getPow2VectorType();
1541 if (NVT == VT) {
1542 // Type is already a power of 2. The default action is to split.
1543 TransformToType[i] = MVT::Other;
1544 if (PreferredAction == TypeScalarizeVector)
1545 ValueTypeActions.setTypeAction(VT, TypeScalarizeVector);
1546 else if (PreferredAction == TypeSplitVector)
1547 ValueTypeActions.setTypeAction(VT, TypeSplitVector);
1548 else if (EC.getKnownMinValue() > 1)
1549 ValueTypeActions.setTypeAction(VT, TypeSplitVector);
1550 else
1551 ValueTypeActions.setTypeAction(VT, EC.isScalable()
1552 ? TypeScalarizeScalableVector
1553 : TypeScalarizeVector);
1554 } else {
1555 TransformToType[i] = NVT;
1556 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1558 break;
1560 default:
1561 llvm_unreachable("Unknown vector legalization action!");
1565 // Determine the 'representative' register class for each value type.
1566 // An representative register class is the largest (meaning one which is
1567 // not a sub-register class / subreg register class) legal register class for
1568 // a group of value types. For example, on i386, i8, i16, and i32
1569 // representative would be GR32; while on x86_64 it's GR64.
1570 for (unsigned i = 0; i != MVT::VALUETYPE_SIZE; ++i) {
1571 const TargetRegisterClass* RRC;
1572 uint8_t Cost;
1573 std::tie(RRC, Cost) = findRepresentativeClass(TRI, (MVT::SimpleValueType)i);
1574 RepRegClassForVT[i] = RRC;
1575 RepRegClassCostForVT[i] = Cost;
1579 EVT TargetLoweringBase::getSetCCResultType(const DataLayout &DL, LLVMContext &,
1580 EVT VT) const {
1581 assert(!VT.isVector() && "No default SetCC type for vectors!");
1582 return getPointerTy(DL).SimpleTy;
1585 MVT::SimpleValueType TargetLoweringBase::getCmpLibcallReturnType() const {
1586 return MVT::i32; // return the default value
1589 /// getVectorTypeBreakdown - Vector types are broken down into some number of
1590 /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32
1591 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
1592 /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86.
1594 /// This method returns the number of registers needed, and the VT for each
1595 /// register. It also returns the VT and quantity of the intermediate values
1596 /// before they are promoted/expanded.
1597 unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context,
1598 EVT VT, EVT &IntermediateVT,
1599 unsigned &NumIntermediates,
1600 MVT &RegisterVT) const {
1601 ElementCount EltCnt = VT.getVectorElementCount();
1603 // If there is a wider vector type with the same element type as this one,
1604 // or a promoted vector type that has the same number of elements which
1605 // are wider, then we should convert to that legal vector type.
1606 // This handles things like <2 x float> -> <4 x float> and
1607 // <4 x i1> -> <4 x i32>.
1608 LegalizeTypeAction TA = getTypeAction(Context, VT);
1609 if (!EltCnt.isScalar() &&
1610 (TA == TypeWidenVector || TA == TypePromoteInteger)) {
1611 EVT RegisterEVT = getTypeToTransformTo(Context, VT);
1612 if (isTypeLegal(RegisterEVT)) {
1613 IntermediateVT = RegisterEVT;
1614 RegisterVT = RegisterEVT.getSimpleVT();
1615 NumIntermediates = 1;
1616 return 1;
1620 // Figure out the right, legal destination reg to copy into.
1621 EVT EltTy = VT.getVectorElementType();
1623 unsigned NumVectorRegs = 1;
1625 // Scalable vectors cannot be scalarized, so handle the legalisation of the
1626 // types like done elsewhere in SelectionDAG.
1627 if (EltCnt.isScalable()) {
1628 LegalizeKind LK;
1629 EVT PartVT = VT;
1630 do {
1631 // Iterate until we've found a legal (part) type to hold VT.
1632 LK = getTypeConversion(Context, PartVT);
1633 PartVT = LK.second;
1634 } while (LK.first != TypeLegal);
1636 if (!PartVT.isVector()) {
1637 report_fatal_error(
1638 "Don't know how to legalize this scalable vector type");
1641 NumIntermediates =
1642 divideCeil(VT.getVectorElementCount().getKnownMinValue(),
1643 PartVT.getVectorElementCount().getKnownMinValue());
1644 IntermediateVT = PartVT;
1645 RegisterVT = getRegisterType(Context, IntermediateVT);
1646 return NumIntermediates;
1649 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally
1650 // we could break down into LHS/RHS like LegalizeDAG does.
1651 if (!isPowerOf2_32(EltCnt.getKnownMinValue())) {
1652 NumVectorRegs = EltCnt.getKnownMinValue();
1653 EltCnt = ElementCount::getFixed(1);
1656 // Divide the input until we get to a supported size. This will always
1657 // end with a scalar if the target doesn't support vectors.
1658 while (EltCnt.getKnownMinValue() > 1 &&
1659 !isTypeLegal(EVT::getVectorVT(Context, EltTy, EltCnt))) {
1660 EltCnt = EltCnt.divideCoefficientBy(2);
1661 NumVectorRegs <<= 1;
1664 NumIntermediates = NumVectorRegs;
1666 EVT NewVT = EVT::getVectorVT(Context, EltTy, EltCnt);
1667 if (!isTypeLegal(NewVT))
1668 NewVT = EltTy;
1669 IntermediateVT = NewVT;
1671 MVT DestVT = getRegisterType(Context, NewVT);
1672 RegisterVT = DestVT;
1674 if (EVT(DestVT).bitsLT(NewVT)) { // Value is expanded, e.g. i64 -> i16.
1675 TypeSize NewVTSize = NewVT.getSizeInBits();
1676 // Convert sizes such as i33 to i64.
1677 if (!llvm::has_single_bit<uint32_t>(NewVTSize.getKnownMinValue()))
1678 NewVTSize = NewVTSize.coefficientNextPowerOf2();
1679 return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
1682 // Otherwise, promotion or legal types use the same number of registers as
1683 // the vector decimated to the appropriate level.
1684 return NumVectorRegs;
1687 bool TargetLoweringBase::isSuitableForJumpTable(const SwitchInst *SI,
1688 uint64_t NumCases,
1689 uint64_t Range,
1690 ProfileSummaryInfo *PSI,
1691 BlockFrequencyInfo *BFI) const {
1692 // FIXME: This function check the maximum table size and density, but the
1693 // minimum size is not checked. It would be nice if the minimum size is
1694 // also combined within this function. Currently, the minimum size check is
1695 // performed in findJumpTable() in SelectionDAGBuiler and
1696 // getEstimatedNumberOfCaseClusters() in BasicTTIImpl.
1697 const bool OptForSize =
1698 SI->getParent()->getParent()->hasOptSize() ||
1699 llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI);
1700 const unsigned MinDensity = getMinimumJumpTableDensity(OptForSize);
1701 const unsigned MaxJumpTableSize = getMaximumJumpTableSize();
1703 // Check whether the number of cases is small enough and
1704 // the range is dense enough for a jump table.
1705 return (OptForSize || Range <= MaxJumpTableSize) &&
1706 (NumCases * 100 >= Range * MinDensity);
1709 MVT TargetLoweringBase::getPreferredSwitchConditionType(LLVMContext &Context,
1710 EVT ConditionVT) const {
1711 return getRegisterType(Context, ConditionVT);
1714 /// Get the EVTs and ArgFlags collections that represent the legalized return
1715 /// type of the given function. This does not require a DAG or a return value,
1716 /// and is suitable for use before any DAGs for the function are constructed.
1717 /// TODO: Move this out of TargetLowering.cpp.
1718 void llvm::GetReturnInfo(CallingConv::ID CC, Type *ReturnType,
1719 AttributeList attr,
1720 SmallVectorImpl<ISD::OutputArg> &Outs,
1721 const TargetLowering &TLI, const DataLayout &DL) {
1722 SmallVector<EVT, 4> ValueVTs;
1723 ComputeValueVTs(TLI, DL, ReturnType, ValueVTs);
1724 unsigned NumValues = ValueVTs.size();
1725 if (NumValues == 0) return;
1727 for (unsigned j = 0, f = NumValues; j != f; ++j) {
1728 EVT VT = ValueVTs[j];
1729 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1731 if (attr.hasRetAttr(Attribute::SExt))
1732 ExtendKind = ISD::SIGN_EXTEND;
1733 else if (attr.hasRetAttr(Attribute::ZExt))
1734 ExtendKind = ISD::ZERO_EXTEND;
1736 // FIXME: C calling convention requires the return type to be promoted to
1737 // at least 32-bit. But this is not necessary for non-C calling
1738 // conventions. The frontend should mark functions whose return values
1739 // require promoting with signext or zeroext attributes.
1740 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
1741 MVT MinVT = TLI.getRegisterType(MVT::i32);
1742 if (VT.bitsLT(MinVT))
1743 VT = MinVT;
1746 unsigned NumParts =
1747 TLI.getNumRegistersForCallingConv(ReturnType->getContext(), CC, VT);
1748 MVT PartVT =
1749 TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), CC, VT);
1751 // 'inreg' on function refers to return value
1752 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1753 if (attr.hasRetAttr(Attribute::InReg))
1754 Flags.setInReg();
1756 // Propagate extension type if any
1757 if (attr.hasRetAttr(Attribute::SExt))
1758 Flags.setSExt();
1759 else if (attr.hasRetAttr(Attribute::ZExt))
1760 Flags.setZExt();
1762 for (unsigned i = 0; i < NumParts; ++i)
1763 Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isfixed=*/true, 0, 0));
1767 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1768 /// function arguments in the caller parameter area. This is the actual
1769 /// alignment, not its logarithm.
1770 uint64_t TargetLoweringBase::getByValTypeAlignment(Type *Ty,
1771 const DataLayout &DL) const {
1772 return DL.getABITypeAlign(Ty).value();
1775 bool TargetLoweringBase::allowsMemoryAccessForAlignment(
1776 LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace,
1777 Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const {
1778 // Check if the specified alignment is sufficient based on the data layout.
1779 // TODO: While using the data layout works in practice, a better solution
1780 // would be to implement this check directly (make this a virtual function).
1781 // For example, the ABI alignment may change based on software platform while
1782 // this function should only be affected by hardware implementation.
1783 Type *Ty = VT.getTypeForEVT(Context);
1784 if (VT.isZeroSized() || Alignment >= DL.getABITypeAlign(Ty)) {
1785 // Assume that an access that meets the ABI-specified alignment is fast.
1786 if (Fast != nullptr)
1787 *Fast = 1;
1788 return true;
1791 // This is a misaligned access.
1792 return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Flags, Fast);
1795 bool TargetLoweringBase::allowsMemoryAccessForAlignment(
1796 LLVMContext &Context, const DataLayout &DL, EVT VT,
1797 const MachineMemOperand &MMO, unsigned *Fast) const {
1798 return allowsMemoryAccessForAlignment(Context, DL, VT, MMO.getAddrSpace(),
1799 MMO.getAlign(), MMO.getFlags(), Fast);
1802 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
1803 const DataLayout &DL, EVT VT,
1804 unsigned AddrSpace, Align Alignment,
1805 MachineMemOperand::Flags Flags,
1806 unsigned *Fast) const {
1807 return allowsMemoryAccessForAlignment(Context, DL, VT, AddrSpace, Alignment,
1808 Flags, Fast);
1811 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
1812 const DataLayout &DL, EVT VT,
1813 const MachineMemOperand &MMO,
1814 unsigned *Fast) const {
1815 return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), MMO.getAlign(),
1816 MMO.getFlags(), Fast);
1819 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
1820 const DataLayout &DL, LLT Ty,
1821 const MachineMemOperand &MMO,
1822 unsigned *Fast) const {
1823 EVT VT = getApproximateEVTForLLT(Ty, DL, Context);
1824 return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), MMO.getAlign(),
1825 MMO.getFlags(), Fast);
1828 //===----------------------------------------------------------------------===//
1829 // TargetTransformInfo Helpers
1830 //===----------------------------------------------------------------------===//
1832 int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const {
1833 enum InstructionOpcodes {
1834 #define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM,
1835 #define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM
1836 #include "llvm/IR/Instruction.def"
1838 switch (static_cast<InstructionOpcodes>(Opcode)) {
1839 case Ret: return 0;
1840 case Br: return 0;
1841 case Switch: return 0;
1842 case IndirectBr: return 0;
1843 case Invoke: return 0;
1844 case CallBr: return 0;
1845 case Resume: return 0;
1846 case Unreachable: return 0;
1847 case CleanupRet: return 0;
1848 case CatchRet: return 0;
1849 case CatchPad: return 0;
1850 case CatchSwitch: return 0;
1851 case CleanupPad: return 0;
1852 case FNeg: return ISD::FNEG;
1853 case Add: return ISD::ADD;
1854 case FAdd: return ISD::FADD;
1855 case Sub: return ISD::SUB;
1856 case FSub: return ISD::FSUB;
1857 case Mul: return ISD::MUL;
1858 case FMul: return ISD::FMUL;
1859 case UDiv: return ISD::UDIV;
1860 case SDiv: return ISD::SDIV;
1861 case FDiv: return ISD::FDIV;
1862 case URem: return ISD::UREM;
1863 case SRem: return ISD::SREM;
1864 case FRem: return ISD::FREM;
1865 case Shl: return ISD::SHL;
1866 case LShr: return ISD::SRL;
1867 case AShr: return ISD::SRA;
1868 case And: return ISD::AND;
1869 case Or: return ISD::OR;
1870 case Xor: return ISD::XOR;
1871 case Alloca: return 0;
1872 case Load: return ISD::LOAD;
1873 case Store: return ISD::STORE;
1874 case GetElementPtr: return 0;
1875 case Fence: return 0;
1876 case AtomicCmpXchg: return 0;
1877 case AtomicRMW: return 0;
1878 case Trunc: return ISD::TRUNCATE;
1879 case ZExt: return ISD::ZERO_EXTEND;
1880 case SExt: return ISD::SIGN_EXTEND;
1881 case FPToUI: return ISD::FP_TO_UINT;
1882 case FPToSI: return ISD::FP_TO_SINT;
1883 case UIToFP: return ISD::UINT_TO_FP;
1884 case SIToFP: return ISD::SINT_TO_FP;
1885 case FPTrunc: return ISD::FP_ROUND;
1886 case FPExt: return ISD::FP_EXTEND;
1887 case PtrToInt: return ISD::BITCAST;
1888 case IntToPtr: return ISD::BITCAST;
1889 case BitCast: return ISD::BITCAST;
1890 case AddrSpaceCast: return ISD::ADDRSPACECAST;
1891 case ICmp: return ISD::SETCC;
1892 case FCmp: return ISD::SETCC;
1893 case PHI: return 0;
1894 case Call: return 0;
1895 case Select: return ISD::SELECT;
1896 case UserOp1: return 0;
1897 case UserOp2: return 0;
1898 case VAArg: return 0;
1899 case ExtractElement: return ISD::EXTRACT_VECTOR_ELT;
1900 case InsertElement: return ISD::INSERT_VECTOR_ELT;
1901 case ShuffleVector: return ISD::VECTOR_SHUFFLE;
1902 case ExtractValue: return ISD::MERGE_VALUES;
1903 case InsertValue: return ISD::MERGE_VALUES;
1904 case LandingPad: return 0;
1905 case Freeze: return ISD::FREEZE;
1908 llvm_unreachable("Unknown instruction type encountered!");
1911 Value *
1912 TargetLoweringBase::getDefaultSafeStackPointerLocation(IRBuilderBase &IRB,
1913 bool UseTLS) const {
1914 // compiler-rt provides a variable with a magic name. Targets that do not
1915 // link with compiler-rt may also provide such a variable.
1916 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1917 const char *UnsafeStackPtrVar = "__safestack_unsafe_stack_ptr";
1918 auto UnsafeStackPtr =
1919 dyn_cast_or_null<GlobalVariable>(M->getNamedValue(UnsafeStackPtrVar));
1921 Type *StackPtrTy = PointerType::getUnqual(M->getContext());
1923 if (!UnsafeStackPtr) {
1924 auto TLSModel = UseTLS ?
1925 GlobalValue::InitialExecTLSModel :
1926 GlobalValue::NotThreadLocal;
1927 // The global variable is not defined yet, define it ourselves.
1928 // We use the initial-exec TLS model because we do not support the
1929 // variable living anywhere other than in the main executable.
1930 UnsafeStackPtr = new GlobalVariable(
1931 *M, StackPtrTy, false, GlobalValue::ExternalLinkage, nullptr,
1932 UnsafeStackPtrVar, nullptr, TLSModel);
1933 } else {
1934 // The variable exists, check its type and attributes.
1935 if (UnsafeStackPtr->getValueType() != StackPtrTy)
1936 report_fatal_error(Twine(UnsafeStackPtrVar) + " must have void* type");
1937 if (UseTLS != UnsafeStackPtr->isThreadLocal())
1938 report_fatal_error(Twine(UnsafeStackPtrVar) + " must " +
1939 (UseTLS ? "" : "not ") + "be thread-local");
1941 return UnsafeStackPtr;
1944 Value *
1945 TargetLoweringBase::getSafeStackPointerLocation(IRBuilderBase &IRB) const {
1946 if (!TM.getTargetTriple().isAndroid())
1947 return getDefaultSafeStackPointerLocation(IRB, true);
1949 // Android provides a libc function to retrieve the address of the current
1950 // thread's unsafe stack pointer.
1951 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1952 auto *PtrTy = PointerType::getUnqual(M->getContext());
1953 FunctionCallee Fn =
1954 M->getOrInsertFunction("__safestack_pointer_address", PtrTy);
1955 return IRB.CreateCall(Fn);
1958 //===----------------------------------------------------------------------===//
1959 // Loop Strength Reduction hooks
1960 //===----------------------------------------------------------------------===//
1962 /// isLegalAddressingMode - Return true if the addressing mode represented
1963 /// by AM is legal for this target, for a load/store of the specified type.
1964 bool TargetLoweringBase::isLegalAddressingMode(const DataLayout &DL,
1965 const AddrMode &AM, Type *Ty,
1966 unsigned AS, Instruction *I) const {
1967 // The default implementation of this implements a conservative RISCy, r+r and
1968 // r+i addr mode.
1970 // Allows a sign-extended 16-bit immediate field.
1971 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
1972 return false;
1974 // No global is ever allowed as a base.
1975 if (AM.BaseGV)
1976 return false;
1978 // Only support r+r,
1979 switch (AM.Scale) {
1980 case 0: // "r+i" or just "i", depending on HasBaseReg.
1981 break;
1982 case 1:
1983 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
1984 return false;
1985 // Otherwise we have r+r or r+i.
1986 break;
1987 case 2:
1988 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
1989 return false;
1990 // Allow 2*r as r+r.
1991 break;
1992 default: // Don't allow n * r
1993 return false;
1996 return true;
1999 //===----------------------------------------------------------------------===//
2000 // Stack Protector
2001 //===----------------------------------------------------------------------===//
2003 // For OpenBSD return its special guard variable. Otherwise return nullptr,
2004 // so that SelectionDAG handle SSP.
2005 Value *TargetLoweringBase::getIRStackGuard(IRBuilderBase &IRB) const {
2006 if (getTargetMachine().getTargetTriple().isOSOpenBSD()) {
2007 Module &M = *IRB.GetInsertBlock()->getParent()->getParent();
2008 PointerType *PtrTy = PointerType::getUnqual(M.getContext());
2009 Constant *C = M.getOrInsertGlobal("__guard_local", PtrTy);
2010 if (GlobalVariable *G = dyn_cast_or_null<GlobalVariable>(C))
2011 G->setVisibility(GlobalValue::HiddenVisibility);
2012 return C;
2014 return nullptr;
2017 // Currently only support "standard" __stack_chk_guard.
2018 // TODO: add LOAD_STACK_GUARD support.
2019 void TargetLoweringBase::insertSSPDeclarations(Module &M) const {
2020 if (!M.getNamedValue("__stack_chk_guard")) {
2021 auto *GV = new GlobalVariable(M, PointerType::getUnqual(M.getContext()),
2022 false, GlobalVariable::ExternalLinkage,
2023 nullptr, "__stack_chk_guard");
2025 // FreeBSD has "__stack_chk_guard" defined externally on libc.so
2026 if (M.getDirectAccessExternalData() &&
2027 !TM.getTargetTriple().isWindowsGNUEnvironment() &&
2028 !TM.getTargetTriple().isOSFreeBSD() &&
2029 (!TM.getTargetTriple().isOSDarwin() ||
2030 TM.getRelocationModel() == Reloc::Static))
2031 GV->setDSOLocal(true);
2035 // Currently only support "standard" __stack_chk_guard.
2036 // TODO: add LOAD_STACK_GUARD support.
2037 Value *TargetLoweringBase::getSDagStackGuard(const Module &M) const {
2038 return M.getNamedValue("__stack_chk_guard");
2041 Function *TargetLoweringBase::getSSPStackGuardCheck(const Module &M) const {
2042 return nullptr;
2045 unsigned TargetLoweringBase::getMinimumJumpTableEntries() const {
2046 return MinimumJumpTableEntries;
2049 void TargetLoweringBase::setMinimumJumpTableEntries(unsigned Val) {
2050 MinimumJumpTableEntries = Val;
2053 unsigned TargetLoweringBase::getMinimumJumpTableDensity(bool OptForSize) const {
2054 return OptForSize ? OptsizeJumpTableDensity : JumpTableDensity;
2057 unsigned TargetLoweringBase::getMaximumJumpTableSize() const {
2058 return MaximumJumpTableSize;
2061 void TargetLoweringBase::setMaximumJumpTableSize(unsigned Val) {
2062 MaximumJumpTableSize = Val;
2065 bool TargetLoweringBase::isJumpTableRelative() const {
2066 return getTargetMachine().isPositionIndependent();
2069 Align TargetLoweringBase::getPrefLoopAlignment(MachineLoop *ML) const {
2070 if (TM.Options.LoopAlignment)
2071 return Align(TM.Options.LoopAlignment);
2072 return PrefLoopAlignment;
2075 unsigned TargetLoweringBase::getMaxPermittedBytesForAlignment(
2076 MachineBasicBlock *MBB) const {
2077 return MaxBytesForAlignment;
2080 //===----------------------------------------------------------------------===//
2081 // Reciprocal Estimates
2082 //===----------------------------------------------------------------------===//
2084 /// Get the reciprocal estimate attribute string for a function that will
2085 /// override the target defaults.
2086 static StringRef getRecipEstimateForFunc(MachineFunction &MF) {
2087 const Function &F = MF.getFunction();
2088 return F.getFnAttribute("reciprocal-estimates").getValueAsString();
2091 /// Construct a string for the given reciprocal operation of the given type.
2092 /// This string should match the corresponding option to the front-end's
2093 /// "-mrecip" flag assuming those strings have been passed through in an
2094 /// attribute string. For example, "vec-divf" for a division of a vXf32.
2095 static std::string getReciprocalOpName(bool IsSqrt, EVT VT) {
2096 std::string Name = VT.isVector() ? "vec-" : "";
2098 Name += IsSqrt ? "sqrt" : "div";
2100 // TODO: Handle other float types?
2101 if (VT.getScalarType() == MVT::f64) {
2102 Name += "d";
2103 } else if (VT.getScalarType() == MVT::f16) {
2104 Name += "h";
2105 } else {
2106 assert(VT.getScalarType() == MVT::f32 &&
2107 "Unexpected FP type for reciprocal estimate");
2108 Name += "f";
2111 return Name;
2114 /// Return the character position and value (a single numeric character) of a
2115 /// customized refinement operation in the input string if it exists. Return
2116 /// false if there is no customized refinement step count.
2117 static bool parseRefinementStep(StringRef In, size_t &Position,
2118 uint8_t &Value) {
2119 const char RefStepToken = ':';
2120 Position = In.find(RefStepToken);
2121 if (Position == StringRef::npos)
2122 return false;
2124 StringRef RefStepString = In.substr(Position + 1);
2125 // Allow exactly one numeric character for the additional refinement
2126 // step parameter.
2127 if (RefStepString.size() == 1) {
2128 char RefStepChar = RefStepString[0];
2129 if (isDigit(RefStepChar)) {
2130 Value = RefStepChar - '0';
2131 return true;
2134 report_fatal_error("Invalid refinement step for -recip.");
2137 /// For the input attribute string, return one of the ReciprocalEstimate enum
2138 /// status values (enabled, disabled, or not specified) for this operation on
2139 /// the specified data type.
2140 static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override) {
2141 if (Override.empty())
2142 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2144 SmallVector<StringRef, 4> OverrideVector;
2145 Override.split(OverrideVector, ',');
2146 unsigned NumArgs = OverrideVector.size();
2148 // Check if "all", "none", or "default" was specified.
2149 if (NumArgs == 1) {
2150 // Look for an optional setting of the number of refinement steps needed
2151 // for this type of reciprocal operation.
2152 size_t RefPos;
2153 uint8_t RefSteps;
2154 if (parseRefinementStep(Override, RefPos, RefSteps)) {
2155 // Split the string for further processing.
2156 Override = Override.substr(0, RefPos);
2159 // All reciprocal types are enabled.
2160 if (Override == "all")
2161 return TargetLoweringBase::ReciprocalEstimate::Enabled;
2163 // All reciprocal types are disabled.
2164 if (Override == "none")
2165 return TargetLoweringBase::ReciprocalEstimate::Disabled;
2167 // Target defaults for enablement are used.
2168 if (Override == "default")
2169 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2172 // The attribute string may omit the size suffix ('f'/'d').
2173 std::string VTName = getReciprocalOpName(IsSqrt, VT);
2174 std::string VTNameNoSize = VTName;
2175 VTNameNoSize.pop_back();
2176 static const char DisabledPrefix = '!';
2178 for (StringRef RecipType : OverrideVector) {
2179 size_t RefPos;
2180 uint8_t RefSteps;
2181 if (parseRefinementStep(RecipType, RefPos, RefSteps))
2182 RecipType = RecipType.substr(0, RefPos);
2184 // Ignore the disablement token for string matching.
2185 bool IsDisabled = RecipType[0] == DisabledPrefix;
2186 if (IsDisabled)
2187 RecipType = RecipType.substr(1);
2189 if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
2190 return IsDisabled ? TargetLoweringBase::ReciprocalEstimate::Disabled
2191 : TargetLoweringBase::ReciprocalEstimate::Enabled;
2194 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2197 /// For the input attribute string, return the customized refinement step count
2198 /// for this operation on the specified data type. If the step count does not
2199 /// exist, return the ReciprocalEstimate enum value for unspecified.
2200 static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override) {
2201 if (Override.empty())
2202 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2204 SmallVector<StringRef, 4> OverrideVector;
2205 Override.split(OverrideVector, ',');
2206 unsigned NumArgs = OverrideVector.size();
2208 // Check if "all", "default", or "none" was specified.
2209 if (NumArgs == 1) {
2210 // Look for an optional setting of the number of refinement steps needed
2211 // for this type of reciprocal operation.
2212 size_t RefPos;
2213 uint8_t RefSteps;
2214 if (!parseRefinementStep(Override, RefPos, RefSteps))
2215 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2217 // Split the string for further processing.
2218 Override = Override.substr(0, RefPos);
2219 assert(Override != "none" &&
2220 "Disabled reciprocals, but specifed refinement steps?");
2222 // If this is a general override, return the specified number of steps.
2223 if (Override == "all" || Override == "default")
2224 return RefSteps;
2227 // The attribute string may omit the size suffix ('f'/'d').
2228 std::string VTName = getReciprocalOpName(IsSqrt, VT);
2229 std::string VTNameNoSize = VTName;
2230 VTNameNoSize.pop_back();
2232 for (StringRef RecipType : OverrideVector) {
2233 size_t RefPos;
2234 uint8_t RefSteps;
2235 if (!parseRefinementStep(RecipType, RefPos, RefSteps))
2236 continue;
2238 RecipType = RecipType.substr(0, RefPos);
2239 if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
2240 return RefSteps;
2243 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2246 int TargetLoweringBase::getRecipEstimateSqrtEnabled(EVT VT,
2247 MachineFunction &MF) const {
2248 return getOpEnabled(true, VT, getRecipEstimateForFunc(MF));
2251 int TargetLoweringBase::getRecipEstimateDivEnabled(EVT VT,
2252 MachineFunction &MF) const {
2253 return getOpEnabled(false, VT, getRecipEstimateForFunc(MF));
2256 int TargetLoweringBase::getSqrtRefinementSteps(EVT VT,
2257 MachineFunction &MF) const {
2258 return getOpRefinementSteps(true, VT, getRecipEstimateForFunc(MF));
2261 int TargetLoweringBase::getDivRefinementSteps(EVT VT,
2262 MachineFunction &MF) const {
2263 return getOpRefinementSteps(false, VT, getRecipEstimateForFunc(MF));
2266 bool TargetLoweringBase::isLoadBitCastBeneficial(
2267 EVT LoadVT, EVT BitcastVT, const SelectionDAG &DAG,
2268 const MachineMemOperand &MMO) const {
2269 // Single-element vectors are scalarized, so we should generally avoid having
2270 // any memory operations on such types, as they would get scalarized too.
2271 if (LoadVT.isFixedLengthVector() && BitcastVT.isFixedLengthVector() &&
2272 BitcastVT.getVectorNumElements() == 1)
2273 return false;
2275 // Don't do if we could do an indexed load on the original type, but not on
2276 // the new one.
2277 if (!LoadVT.isSimple() || !BitcastVT.isSimple())
2278 return true;
2280 MVT LoadMVT = LoadVT.getSimpleVT();
2282 // Don't bother doing this if it's just going to be promoted again later, as
2283 // doing so might interfere with other combines.
2284 if (getOperationAction(ISD::LOAD, LoadMVT) == Promote &&
2285 getTypeToPromoteTo(ISD::LOAD, LoadMVT) == BitcastVT.getSimpleVT())
2286 return false;
2288 unsigned Fast = 0;
2289 return allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), BitcastVT,
2290 MMO, &Fast) &&
2291 Fast;
2294 void TargetLoweringBase::finalizeLowering(MachineFunction &MF) const {
2295 MF.getRegInfo().freezeReservedRegs(MF);
2298 MachineMemOperand::Flags TargetLoweringBase::getLoadMemOperandFlags(
2299 const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC,
2300 const TargetLibraryInfo *LibInfo) const {
2301 MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad;
2302 if (LI.isVolatile())
2303 Flags |= MachineMemOperand::MOVolatile;
2305 if (LI.hasMetadata(LLVMContext::MD_nontemporal))
2306 Flags |= MachineMemOperand::MONonTemporal;
2308 if (LI.hasMetadata(LLVMContext::MD_invariant_load))
2309 Flags |= MachineMemOperand::MOInvariant;
2311 if (isDereferenceableAndAlignedPointer(LI.getPointerOperand(), LI.getType(),
2312 LI.getAlign(), DL, &LI, AC,
2313 /*DT=*/nullptr, LibInfo))
2314 Flags |= MachineMemOperand::MODereferenceable;
2316 Flags |= getTargetMMOFlags(LI);
2317 return Flags;
2320 MachineMemOperand::Flags
2321 TargetLoweringBase::getStoreMemOperandFlags(const StoreInst &SI,
2322 const DataLayout &DL) const {
2323 MachineMemOperand::Flags Flags = MachineMemOperand::MOStore;
2325 if (SI.isVolatile())
2326 Flags |= MachineMemOperand::MOVolatile;
2328 if (SI.hasMetadata(LLVMContext::MD_nontemporal))
2329 Flags |= MachineMemOperand::MONonTemporal;
2331 // FIXME: Not preserving dereferenceable
2332 Flags |= getTargetMMOFlags(SI);
2333 return Flags;
2336 MachineMemOperand::Flags
2337 TargetLoweringBase::getAtomicMemOperandFlags(const Instruction &AI,
2338 const DataLayout &DL) const {
2339 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
2341 if (const AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(&AI)) {
2342 if (RMW->isVolatile())
2343 Flags |= MachineMemOperand::MOVolatile;
2344 } else if (const AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(&AI)) {
2345 if (CmpX->isVolatile())
2346 Flags |= MachineMemOperand::MOVolatile;
2347 } else
2348 llvm_unreachable("not an atomic instruction");
2350 // FIXME: Not preserving dereferenceable
2351 Flags |= getTargetMMOFlags(AI);
2352 return Flags;
2355 Instruction *TargetLoweringBase::emitLeadingFence(IRBuilderBase &Builder,
2356 Instruction *Inst,
2357 AtomicOrdering Ord) const {
2358 if (isReleaseOrStronger(Ord) && Inst->hasAtomicStore())
2359 return Builder.CreateFence(Ord);
2360 else
2361 return nullptr;
2364 Instruction *TargetLoweringBase::emitTrailingFence(IRBuilderBase &Builder,
2365 Instruction *Inst,
2366 AtomicOrdering Ord) const {
2367 if (isAcquireOrStronger(Ord))
2368 return Builder.CreateFence(Ord);
2369 else
2370 return nullptr;
2373 //===----------------------------------------------------------------------===//
2374 // GlobalISel Hooks
2375 //===----------------------------------------------------------------------===//
2377 bool TargetLoweringBase::shouldLocalize(const MachineInstr &MI,
2378 const TargetTransformInfo *TTI) const {
2379 auto &MF = *MI.getMF();
2380 auto &MRI = MF.getRegInfo();
2381 // Assuming a spill and reload of a value has a cost of 1 instruction each,
2382 // this helper function computes the maximum number of uses we should consider
2383 // for remat. E.g. on arm64 global addresses take 2 insts to materialize. We
2384 // break even in terms of code size when the original MI has 2 users vs
2385 // choosing to potentially spill. Any more than 2 users we we have a net code
2386 // size increase. This doesn't take into account register pressure though.
2387 auto maxUses = [](unsigned RematCost) {
2388 // A cost of 1 means remats are basically free.
2389 if (RematCost == 1)
2390 return std::numeric_limits<unsigned>::max();
2391 if (RematCost == 2)
2392 return 2U;
2394 // Remat is too expensive, only sink if there's one user.
2395 if (RematCost > 2)
2396 return 1U;
2397 llvm_unreachable("Unexpected remat cost");
2400 switch (MI.getOpcode()) {
2401 default:
2402 return false;
2403 // Constants-like instructions should be close to their users.
2404 // We don't want long live-ranges for them.
2405 case TargetOpcode::G_CONSTANT:
2406 case TargetOpcode::G_FCONSTANT:
2407 case TargetOpcode::G_FRAME_INDEX:
2408 case TargetOpcode::G_INTTOPTR:
2409 return true;
2410 case TargetOpcode::G_GLOBAL_VALUE: {
2411 unsigned RematCost = TTI->getGISelRematGlobalCost();
2412 Register Reg = MI.getOperand(0).getReg();
2413 unsigned MaxUses = maxUses(RematCost);
2414 if (MaxUses == UINT_MAX)
2415 return true; // Remats are "free" so always localize.
2416 return MRI.hasAtMostUserInstrs(Reg, MaxUses);