[llvm-exegesis] Fix missing std::move.
[llvm-complete.git] / lib / Target / X86 / X86LegalizerInfo.cpp
blob4f59e0f79a72cc76042f107e3c476371576df0ae
1 //===- X86LegalizerInfo.cpp --------------------------------------*- C++ -*-==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the targeting of the Machinelegalizer class for X86.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
14 #include "X86LegalizerInfo.h"
15 #include "X86Subtarget.h"
16 #include "X86TargetMachine.h"
17 #include "llvm/CodeGen/TargetOpcodes.h"
18 #include "llvm/CodeGen/ValueTypes.h"
19 #include "llvm/IR/DerivedTypes.h"
20 #include "llvm/IR/Type.h"
22 using namespace llvm;
23 using namespace TargetOpcode;
24 using namespace LegalizeActions;
26 /// FIXME: The following static functions are SizeChangeStrategy functions
27 /// that are meant to temporarily mimic the behaviour of the old legalization
28 /// based on doubling/halving non-legal types as closely as possible. This is
29 /// not entirly possible as only legalizing the types that are exactly a power
30 /// of 2 times the size of the legal types would require specifying all those
31 /// sizes explicitly.
32 /// In practice, not specifying those isn't a problem, and the below functions
33 /// should disappear quickly as we add support for legalizing non-power-of-2
34 /// sized types further.
35 static void
36 addAndInterleaveWithUnsupported(LegalizerInfo::SizeAndActionsVec &result,
37 const LegalizerInfo::SizeAndActionsVec &v) {
38 for (unsigned i = 0; i < v.size(); ++i) {
39 result.push_back(v[i]);
40 if (i + 1 < v[i].first && i + 1 < v.size() &&
41 v[i + 1].first != v[i].first + 1)
42 result.push_back({v[i].first + 1, Unsupported});
46 static LegalizerInfo::SizeAndActionsVec
47 widen_1(const LegalizerInfo::SizeAndActionsVec &v) {
48 assert(v.size() >= 1);
49 assert(v[0].first > 1);
50 LegalizerInfo::SizeAndActionsVec result = {{1, WidenScalar},
51 {2, Unsupported}};
52 addAndInterleaveWithUnsupported(result, v);
53 auto Largest = result.back().first;
54 result.push_back({Largest + 1, Unsupported});
55 return result;
58 X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI,
59 const X86TargetMachine &TM)
60 : Subtarget(STI), TM(TM) {
62 setLegalizerInfo32bit();
63 setLegalizerInfo64bit();
64 setLegalizerInfoSSE1();
65 setLegalizerInfoSSE2();
66 setLegalizerInfoSSE41();
67 setLegalizerInfoAVX();
68 setLegalizerInfoAVX2();
69 setLegalizerInfoAVX512();
70 setLegalizerInfoAVX512DQ();
71 setLegalizerInfoAVX512BW();
73 setLegalizeScalarToDifferentSizeStrategy(G_PHI, 0, widen_1);
74 for (unsigned BinOp : {G_SUB, G_MUL, G_AND, G_OR, G_XOR})
75 setLegalizeScalarToDifferentSizeStrategy(BinOp, 0, widen_1);
76 for (unsigned MemOp : {G_LOAD, G_STORE})
77 setLegalizeScalarToDifferentSizeStrategy(MemOp, 0,
78 narrowToSmallerAndWidenToSmallest);
79 setLegalizeScalarToDifferentSizeStrategy(
80 G_GEP, 1, widenToLargerTypesUnsupportedOtherwise);
81 setLegalizeScalarToDifferentSizeStrategy(
82 G_CONSTANT, 0, widenToLargerTypesAndNarrowToLargest);
84 computeTables();
85 verify(*STI.getInstrInfo());
88 void X86LegalizerInfo::setLegalizerInfo32bit() {
90 const LLT p0 = LLT::pointer(0, TM.getPointerSizeInBits(0));
91 const LLT s1 = LLT::scalar(1);
92 const LLT s8 = LLT::scalar(8);
93 const LLT s16 = LLT::scalar(16);
94 const LLT s32 = LLT::scalar(32);
95 const LLT s64 = LLT::scalar(64);
96 const LLT s128 = LLT::scalar(128);
98 for (auto Ty : {p0, s1, s8, s16, s32})
99 setAction({G_IMPLICIT_DEF, Ty}, Legal);
101 for (auto Ty : {s8, s16, s32, p0})
102 setAction({G_PHI, Ty}, Legal);
104 for (unsigned BinOp : {G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR})
105 for (auto Ty : {s8, s16, s32})
106 setAction({BinOp, Ty}, Legal);
108 for (unsigned Op : {G_UADDE}) {
109 setAction({Op, s32}, Legal);
110 setAction({Op, 1, s1}, Legal);
113 for (unsigned MemOp : {G_LOAD, G_STORE}) {
114 for (auto Ty : {s8, s16, s32, p0})
115 setAction({MemOp, Ty}, Legal);
117 // And everything's fine in addrspace 0.
118 setAction({MemOp, 1, p0}, Legal);
121 // Pointer-handling
122 setAction({G_FRAME_INDEX, p0}, Legal);
123 setAction({G_GLOBAL_VALUE, p0}, Legal);
125 setAction({G_GEP, p0}, Legal);
126 setAction({G_GEP, 1, s32}, Legal);
128 if (!Subtarget.is64Bit()) {
129 getActionDefinitionsBuilder(G_PTRTOINT)
130 .legalForCartesianProduct({s1, s8, s16, s32}, {p0})
131 .maxScalar(0, s32)
132 .widenScalarToNextPow2(0, /*Min*/ 8);
133 getActionDefinitionsBuilder(G_INTTOPTR).legalFor({{p0, s32}});
135 // Shifts and SDIV
136 getActionDefinitionsBuilder(
137 {G_SHL, G_LSHR, G_ASHR, G_SDIV, G_SREM, G_UDIV, G_UREM})
138 .legalFor({s8, s16, s32})
139 .clampScalar(0, s8, s32);
142 // Control-flow
143 setAction({G_BRCOND, s1}, Legal);
145 // Constants
146 for (auto Ty : {s8, s16, s32, p0})
147 setAction({TargetOpcode::G_CONSTANT, Ty}, Legal);
149 // Extensions
150 for (auto Ty : {s8, s16, s32}) {
151 setAction({G_ZEXT, Ty}, Legal);
152 setAction({G_SEXT, Ty}, Legal);
153 setAction({G_ANYEXT, Ty}, Legal);
155 setAction({G_ANYEXT, s128}, Legal);
157 // Comparison
158 setAction({G_ICMP, s1}, Legal);
160 for (auto Ty : {s8, s16, s32, p0})
161 setAction({G_ICMP, 1, Ty}, Legal);
163 // Merge/Unmerge
164 for (const auto &Ty : {s16, s32, s64}) {
165 setAction({G_MERGE_VALUES, Ty}, Legal);
166 setAction({G_UNMERGE_VALUES, 1, Ty}, Legal);
168 for (const auto &Ty : {s8, s16, s32}) {
169 setAction({G_MERGE_VALUES, 1, Ty}, Legal);
170 setAction({G_UNMERGE_VALUES, Ty}, Legal);
174 void X86LegalizerInfo::setLegalizerInfo64bit() {
176 if (!Subtarget.is64Bit())
177 return;
179 const LLT p0 = LLT::pointer(0, TM.getPointerSizeInBits(0));
180 const LLT s1 = LLT::scalar(1);
181 const LLT s8 = LLT::scalar(8);
182 const LLT s16 = LLT::scalar(16);
183 const LLT s32 = LLT::scalar(32);
184 const LLT s64 = LLT::scalar(64);
185 const LLT s128 = LLT::scalar(128);
187 setAction({G_IMPLICIT_DEF, s64}, Legal);
188 // Need to have that, as tryFoldImplicitDef will create this pattern:
189 // s128 = EXTEND (G_IMPLICIT_DEF s32/s64) -> s128 = G_IMPLICIT_DEF
190 setAction({G_IMPLICIT_DEF, s128}, Legal);
192 setAction({G_PHI, s64}, Legal);
194 for (unsigned BinOp : {G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR})
195 setAction({BinOp, s64}, Legal);
197 for (unsigned MemOp : {G_LOAD, G_STORE})
198 setAction({MemOp, s64}, Legal);
200 // Pointer-handling
201 setAction({G_GEP, 1, s64}, Legal);
202 getActionDefinitionsBuilder(G_PTRTOINT)
203 .legalForCartesianProduct({s1, s8, s16, s32, s64}, {p0})
204 .maxScalar(0, s64)
205 .widenScalarToNextPow2(0, /*Min*/ 8);
206 getActionDefinitionsBuilder(G_INTTOPTR).legalFor({{p0, s64}});
208 // Constants
209 setAction({TargetOpcode::G_CONSTANT, s64}, Legal);
211 // Extensions
212 for (unsigned extOp : {G_ZEXT, G_SEXT, G_ANYEXT}) {
213 setAction({extOp, s64}, Legal);
216 getActionDefinitionsBuilder(G_SITOFP)
217 .legalForCartesianProduct({s32, s64})
218 .clampScalar(1, s32, s64)
219 .widenScalarToNextPow2(1)
220 .clampScalar(0, s32, s64)
221 .widenScalarToNextPow2(0);
223 getActionDefinitionsBuilder(G_FPTOSI)
224 .legalForCartesianProduct({s32, s64})
225 .clampScalar(1, s32, s64)
226 .widenScalarToNextPow2(0)
227 .clampScalar(0, s32, s64)
228 .widenScalarToNextPow2(1);
230 // Comparison
231 setAction({G_ICMP, 1, s64}, Legal);
233 getActionDefinitionsBuilder(G_FCMP)
234 .legalForCartesianProduct({s8}, {s32, s64})
235 .clampScalar(0, s8, s8)
236 .clampScalar(1, s32, s64)
237 .widenScalarToNextPow2(1);
239 // Shifts and SDIV
240 getActionDefinitionsBuilder(
241 {G_SHL, G_LSHR, G_ASHR, G_SDIV, G_SREM, G_UDIV, G_UREM})
242 .legalFor({s8, s16, s32, s64})
243 .clampScalar(0, s8, s64);
245 // Merge/Unmerge
246 setAction({G_MERGE_VALUES, s128}, Legal);
247 setAction({G_UNMERGE_VALUES, 1, s128}, Legal);
248 setAction({G_MERGE_VALUES, 1, s128}, Legal);
249 setAction({G_UNMERGE_VALUES, s128}, Legal);
252 void X86LegalizerInfo::setLegalizerInfoSSE1() {
253 if (!Subtarget.hasSSE1())
254 return;
256 const LLT s32 = LLT::scalar(32);
257 const LLT s64 = LLT::scalar(64);
258 const LLT v4s32 = LLT::vector(4, 32);
259 const LLT v2s64 = LLT::vector(2, 64);
261 for (unsigned BinOp : {G_FADD, G_FSUB, G_FMUL, G_FDIV})
262 for (auto Ty : {s32, v4s32})
263 setAction({BinOp, Ty}, Legal);
265 for (unsigned MemOp : {G_LOAD, G_STORE})
266 for (auto Ty : {v4s32, v2s64})
267 setAction({MemOp, Ty}, Legal);
269 // Constants
270 setAction({TargetOpcode::G_FCONSTANT, s32}, Legal);
272 // Merge/Unmerge
273 for (const auto &Ty : {v4s32, v2s64}) {
274 setAction({G_MERGE_VALUES, Ty}, Legal);
275 setAction({G_UNMERGE_VALUES, 1, Ty}, Legal);
277 setAction({G_MERGE_VALUES, 1, s64}, Legal);
278 setAction({G_UNMERGE_VALUES, s64}, Legal);
281 void X86LegalizerInfo::setLegalizerInfoSSE2() {
282 if (!Subtarget.hasSSE2())
283 return;
285 const LLT s32 = LLT::scalar(32);
286 const LLT s64 = LLT::scalar(64);
287 const LLT v16s8 = LLT::vector(16, 8);
288 const LLT v8s16 = LLT::vector(8, 16);
289 const LLT v4s32 = LLT::vector(4, 32);
290 const LLT v2s64 = LLT::vector(2, 64);
292 const LLT v32s8 = LLT::vector(32, 8);
293 const LLT v16s16 = LLT::vector(16, 16);
294 const LLT v8s32 = LLT::vector(8, 32);
295 const LLT v4s64 = LLT::vector(4, 64);
297 for (unsigned BinOp : {G_FADD, G_FSUB, G_FMUL, G_FDIV})
298 for (auto Ty : {s64, v2s64})
299 setAction({BinOp, Ty}, Legal);
301 for (unsigned BinOp : {G_ADD, G_SUB})
302 for (auto Ty : {v16s8, v8s16, v4s32, v2s64})
303 setAction({BinOp, Ty}, Legal);
305 setAction({G_MUL, v8s16}, Legal);
307 setAction({G_FPEXT, s64}, Legal);
308 setAction({G_FPEXT, 1, s32}, Legal);
310 setAction({G_FPTRUNC, s32}, Legal);
311 setAction({G_FPTRUNC, 1, s64}, Legal);
313 // Constants
314 setAction({TargetOpcode::G_FCONSTANT, s64}, Legal);
316 // Merge/Unmerge
317 for (const auto &Ty :
318 {v16s8, v32s8, v8s16, v16s16, v4s32, v8s32, v2s64, v4s64}) {
319 setAction({G_MERGE_VALUES, Ty}, Legal);
320 setAction({G_UNMERGE_VALUES, 1, Ty}, Legal);
322 for (const auto &Ty : {v16s8, v8s16, v4s32, v2s64}) {
323 setAction({G_MERGE_VALUES, 1, Ty}, Legal);
324 setAction({G_UNMERGE_VALUES, Ty}, Legal);
328 void X86LegalizerInfo::setLegalizerInfoSSE41() {
329 if (!Subtarget.hasSSE41())
330 return;
332 const LLT v4s32 = LLT::vector(4, 32);
334 setAction({G_MUL, v4s32}, Legal);
337 void X86LegalizerInfo::setLegalizerInfoAVX() {
338 if (!Subtarget.hasAVX())
339 return;
341 const LLT v16s8 = LLT::vector(16, 8);
342 const LLT v8s16 = LLT::vector(8, 16);
343 const LLT v4s32 = LLT::vector(4, 32);
344 const LLT v2s64 = LLT::vector(2, 64);
346 const LLT v32s8 = LLT::vector(32, 8);
347 const LLT v64s8 = LLT::vector(64, 8);
348 const LLT v16s16 = LLT::vector(16, 16);
349 const LLT v32s16 = LLT::vector(32, 16);
350 const LLT v8s32 = LLT::vector(8, 32);
351 const LLT v16s32 = LLT::vector(16, 32);
352 const LLT v4s64 = LLT::vector(4, 64);
353 const LLT v8s64 = LLT::vector(8, 64);
355 for (unsigned MemOp : {G_LOAD, G_STORE})
356 for (auto Ty : {v8s32, v4s64})
357 setAction({MemOp, Ty}, Legal);
359 for (auto Ty : {v32s8, v16s16, v8s32, v4s64}) {
360 setAction({G_INSERT, Ty}, Legal);
361 setAction({G_EXTRACT, 1, Ty}, Legal);
363 for (auto Ty : {v16s8, v8s16, v4s32, v2s64}) {
364 setAction({G_INSERT, 1, Ty}, Legal);
365 setAction({G_EXTRACT, Ty}, Legal);
367 // Merge/Unmerge
368 for (const auto &Ty :
369 {v32s8, v64s8, v16s16, v32s16, v8s32, v16s32, v4s64, v8s64}) {
370 setAction({G_MERGE_VALUES, Ty}, Legal);
371 setAction({G_UNMERGE_VALUES, 1, Ty}, Legal);
373 for (const auto &Ty :
374 {v16s8, v32s8, v8s16, v16s16, v4s32, v8s32, v2s64, v4s64}) {
375 setAction({G_MERGE_VALUES, 1, Ty}, Legal);
376 setAction({G_UNMERGE_VALUES, Ty}, Legal);
380 void X86LegalizerInfo::setLegalizerInfoAVX2() {
381 if (!Subtarget.hasAVX2())
382 return;
384 const LLT v32s8 = LLT::vector(32, 8);
385 const LLT v16s16 = LLT::vector(16, 16);
386 const LLT v8s32 = LLT::vector(8, 32);
387 const LLT v4s64 = LLT::vector(4, 64);
389 const LLT v64s8 = LLT::vector(64, 8);
390 const LLT v32s16 = LLT::vector(32, 16);
391 const LLT v16s32 = LLT::vector(16, 32);
392 const LLT v8s64 = LLT::vector(8, 64);
394 for (unsigned BinOp : {G_ADD, G_SUB})
395 for (auto Ty : {v32s8, v16s16, v8s32, v4s64})
396 setAction({BinOp, Ty}, Legal);
398 for (auto Ty : {v16s16, v8s32})
399 setAction({G_MUL, Ty}, Legal);
401 // Merge/Unmerge
402 for (const auto &Ty : {v64s8, v32s16, v16s32, v8s64}) {
403 setAction({G_MERGE_VALUES, Ty}, Legal);
404 setAction({G_UNMERGE_VALUES, 1, Ty}, Legal);
406 for (const auto &Ty : {v32s8, v16s16, v8s32, v4s64}) {
407 setAction({G_MERGE_VALUES, 1, Ty}, Legal);
408 setAction({G_UNMERGE_VALUES, Ty}, Legal);
412 void X86LegalizerInfo::setLegalizerInfoAVX512() {
413 if (!Subtarget.hasAVX512())
414 return;
416 const LLT v16s8 = LLT::vector(16, 8);
417 const LLT v8s16 = LLT::vector(8, 16);
418 const LLT v4s32 = LLT::vector(4, 32);
419 const LLT v2s64 = LLT::vector(2, 64);
421 const LLT v32s8 = LLT::vector(32, 8);
422 const LLT v16s16 = LLT::vector(16, 16);
423 const LLT v8s32 = LLT::vector(8, 32);
424 const LLT v4s64 = LLT::vector(4, 64);
426 const LLT v64s8 = LLT::vector(64, 8);
427 const LLT v32s16 = LLT::vector(32, 16);
428 const LLT v16s32 = LLT::vector(16, 32);
429 const LLT v8s64 = LLT::vector(8, 64);
431 for (unsigned BinOp : {G_ADD, G_SUB})
432 for (auto Ty : {v16s32, v8s64})
433 setAction({BinOp, Ty}, Legal);
435 setAction({G_MUL, v16s32}, Legal);
437 for (unsigned MemOp : {G_LOAD, G_STORE})
438 for (auto Ty : {v16s32, v8s64})
439 setAction({MemOp, Ty}, Legal);
441 for (auto Ty : {v64s8, v32s16, v16s32, v8s64}) {
442 setAction({G_INSERT, Ty}, Legal);
443 setAction({G_EXTRACT, 1, Ty}, Legal);
445 for (auto Ty : {v32s8, v16s16, v8s32, v4s64, v16s8, v8s16, v4s32, v2s64}) {
446 setAction({G_INSERT, 1, Ty}, Legal);
447 setAction({G_EXTRACT, Ty}, Legal);
450 /************ VLX *******************/
451 if (!Subtarget.hasVLX())
452 return;
454 for (auto Ty : {v4s32, v8s32})
455 setAction({G_MUL, Ty}, Legal);
458 void X86LegalizerInfo::setLegalizerInfoAVX512DQ() {
459 if (!(Subtarget.hasAVX512() && Subtarget.hasDQI()))
460 return;
462 const LLT v8s64 = LLT::vector(8, 64);
464 setAction({G_MUL, v8s64}, Legal);
466 /************ VLX *******************/
467 if (!Subtarget.hasVLX())
468 return;
470 const LLT v2s64 = LLT::vector(2, 64);
471 const LLT v4s64 = LLT::vector(4, 64);
473 for (auto Ty : {v2s64, v4s64})
474 setAction({G_MUL, Ty}, Legal);
477 void X86LegalizerInfo::setLegalizerInfoAVX512BW() {
478 if (!(Subtarget.hasAVX512() && Subtarget.hasBWI()))
479 return;
481 const LLT v64s8 = LLT::vector(64, 8);
482 const LLT v32s16 = LLT::vector(32, 16);
484 for (unsigned BinOp : {G_ADD, G_SUB})
485 for (auto Ty : {v64s8, v32s16})
486 setAction({BinOp, Ty}, Legal);
488 setAction({G_MUL, v32s16}, Legal);
490 /************ VLX *******************/
491 if (!Subtarget.hasVLX())
492 return;
494 const LLT v8s16 = LLT::vector(8, 16);
495 const LLT v16s16 = LLT::vector(16, 16);
497 for (auto Ty : {v8s16, v16s16})
498 setAction({G_MUL, Ty}, Legal);