[ORC] Add std::tuple support to SimplePackedSerialization.
[llvm-project.git] / llvm / lib / Target / X86 / X86LegalizerInfo.cpp
blob2fd740573d244e8e3ccc7084c6387fef0f76e0f6
1 //===- X86LegalizerInfo.cpp --------------------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the Machinelegalizer class for X86.
10 /// \todo This should be generated by TableGen.
11 //===----------------------------------------------------------------------===//
13 #include "X86LegalizerInfo.h"
14 #include "X86Subtarget.h"
15 #include "X86TargetMachine.h"
16 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
17 #include "llvm/CodeGen/TargetOpcodes.h"
18 #include "llvm/CodeGen/ValueTypes.h"
19 #include "llvm/IR/DerivedTypes.h"
20 #include "llvm/IR/Type.h"
22 using namespace llvm;
23 using namespace TargetOpcode;
24 using namespace LegalizeActions;
26 /// FIXME: The following static functions are SizeChangeStrategy functions
27 /// that are meant to temporarily mimic the behaviour of the old legalization
28 /// based on doubling/halving non-legal types as closely as possible. This is
29 /// not entirly possible as only legalizing the types that are exactly a power
30 /// of 2 times the size of the legal types would require specifying all those
31 /// sizes explicitly.
32 /// In practice, not specifying those isn't a problem, and the below functions
33 /// should disappear quickly as we add support for legalizing non-power-of-2
34 /// sized types further.
35 static void addAndInterleaveWithUnsupported(
36 LegacyLegalizerInfo::SizeAndActionsVec &result,
37 const LegacyLegalizerInfo::SizeAndActionsVec &v) {
38 for (unsigned i = 0; i < v.size(); ++i) {
39 result.push_back(v[i]);
40 if (i + 1 < v[i].first && i + 1 < v.size() &&
41 v[i + 1].first != v[i].first + 1)
42 result.push_back({v[i].first + 1, LegacyLegalizeActions::Unsupported});
46 static LegacyLegalizerInfo::SizeAndActionsVec
47 widen_1(const LegacyLegalizerInfo::SizeAndActionsVec &v) {
48 assert(v.size() >= 1);
49 assert(v[0].first > 1);
50 LegacyLegalizerInfo::SizeAndActionsVec result = {
51 {1, LegacyLegalizeActions::WidenScalar},
52 {2, LegacyLegalizeActions::Unsupported}};
53 addAndInterleaveWithUnsupported(result, v);
54 auto Largest = result.back().first;
55 result.push_back({Largest + 1, LegacyLegalizeActions::Unsupported});
56 return result;
59 X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI,
60 const X86TargetMachine &TM)
61 : Subtarget(STI), TM(TM) {
63 setLegalizerInfo32bit();
64 setLegalizerInfo64bit();
65 setLegalizerInfoSSE1();
66 setLegalizerInfoSSE2();
67 setLegalizerInfoSSE41();
68 setLegalizerInfoAVX();
69 setLegalizerInfoAVX2();
70 setLegalizerInfoAVX512();
71 setLegalizerInfoAVX512DQ();
72 setLegalizerInfoAVX512BW();
74 getActionDefinitionsBuilder(G_INTRINSIC_ROUNDEVEN)
75 .scalarize(0)
76 .minScalar(0, LLT::scalar(32))
77 .libcall();
79 auto &LegacyInfo = getLegacyLegalizerInfo();
80 LegacyInfo.setLegalizeScalarToDifferentSizeStrategy(G_PHI, 0, widen_1);
81 for (unsigned BinOp : {G_SUB, G_MUL, G_AND, G_OR, G_XOR})
82 LegacyInfo.setLegalizeScalarToDifferentSizeStrategy(BinOp, 0, widen_1);
83 for (unsigned MemOp : {G_LOAD, G_STORE})
84 LegacyInfo.setLegalizeScalarToDifferentSizeStrategy(
85 MemOp, 0, LegacyLegalizerInfo::narrowToSmallerAndWidenToSmallest);
86 LegacyInfo.setLegalizeScalarToDifferentSizeStrategy(
87 G_PTR_ADD, 1,
88 LegacyLegalizerInfo::widenToLargerTypesUnsupportedOtherwise);
89 LegacyInfo.setLegalizeScalarToDifferentSizeStrategy(
90 G_CONSTANT, 0,
91 LegacyLegalizerInfo::widenToLargerTypesAndNarrowToLargest);
93 getActionDefinitionsBuilder({G_MEMCPY, G_MEMMOVE, G_MEMSET}).libcall();
95 LegacyInfo.computeTables();
96 verify(*STI.getInstrInfo());
99 bool X86LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
100 MachineInstr &MI) const {
101 return true;
104 void X86LegalizerInfo::setLegalizerInfo32bit() {
106 const LLT p0 = LLT::pointer(0, TM.getPointerSizeInBits(0));
107 const LLT s1 = LLT::scalar(1);
108 const LLT s8 = LLT::scalar(8);
109 const LLT s16 = LLT::scalar(16);
110 const LLT s32 = LLT::scalar(32);
111 const LLT s64 = LLT::scalar(64);
112 const LLT s128 = LLT::scalar(128);
114 auto &LegacyInfo = getLegacyLegalizerInfo();
116 for (auto Ty : {p0, s1, s8, s16, s32})
117 LegacyInfo.setAction({G_IMPLICIT_DEF, Ty}, LegacyLegalizeActions::Legal);
119 for (auto Ty : {s8, s16, s32, p0})
120 LegacyInfo.setAction({G_PHI, Ty}, LegacyLegalizeActions::Legal);
122 for (unsigned BinOp : {G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR})
123 for (auto Ty : {s8, s16, s32})
124 LegacyInfo.setAction({BinOp, Ty}, LegacyLegalizeActions::Legal);
126 for (unsigned Op : {G_UADDE}) {
127 LegacyInfo.setAction({Op, s32}, LegacyLegalizeActions::Legal);
128 LegacyInfo.setAction({Op, 1, s1}, LegacyLegalizeActions::Legal);
131 for (unsigned MemOp : {G_LOAD, G_STORE}) {
132 for (auto Ty : {s8, s16, s32, p0})
133 LegacyInfo.setAction({MemOp, Ty}, LegacyLegalizeActions::Legal);
135 // And everything's fine in addrspace 0.
136 LegacyInfo.setAction({MemOp, 1, p0}, LegacyLegalizeActions::Legal);
139 // Pointer-handling
140 LegacyInfo.setAction({G_FRAME_INDEX, p0}, LegacyLegalizeActions::Legal);
141 LegacyInfo.setAction({G_GLOBAL_VALUE, p0}, LegacyLegalizeActions::Legal);
143 LegacyInfo.setAction({G_PTR_ADD, p0}, LegacyLegalizeActions::Legal);
144 LegacyInfo.setAction({G_PTR_ADD, 1, s32}, LegacyLegalizeActions::Legal);
146 if (!Subtarget.is64Bit()) {
147 getActionDefinitionsBuilder(G_PTRTOINT)
148 .legalForCartesianProduct({s1, s8, s16, s32}, {p0})
149 .maxScalar(0, s32)
150 .widenScalarToNextPow2(0, /*Min*/ 8);
151 getActionDefinitionsBuilder(G_INTTOPTR).legalFor({{p0, s32}});
153 // Shifts and SDIV
154 getActionDefinitionsBuilder(
155 {G_SDIV, G_SREM, G_UDIV, G_UREM})
156 .legalFor({s8, s16, s32})
157 .clampScalar(0, s8, s32);
159 getActionDefinitionsBuilder(
160 {G_SHL, G_LSHR, G_ASHR})
161 .legalFor({{s8, s8}, {s16, s8}, {s32, s8}})
162 .clampScalar(0, s8, s32)
163 .clampScalar(1, s8, s8);
165 // Comparison
166 getActionDefinitionsBuilder(G_ICMP)
167 .legalForCartesianProduct({s8}, {s8, s16, s32, p0})
168 .clampScalar(0, s8, s8);
171 // Control-flow
172 LegacyInfo.setAction({G_BRCOND, s1}, LegacyLegalizeActions::Legal);
174 // Constants
175 for (auto Ty : {s8, s16, s32, p0})
176 LegacyInfo.setAction({TargetOpcode::G_CONSTANT, Ty},
177 LegacyLegalizeActions::Legal);
179 // Extensions
180 for (auto Ty : {s8, s16, s32}) {
181 LegacyInfo.setAction({G_ZEXT, Ty}, LegacyLegalizeActions::Legal);
182 LegacyInfo.setAction({G_SEXT, Ty}, LegacyLegalizeActions::Legal);
183 LegacyInfo.setAction({G_ANYEXT, Ty}, LegacyLegalizeActions::Legal);
185 LegacyInfo.setAction({G_ANYEXT, s128}, LegacyLegalizeActions::Legal);
186 getActionDefinitionsBuilder(G_SEXT_INREG).lower();
188 // Merge/Unmerge
189 for (const auto &Ty : {s16, s32, s64}) {
190 LegacyInfo.setAction({G_MERGE_VALUES, Ty}, LegacyLegalizeActions::Legal);
191 LegacyInfo.setAction({G_UNMERGE_VALUES, 1, Ty},
192 LegacyLegalizeActions::Legal);
194 for (const auto &Ty : {s8, s16, s32}) {
195 LegacyInfo.setAction({G_MERGE_VALUES, 1, Ty}, LegacyLegalizeActions::Legal);
196 LegacyInfo.setAction({G_UNMERGE_VALUES, Ty}, LegacyLegalizeActions::Legal);
200 void X86LegalizerInfo::setLegalizerInfo64bit() {
202 if (!Subtarget.is64Bit())
203 return;
205 const LLT p0 = LLT::pointer(0, TM.getPointerSizeInBits(0));
206 const LLT s1 = LLT::scalar(1);
207 const LLT s8 = LLT::scalar(8);
208 const LLT s16 = LLT::scalar(16);
209 const LLT s32 = LLT::scalar(32);
210 const LLT s64 = LLT::scalar(64);
211 const LLT s128 = LLT::scalar(128);
213 auto &LegacyInfo = getLegacyLegalizerInfo();
215 LegacyInfo.setAction({G_IMPLICIT_DEF, s64}, LegacyLegalizeActions::Legal);
216 // Need to have that, as tryFoldImplicitDef will create this pattern:
217 // s128 = EXTEND (G_IMPLICIT_DEF s32/s64) -> s128 = G_IMPLICIT_DEF
218 LegacyInfo.setAction({G_IMPLICIT_DEF, s128}, LegacyLegalizeActions::Legal);
220 LegacyInfo.setAction({G_PHI, s64}, LegacyLegalizeActions::Legal);
222 for (unsigned BinOp : {G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR})
223 LegacyInfo.setAction({BinOp, s64}, LegacyLegalizeActions::Legal);
225 for (unsigned MemOp : {G_LOAD, G_STORE})
226 LegacyInfo.setAction({MemOp, s64}, LegacyLegalizeActions::Legal);
228 // Pointer-handling
229 LegacyInfo.setAction({G_PTR_ADD, 1, s64}, LegacyLegalizeActions::Legal);
230 getActionDefinitionsBuilder(G_PTRTOINT)
231 .legalForCartesianProduct({s1, s8, s16, s32, s64}, {p0})
232 .maxScalar(0, s64)
233 .widenScalarToNextPow2(0, /*Min*/ 8);
234 getActionDefinitionsBuilder(G_INTTOPTR).legalFor({{p0, s64}});
236 // Constants
237 LegacyInfo.setAction({TargetOpcode::G_CONSTANT, s64},
238 LegacyLegalizeActions::Legal);
240 // Extensions
241 for (unsigned extOp : {G_ZEXT, G_SEXT, G_ANYEXT}) {
242 LegacyInfo.setAction({extOp, s64}, LegacyLegalizeActions::Legal);
245 getActionDefinitionsBuilder(G_SITOFP)
246 .legalForCartesianProduct({s32, s64})
247 .clampScalar(1, s32, s64)
248 .widenScalarToNextPow2(1)
249 .clampScalar(0, s32, s64)
250 .widenScalarToNextPow2(0);
252 getActionDefinitionsBuilder(G_FPTOSI)
253 .legalForCartesianProduct({s32, s64})
254 .clampScalar(1, s32, s64)
255 .widenScalarToNextPow2(0)
256 .clampScalar(0, s32, s64)
257 .widenScalarToNextPow2(1);
259 // Comparison
260 getActionDefinitionsBuilder(G_ICMP)
261 .legalForCartesianProduct({s8}, {s8, s16, s32, s64, p0})
262 .clampScalar(0, s8, s8);
264 getActionDefinitionsBuilder(G_FCMP)
265 .legalForCartesianProduct({s8}, {s32, s64})
266 .clampScalar(0, s8, s8)
267 .clampScalar(1, s32, s64)
268 .widenScalarToNextPow2(1);
270 // Divisions
271 getActionDefinitionsBuilder(
272 {G_SDIV, G_SREM, G_UDIV, G_UREM})
273 .legalFor({s8, s16, s32, s64})
274 .clampScalar(0, s8, s64);
276 // Shifts
277 getActionDefinitionsBuilder(
278 {G_SHL, G_LSHR, G_ASHR})
279 .legalFor({{s8, s8}, {s16, s8}, {s32, s8}, {s64, s8}})
280 .clampScalar(0, s8, s64)
281 .clampScalar(1, s8, s8);
283 // Merge/Unmerge
284 LegacyInfo.setAction({G_MERGE_VALUES, s128}, LegacyLegalizeActions::Legal);
285 LegacyInfo.setAction({G_UNMERGE_VALUES, 1, s128},
286 LegacyLegalizeActions::Legal);
287 LegacyInfo.setAction({G_MERGE_VALUES, 1, s128}, LegacyLegalizeActions::Legal);
288 LegacyInfo.setAction({G_UNMERGE_VALUES, s128}, LegacyLegalizeActions::Legal);
291 void X86LegalizerInfo::setLegalizerInfoSSE1() {
292 if (!Subtarget.hasSSE1())
293 return;
295 const LLT s32 = LLT::scalar(32);
296 const LLT s64 = LLT::scalar(64);
297 const LLT v4s32 = LLT::fixed_vector(4, 32);
298 const LLT v2s64 = LLT::fixed_vector(2, 64);
300 auto &LegacyInfo = getLegacyLegalizerInfo();
302 for (unsigned BinOp : {G_FADD, G_FSUB, G_FMUL, G_FDIV})
303 for (auto Ty : {s32, v4s32})
304 LegacyInfo.setAction({BinOp, Ty}, LegacyLegalizeActions::Legal);
306 for (unsigned MemOp : {G_LOAD, G_STORE})
307 for (auto Ty : {v4s32, v2s64})
308 LegacyInfo.setAction({MemOp, Ty}, LegacyLegalizeActions::Legal);
310 // Constants
311 LegacyInfo.setAction({TargetOpcode::G_FCONSTANT, s32},
312 LegacyLegalizeActions::Legal);
314 // Merge/Unmerge
315 for (const auto &Ty : {v4s32, v2s64}) {
316 LegacyInfo.setAction({G_CONCAT_VECTORS, Ty}, LegacyLegalizeActions::Legal);
317 LegacyInfo.setAction({G_UNMERGE_VALUES, 1, Ty},
318 LegacyLegalizeActions::Legal);
320 LegacyInfo.setAction({G_MERGE_VALUES, 1, s64}, LegacyLegalizeActions::Legal);
321 LegacyInfo.setAction({G_UNMERGE_VALUES, s64}, LegacyLegalizeActions::Legal);
324 void X86LegalizerInfo::setLegalizerInfoSSE2() {
325 if (!Subtarget.hasSSE2())
326 return;
328 const LLT s32 = LLT::scalar(32);
329 const LLT s64 = LLT::scalar(64);
330 const LLT v16s8 = LLT::fixed_vector(16, 8);
331 const LLT v8s16 = LLT::fixed_vector(8, 16);
332 const LLT v4s32 = LLT::fixed_vector(4, 32);
333 const LLT v2s64 = LLT::fixed_vector(2, 64);
335 const LLT v32s8 = LLT::fixed_vector(32, 8);
336 const LLT v16s16 = LLT::fixed_vector(16, 16);
337 const LLT v8s32 = LLT::fixed_vector(8, 32);
338 const LLT v4s64 = LLT::fixed_vector(4, 64);
340 auto &LegacyInfo = getLegacyLegalizerInfo();
342 for (unsigned BinOp : {G_FADD, G_FSUB, G_FMUL, G_FDIV})
343 for (auto Ty : {s64, v2s64})
344 LegacyInfo.setAction({BinOp, Ty}, LegacyLegalizeActions::Legal);
346 for (unsigned BinOp : {G_ADD, G_SUB})
347 for (auto Ty : {v16s8, v8s16, v4s32, v2s64})
348 LegacyInfo.setAction({BinOp, Ty}, LegacyLegalizeActions::Legal);
350 LegacyInfo.setAction({G_MUL, v8s16}, LegacyLegalizeActions::Legal);
352 LegacyInfo.setAction({G_FPEXT, s64}, LegacyLegalizeActions::Legal);
353 LegacyInfo.setAction({G_FPEXT, 1, s32}, LegacyLegalizeActions::Legal);
355 LegacyInfo.setAction({G_FPTRUNC, s32}, LegacyLegalizeActions::Legal);
356 LegacyInfo.setAction({G_FPTRUNC, 1, s64}, LegacyLegalizeActions::Legal);
358 // Constants
359 LegacyInfo.setAction({TargetOpcode::G_FCONSTANT, s64},
360 LegacyLegalizeActions::Legal);
362 // Merge/Unmerge
363 for (const auto &Ty :
364 {v16s8, v32s8, v8s16, v16s16, v4s32, v8s32, v2s64, v4s64}) {
365 LegacyInfo.setAction({G_CONCAT_VECTORS, Ty}, LegacyLegalizeActions::Legal);
366 LegacyInfo.setAction({G_UNMERGE_VALUES, 1, Ty},
367 LegacyLegalizeActions::Legal);
369 for (const auto &Ty : {v16s8, v8s16, v4s32, v2s64}) {
370 LegacyInfo.setAction({G_CONCAT_VECTORS, 1, Ty},
371 LegacyLegalizeActions::Legal);
372 LegacyInfo.setAction({G_UNMERGE_VALUES, Ty}, LegacyLegalizeActions::Legal);
376 void X86LegalizerInfo::setLegalizerInfoSSE41() {
377 if (!Subtarget.hasSSE41())
378 return;
380 const LLT v4s32 = LLT::fixed_vector(4, 32);
382 auto &LegacyInfo = getLegacyLegalizerInfo();
384 LegacyInfo.setAction({G_MUL, v4s32}, LegacyLegalizeActions::Legal);
387 void X86LegalizerInfo::setLegalizerInfoAVX() {
388 if (!Subtarget.hasAVX())
389 return;
391 const LLT v16s8 = LLT::fixed_vector(16, 8);
392 const LLT v8s16 = LLT::fixed_vector(8, 16);
393 const LLT v4s32 = LLT::fixed_vector(4, 32);
394 const LLT v2s64 = LLT::fixed_vector(2, 64);
396 const LLT v32s8 = LLT::fixed_vector(32, 8);
397 const LLT v64s8 = LLT::fixed_vector(64, 8);
398 const LLT v16s16 = LLT::fixed_vector(16, 16);
399 const LLT v32s16 = LLT::fixed_vector(32, 16);
400 const LLT v8s32 = LLT::fixed_vector(8, 32);
401 const LLT v16s32 = LLT::fixed_vector(16, 32);
402 const LLT v4s64 = LLT::fixed_vector(4, 64);
403 const LLT v8s64 = LLT::fixed_vector(8, 64);
405 auto &LegacyInfo = getLegacyLegalizerInfo();
407 for (unsigned MemOp : {G_LOAD, G_STORE})
408 for (auto Ty : {v8s32, v4s64})
409 LegacyInfo.setAction({MemOp, Ty}, LegacyLegalizeActions::Legal);
411 for (auto Ty : {v32s8, v16s16, v8s32, v4s64}) {
412 LegacyInfo.setAction({G_INSERT, Ty}, LegacyLegalizeActions::Legal);
413 LegacyInfo.setAction({G_EXTRACT, 1, Ty}, LegacyLegalizeActions::Legal);
415 for (auto Ty : {v16s8, v8s16, v4s32, v2s64}) {
416 LegacyInfo.setAction({G_INSERT, 1, Ty}, LegacyLegalizeActions::Legal);
417 LegacyInfo.setAction({G_EXTRACT, Ty}, LegacyLegalizeActions::Legal);
419 // Merge/Unmerge
420 for (const auto &Ty :
421 {v32s8, v64s8, v16s16, v32s16, v8s32, v16s32, v4s64, v8s64}) {
422 LegacyInfo.setAction({G_CONCAT_VECTORS, Ty}, LegacyLegalizeActions::Legal);
423 LegacyInfo.setAction({G_UNMERGE_VALUES, 1, Ty},
424 LegacyLegalizeActions::Legal);
426 for (const auto &Ty :
427 {v16s8, v32s8, v8s16, v16s16, v4s32, v8s32, v2s64, v4s64}) {
428 LegacyInfo.setAction({G_CONCAT_VECTORS, 1, Ty},
429 LegacyLegalizeActions::Legal);
430 LegacyInfo.setAction({G_UNMERGE_VALUES, Ty}, LegacyLegalizeActions::Legal);
434 void X86LegalizerInfo::setLegalizerInfoAVX2() {
435 if (!Subtarget.hasAVX2())
436 return;
438 const LLT v32s8 = LLT::fixed_vector(32, 8);
439 const LLT v16s16 = LLT::fixed_vector(16, 16);
440 const LLT v8s32 = LLT::fixed_vector(8, 32);
441 const LLT v4s64 = LLT::fixed_vector(4, 64);
443 const LLT v64s8 = LLT::fixed_vector(64, 8);
444 const LLT v32s16 = LLT::fixed_vector(32, 16);
445 const LLT v16s32 = LLT::fixed_vector(16, 32);
446 const LLT v8s64 = LLT::fixed_vector(8, 64);
448 auto &LegacyInfo = getLegacyLegalizerInfo();
450 for (unsigned BinOp : {G_ADD, G_SUB})
451 for (auto Ty : {v32s8, v16s16, v8s32, v4s64})
452 LegacyInfo.setAction({BinOp, Ty}, LegacyLegalizeActions::Legal);
454 for (auto Ty : {v16s16, v8s32})
455 LegacyInfo.setAction({G_MUL, Ty}, LegacyLegalizeActions::Legal);
457 // Merge/Unmerge
458 for (const auto &Ty : {v64s8, v32s16, v16s32, v8s64}) {
459 LegacyInfo.setAction({G_CONCAT_VECTORS, Ty}, LegacyLegalizeActions::Legal);
460 LegacyInfo.setAction({G_UNMERGE_VALUES, 1, Ty},
461 LegacyLegalizeActions::Legal);
463 for (const auto &Ty : {v32s8, v16s16, v8s32, v4s64}) {
464 LegacyInfo.setAction({G_CONCAT_VECTORS, 1, Ty},
465 LegacyLegalizeActions::Legal);
466 LegacyInfo.setAction({G_UNMERGE_VALUES, Ty}, LegacyLegalizeActions::Legal);
470 void X86LegalizerInfo::setLegalizerInfoAVX512() {
471 if (!Subtarget.hasAVX512())
472 return;
474 const LLT v16s8 = LLT::fixed_vector(16, 8);
475 const LLT v8s16 = LLT::fixed_vector(8, 16);
476 const LLT v4s32 = LLT::fixed_vector(4, 32);
477 const LLT v2s64 = LLT::fixed_vector(2, 64);
479 const LLT v32s8 = LLT::fixed_vector(32, 8);
480 const LLT v16s16 = LLT::fixed_vector(16, 16);
481 const LLT v8s32 = LLT::fixed_vector(8, 32);
482 const LLT v4s64 = LLT::fixed_vector(4, 64);
484 const LLT v64s8 = LLT::fixed_vector(64, 8);
485 const LLT v32s16 = LLT::fixed_vector(32, 16);
486 const LLT v16s32 = LLT::fixed_vector(16, 32);
487 const LLT v8s64 = LLT::fixed_vector(8, 64);
489 auto &LegacyInfo = getLegacyLegalizerInfo();
491 for (unsigned BinOp : {G_ADD, G_SUB})
492 for (auto Ty : {v16s32, v8s64})
493 LegacyInfo.setAction({BinOp, Ty}, LegacyLegalizeActions::Legal);
495 LegacyInfo.setAction({G_MUL, v16s32}, LegacyLegalizeActions::Legal);
497 for (unsigned MemOp : {G_LOAD, G_STORE})
498 for (auto Ty : {v16s32, v8s64})
499 LegacyInfo.setAction({MemOp, Ty}, LegacyLegalizeActions::Legal);
501 for (auto Ty : {v64s8, v32s16, v16s32, v8s64}) {
502 LegacyInfo.setAction({G_INSERT, Ty}, LegacyLegalizeActions::Legal);
503 LegacyInfo.setAction({G_EXTRACT, 1, Ty}, LegacyLegalizeActions::Legal);
505 for (auto Ty : {v32s8, v16s16, v8s32, v4s64, v16s8, v8s16, v4s32, v2s64}) {
506 LegacyInfo.setAction({G_INSERT, 1, Ty}, LegacyLegalizeActions::Legal);
507 LegacyInfo.setAction({G_EXTRACT, Ty}, LegacyLegalizeActions::Legal);
510 /************ VLX *******************/
511 if (!Subtarget.hasVLX())
512 return;
514 for (auto Ty : {v4s32, v8s32})
515 LegacyInfo.setAction({G_MUL, Ty}, LegacyLegalizeActions::Legal);
518 void X86LegalizerInfo::setLegalizerInfoAVX512DQ() {
519 if (!(Subtarget.hasAVX512() && Subtarget.hasDQI()))
520 return;
522 const LLT v8s64 = LLT::fixed_vector(8, 64);
524 auto &LegacyInfo = getLegacyLegalizerInfo();
526 LegacyInfo.setAction({G_MUL, v8s64}, LegacyLegalizeActions::Legal);
528 /************ VLX *******************/
529 if (!Subtarget.hasVLX())
530 return;
532 const LLT v2s64 = LLT::fixed_vector(2, 64);
533 const LLT v4s64 = LLT::fixed_vector(4, 64);
535 for (auto Ty : {v2s64, v4s64})
536 LegacyInfo.setAction({G_MUL, Ty}, LegacyLegalizeActions::Legal);
539 void X86LegalizerInfo::setLegalizerInfoAVX512BW() {
540 if (!(Subtarget.hasAVX512() && Subtarget.hasBWI()))
541 return;
543 const LLT v64s8 = LLT::fixed_vector(64, 8);
544 const LLT v32s16 = LLT::fixed_vector(32, 16);
546 auto &LegacyInfo = getLegacyLegalizerInfo();
548 for (unsigned BinOp : {G_ADD, G_SUB})
549 for (auto Ty : {v64s8, v32s16})
550 LegacyInfo.setAction({BinOp, Ty}, LegacyLegalizeActions::Legal);
552 LegacyInfo.setAction({G_MUL, v32s16}, LegacyLegalizeActions::Legal);
554 /************ VLX *******************/
555 if (!Subtarget.hasVLX())
556 return;
558 const LLT v8s16 = LLT::fixed_vector(8, 16);
559 const LLT v16s16 = LLT::fixed_vector(16, 16);
561 for (auto Ty : {v8s16, v16s16})
562 LegacyInfo.setAction({G_MUL, Ty}, LegacyLegalizeActions::Legal);