[llvm-shlib] Fix the version naming style of libLLVM for Windows (#85710)
[llvm-project.git] / llvm / lib / Target / X86 / X86ISelLowering.cpp
blob9e64726fb6fff7a2d9b2211ddc84e811260be4a2
1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that X86 uses to lower LLVM code into a
10 // selection DAG.
12 //===----------------------------------------------------------------------===//
14 #include "X86ISelLowering.h"
15 #include "MCTargetDesc/X86ShuffleDecode.h"
16 #include "X86.h"
17 #include "X86CallingConv.h"
18 #include "X86FrameLowering.h"
19 #include "X86InstrBuilder.h"
20 #include "X86IntrinsicsInfo.h"
21 #include "X86MachineFunctionInfo.h"
22 #include "X86TargetMachine.h"
23 #include "X86TargetObjectFile.h"
24 #include "llvm/ADT/SmallBitVector.h"
25 #include "llvm/ADT/SmallSet.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/ADT/StringExtras.h"
28 #include "llvm/ADT/StringSwitch.h"
29 #include "llvm/Analysis/BlockFrequencyInfo.h"
30 #include "llvm/Analysis/ObjCARCUtil.h"
31 #include "llvm/Analysis/ProfileSummaryInfo.h"
32 #include "llvm/Analysis/VectorUtils.h"
33 #include "llvm/CodeGen/IntrinsicLowering.h"
34 #include "llvm/CodeGen/MachineFrameInfo.h"
35 #include "llvm/CodeGen/MachineFunction.h"
36 #include "llvm/CodeGen/MachineInstrBuilder.h"
37 #include "llvm/CodeGen/MachineJumpTableInfo.h"
38 #include "llvm/CodeGen/MachineLoopInfo.h"
39 #include "llvm/CodeGen/MachineModuleInfo.h"
40 #include "llvm/CodeGen/MachineRegisterInfo.h"
41 #include "llvm/CodeGen/TargetLowering.h"
42 #include "llvm/CodeGen/WinEHFuncInfo.h"
43 #include "llvm/IR/CallingConv.h"
44 #include "llvm/IR/Constants.h"
45 #include "llvm/IR/DerivedTypes.h"
46 #include "llvm/IR/EHPersonalities.h"
47 #include "llvm/IR/Function.h"
48 #include "llvm/IR/GlobalAlias.h"
49 #include "llvm/IR/GlobalVariable.h"
50 #include "llvm/IR/IRBuilder.h"
51 #include "llvm/IR/Instructions.h"
52 #include "llvm/IR/Intrinsics.h"
53 #include "llvm/IR/PatternMatch.h"
54 #include "llvm/MC/MCAsmInfo.h"
55 #include "llvm/MC/MCContext.h"
56 #include "llvm/MC/MCExpr.h"
57 #include "llvm/MC/MCSymbol.h"
58 #include "llvm/Support/CommandLine.h"
59 #include "llvm/Support/Debug.h"
60 #include "llvm/Support/ErrorHandling.h"
61 #include "llvm/Support/KnownBits.h"
62 #include "llvm/Support/MathExtras.h"
63 #include "llvm/Target/TargetOptions.h"
64 #include <algorithm>
65 #include <bitset>
66 #include <cctype>
67 #include <numeric>
68 using namespace llvm;
70 #define DEBUG_TYPE "x86-isel"
72 static cl::opt<int> ExperimentalPrefInnermostLoopAlignment(
73 "x86-experimental-pref-innermost-loop-alignment", cl::init(4),
74 cl::desc(
75 "Sets the preferable loop alignment for experiments (as log2 bytes) "
76 "for innermost loops only. If specified, this option overrides "
77 "alignment set by x86-experimental-pref-loop-alignment."),
78 cl::Hidden);
80 static cl::opt<bool> MulConstantOptimization(
81 "mul-constant-optimization", cl::init(true),
82 cl::desc("Replace 'mul x, Const' with more effective instructions like "
83 "SHIFT, LEA, etc."),
84 cl::Hidden);
86 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
87 const X86Subtarget &STI)
88 : TargetLowering(TM), Subtarget(STI) {
89 bool UseX87 = !Subtarget.useSoftFloat() && Subtarget.hasX87();
90 MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
92 // Set up the TargetLowering object.
94 // X86 is weird. It always uses i8 for shift amounts and setcc results.
95 setBooleanContents(ZeroOrOneBooleanContent);
96 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
97 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
99 // For 64-bit, since we have so many registers, use the ILP scheduler.
100 // For 32-bit, use the register pressure specific scheduling.
101 // For Atom, always use ILP scheduling.
102 if (Subtarget.isAtom())
103 setSchedulingPreference(Sched::ILP);
104 else if (Subtarget.is64Bit())
105 setSchedulingPreference(Sched::ILP);
106 else
107 setSchedulingPreference(Sched::RegPressure);
108 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
109 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
111 // Bypass expensive divides and use cheaper ones.
112 if (TM.getOptLevel() >= CodeGenOptLevel::Default) {
113 if (Subtarget.hasSlowDivide32())
114 addBypassSlowDiv(32, 8);
115 if (Subtarget.hasSlowDivide64() && Subtarget.is64Bit())
116 addBypassSlowDiv(64, 32);
119 // Setup Windows compiler runtime calls.
120 if (Subtarget.isTargetWindowsMSVC() || Subtarget.isTargetWindowsItanium()) {
121 static const struct {
122 const RTLIB::Libcall Op;
123 const char * const Name;
124 const CallingConv::ID CC;
125 } LibraryCalls[] = {
126 { RTLIB::SDIV_I64, "_alldiv", CallingConv::X86_StdCall },
127 { RTLIB::UDIV_I64, "_aulldiv", CallingConv::X86_StdCall },
128 { RTLIB::SREM_I64, "_allrem", CallingConv::X86_StdCall },
129 { RTLIB::UREM_I64, "_aullrem", CallingConv::X86_StdCall },
130 { RTLIB::MUL_I64, "_allmul", CallingConv::X86_StdCall },
133 for (const auto &LC : LibraryCalls) {
134 setLibcallName(LC.Op, LC.Name);
135 setLibcallCallingConv(LC.Op, LC.CC);
139 if (Subtarget.getTargetTriple().isOSMSVCRT()) {
140 // MSVCRT doesn't have powi; fall back to pow
141 setLibcallName(RTLIB::POWI_F32, nullptr);
142 setLibcallName(RTLIB::POWI_F64, nullptr);
145 if (Subtarget.canUseCMPXCHG16B())
146 setMaxAtomicSizeInBitsSupported(128);
147 else if (Subtarget.canUseCMPXCHG8B())
148 setMaxAtomicSizeInBitsSupported(64);
149 else
150 setMaxAtomicSizeInBitsSupported(32);
152 setMaxDivRemBitWidthSupported(Subtarget.is64Bit() ? 128 : 64);
154 setMaxLargeFPConvertBitWidthSupported(128);
156 // Set up the register classes.
157 addRegisterClass(MVT::i8, &X86::GR8RegClass);
158 addRegisterClass(MVT::i16, &X86::GR16RegClass);
159 addRegisterClass(MVT::i32, &X86::GR32RegClass);
160 if (Subtarget.is64Bit())
161 addRegisterClass(MVT::i64, &X86::GR64RegClass);
163 for (MVT VT : MVT::integer_valuetypes())
164 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
166 // We don't accept any truncstore of integer registers.
167 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
168 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
169 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
170 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
171 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
172 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
174 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
176 // SETOEQ and SETUNE require checking two conditions.
177 for (auto VT : {MVT::f32, MVT::f64, MVT::f80}) {
178 setCondCodeAction(ISD::SETOEQ, VT, Expand);
179 setCondCodeAction(ISD::SETUNE, VT, Expand);
182 // Integer absolute.
183 if (Subtarget.canUseCMOV()) {
184 setOperationAction(ISD::ABS , MVT::i16 , Custom);
185 setOperationAction(ISD::ABS , MVT::i32 , Custom);
186 if (Subtarget.is64Bit())
187 setOperationAction(ISD::ABS , MVT::i64 , Custom);
190 // Absolute difference.
191 for (auto Op : {ISD::ABDS, ISD::ABDU}) {
192 setOperationAction(Op , MVT::i8 , Custom);
193 setOperationAction(Op , MVT::i16 , Custom);
194 setOperationAction(Op , MVT::i32 , Custom);
195 if (Subtarget.is64Bit())
196 setOperationAction(Op , MVT::i64 , Custom);
199 // Signed saturation subtraction.
200 setOperationAction(ISD::SSUBSAT , MVT::i8 , Custom);
201 setOperationAction(ISD::SSUBSAT , MVT::i16 , Custom);
202 setOperationAction(ISD::SSUBSAT , MVT::i32 , Custom);
203 if (Subtarget.is64Bit())
204 setOperationAction(ISD::SSUBSAT , MVT::i64 , Custom);
206 // Funnel shifts.
207 for (auto ShiftOp : {ISD::FSHL, ISD::FSHR}) {
208 // For slow shld targets we only lower for code size.
209 LegalizeAction ShiftDoubleAction = Subtarget.isSHLDSlow() ? Custom : Legal;
211 setOperationAction(ShiftOp , MVT::i8 , Custom);
212 setOperationAction(ShiftOp , MVT::i16 , Custom);
213 setOperationAction(ShiftOp , MVT::i32 , ShiftDoubleAction);
214 if (Subtarget.is64Bit())
215 setOperationAction(ShiftOp , MVT::i64 , ShiftDoubleAction);
218 if (!Subtarget.useSoftFloat()) {
219 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
220 // operation.
221 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
222 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i8, Promote);
223 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
224 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i16, Promote);
225 // We have an algorithm for SSE2, and we turn this into a 64-bit
226 // FILD or VCVTUSI2SS/SD for other targets.
227 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
228 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom);
229 // We have an algorithm for SSE2->double, and we turn this into a
230 // 64-bit FILD followed by conditional FADD for other targets.
231 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
232 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom);
234 // Promote i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
235 // this operation.
236 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
237 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i8, Promote);
238 // SSE has no i16 to fp conversion, only i32. We promote in the handler
239 // to allow f80 to use i16 and f64 to use i16 with sse1 only
240 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Custom);
241 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i16, Custom);
242 // f32 and f64 cases are Legal with SSE1/SSE2, f80 case is not
243 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
244 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
245 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
246 // are Legal, f80 is custom lowered.
247 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
248 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
250 // Promote i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
251 // this operation.
252 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
253 // FIXME: This doesn't generate invalid exception when it should. PR44019.
254 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i8, Promote);
255 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Custom);
256 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i16, Custom);
257 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
258 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
259 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
260 // are Legal, f80 is custom lowered.
261 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
262 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
264 // Handle FP_TO_UINT by promoting the destination to a larger signed
265 // conversion.
266 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
267 // FIXME: This doesn't generate invalid exception when it should. PR44019.
268 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i8, Promote);
269 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
270 // FIXME: This doesn't generate invalid exception when it should. PR44019.
271 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i16, Promote);
272 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
273 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
274 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
275 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
277 setOperationAction(ISD::LRINT, MVT::f32, Custom);
278 setOperationAction(ISD::LRINT, MVT::f64, Custom);
279 setOperationAction(ISD::LLRINT, MVT::f32, Custom);
280 setOperationAction(ISD::LLRINT, MVT::f64, Custom);
282 if (!Subtarget.is64Bit()) {
283 setOperationAction(ISD::LRINT, MVT::i64, Custom);
284 setOperationAction(ISD::LLRINT, MVT::i64, Custom);
288 if (Subtarget.hasSSE2()) {
289 // Custom lowering for saturating float to int conversions.
290 // We handle promotion to larger result types manually.
291 for (MVT VT : { MVT::i8, MVT::i16, MVT::i32 }) {
292 setOperationAction(ISD::FP_TO_UINT_SAT, VT, Custom);
293 setOperationAction(ISD::FP_TO_SINT_SAT, VT, Custom);
295 if (Subtarget.is64Bit()) {
296 setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i64, Custom);
297 setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i64, Custom);
301 // Handle address space casts between mixed sized pointers.
302 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
303 setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
305 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
306 if (!Subtarget.hasSSE2()) {
307 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
308 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
309 if (Subtarget.is64Bit()) {
310 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
311 // Without SSE, i64->f64 goes through memory.
312 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
314 } else if (!Subtarget.is64Bit())
315 setOperationAction(ISD::BITCAST , MVT::i64 , Custom);
317 // Scalar integer divide and remainder are lowered to use operations that
318 // produce two results, to match the available instructions. This exposes
319 // the two-result form to trivial CSE, which is able to combine x/y and x%y
320 // into a single instruction.
322 // Scalar integer multiply-high is also lowered to use two-result
323 // operations, to match the available instructions. However, plain multiply
324 // (low) operations are left as Legal, as there are single-result
325 // instructions for this in x86. Using the two-result multiply instructions
326 // when both high and low results are needed must be arranged by dagcombine.
327 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
328 setOperationAction(ISD::MULHS, VT, Expand);
329 setOperationAction(ISD::MULHU, VT, Expand);
330 setOperationAction(ISD::SDIV, VT, Expand);
331 setOperationAction(ISD::UDIV, VT, Expand);
332 setOperationAction(ISD::SREM, VT, Expand);
333 setOperationAction(ISD::UREM, VT, Expand);
336 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
337 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
338 for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128,
339 MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
340 setOperationAction(ISD::BR_CC, VT, Expand);
341 setOperationAction(ISD::SELECT_CC, VT, Expand);
343 if (Subtarget.is64Bit())
344 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
345 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
346 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
347 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
349 setOperationAction(ISD::FREM , MVT::f32 , Expand);
350 setOperationAction(ISD::FREM , MVT::f64 , Expand);
351 setOperationAction(ISD::FREM , MVT::f80 , Expand);
352 setOperationAction(ISD::FREM , MVT::f128 , Expand);
354 if (!Subtarget.useSoftFloat() && Subtarget.hasX87()) {
355 setOperationAction(ISD::GET_ROUNDING , MVT::i32 , Custom);
356 setOperationAction(ISD::SET_ROUNDING , MVT::Other, Custom);
357 setOperationAction(ISD::GET_FPENV_MEM , MVT::Other, Custom);
358 setOperationAction(ISD::SET_FPENV_MEM , MVT::Other, Custom);
359 setOperationAction(ISD::RESET_FPENV , MVT::Other, Custom);
362 // Promote the i8 variants and force them on up to i32 which has a shorter
363 // encoding.
364 setOperationPromotedToType(ISD::CTTZ , MVT::i8 , MVT::i32);
365 setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
366 // Promoted i16. tzcntw has a false dependency on Intel CPUs. For BSF, we emit
367 // a REP prefix to encode it as TZCNT for modern CPUs so it makes sense to
368 // promote that too.
369 setOperationPromotedToType(ISD::CTTZ , MVT::i16 , MVT::i32);
370 setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , MVT::i32);
372 if (!Subtarget.hasBMI()) {
373 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
374 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Legal);
375 if (Subtarget.is64Bit()) {
376 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
377 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Legal);
381 if (Subtarget.hasLZCNT()) {
382 // When promoting the i8 variants, force them to i32 for a shorter
383 // encoding.
384 setOperationPromotedToType(ISD::CTLZ , MVT::i8 , MVT::i32);
385 setOperationPromotedToType(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
386 } else {
387 for (auto VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64}) {
388 if (VT == MVT::i64 && !Subtarget.is64Bit())
389 continue;
390 setOperationAction(ISD::CTLZ , VT, Custom);
391 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
395 for (auto Op : {ISD::FP16_TO_FP, ISD::STRICT_FP16_TO_FP, ISD::FP_TO_FP16,
396 ISD::STRICT_FP_TO_FP16}) {
397 // Special handling for half-precision floating point conversions.
398 // If we don't have F16C support, then lower half float conversions
399 // into library calls.
400 setOperationAction(
401 Op, MVT::f32,
402 (!Subtarget.useSoftFloat() && Subtarget.hasF16C()) ? Custom : Expand);
403 // There's never any support for operations beyond MVT::f32.
404 setOperationAction(Op, MVT::f64, Expand);
405 setOperationAction(Op, MVT::f80, Expand);
406 setOperationAction(Op, MVT::f128, Expand);
409 for (MVT VT : {MVT::f32, MVT::f64, MVT::f80, MVT::f128}) {
410 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
411 setLoadExtAction(ISD::EXTLOAD, VT, MVT::bf16, Expand);
412 setTruncStoreAction(VT, MVT::f16, Expand);
413 setTruncStoreAction(VT, MVT::bf16, Expand);
415 setOperationAction(ISD::BF16_TO_FP, VT, Expand);
416 setOperationAction(ISD::FP_TO_BF16, VT, Custom);
419 setOperationAction(ISD::PARITY, MVT::i8, Custom);
420 setOperationAction(ISD::PARITY, MVT::i16, Custom);
421 setOperationAction(ISD::PARITY, MVT::i32, Custom);
422 if (Subtarget.is64Bit())
423 setOperationAction(ISD::PARITY, MVT::i64, Custom);
424 if (Subtarget.hasPOPCNT()) {
425 setOperationPromotedToType(ISD::CTPOP, MVT::i8, MVT::i32);
426 // popcntw is longer to encode than popcntl and also has a false dependency
427 // on the dest that popcntl hasn't had since Cannon Lake.
428 setOperationPromotedToType(ISD::CTPOP, MVT::i16, MVT::i32);
429 } else {
430 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
431 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
432 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
433 if (Subtarget.is64Bit())
434 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
435 else
436 setOperationAction(ISD::CTPOP , MVT::i64 , Custom);
439 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
441 if (!Subtarget.hasMOVBE())
442 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
444 // X86 wants to expand cmov itself.
445 for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128 }) {
446 setOperationAction(ISD::SELECT, VT, Custom);
447 setOperationAction(ISD::SETCC, VT, Custom);
448 setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
449 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
451 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
452 if (VT == MVT::i64 && !Subtarget.is64Bit())
453 continue;
454 setOperationAction(ISD::SELECT, VT, Custom);
455 setOperationAction(ISD::SETCC, VT, Custom);
458 // Custom action for SELECT MMX and expand action for SELECT_CC MMX
459 setOperationAction(ISD::SELECT, MVT::x86mmx, Custom);
460 setOperationAction(ISD::SELECT_CC, MVT::x86mmx, Expand);
462 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
463 // NOTE: EH_SJLJ_SETJMP/_LONGJMP are not recommended, since
464 // LLVM/Clang supports zero-cost DWARF and SEH exception handling.
465 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
466 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
467 setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
468 if (TM.Options.ExceptionModel == ExceptionHandling::SjLj)
469 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
471 // Darwin ABI issue.
472 for (auto VT : { MVT::i32, MVT::i64 }) {
473 if (VT == MVT::i64 && !Subtarget.is64Bit())
474 continue;
475 setOperationAction(ISD::ConstantPool , VT, Custom);
476 setOperationAction(ISD::JumpTable , VT, Custom);
477 setOperationAction(ISD::GlobalAddress , VT, Custom);
478 setOperationAction(ISD::GlobalTLSAddress, VT, Custom);
479 setOperationAction(ISD::ExternalSymbol , VT, Custom);
480 setOperationAction(ISD::BlockAddress , VT, Custom);
483 // 64-bit shl, sra, srl (iff 32-bit x86)
484 for (auto VT : { MVT::i32, MVT::i64 }) {
485 if (VT == MVT::i64 && !Subtarget.is64Bit())
486 continue;
487 setOperationAction(ISD::SHL_PARTS, VT, Custom);
488 setOperationAction(ISD::SRA_PARTS, VT, Custom);
489 setOperationAction(ISD::SRL_PARTS, VT, Custom);
492 if (Subtarget.hasSSEPrefetch() || Subtarget.hasThreeDNow())
493 setOperationAction(ISD::PREFETCH , MVT::Other, Custom);
495 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
497 // Expand certain atomics
498 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
499 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
500 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
501 setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom);
502 setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom);
503 setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom);
504 setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom);
505 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
508 if (!Subtarget.is64Bit())
509 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
511 if (Subtarget.canUseCMPXCHG16B())
512 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
514 // FIXME - use subtarget debug flags
515 if (!Subtarget.isTargetDarwin() && !Subtarget.isTargetELF() &&
516 !Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64() &&
517 TM.Options.ExceptionModel != ExceptionHandling::SjLj) {
518 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
521 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
522 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
524 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
525 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
527 setOperationAction(ISD::TRAP, MVT::Other, Legal);
528 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
529 if (Subtarget.isTargetPS())
530 setOperationAction(ISD::UBSANTRAP, MVT::Other, Expand);
531 else
532 setOperationAction(ISD::UBSANTRAP, MVT::Other, Legal);
534 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
535 setOperationAction(ISD::VASTART , MVT::Other, Custom);
536 setOperationAction(ISD::VAEND , MVT::Other, Expand);
537 bool Is64Bit = Subtarget.is64Bit();
538 setOperationAction(ISD::VAARG, MVT::Other, Is64Bit ? Custom : Expand);
539 setOperationAction(ISD::VACOPY, MVT::Other, Is64Bit ? Custom : Expand);
541 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
542 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
544 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
546 // GC_TRANSITION_START and GC_TRANSITION_END need custom lowering.
547 setOperationAction(ISD::GC_TRANSITION_START, MVT::Other, Custom);
548 setOperationAction(ISD::GC_TRANSITION_END, MVT::Other, Custom);
550 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
552 auto setF16Action = [&] (MVT VT, LegalizeAction Action) {
553 setOperationAction(ISD::FABS, VT, Action);
554 setOperationAction(ISD::FNEG, VT, Action);
555 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
556 setOperationAction(ISD::FREM, VT, Action);
557 setOperationAction(ISD::FMA, VT, Action);
558 setOperationAction(ISD::FMINNUM, VT, Action);
559 setOperationAction(ISD::FMAXNUM, VT, Action);
560 setOperationAction(ISD::FMINIMUM, VT, Action);
561 setOperationAction(ISD::FMAXIMUM, VT, Action);
562 setOperationAction(ISD::FSIN, VT, Action);
563 setOperationAction(ISD::FCOS, VT, Action);
564 setOperationAction(ISD::FSINCOS, VT, Action);
565 setOperationAction(ISD::FSQRT, VT, Action);
566 setOperationAction(ISD::FPOW, VT, Action);
567 setOperationAction(ISD::FLOG, VT, Action);
568 setOperationAction(ISD::FLOG2, VT, Action);
569 setOperationAction(ISD::FLOG10, VT, Action);
570 setOperationAction(ISD::FEXP, VT, Action);
571 setOperationAction(ISD::FEXP2, VT, Action);
572 setOperationAction(ISD::FEXP10, VT, Action);
573 setOperationAction(ISD::FCEIL, VT, Action);
574 setOperationAction(ISD::FFLOOR, VT, Action);
575 setOperationAction(ISD::FNEARBYINT, VT, Action);
576 setOperationAction(ISD::FRINT, VT, Action);
577 setOperationAction(ISD::BR_CC, VT, Action);
578 setOperationAction(ISD::SETCC, VT, Action);
579 setOperationAction(ISD::SELECT, VT, Custom);
580 setOperationAction(ISD::SELECT_CC, VT, Action);
581 setOperationAction(ISD::FROUND, VT, Action);
582 setOperationAction(ISD::FROUNDEVEN, VT, Action);
583 setOperationAction(ISD::FTRUNC, VT, Action);
584 setOperationAction(ISD::FLDEXP, VT, Action);
587 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
588 // f16, f32 and f64 use SSE.
589 // Set up the FP register classes.
590 addRegisterClass(MVT::f16, Subtarget.hasAVX512() ? &X86::FR16XRegClass
591 : &X86::FR16RegClass);
592 addRegisterClass(MVT::f32, Subtarget.hasAVX512() ? &X86::FR32XRegClass
593 : &X86::FR32RegClass);
594 addRegisterClass(MVT::f64, Subtarget.hasAVX512() ? &X86::FR64XRegClass
595 : &X86::FR64RegClass);
597 // Disable f32->f64 extload as we can only generate this in one instruction
598 // under optsize. So its easier to pattern match (fpext (load)) for that
599 // case instead of needing to emit 2 instructions for extload in the
600 // non-optsize case.
601 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
603 for (auto VT : { MVT::f32, MVT::f64 }) {
604 // Use ANDPD to simulate FABS.
605 setOperationAction(ISD::FABS, VT, Custom);
607 // Use XORP to simulate FNEG.
608 setOperationAction(ISD::FNEG, VT, Custom);
610 // Use ANDPD and ORPD to simulate FCOPYSIGN.
611 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
613 // These might be better off as horizontal vector ops.
614 setOperationAction(ISD::FADD, VT, Custom);
615 setOperationAction(ISD::FSUB, VT, Custom);
617 // We don't support sin/cos/fmod
618 setOperationAction(ISD::FSIN , VT, Expand);
619 setOperationAction(ISD::FCOS , VT, Expand);
620 setOperationAction(ISD::FSINCOS, VT, Expand);
623 // Half type will be promoted by default.
624 setF16Action(MVT::f16, Promote);
625 setOperationAction(ISD::FADD, MVT::f16, Promote);
626 setOperationAction(ISD::FSUB, MVT::f16, Promote);
627 setOperationAction(ISD::FMUL, MVT::f16, Promote);
628 setOperationAction(ISD::FDIV, MVT::f16, Promote);
629 setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
630 setOperationAction(ISD::FP_EXTEND, MVT::f32, Custom);
631 setOperationAction(ISD::FP_EXTEND, MVT::f64, Custom);
633 setOperationAction(ISD::STRICT_FADD, MVT::f16, Promote);
634 setOperationAction(ISD::STRICT_FSUB, MVT::f16, Promote);
635 setOperationAction(ISD::STRICT_FMUL, MVT::f16, Promote);
636 setOperationAction(ISD::STRICT_FDIV, MVT::f16, Promote);
637 setOperationAction(ISD::STRICT_FMA, MVT::f16, Promote);
638 setOperationAction(ISD::STRICT_FMINNUM, MVT::f16, Promote);
639 setOperationAction(ISD::STRICT_FMAXNUM, MVT::f16, Promote);
640 setOperationAction(ISD::STRICT_FMINIMUM, MVT::f16, Promote);
641 setOperationAction(ISD::STRICT_FMAXIMUM, MVT::f16, Promote);
642 setOperationAction(ISD::STRICT_FSQRT, MVT::f16, Promote);
643 setOperationAction(ISD::STRICT_FPOW, MVT::f16, Promote);
644 setOperationAction(ISD::STRICT_FLDEXP, MVT::f16, Promote);
645 setOperationAction(ISD::STRICT_FLOG, MVT::f16, Promote);
646 setOperationAction(ISD::STRICT_FLOG2, MVT::f16, Promote);
647 setOperationAction(ISD::STRICT_FLOG10, MVT::f16, Promote);
648 setOperationAction(ISD::STRICT_FEXP, MVT::f16, Promote);
649 setOperationAction(ISD::STRICT_FEXP2, MVT::f16, Promote);
650 setOperationAction(ISD::STRICT_FCEIL, MVT::f16, Promote);
651 setOperationAction(ISD::STRICT_FFLOOR, MVT::f16, Promote);
652 setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f16, Promote);
653 setOperationAction(ISD::STRICT_FRINT, MVT::f16, Promote);
654 setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Promote);
655 setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Promote);
656 setOperationAction(ISD::STRICT_FROUND, MVT::f16, Promote);
657 setOperationAction(ISD::STRICT_FROUNDEVEN, MVT::f16, Promote);
658 setOperationAction(ISD::STRICT_FTRUNC, MVT::f16, Promote);
659 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom);
660 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Custom);
661 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Custom);
663 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
664 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
666 // Lower this to MOVMSK plus an AND.
667 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
668 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
670 } else if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1() &&
671 (UseX87 || Is64Bit)) {
672 // Use SSE for f32, x87 for f64.
673 // Set up the FP register classes.
674 addRegisterClass(MVT::f32, &X86::FR32RegClass);
675 if (UseX87)
676 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
678 // Use ANDPS to simulate FABS.
679 setOperationAction(ISD::FABS , MVT::f32, Custom);
681 // Use XORP to simulate FNEG.
682 setOperationAction(ISD::FNEG , MVT::f32, Custom);
684 if (UseX87)
685 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
687 // Use ANDPS and ORPS to simulate FCOPYSIGN.
688 if (UseX87)
689 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
690 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
692 // We don't support sin/cos/fmod
693 setOperationAction(ISD::FSIN , MVT::f32, Expand);
694 setOperationAction(ISD::FCOS , MVT::f32, Expand);
695 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
697 if (UseX87) {
698 // Always expand sin/cos functions even though x87 has an instruction.
699 setOperationAction(ISD::FSIN, MVT::f64, Expand);
700 setOperationAction(ISD::FCOS, MVT::f64, Expand);
701 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
703 } else if (UseX87) {
704 // f32 and f64 in x87.
705 // Set up the FP register classes.
706 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
707 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
709 for (auto VT : { MVT::f32, MVT::f64 }) {
710 setOperationAction(ISD::UNDEF, VT, Expand);
711 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
713 // Always expand sin/cos functions even though x87 has an instruction.
714 setOperationAction(ISD::FSIN , VT, Expand);
715 setOperationAction(ISD::FCOS , VT, Expand);
716 setOperationAction(ISD::FSINCOS, VT, Expand);
720 // Expand FP32 immediates into loads from the stack, save special cases.
721 if (isTypeLegal(MVT::f32)) {
722 if (UseX87 && (getRegClassFor(MVT::f32) == &X86::RFP32RegClass)) {
723 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
724 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
725 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
726 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
727 } else // SSE immediates.
728 addLegalFPImmediate(APFloat(+0.0f)); // xorps
730 // Expand FP64 immediates into loads from the stack, save special cases.
731 if (isTypeLegal(MVT::f64)) {
732 if (UseX87 && getRegClassFor(MVT::f64) == &X86::RFP64RegClass) {
733 addLegalFPImmediate(APFloat(+0.0)); // FLD0
734 addLegalFPImmediate(APFloat(+1.0)); // FLD1
735 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
736 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
737 } else // SSE immediates.
738 addLegalFPImmediate(APFloat(+0.0)); // xorpd
740 // Support fp16 0 immediate.
741 if (isTypeLegal(MVT::f16))
742 addLegalFPImmediate(APFloat::getZero(APFloat::IEEEhalf()));
744 // Handle constrained floating-point operations of scalar.
745 setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal);
746 setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal);
747 setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal);
748 setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal);
749 setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
750 setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal);
751 setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
752 setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal);
753 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
754 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal);
755 setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal);
756 setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal);
758 // We don't support FMA.
759 setOperationAction(ISD::FMA, MVT::f64, Expand);
760 setOperationAction(ISD::FMA, MVT::f32, Expand);
762 // f80 always uses X87.
763 if (UseX87) {
764 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
765 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
766 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
768 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended());
769 addLegalFPImmediate(TmpFlt); // FLD0
770 TmpFlt.changeSign();
771 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
773 bool ignored;
774 APFloat TmpFlt2(+1.0);
775 TmpFlt2.convert(APFloat::x87DoubleExtended(), APFloat::rmNearestTiesToEven,
776 &ignored);
777 addLegalFPImmediate(TmpFlt2); // FLD1
778 TmpFlt2.changeSign();
779 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
782 // Always expand sin/cos functions even though x87 has an instruction.
783 setOperationAction(ISD::FSIN , MVT::f80, Expand);
784 setOperationAction(ISD::FCOS , MVT::f80, Expand);
785 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
787 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
788 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
789 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
790 setOperationAction(ISD::FRINT, MVT::f80, Expand);
791 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
792 setOperationAction(ISD::FROUNDEVEN, MVT::f80, Expand);
793 setOperationAction(ISD::FMA, MVT::f80, Expand);
794 setOperationAction(ISD::LROUND, MVT::f80, Expand);
795 setOperationAction(ISD::LLROUND, MVT::f80, Expand);
796 setOperationAction(ISD::LRINT, MVT::f80, Custom);
797 setOperationAction(ISD::LLRINT, MVT::f80, Custom);
799 // Handle constrained floating-point operations of scalar.
800 setOperationAction(ISD::STRICT_FADD , MVT::f80, Legal);
801 setOperationAction(ISD::STRICT_FSUB , MVT::f80, Legal);
802 setOperationAction(ISD::STRICT_FMUL , MVT::f80, Legal);
803 setOperationAction(ISD::STRICT_FDIV , MVT::f80, Legal);
804 setOperationAction(ISD::STRICT_FSQRT , MVT::f80, Legal);
805 if (isTypeLegal(MVT::f16)) {
806 setOperationAction(ISD::FP_EXTEND, MVT::f80, Custom);
807 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f80, Custom);
808 } else {
809 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f80, Legal);
811 // FIXME: When the target is 64-bit, STRICT_FP_ROUND will be overwritten
812 // as Custom.
813 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Legal);
816 // f128 uses xmm registers, but most operations require libcalls.
817 if (!Subtarget.useSoftFloat() && Subtarget.is64Bit() && Subtarget.hasSSE1()) {
818 addRegisterClass(MVT::f128, Subtarget.hasVLX() ? &X86::VR128XRegClass
819 : &X86::VR128RegClass);
821 addLegalFPImmediate(APFloat::getZero(APFloat::IEEEquad())); // xorps
823 setOperationAction(ISD::FADD, MVT::f128, LibCall);
824 setOperationAction(ISD::STRICT_FADD, MVT::f128, LibCall);
825 setOperationAction(ISD::FSUB, MVT::f128, LibCall);
826 setOperationAction(ISD::STRICT_FSUB, MVT::f128, LibCall);
827 setOperationAction(ISD::FDIV, MVT::f128, LibCall);
828 setOperationAction(ISD::STRICT_FDIV, MVT::f128, LibCall);
829 setOperationAction(ISD::FMUL, MVT::f128, LibCall);
830 setOperationAction(ISD::STRICT_FMUL, MVT::f128, LibCall);
831 setOperationAction(ISD::FMA, MVT::f128, LibCall);
832 setOperationAction(ISD::STRICT_FMA, MVT::f128, LibCall);
834 setOperationAction(ISD::FABS, MVT::f128, Custom);
835 setOperationAction(ISD::FNEG, MVT::f128, Custom);
836 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Custom);
838 setOperationAction(ISD::FSIN, MVT::f128, LibCall);
839 setOperationAction(ISD::STRICT_FSIN, MVT::f128, LibCall);
840 setOperationAction(ISD::FCOS, MVT::f128, LibCall);
841 setOperationAction(ISD::STRICT_FCOS, MVT::f128, LibCall);
842 setOperationAction(ISD::FSINCOS, MVT::f128, LibCall);
843 // No STRICT_FSINCOS
844 setOperationAction(ISD::FSQRT, MVT::f128, LibCall);
845 setOperationAction(ISD::STRICT_FSQRT, MVT::f128, LibCall);
847 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
848 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Custom);
849 // We need to custom handle any FP_ROUND with an f128 input, but
850 // LegalizeDAG uses the result type to know when to run a custom handler.
851 // So we have to list all legal floating point result types here.
852 if (isTypeLegal(MVT::f32)) {
853 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
854 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom);
856 if (isTypeLegal(MVT::f64)) {
857 setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
858 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Custom);
860 if (isTypeLegal(MVT::f80)) {
861 setOperationAction(ISD::FP_ROUND, MVT::f80, Custom);
862 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Custom);
865 setOperationAction(ISD::SETCC, MVT::f128, Custom);
867 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f32, Expand);
868 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand);
869 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f80, Expand);
870 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
871 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
872 setTruncStoreAction(MVT::f128, MVT::f80, Expand);
875 // Always use a library call for pow.
876 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
877 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
878 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
879 setOperationAction(ISD::FPOW , MVT::f128 , Expand);
881 setOperationAction(ISD::FLOG, MVT::f80, Expand);
882 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
883 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
884 setOperationAction(ISD::FEXP, MVT::f80, Expand);
885 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
886 setOperationAction(ISD::FEXP10, MVT::f80, Expand);
887 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
888 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
890 // Some FP actions are always expanded for vector types.
891 for (auto VT : { MVT::v8f16, MVT::v16f16, MVT::v32f16,
892 MVT::v4f32, MVT::v8f32, MVT::v16f32,
893 MVT::v2f64, MVT::v4f64, MVT::v8f64 }) {
894 setOperationAction(ISD::FSIN, VT, Expand);
895 setOperationAction(ISD::FSINCOS, VT, Expand);
896 setOperationAction(ISD::FCOS, VT, Expand);
897 setOperationAction(ISD::FREM, VT, Expand);
898 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
899 setOperationAction(ISD::FPOW, VT, Expand);
900 setOperationAction(ISD::FLOG, VT, Expand);
901 setOperationAction(ISD::FLOG2, VT, Expand);
902 setOperationAction(ISD::FLOG10, VT, Expand);
903 setOperationAction(ISD::FEXP, VT, Expand);
904 setOperationAction(ISD::FEXP2, VT, Expand);
905 setOperationAction(ISD::FEXP10, VT, Expand);
908 // First set operation action for all vector types to either promote
909 // (for widening) or expand (for scalarization). Then we will selectively
910 // turn on ones that can be effectively codegen'd.
911 for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
912 setOperationAction(ISD::SDIV, VT, Expand);
913 setOperationAction(ISD::UDIV, VT, Expand);
914 setOperationAction(ISD::SREM, VT, Expand);
915 setOperationAction(ISD::UREM, VT, Expand);
916 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
917 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
918 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
919 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
920 setOperationAction(ISD::FMA, VT, Expand);
921 setOperationAction(ISD::FFLOOR, VT, Expand);
922 setOperationAction(ISD::FCEIL, VT, Expand);
923 setOperationAction(ISD::FTRUNC, VT, Expand);
924 setOperationAction(ISD::FRINT, VT, Expand);
925 setOperationAction(ISD::FNEARBYINT, VT, Expand);
926 setOperationAction(ISD::FROUNDEVEN, VT, Expand);
927 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
928 setOperationAction(ISD::MULHS, VT, Expand);
929 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
930 setOperationAction(ISD::MULHU, VT, Expand);
931 setOperationAction(ISD::SDIVREM, VT, Expand);
932 setOperationAction(ISD::UDIVREM, VT, Expand);
933 setOperationAction(ISD::CTPOP, VT, Expand);
934 setOperationAction(ISD::CTTZ, VT, Expand);
935 setOperationAction(ISD::CTLZ, VT, Expand);
936 setOperationAction(ISD::ROTL, VT, Expand);
937 setOperationAction(ISD::ROTR, VT, Expand);
938 setOperationAction(ISD::BSWAP, VT, Expand);
939 setOperationAction(ISD::SETCC, VT, Expand);
940 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
941 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
942 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
943 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
944 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
945 setOperationAction(ISD::TRUNCATE, VT, Expand);
946 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
947 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
948 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
949 setOperationAction(ISD::SELECT_CC, VT, Expand);
950 for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
951 setTruncStoreAction(InnerVT, VT, Expand);
953 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
954 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
956 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
957 // types, we have to deal with them whether we ask for Expansion or not.
958 // Setting Expand causes its own optimisation problems though, so leave
959 // them legal.
960 if (VT.getVectorElementType() == MVT::i1)
961 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
963 // EXTLOAD for MVT::f16 vectors is not legal because f16 vectors are
964 // split/scalarized right now.
965 if (VT.getVectorElementType() == MVT::f16 ||
966 VT.getVectorElementType() == MVT::bf16)
967 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
971 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
972 // with -msoft-float, disable use of MMX as well.
973 if (!Subtarget.useSoftFloat() && Subtarget.hasMMX()) {
974 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
975 // No operations on x86mmx supported, everything uses intrinsics.
978 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1()) {
979 addRegisterClass(MVT::v4f32, Subtarget.hasVLX() ? &X86::VR128XRegClass
980 : &X86::VR128RegClass);
982 setOperationAction(ISD::FMAXIMUM, MVT::f32, Custom);
983 setOperationAction(ISD::FMINIMUM, MVT::f32, Custom);
985 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
986 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
987 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Custom);
988 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
989 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
990 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
991 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
992 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
994 setOperationAction(ISD::LOAD, MVT::v2f32, Custom);
995 setOperationAction(ISD::STORE, MVT::v2f32, Custom);
997 setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal);
998 setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal);
999 setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal);
1000 setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal);
1001 setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal);
1004 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
1005 addRegisterClass(MVT::v2f64, Subtarget.hasVLX() ? &X86::VR128XRegClass
1006 : &X86::VR128RegClass);
1008 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
1009 // registers cannot be used even for integer operations.
1010 addRegisterClass(MVT::v16i8, Subtarget.hasVLX() ? &X86::VR128XRegClass
1011 : &X86::VR128RegClass);
1012 addRegisterClass(MVT::v8i16, Subtarget.hasVLX() ? &X86::VR128XRegClass
1013 : &X86::VR128RegClass);
1014 addRegisterClass(MVT::v8f16, Subtarget.hasVLX() ? &X86::VR128XRegClass
1015 : &X86::VR128RegClass);
1016 addRegisterClass(MVT::v4i32, Subtarget.hasVLX() ? &X86::VR128XRegClass
1017 : &X86::VR128RegClass);
1018 addRegisterClass(MVT::v2i64, Subtarget.hasVLX() ? &X86::VR128XRegClass
1019 : &X86::VR128RegClass);
1021 for (auto VT : { MVT::f64, MVT::v4f32, MVT::v2f64 }) {
1022 setOperationAction(ISD::FMAXIMUM, VT, Custom);
1023 setOperationAction(ISD::FMINIMUM, VT, Custom);
1026 for (auto VT : { MVT::v2i8, MVT::v4i8, MVT::v8i8,
1027 MVT::v2i16, MVT::v4i16, MVT::v2i32 }) {
1028 setOperationAction(ISD::SDIV, VT, Custom);
1029 setOperationAction(ISD::SREM, VT, Custom);
1030 setOperationAction(ISD::UDIV, VT, Custom);
1031 setOperationAction(ISD::UREM, VT, Custom);
1034 setOperationAction(ISD::MUL, MVT::v2i8, Custom);
1035 setOperationAction(ISD::MUL, MVT::v4i8, Custom);
1036 setOperationAction(ISD::MUL, MVT::v8i8, Custom);
1038 setOperationAction(ISD::MUL, MVT::v16i8, Custom);
1039 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
1040 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
1041 setOperationAction(ISD::MULHU, MVT::v4i32, Custom);
1042 setOperationAction(ISD::MULHS, MVT::v4i32, Custom);
1043 setOperationAction(ISD::MULHU, MVT::v16i8, Custom);
1044 setOperationAction(ISD::MULHS, MVT::v16i8, Custom);
1045 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
1046 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
1047 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
1048 setOperationAction(ISD::AVGCEILU, MVT::v16i8, Legal);
1049 setOperationAction(ISD::AVGCEILU, MVT::v8i16, Legal);
1051 setOperationAction(ISD::SMULO, MVT::v16i8, Custom);
1052 setOperationAction(ISD::UMULO, MVT::v16i8, Custom);
1053 setOperationAction(ISD::UMULO, MVT::v2i32, Custom);
1055 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
1056 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
1057 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Custom);
1059 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1060 setOperationAction(ISD::SMAX, VT, VT == MVT::v8i16 ? Legal : Custom);
1061 setOperationAction(ISD::SMIN, VT, VT == MVT::v8i16 ? Legal : Custom);
1062 setOperationAction(ISD::UMAX, VT, VT == MVT::v16i8 ? Legal : Custom);
1063 setOperationAction(ISD::UMIN, VT, VT == MVT::v16i8 ? Legal : Custom);
1066 setOperationAction(ISD::ABDU, MVT::v16i8, Custom);
1067 setOperationAction(ISD::ABDS, MVT::v16i8, Custom);
1068 setOperationAction(ISD::ABDU, MVT::v8i16, Custom);
1069 setOperationAction(ISD::ABDS, MVT::v8i16, Custom);
1070 setOperationAction(ISD::ABDU, MVT::v4i32, Custom);
1071 setOperationAction(ISD::ABDS, MVT::v4i32, Custom);
1073 setOperationAction(ISD::UADDSAT, MVT::v16i8, Legal);
1074 setOperationAction(ISD::SADDSAT, MVT::v16i8, Legal);
1075 setOperationAction(ISD::USUBSAT, MVT::v16i8, Legal);
1076 setOperationAction(ISD::SSUBSAT, MVT::v16i8, Legal);
1077 setOperationAction(ISD::UADDSAT, MVT::v8i16, Legal);
1078 setOperationAction(ISD::SADDSAT, MVT::v8i16, Legal);
1079 setOperationAction(ISD::USUBSAT, MVT::v8i16, Legal);
1080 setOperationAction(ISD::SSUBSAT, MVT::v8i16, Legal);
1081 setOperationAction(ISD::USUBSAT, MVT::v4i32, Custom);
1082 setOperationAction(ISD::USUBSAT, MVT::v2i64, Custom);
1084 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1085 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1086 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1087 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1089 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1090 setOperationAction(ISD::SETCC, VT, Custom);
1091 setOperationAction(ISD::CTPOP, VT, Custom);
1092 setOperationAction(ISD::ABS, VT, Custom);
1094 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1095 // setcc all the way to isel and prefer SETGT in some isel patterns.
1096 setCondCodeAction(ISD::SETLT, VT, Custom);
1097 setCondCodeAction(ISD::SETLE, VT, Custom);
1100 setOperationAction(ISD::SETCC, MVT::v2f64, Custom);
1101 setOperationAction(ISD::SETCC, MVT::v4f32, Custom);
1102 setOperationAction(ISD::STRICT_FSETCC, MVT::v2f64, Custom);
1103 setOperationAction(ISD::STRICT_FSETCC, MVT::v4f32, Custom);
1104 setOperationAction(ISD::STRICT_FSETCCS, MVT::v2f64, Custom);
1105 setOperationAction(ISD::STRICT_FSETCCS, MVT::v4f32, Custom);
1107 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
1108 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1109 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1110 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1111 setOperationAction(ISD::VSELECT, VT, Custom);
1112 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1115 for (auto VT : { MVT::v8f16, MVT::v2f64, MVT::v2i64 }) {
1116 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1117 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1118 setOperationAction(ISD::VSELECT, VT, Custom);
1120 if (VT == MVT::v2i64 && !Subtarget.is64Bit())
1121 continue;
1123 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1124 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1126 setF16Action(MVT::v8f16, Expand);
1127 setOperationAction(ISD::FADD, MVT::v8f16, Expand);
1128 setOperationAction(ISD::FSUB, MVT::v8f16, Expand);
1129 setOperationAction(ISD::FMUL, MVT::v8f16, Expand);
1130 setOperationAction(ISD::FDIV, MVT::v8f16, Expand);
1131 setOperationAction(ISD::FNEG, MVT::v8f16, Custom);
1132 setOperationAction(ISD::FABS, MVT::v8f16, Custom);
1133 setOperationAction(ISD::FCOPYSIGN, MVT::v8f16, Custom);
1135 // Custom lower v2i64 and v2f64 selects.
1136 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
1137 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
1138 setOperationAction(ISD::SELECT, MVT::v4i32, Custom);
1139 setOperationAction(ISD::SELECT, MVT::v8i16, Custom);
1140 setOperationAction(ISD::SELECT, MVT::v8f16, Custom);
1141 setOperationAction(ISD::SELECT, MVT::v16i8, Custom);
1143 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Custom);
1144 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Custom);
1145 setOperationAction(ISD::FP_TO_SINT, MVT::v2i32, Custom);
1146 setOperationAction(ISD::FP_TO_UINT, MVT::v2i32, Custom);
1147 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Custom);
1148 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i32, Custom);
1150 // Custom legalize these to avoid over promotion or custom promotion.
1151 for (auto VT : {MVT::v2i8, MVT::v4i8, MVT::v8i8, MVT::v2i16, MVT::v4i16}) {
1152 setOperationAction(ISD::FP_TO_SINT, VT, Custom);
1153 setOperationAction(ISD::FP_TO_UINT, VT, Custom);
1154 setOperationAction(ISD::STRICT_FP_TO_SINT, VT, Custom);
1155 setOperationAction(ISD::STRICT_FP_TO_UINT, VT, Custom);
1158 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Custom);
1159 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Custom);
1160 setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom);
1161 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i32, Custom);
1163 setOperationAction(ISD::UINT_TO_FP, MVT::v2i32, Custom);
1164 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i32, Custom);
1166 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
1167 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Custom);
1169 // Fast v2f32 UINT_TO_FP( v2i32 ) custom conversion.
1170 setOperationAction(ISD::SINT_TO_FP, MVT::v2f32, Custom);
1171 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2f32, Custom);
1172 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
1173 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2f32, Custom);
1175 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1176 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v2f32, Custom);
1177 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
1178 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v2f32, Custom);
1180 // We want to legalize this to an f64 load rather than an i64 load on
1181 // 64-bit targets and two 32-bit loads on a 32-bit target. Similar for
1182 // store.
1183 setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
1184 setOperationAction(ISD::LOAD, MVT::v4i16, Custom);
1185 setOperationAction(ISD::LOAD, MVT::v8i8, Custom);
1186 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
1187 setOperationAction(ISD::STORE, MVT::v4i16, Custom);
1188 setOperationAction(ISD::STORE, MVT::v8i8, Custom);
1190 // Add 32-bit vector stores to help vectorization opportunities.
1191 setOperationAction(ISD::STORE, MVT::v2i16, Custom);
1192 setOperationAction(ISD::STORE, MVT::v4i8, Custom);
1194 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
1195 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
1196 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
1197 if (!Subtarget.hasAVX512())
1198 setOperationAction(ISD::BITCAST, MVT::v16i1, Custom);
1200 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom);
1201 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom);
1202 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom);
1204 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1206 setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
1207 setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
1208 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Custom);
1209 setOperationAction(ISD::TRUNCATE, MVT::v2i64, Custom);
1210 setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
1211 setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
1212 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1213 setOperationAction(ISD::TRUNCATE, MVT::v4i64, Custom);
1214 setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
1215 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1216 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
1217 setOperationAction(ISD::TRUNCATE, MVT::v8i64, Custom);
1218 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1219 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
1220 setOperationAction(ISD::TRUNCATE, MVT::v16i32, Custom);
1221 setOperationAction(ISD::TRUNCATE, MVT::v16i64, Custom);
1223 // In the customized shift lowering, the legal v4i32/v2i64 cases
1224 // in AVX2 will be recognized.
1225 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1226 setOperationAction(ISD::SRL, VT, Custom);
1227 setOperationAction(ISD::SHL, VT, Custom);
1228 setOperationAction(ISD::SRA, VT, Custom);
1229 if (VT == MVT::v2i64) continue;
1230 setOperationAction(ISD::ROTL, VT, Custom);
1231 setOperationAction(ISD::ROTR, VT, Custom);
1232 setOperationAction(ISD::FSHL, VT, Custom);
1233 setOperationAction(ISD::FSHR, VT, Custom);
1236 setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal);
1237 setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal);
1238 setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal);
1239 setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal);
1240 setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal);
1243 if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) {
1244 setOperationAction(ISD::ABS, MVT::v16i8, Legal);
1245 setOperationAction(ISD::ABS, MVT::v8i16, Legal);
1246 setOperationAction(ISD::ABS, MVT::v4i32, Legal);
1247 setOperationAction(ISD::BITREVERSE, MVT::v16i8, Custom);
1248 setOperationAction(ISD::CTLZ, MVT::v16i8, Custom);
1249 setOperationAction(ISD::CTLZ, MVT::v8i16, Custom);
1250 setOperationAction(ISD::CTLZ, MVT::v4i32, Custom);
1251 setOperationAction(ISD::CTLZ, MVT::v2i64, Custom);
1253 // These might be better off as horizontal vector ops.
1254 setOperationAction(ISD::ADD, MVT::i16, Custom);
1255 setOperationAction(ISD::ADD, MVT::i32, Custom);
1256 setOperationAction(ISD::SUB, MVT::i16, Custom);
1257 setOperationAction(ISD::SUB, MVT::i32, Custom);
1260 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) {
1261 for (MVT RoundedTy : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
1262 setOperationAction(ISD::FFLOOR, RoundedTy, Legal);
1263 setOperationAction(ISD::STRICT_FFLOOR, RoundedTy, Legal);
1264 setOperationAction(ISD::FCEIL, RoundedTy, Legal);
1265 setOperationAction(ISD::STRICT_FCEIL, RoundedTy, Legal);
1266 setOperationAction(ISD::FTRUNC, RoundedTy, Legal);
1267 setOperationAction(ISD::STRICT_FTRUNC, RoundedTy, Legal);
1268 setOperationAction(ISD::FRINT, RoundedTy, Legal);
1269 setOperationAction(ISD::STRICT_FRINT, RoundedTy, Legal);
1270 setOperationAction(ISD::FNEARBYINT, RoundedTy, Legal);
1271 setOperationAction(ISD::STRICT_FNEARBYINT, RoundedTy, Legal);
1272 setOperationAction(ISD::FROUNDEVEN, RoundedTy, Legal);
1273 setOperationAction(ISD::STRICT_FROUNDEVEN, RoundedTy, Legal);
1275 setOperationAction(ISD::FROUND, RoundedTy, Custom);
1278 setOperationAction(ISD::SMAX, MVT::v16i8, Legal);
1279 setOperationAction(ISD::SMAX, MVT::v4i32, Legal);
1280 setOperationAction(ISD::UMAX, MVT::v8i16, Legal);
1281 setOperationAction(ISD::UMAX, MVT::v4i32, Legal);
1282 setOperationAction(ISD::SMIN, MVT::v16i8, Legal);
1283 setOperationAction(ISD::SMIN, MVT::v4i32, Legal);
1284 setOperationAction(ISD::UMIN, MVT::v8i16, Legal);
1285 setOperationAction(ISD::UMIN, MVT::v4i32, Legal);
1287 for (auto VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) {
1288 setOperationAction(ISD::ABDS, VT, Custom);
1289 setOperationAction(ISD::ABDU, VT, Custom);
1292 setOperationAction(ISD::UADDSAT, MVT::v4i32, Custom);
1293 setOperationAction(ISD::SADDSAT, MVT::v2i64, Custom);
1294 setOperationAction(ISD::SSUBSAT, MVT::v2i64, Custom);
1296 // FIXME: Do we need to handle scalar-to-vector here?
1297 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
1298 setOperationAction(ISD::SMULO, MVT::v2i32, Custom);
1300 // We directly match byte blends in the backend as they match the VSELECT
1301 // condition form.
1302 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
1304 // SSE41 brings specific instructions for doing vector sign extend even in
1305 // cases where we don't have SRA.
1306 for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1307 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Legal);
1308 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Legal);
1311 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1312 for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1313 setLoadExtAction(LoadExtOp, MVT::v8i16, MVT::v8i8, Legal);
1314 setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i8, Legal);
1315 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i8, Legal);
1316 setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i16, Legal);
1317 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i16, Legal);
1318 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i32, Legal);
1321 if (Subtarget.is64Bit() && !Subtarget.hasAVX512()) {
1322 // We need to scalarize v4i64->v432 uint_to_fp using cvtsi2ss, but we can
1323 // do the pre and post work in the vector domain.
1324 setOperationAction(ISD::UINT_TO_FP, MVT::v4i64, Custom);
1325 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i64, Custom);
1326 // We need to mark SINT_TO_FP as Custom even though we want to expand it
1327 // so that DAG combine doesn't try to turn it into uint_to_fp.
1328 setOperationAction(ISD::SINT_TO_FP, MVT::v4i64, Custom);
1329 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i64, Custom);
1333 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE42()) {
1334 setOperationAction(ISD::UADDSAT, MVT::v2i64, Custom);
1337 if (!Subtarget.useSoftFloat() && Subtarget.hasXOP()) {
1338 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1339 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1340 setOperationAction(ISD::ROTL, VT, Custom);
1341 setOperationAction(ISD::ROTR, VT, Custom);
1344 // XOP can efficiently perform BITREVERSE with VPPERM.
1345 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 })
1346 setOperationAction(ISD::BITREVERSE, VT, Custom);
1348 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1349 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
1350 setOperationAction(ISD::BITREVERSE, VT, Custom);
1353 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX()) {
1354 bool HasInt256 = Subtarget.hasInt256();
1356 addRegisterClass(MVT::v32i8, Subtarget.hasVLX() ? &X86::VR256XRegClass
1357 : &X86::VR256RegClass);
1358 addRegisterClass(MVT::v16i16, Subtarget.hasVLX() ? &X86::VR256XRegClass
1359 : &X86::VR256RegClass);
1360 addRegisterClass(MVT::v16f16, Subtarget.hasVLX() ? &X86::VR256XRegClass
1361 : &X86::VR256RegClass);
1362 addRegisterClass(MVT::v8i32, Subtarget.hasVLX() ? &X86::VR256XRegClass
1363 : &X86::VR256RegClass);
1364 addRegisterClass(MVT::v8f32, Subtarget.hasVLX() ? &X86::VR256XRegClass
1365 : &X86::VR256RegClass);
1366 addRegisterClass(MVT::v4i64, Subtarget.hasVLX() ? &X86::VR256XRegClass
1367 : &X86::VR256RegClass);
1368 addRegisterClass(MVT::v4f64, Subtarget.hasVLX() ? &X86::VR256XRegClass
1369 : &X86::VR256RegClass);
1371 for (auto VT : { MVT::v8f32, MVT::v4f64 }) {
1372 setOperationAction(ISD::FFLOOR, VT, Legal);
1373 setOperationAction(ISD::STRICT_FFLOOR, VT, Legal);
1374 setOperationAction(ISD::FCEIL, VT, Legal);
1375 setOperationAction(ISD::STRICT_FCEIL, VT, Legal);
1376 setOperationAction(ISD::FTRUNC, VT, Legal);
1377 setOperationAction(ISD::STRICT_FTRUNC, VT, Legal);
1378 setOperationAction(ISD::FRINT, VT, Legal);
1379 setOperationAction(ISD::STRICT_FRINT, VT, Legal);
1380 setOperationAction(ISD::FNEARBYINT, VT, Legal);
1381 setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
1382 setOperationAction(ISD::FROUNDEVEN, VT, Legal);
1383 setOperationAction(ISD::STRICT_FROUNDEVEN, VT, Legal);
1385 setOperationAction(ISD::FROUND, VT, Custom);
1387 setOperationAction(ISD::FNEG, VT, Custom);
1388 setOperationAction(ISD::FABS, VT, Custom);
1389 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1391 setOperationAction(ISD::FMAXIMUM, VT, Custom);
1392 setOperationAction(ISD::FMINIMUM, VT, Custom);
1395 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1396 // even though v8i16 is a legal type.
1397 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i16, MVT::v8i32);
1398 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i16, MVT::v8i32);
1399 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v8i16, MVT::v8i32);
1400 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v8i16, MVT::v8i32);
1401 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Custom);
1402 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Custom);
1403 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v8i32, Custom);
1405 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Custom);
1406 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v8i32, Custom);
1407 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Expand);
1408 setOperationAction(ISD::FP_ROUND, MVT::v8f16, Expand);
1409 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Custom);
1410 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v4f64, Custom);
1412 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v4f32, Legal);
1413 setOperationAction(ISD::STRICT_FADD, MVT::v8f32, Legal);
1414 setOperationAction(ISD::STRICT_FADD, MVT::v4f64, Legal);
1415 setOperationAction(ISD::STRICT_FSUB, MVT::v8f32, Legal);
1416 setOperationAction(ISD::STRICT_FSUB, MVT::v4f64, Legal);
1417 setOperationAction(ISD::STRICT_FMUL, MVT::v8f32, Legal);
1418 setOperationAction(ISD::STRICT_FMUL, MVT::v4f64, Legal);
1419 setOperationAction(ISD::STRICT_FDIV, MVT::v8f32, Legal);
1420 setOperationAction(ISD::STRICT_FDIV, MVT::v4f64, Legal);
1421 setOperationAction(ISD::STRICT_FSQRT, MVT::v8f32, Legal);
1422 setOperationAction(ISD::STRICT_FSQRT, MVT::v4f64, Legal);
1424 if (!Subtarget.hasAVX512())
1425 setOperationAction(ISD::BITCAST, MVT::v32i1, Custom);
1427 // In the customized shift lowering, the legal v8i32/v4i64 cases
1428 // in AVX2 will be recognized.
1429 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1430 setOperationAction(ISD::SRL, VT, Custom);
1431 setOperationAction(ISD::SHL, VT, Custom);
1432 setOperationAction(ISD::SRA, VT, Custom);
1433 setOperationAction(ISD::ABDS, VT, Custom);
1434 setOperationAction(ISD::ABDU, VT, Custom);
1435 if (VT == MVT::v4i64) continue;
1436 setOperationAction(ISD::ROTL, VT, Custom);
1437 setOperationAction(ISD::ROTR, VT, Custom);
1438 setOperationAction(ISD::FSHL, VT, Custom);
1439 setOperationAction(ISD::FSHR, VT, Custom);
1442 // These types need custom splitting if their input is a 128-bit vector.
1443 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1444 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1445 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1446 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1448 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1449 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1450 setOperationAction(ISD::SELECT, MVT::v8i32, Custom);
1451 setOperationAction(ISD::SELECT, MVT::v16i16, Custom);
1452 setOperationAction(ISD::SELECT, MVT::v16f16, Custom);
1453 setOperationAction(ISD::SELECT, MVT::v32i8, Custom);
1454 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1456 for (auto VT : { MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1457 setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1458 setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1459 setOperationAction(ISD::ANY_EXTEND, VT, Custom);
1462 setOperationAction(ISD::TRUNCATE, MVT::v32i8, Custom);
1463 setOperationAction(ISD::TRUNCATE, MVT::v32i16, Custom);
1464 setOperationAction(ISD::TRUNCATE, MVT::v32i32, Custom);
1465 setOperationAction(ISD::TRUNCATE, MVT::v32i64, Custom);
1467 setOperationAction(ISD::BITREVERSE, MVT::v32i8, Custom);
1469 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1470 setOperationAction(ISD::SETCC, VT, Custom);
1471 setOperationAction(ISD::CTPOP, VT, Custom);
1472 setOperationAction(ISD::CTLZ, VT, Custom);
1474 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1475 // setcc all the way to isel and prefer SETGT in some isel patterns.
1476 setCondCodeAction(ISD::SETLT, VT, Custom);
1477 setCondCodeAction(ISD::SETLE, VT, Custom);
1480 setOperationAction(ISD::SETCC, MVT::v4f64, Custom);
1481 setOperationAction(ISD::SETCC, MVT::v8f32, Custom);
1482 setOperationAction(ISD::STRICT_FSETCC, MVT::v4f64, Custom);
1483 setOperationAction(ISD::STRICT_FSETCC, MVT::v8f32, Custom);
1484 setOperationAction(ISD::STRICT_FSETCCS, MVT::v4f64, Custom);
1485 setOperationAction(ISD::STRICT_FSETCCS, MVT::v8f32, Custom);
1487 if (Subtarget.hasAnyFMA()) {
1488 for (auto VT : { MVT::f32, MVT::f64, MVT::v4f32, MVT::v8f32,
1489 MVT::v2f64, MVT::v4f64 }) {
1490 setOperationAction(ISD::FMA, VT, Legal);
1491 setOperationAction(ISD::STRICT_FMA, VT, Legal);
1495 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1496 setOperationAction(ISD::ADD, VT, HasInt256 ? Legal : Custom);
1497 setOperationAction(ISD::SUB, VT, HasInt256 ? Legal : Custom);
1500 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1501 setOperationAction(ISD::MUL, MVT::v8i32, HasInt256 ? Legal : Custom);
1502 setOperationAction(ISD::MUL, MVT::v16i16, HasInt256 ? Legal : Custom);
1503 setOperationAction(ISD::MUL, MVT::v32i8, Custom);
1505 setOperationAction(ISD::MULHU, MVT::v8i32, Custom);
1506 setOperationAction(ISD::MULHS, MVT::v8i32, Custom);
1507 setOperationAction(ISD::MULHU, MVT::v16i16, HasInt256 ? Legal : Custom);
1508 setOperationAction(ISD::MULHS, MVT::v16i16, HasInt256 ? Legal : Custom);
1509 setOperationAction(ISD::MULHU, MVT::v32i8, Custom);
1510 setOperationAction(ISD::MULHS, MVT::v32i8, Custom);
1511 setOperationAction(ISD::AVGCEILU, MVT::v16i16, HasInt256 ? Legal : Custom);
1512 setOperationAction(ISD::AVGCEILU, MVT::v32i8, HasInt256 ? Legal : Custom);
1514 setOperationAction(ISD::SMULO, MVT::v32i8, Custom);
1515 setOperationAction(ISD::UMULO, MVT::v32i8, Custom);
1517 setOperationAction(ISD::ABS, MVT::v4i64, Custom);
1518 setOperationAction(ISD::SMAX, MVT::v4i64, Custom);
1519 setOperationAction(ISD::UMAX, MVT::v4i64, Custom);
1520 setOperationAction(ISD::SMIN, MVT::v4i64, Custom);
1521 setOperationAction(ISD::UMIN, MVT::v4i64, Custom);
1523 setOperationAction(ISD::UADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1524 setOperationAction(ISD::SADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1525 setOperationAction(ISD::USUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1526 setOperationAction(ISD::SSUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1527 setOperationAction(ISD::UADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1528 setOperationAction(ISD::SADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1529 setOperationAction(ISD::USUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1530 setOperationAction(ISD::SSUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1531 setOperationAction(ISD::UADDSAT, MVT::v8i32, Custom);
1532 setOperationAction(ISD::USUBSAT, MVT::v8i32, Custom);
1533 setOperationAction(ISD::UADDSAT, MVT::v4i64, Custom);
1534 setOperationAction(ISD::USUBSAT, MVT::v4i64, Custom);
1536 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
1537 setOperationAction(ISD::ABS, VT, HasInt256 ? Legal : Custom);
1538 setOperationAction(ISD::SMAX, VT, HasInt256 ? Legal : Custom);
1539 setOperationAction(ISD::UMAX, VT, HasInt256 ? Legal : Custom);
1540 setOperationAction(ISD::SMIN, VT, HasInt256 ? Legal : Custom);
1541 setOperationAction(ISD::UMIN, VT, HasInt256 ? Legal : Custom);
1544 for (auto VT : {MVT::v16i16, MVT::v8i32, MVT::v4i64}) {
1545 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1546 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1549 if (HasInt256) {
1550 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1551 // when we have a 256bit-wide blend with immediate.
1552 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1553 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i32, Custom);
1555 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1556 for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1557 setLoadExtAction(LoadExtOp, MVT::v16i16, MVT::v16i8, Legal);
1558 setLoadExtAction(LoadExtOp, MVT::v8i32, MVT::v8i8, Legal);
1559 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i8, Legal);
1560 setLoadExtAction(LoadExtOp, MVT::v8i32, MVT::v8i16, Legal);
1561 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i16, Legal);
1562 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i32, Legal);
1566 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1567 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) {
1568 setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom);
1569 setOperationAction(ISD::MSTORE, VT, Legal);
1572 // Extract subvector is special because the value type
1573 // (result) is 128-bit but the source is 256-bit wide.
1574 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1575 MVT::v8f16, MVT::v4f32, MVT::v2f64 }) {
1576 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1579 // Custom lower several nodes for 256-bit types.
1580 for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1581 MVT::v16f16, MVT::v8f32, MVT::v4f64 }) {
1582 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1583 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1584 setOperationAction(ISD::VSELECT, VT, Custom);
1585 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1586 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1587 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1588 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal);
1589 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1590 setOperationAction(ISD::STORE, VT, Custom);
1592 setF16Action(MVT::v16f16, Expand);
1593 setOperationAction(ISD::FNEG, MVT::v16f16, Custom);
1594 setOperationAction(ISD::FABS, MVT::v16f16, Custom);
1595 setOperationAction(ISD::FCOPYSIGN, MVT::v16f16, Custom);
1596 setOperationAction(ISD::FADD, MVT::v16f16, Expand);
1597 setOperationAction(ISD::FSUB, MVT::v16f16, Expand);
1598 setOperationAction(ISD::FMUL, MVT::v16f16, Expand);
1599 setOperationAction(ISD::FDIV, MVT::v16f16, Expand);
1601 if (HasInt256) {
1602 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1604 // Custom legalize 2x32 to get a little better code.
1605 setOperationAction(ISD::MGATHER, MVT::v2f32, Custom);
1606 setOperationAction(ISD::MGATHER, MVT::v2i32, Custom);
1608 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1609 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
1610 setOperationAction(ISD::MGATHER, VT, Custom);
1614 if (!Subtarget.useSoftFloat() && !Subtarget.hasFP16() &&
1615 Subtarget.hasF16C()) {
1616 for (MVT VT : { MVT::f16, MVT::v2f16, MVT::v4f16, MVT::v8f16 }) {
1617 setOperationAction(ISD::FP_ROUND, VT, Custom);
1618 setOperationAction(ISD::STRICT_FP_ROUND, VT, Custom);
1620 for (MVT VT : { MVT::f32, MVT::v2f32, MVT::v4f32, MVT::v8f32 }) {
1621 setOperationAction(ISD::FP_EXTEND, VT, Custom);
1622 setOperationAction(ISD::STRICT_FP_EXTEND, VT, Custom);
1624 for (unsigned Opc : {ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV}) {
1625 setOperationPromotedToType(Opc, MVT::v8f16, MVT::v8f32);
1626 setOperationPromotedToType(Opc, MVT::v16f16, MVT::v16f32);
1630 // This block controls legalization of the mask vector sizes that are
1631 // available with AVX512. 512-bit vectors are in a separate block controlled
1632 // by useAVX512Regs.
1633 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1634 addRegisterClass(MVT::v1i1, &X86::VK1RegClass);
1635 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1636 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1637 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1638 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1640 setOperationAction(ISD::SELECT, MVT::v1i1, Custom);
1641 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v1i1, Custom);
1642 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i1, Custom);
1644 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i1, MVT::v8i32);
1645 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i1, MVT::v8i32);
1646 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v4i1, MVT::v4i32);
1647 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v4i1, MVT::v4i32);
1648 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v8i1, MVT::v8i32);
1649 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v8i1, MVT::v8i32);
1650 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v4i1, MVT::v4i32);
1651 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v4i1, MVT::v4i32);
1652 setOperationAction(ISD::FP_TO_SINT, MVT::v2i1, Custom);
1653 setOperationAction(ISD::FP_TO_UINT, MVT::v2i1, Custom);
1654 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i1, Custom);
1655 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i1, Custom);
1657 // There is no byte sized k-register load or store without AVX512DQ.
1658 if (!Subtarget.hasDQI()) {
1659 setOperationAction(ISD::LOAD, MVT::v1i1, Custom);
1660 setOperationAction(ISD::LOAD, MVT::v2i1, Custom);
1661 setOperationAction(ISD::LOAD, MVT::v4i1, Custom);
1662 setOperationAction(ISD::LOAD, MVT::v8i1, Custom);
1664 setOperationAction(ISD::STORE, MVT::v1i1, Custom);
1665 setOperationAction(ISD::STORE, MVT::v2i1, Custom);
1666 setOperationAction(ISD::STORE, MVT::v4i1, Custom);
1667 setOperationAction(ISD::STORE, MVT::v8i1, Custom);
1670 // Extends of v16i1/v8i1/v4i1/v2i1 to 128-bit vectors.
1671 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1672 setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1673 setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1674 setOperationAction(ISD::ANY_EXTEND, VT, Custom);
1677 for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 })
1678 setOperationAction(ISD::VSELECT, VT, Expand);
1680 for (auto VT : { MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) {
1681 setOperationAction(ISD::SETCC, VT, Custom);
1682 setOperationAction(ISD::SELECT, VT, Custom);
1683 setOperationAction(ISD::TRUNCATE, VT, Custom);
1685 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1686 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1687 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1688 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1689 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1690 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1693 for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1 })
1694 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1697 // This block controls legalization for 512-bit operations with 8/16/32/64 bit
1698 // elements. 512-bits can be disabled based on prefer-vector-width and
1699 // required-vector-width function attributes.
1700 if (!Subtarget.useSoftFloat() && Subtarget.useAVX512Regs()) {
1701 bool HasBWI = Subtarget.hasBWI();
1703 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1704 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1705 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1706 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1707 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1708 addRegisterClass(MVT::v32f16, &X86::VR512RegClass);
1709 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1711 for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) {
1712 setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i8, Legal);
1713 setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i16, Legal);
1714 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i8, Legal);
1715 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i16, Legal);
1716 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i32, Legal);
1717 if (HasBWI)
1718 setLoadExtAction(ExtType, MVT::v32i16, MVT::v32i8, Legal);
1721 for (MVT VT : { MVT::v16f32, MVT::v8f64 }) {
1722 setOperationAction(ISD::FMAXIMUM, VT, Custom);
1723 setOperationAction(ISD::FMINIMUM, VT, Custom);
1724 setOperationAction(ISD::FNEG, VT, Custom);
1725 setOperationAction(ISD::FABS, VT, Custom);
1726 setOperationAction(ISD::FMA, VT, Legal);
1727 setOperationAction(ISD::STRICT_FMA, VT, Legal);
1728 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1731 for (MVT VT : { MVT::v16i1, MVT::v16i8 }) {
1732 setOperationPromotedToType(ISD::FP_TO_SINT , VT, MVT::v16i32);
1733 setOperationPromotedToType(ISD::FP_TO_UINT , VT, MVT::v16i32);
1734 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, VT, MVT::v16i32);
1735 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, VT, MVT::v16i32);
1738 for (MVT VT : { MVT::v16i16, MVT::v16i32 }) {
1739 setOperationAction(ISD::FP_TO_SINT, VT, Custom);
1740 setOperationAction(ISD::FP_TO_UINT, VT, Custom);
1741 setOperationAction(ISD::STRICT_FP_TO_SINT, VT, Custom);
1742 setOperationAction(ISD::STRICT_FP_TO_UINT, VT, Custom);
1745 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Custom);
1746 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Custom);
1747 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v16i32, Custom);
1748 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v16i32, Custom);
1749 setOperationAction(ISD::FP_EXTEND, MVT::v8f64, Custom);
1750 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v8f64, Custom);
1752 setOperationAction(ISD::STRICT_FADD, MVT::v16f32, Legal);
1753 setOperationAction(ISD::STRICT_FADD, MVT::v8f64, Legal);
1754 setOperationAction(ISD::STRICT_FSUB, MVT::v16f32, Legal);
1755 setOperationAction(ISD::STRICT_FSUB, MVT::v8f64, Legal);
1756 setOperationAction(ISD::STRICT_FMUL, MVT::v16f32, Legal);
1757 setOperationAction(ISD::STRICT_FMUL, MVT::v8f64, Legal);
1758 setOperationAction(ISD::STRICT_FDIV, MVT::v16f32, Legal);
1759 setOperationAction(ISD::STRICT_FDIV, MVT::v8f64, Legal);
1760 setOperationAction(ISD::STRICT_FSQRT, MVT::v16f32, Legal);
1761 setOperationAction(ISD::STRICT_FSQRT, MVT::v8f64, Legal);
1762 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v8f32, Legal);
1764 setTruncStoreAction(MVT::v8i64, MVT::v8i8, Legal);
1765 setTruncStoreAction(MVT::v8i64, MVT::v8i16, Legal);
1766 setTruncStoreAction(MVT::v8i64, MVT::v8i32, Legal);
1767 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Legal);
1768 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Legal);
1769 if (HasBWI)
1770 setTruncStoreAction(MVT::v32i16, MVT::v32i8, Legal);
1772 // With 512-bit vectors and no VLX, we prefer to widen MLOAD/MSTORE
1773 // to 512-bit rather than use the AVX2 instructions so that we can use
1774 // k-masks.
1775 if (!Subtarget.hasVLX()) {
1776 for (auto VT : {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1777 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}) {
1778 setOperationAction(ISD::MLOAD, VT, Custom);
1779 setOperationAction(ISD::MSTORE, VT, Custom);
1783 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Legal);
1784 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Legal);
1785 setOperationAction(ISD::TRUNCATE, MVT::v32i8, HasBWI ? Legal : Custom);
1786 setOperationAction(ISD::ZERO_EXTEND, MVT::v32i16, Custom);
1787 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1788 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1789 setOperationAction(ISD::ANY_EXTEND, MVT::v32i16, Custom);
1790 setOperationAction(ISD::ANY_EXTEND, MVT::v16i32, Custom);
1791 setOperationAction(ISD::ANY_EXTEND, MVT::v8i64, Custom);
1792 setOperationAction(ISD::SIGN_EXTEND, MVT::v32i16, Custom);
1793 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1794 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1796 if (HasBWI) {
1797 // Extends from v64i1 masks to 512-bit vectors.
1798 setOperationAction(ISD::SIGN_EXTEND, MVT::v64i8, Custom);
1799 setOperationAction(ISD::ZERO_EXTEND, MVT::v64i8, Custom);
1800 setOperationAction(ISD::ANY_EXTEND, MVT::v64i8, Custom);
1803 for (auto VT : { MVT::v16f32, MVT::v8f64 }) {
1804 setOperationAction(ISD::FFLOOR, VT, Legal);
1805 setOperationAction(ISD::STRICT_FFLOOR, VT, Legal);
1806 setOperationAction(ISD::FCEIL, VT, Legal);
1807 setOperationAction(ISD::STRICT_FCEIL, VT, Legal);
1808 setOperationAction(ISD::FTRUNC, VT, Legal);
1809 setOperationAction(ISD::STRICT_FTRUNC, VT, Legal);
1810 setOperationAction(ISD::FRINT, VT, Legal);
1811 setOperationAction(ISD::STRICT_FRINT, VT, Legal);
1812 setOperationAction(ISD::FNEARBYINT, VT, Legal);
1813 setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
1814 setOperationAction(ISD::FROUNDEVEN, VT, Legal);
1815 setOperationAction(ISD::STRICT_FROUNDEVEN, VT, Legal);
1817 setOperationAction(ISD::FROUND, VT, Custom);
1820 for (auto VT : {MVT::v32i16, MVT::v16i32, MVT::v8i64}) {
1821 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1822 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1825 setOperationAction(ISD::ADD, MVT::v32i16, HasBWI ? Legal : Custom);
1826 setOperationAction(ISD::SUB, MVT::v32i16, HasBWI ? Legal : Custom);
1827 setOperationAction(ISD::ADD, MVT::v64i8, HasBWI ? Legal : Custom);
1828 setOperationAction(ISD::SUB, MVT::v64i8, HasBWI ? Legal : Custom);
1830 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1831 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1832 setOperationAction(ISD::MUL, MVT::v32i16, HasBWI ? Legal : Custom);
1833 setOperationAction(ISD::MUL, MVT::v64i8, Custom);
1835 setOperationAction(ISD::MULHU, MVT::v16i32, Custom);
1836 setOperationAction(ISD::MULHS, MVT::v16i32, Custom);
1837 setOperationAction(ISD::MULHS, MVT::v32i16, HasBWI ? Legal : Custom);
1838 setOperationAction(ISD::MULHU, MVT::v32i16, HasBWI ? Legal : Custom);
1839 setOperationAction(ISD::MULHS, MVT::v64i8, Custom);
1840 setOperationAction(ISD::MULHU, MVT::v64i8, Custom);
1841 setOperationAction(ISD::AVGCEILU, MVT::v32i16, HasBWI ? Legal : Custom);
1842 setOperationAction(ISD::AVGCEILU, MVT::v64i8, HasBWI ? Legal : Custom);
1844 setOperationAction(ISD::SMULO, MVT::v64i8, Custom);
1845 setOperationAction(ISD::UMULO, MVT::v64i8, Custom);
1847 setOperationAction(ISD::BITREVERSE, MVT::v64i8, Custom);
1849 for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32, MVT::v8i64 }) {
1850 setOperationAction(ISD::SRL, VT, Custom);
1851 setOperationAction(ISD::SHL, VT, Custom);
1852 setOperationAction(ISD::SRA, VT, Custom);
1853 setOperationAction(ISD::ROTL, VT, Custom);
1854 setOperationAction(ISD::ROTR, VT, Custom);
1855 setOperationAction(ISD::SETCC, VT, Custom);
1856 setOperationAction(ISD::ABDS, VT, Custom);
1857 setOperationAction(ISD::ABDU, VT, Custom);
1859 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1860 // setcc all the way to isel and prefer SETGT in some isel patterns.
1861 setCondCodeAction(ISD::SETLT, VT, Custom);
1862 setCondCodeAction(ISD::SETLE, VT, Custom);
1865 setOperationAction(ISD::SETCC, MVT::v8f64, Custom);
1866 setOperationAction(ISD::SETCC, MVT::v16f32, Custom);
1867 setOperationAction(ISD::STRICT_FSETCC, MVT::v8f64, Custom);
1868 setOperationAction(ISD::STRICT_FSETCC, MVT::v16f32, Custom);
1869 setOperationAction(ISD::STRICT_FSETCCS, MVT::v8f64, Custom);
1870 setOperationAction(ISD::STRICT_FSETCCS, MVT::v16f32, Custom);
1872 for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
1873 setOperationAction(ISD::SMAX, VT, Legal);
1874 setOperationAction(ISD::UMAX, VT, Legal);
1875 setOperationAction(ISD::SMIN, VT, Legal);
1876 setOperationAction(ISD::UMIN, VT, Legal);
1877 setOperationAction(ISD::ABS, VT, Legal);
1878 setOperationAction(ISD::CTPOP, VT, Custom);
1881 for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
1882 setOperationAction(ISD::ABS, VT, HasBWI ? Legal : Custom);
1883 setOperationAction(ISD::CTPOP, VT, Subtarget.hasBITALG() ? Legal : Custom);
1884 setOperationAction(ISD::CTLZ, VT, Custom);
1885 setOperationAction(ISD::SMAX, VT, HasBWI ? Legal : Custom);
1886 setOperationAction(ISD::UMAX, VT, HasBWI ? Legal : Custom);
1887 setOperationAction(ISD::SMIN, VT, HasBWI ? Legal : Custom);
1888 setOperationAction(ISD::UMIN, VT, HasBWI ? Legal : Custom);
1889 setOperationAction(ISD::UADDSAT, VT, HasBWI ? Legal : Custom);
1890 setOperationAction(ISD::SADDSAT, VT, HasBWI ? Legal : Custom);
1891 setOperationAction(ISD::USUBSAT, VT, HasBWI ? Legal : Custom);
1892 setOperationAction(ISD::SSUBSAT, VT, HasBWI ? Legal : Custom);
1895 setOperationAction(ISD::FSHL, MVT::v64i8, Custom);
1896 setOperationAction(ISD::FSHR, MVT::v64i8, Custom);
1897 setOperationAction(ISD::FSHL, MVT::v32i16, Custom);
1898 setOperationAction(ISD::FSHR, MVT::v32i16, Custom);
1899 setOperationAction(ISD::FSHL, MVT::v16i32, Custom);
1900 setOperationAction(ISD::FSHR, MVT::v16i32, Custom);
1902 if (Subtarget.hasDQI()) {
1903 for (auto Opc : {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::STRICT_SINT_TO_FP,
1904 ISD::STRICT_UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT,
1905 ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT})
1906 setOperationAction(Opc, MVT::v8i64, Custom);
1907 setOperationAction(ISD::MUL, MVT::v8i64, Legal);
1910 if (Subtarget.hasCDI()) {
1911 // NonVLX sub-targets extend 128/256 vectors to use the 512 version.
1912 for (auto VT : { MVT::v16i32, MVT::v8i64} ) {
1913 setOperationAction(ISD::CTLZ, VT, Legal);
1915 } // Subtarget.hasCDI()
1917 if (Subtarget.hasVPOPCNTDQ()) {
1918 for (auto VT : { MVT::v16i32, MVT::v8i64 })
1919 setOperationAction(ISD::CTPOP, VT, Legal);
1922 // Extract subvector is special because the value type
1923 // (result) is 256-bit but the source is 512-bit wide.
1924 // 128-bit was made Legal under AVX1.
1925 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1926 MVT::v16f16, MVT::v8f32, MVT::v4f64 })
1927 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1929 for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32, MVT::v8i64,
1930 MVT::v32f16, MVT::v16f32, MVT::v8f64 }) {
1931 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1932 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal);
1933 setOperationAction(ISD::SELECT, VT, Custom);
1934 setOperationAction(ISD::VSELECT, VT, Custom);
1935 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1936 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1937 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1938 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1939 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1941 setF16Action(MVT::v32f16, Expand);
1942 setOperationAction(ISD::FP_ROUND, MVT::v16f16, Custom);
1943 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v16f16, Custom);
1944 setOperationAction(ISD::FP_EXTEND, MVT::v16f32, Custom);
1945 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v16f32, Custom);
1946 for (unsigned Opc : {ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV}) {
1947 setOperationPromotedToType(Opc, MVT::v16f16, MVT::v16f32);
1948 setOperationPromotedToType(Opc, MVT::v32f16, MVT::v32f32);
1951 for (auto VT : { MVT::v16i32, MVT::v8i64, MVT::v16f32, MVT::v8f64 }) {
1952 setOperationAction(ISD::MLOAD, VT, Legal);
1953 setOperationAction(ISD::MSTORE, VT, Legal);
1954 setOperationAction(ISD::MGATHER, VT, Custom);
1955 setOperationAction(ISD::MSCATTER, VT, Custom);
1957 if (HasBWI) {
1958 for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
1959 setOperationAction(ISD::MLOAD, VT, Legal);
1960 setOperationAction(ISD::MSTORE, VT, Legal);
1962 } else {
1963 setOperationAction(ISD::STORE, MVT::v32i16, Custom);
1964 setOperationAction(ISD::STORE, MVT::v64i8, Custom);
1967 if (Subtarget.hasVBMI2()) {
1968 for (auto VT : {MVT::v32i16, MVT::v16i32, MVT::v8i64}) {
1969 setOperationAction(ISD::FSHL, VT, Custom);
1970 setOperationAction(ISD::FSHR, VT, Custom);
1973 setOperationAction(ISD::ROTL, MVT::v32i16, Custom);
1974 setOperationAction(ISD::ROTR, MVT::v32i16, Custom);
1976 }// useAVX512Regs
1978 if (!Subtarget.useSoftFloat() && Subtarget.hasVBMI2()) {
1979 for (auto VT : {MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v16i16, MVT::v8i32,
1980 MVT::v4i64}) {
1981 setOperationAction(ISD::FSHL, VT, Custom);
1982 setOperationAction(ISD::FSHR, VT, Custom);
1986 // This block controls legalization for operations that don't have
1987 // pre-AVX512 equivalents. Without VLX we use 512-bit operations for
1988 // narrower widths.
1989 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1990 // These operations are handled on non-VLX by artificially widening in
1991 // isel patterns.
1993 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v8i32, Custom);
1994 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, Custom);
1995 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i32, Custom);
1997 if (Subtarget.hasDQI()) {
1998 // Fast v2f32 SINT_TO_FP( v2i64 ) custom conversion.
1999 // v2f32 UINT_TO_FP is already custom under SSE2.
2000 assert(isOperationCustom(ISD::UINT_TO_FP, MVT::v2f32) &&
2001 isOperationCustom(ISD::STRICT_UINT_TO_FP, MVT::v2f32) &&
2002 "Unexpected operation action!");
2003 // v2i64 FP_TO_S/UINT(v2f32) custom conversion.
2004 setOperationAction(ISD::FP_TO_SINT, MVT::v2f32, Custom);
2005 setOperationAction(ISD::FP_TO_UINT, MVT::v2f32, Custom);
2006 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f32, Custom);
2007 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f32, Custom);
2010 for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
2011 setOperationAction(ISD::SMAX, VT, Legal);
2012 setOperationAction(ISD::UMAX, VT, Legal);
2013 setOperationAction(ISD::SMIN, VT, Legal);
2014 setOperationAction(ISD::UMIN, VT, Legal);
2015 setOperationAction(ISD::ABS, VT, Legal);
2018 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
2019 setOperationAction(ISD::ROTL, VT, Custom);
2020 setOperationAction(ISD::ROTR, VT, Custom);
2023 // Custom legalize 2x32 to get a little better code.
2024 setOperationAction(ISD::MSCATTER, MVT::v2f32, Custom);
2025 setOperationAction(ISD::MSCATTER, MVT::v2i32, Custom);
2027 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
2028 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
2029 setOperationAction(ISD::MSCATTER, VT, Custom);
2031 if (Subtarget.hasDQI()) {
2032 for (auto Opc : {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::STRICT_SINT_TO_FP,
2033 ISD::STRICT_UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT,
2034 ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT}) {
2035 setOperationAction(Opc, MVT::v2i64, Custom);
2036 setOperationAction(Opc, MVT::v4i64, Custom);
2038 setOperationAction(ISD::MUL, MVT::v2i64, Legal);
2039 setOperationAction(ISD::MUL, MVT::v4i64, Legal);
2042 if (Subtarget.hasCDI()) {
2043 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
2044 setOperationAction(ISD::CTLZ, VT, Legal);
2046 } // Subtarget.hasCDI()
2048 if (Subtarget.hasVPOPCNTDQ()) {
2049 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 })
2050 setOperationAction(ISD::CTPOP, VT, Legal);
2052 setOperationAction(ISD::FNEG, MVT::v32f16, Custom);
2053 setOperationAction(ISD::FABS, MVT::v32f16, Custom);
2054 setOperationAction(ISD::FCOPYSIGN, MVT::v32f16, Custom);
2057 // This block control legalization of v32i1/v64i1 which are available with
2058 // AVX512BW..
2059 if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
2060 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
2061 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
2063 for (auto VT : { MVT::v32i1, MVT::v64i1 }) {
2064 setOperationAction(ISD::VSELECT, VT, Expand);
2065 setOperationAction(ISD::TRUNCATE, VT, Custom);
2066 setOperationAction(ISD::SETCC, VT, Custom);
2067 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
2068 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
2069 setOperationAction(ISD::SELECT, VT, Custom);
2070 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
2071 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
2072 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
2073 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
2076 for (auto VT : { MVT::v16i1, MVT::v32i1 })
2077 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
2079 // Extends from v32i1 masks to 256-bit vectors.
2080 setOperationAction(ISD::SIGN_EXTEND, MVT::v32i8, Custom);
2081 setOperationAction(ISD::ZERO_EXTEND, MVT::v32i8, Custom);
2082 setOperationAction(ISD::ANY_EXTEND, MVT::v32i8, Custom);
2084 for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) {
2085 setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom);
2086 setOperationAction(ISD::MSTORE, VT, Subtarget.hasVLX() ? Legal : Custom);
2089 // These operations are handled on non-VLX by artificially widening in
2090 // isel patterns.
2091 // TODO: Custom widen in lowering on non-VLX and drop the isel patterns?
2093 if (Subtarget.hasBITALG()) {
2094 for (auto VT : { MVT::v16i8, MVT::v32i8, MVT::v8i16, MVT::v16i16 })
2095 setOperationAction(ISD::CTPOP, VT, Legal);
2099 if (!Subtarget.useSoftFloat() && Subtarget.hasFP16()) {
2100 auto setGroup = [&] (MVT VT) {
2101 setOperationAction(ISD::FADD, VT, Legal);
2102 setOperationAction(ISD::STRICT_FADD, VT, Legal);
2103 setOperationAction(ISD::FSUB, VT, Legal);
2104 setOperationAction(ISD::STRICT_FSUB, VT, Legal);
2105 setOperationAction(ISD::FMUL, VT, Legal);
2106 setOperationAction(ISD::STRICT_FMUL, VT, Legal);
2107 setOperationAction(ISD::FDIV, VT, Legal);
2108 setOperationAction(ISD::STRICT_FDIV, VT, Legal);
2109 setOperationAction(ISD::FSQRT, VT, Legal);
2110 setOperationAction(ISD::STRICT_FSQRT, VT, Legal);
2112 setOperationAction(ISD::FFLOOR, VT, Legal);
2113 setOperationAction(ISD::STRICT_FFLOOR, VT, Legal);
2114 setOperationAction(ISD::FCEIL, VT, Legal);
2115 setOperationAction(ISD::STRICT_FCEIL, VT, Legal);
2116 setOperationAction(ISD::FTRUNC, VT, Legal);
2117 setOperationAction(ISD::STRICT_FTRUNC, VT, Legal);
2118 setOperationAction(ISD::FRINT, VT, Legal);
2119 setOperationAction(ISD::STRICT_FRINT, VT, Legal);
2120 setOperationAction(ISD::FNEARBYINT, VT, Legal);
2121 setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
2122 setOperationAction(ISD::FROUNDEVEN, VT, Legal);
2123 setOperationAction(ISD::STRICT_FROUNDEVEN, VT, Legal);
2125 setOperationAction(ISD::FROUND, VT, Custom);
2127 setOperationAction(ISD::LOAD, VT, Legal);
2128 setOperationAction(ISD::STORE, VT, Legal);
2130 setOperationAction(ISD::FMA, VT, Legal);
2131 setOperationAction(ISD::STRICT_FMA, VT, Legal);
2132 setOperationAction(ISD::VSELECT, VT, Legal);
2133 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
2134 setOperationAction(ISD::SELECT, VT, Custom);
2136 setOperationAction(ISD::FNEG, VT, Custom);
2137 setOperationAction(ISD::FABS, VT, Custom);
2138 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
2139 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
2140 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
2142 setOperationAction(ISD::SETCC, VT, Custom);
2143 setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
2144 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
2147 // AVX512_FP16 scalar operations
2148 setGroup(MVT::f16);
2149 setOperationAction(ISD::FREM, MVT::f16, Promote);
2150 setOperationAction(ISD::STRICT_FREM, MVT::f16, Promote);
2151 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
2152 setOperationAction(ISD::BR_CC, MVT::f16, Expand);
2153 setOperationAction(ISD::STRICT_FROUND, MVT::f16, Promote);
2154 setOperationAction(ISD::FROUNDEVEN, MVT::f16, Legal);
2155 setOperationAction(ISD::STRICT_FROUNDEVEN, MVT::f16, Legal);
2156 setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
2157 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom);
2158 setOperationAction(ISD::FMAXIMUM, MVT::f16, Custom);
2159 setOperationAction(ISD::FMINIMUM, MVT::f16, Custom);
2160 setOperationAction(ISD::FP_EXTEND, MVT::f32, Legal);
2161 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
2163 setCondCodeAction(ISD::SETOEQ, MVT::f16, Expand);
2164 setCondCodeAction(ISD::SETUNE, MVT::f16, Expand);
2166 if (Subtarget.useAVX512Regs()) {
2167 setGroup(MVT::v32f16);
2168 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v32f16, Custom);
2169 setOperationAction(ISD::SINT_TO_FP, MVT::v32i16, Legal);
2170 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v32i16, Legal);
2171 setOperationAction(ISD::UINT_TO_FP, MVT::v32i16, Legal);
2172 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v32i16, Legal);
2173 setOperationAction(ISD::FP_ROUND, MVT::v16f16, Legal);
2174 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v16f16, Legal);
2175 setOperationAction(ISD::FP_EXTEND, MVT::v16f32, Custom);
2176 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v16f32, Legal);
2177 setOperationAction(ISD::FP_EXTEND, MVT::v8f64, Custom);
2178 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v8f64, Legal);
2179 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v32f16, Custom);
2181 setOperationAction(ISD::FP_TO_SINT, MVT::v32i16, Custom);
2182 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v32i16, Custom);
2183 setOperationAction(ISD::FP_TO_UINT, MVT::v32i16, Custom);
2184 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v32i16, Custom);
2185 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v32i8, MVT::v32i16);
2186 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v32i8,
2187 MVT::v32i16);
2188 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v32i8, MVT::v32i16);
2189 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v32i8,
2190 MVT::v32i16);
2191 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v32i1, MVT::v32i16);
2192 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v32i1,
2193 MVT::v32i16);
2194 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v32i1, MVT::v32i16);
2195 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v32i1,
2196 MVT::v32i16);
2198 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v16f16, Legal);
2199 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32f16, Legal);
2200 setOperationAction(ISD::CONCAT_VECTORS, MVT::v32f16, Custom);
2202 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Legal);
2203 setLoadExtAction(ISD::EXTLOAD, MVT::v16f32, MVT::v16f16, Legal);
2206 if (Subtarget.hasVLX()) {
2207 setGroup(MVT::v8f16);
2208 setGroup(MVT::v16f16);
2210 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8f16, Legal);
2211 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16f16, Custom);
2212 setOperationAction(ISD::SINT_TO_FP, MVT::v16i16, Legal);
2213 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v16i16, Legal);
2214 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Legal);
2215 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v8i16, Legal);
2216 setOperationAction(ISD::UINT_TO_FP, MVT::v16i16, Legal);
2217 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v16i16, Legal);
2218 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Legal);
2219 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i16, Legal);
2221 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom);
2222 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v8i16, Custom);
2223 setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Custom);
2224 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v8i16, Custom);
2225 setOperationAction(ISD::FP_ROUND, MVT::v8f16, Legal);
2226 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v8f16, Legal);
2227 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Custom);
2228 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v8f32, Legal);
2229 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Custom);
2230 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v4f64, Legal);
2232 // INSERT_VECTOR_ELT v8f16 extended to VECTOR_SHUFFLE
2233 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8f16, Custom);
2234 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16f16, Custom);
2236 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f16, Legal);
2237 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v16f16, Legal);
2238 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f16, Custom);
2240 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Legal);
2241 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Legal);
2242 setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Legal);
2243 setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Legal);
2245 // Need to custom widen these to prevent scalarization.
2246 setOperationAction(ISD::LOAD, MVT::v4f16, Custom);
2247 setOperationAction(ISD::STORE, MVT::v4f16, Custom);
2251 if (!Subtarget.useSoftFloat() &&
2252 (Subtarget.hasAVXNECONVERT() || Subtarget.hasBF16())) {
2253 addRegisterClass(MVT::v8bf16, Subtarget.hasAVX512() ? &X86::VR128XRegClass
2254 : &X86::VR128RegClass);
2255 addRegisterClass(MVT::v16bf16, Subtarget.hasAVX512() ? &X86::VR256XRegClass
2256 : &X86::VR256RegClass);
2257 // We set the type action of bf16 to TypeSoftPromoteHalf, but we don't
2258 // provide the method to promote BUILD_VECTOR and INSERT_VECTOR_ELT.
2259 // Set the operation action Custom to do the customization later.
2260 setOperationAction(ISD::BUILD_VECTOR, MVT::bf16, Custom);
2261 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::bf16, Custom);
2262 for (auto VT : {MVT::v8bf16, MVT::v16bf16}) {
2263 setF16Action(VT, Expand);
2264 setOperationAction(ISD::FADD, VT, Expand);
2265 setOperationAction(ISD::FSUB, VT, Expand);
2266 setOperationAction(ISD::FMUL, VT, Expand);
2267 setOperationAction(ISD::FDIV, VT, Expand);
2268 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
2269 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
2270 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal);
2271 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
2273 setOperationAction(ISD::FP_ROUND, MVT::v8bf16, Custom);
2274 addLegalFPImmediate(APFloat::getZero(APFloat::BFloat()));
2277 if (!Subtarget.useSoftFloat() && Subtarget.hasBF16()) {
2278 addRegisterClass(MVT::v32bf16, &X86::VR512RegClass);
2279 setF16Action(MVT::v32bf16, Expand);
2280 setOperationAction(ISD::FADD, MVT::v32bf16, Expand);
2281 setOperationAction(ISD::FSUB, MVT::v32bf16, Expand);
2282 setOperationAction(ISD::FMUL, MVT::v32bf16, Expand);
2283 setOperationAction(ISD::FDIV, MVT::v32bf16, Expand);
2284 setOperationAction(ISD::BUILD_VECTOR, MVT::v32bf16, Custom);
2285 setOperationAction(ISD::FP_ROUND, MVT::v16bf16, Custom);
2286 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v32bf16, Custom);
2287 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32bf16, Legal);
2288 setOperationAction(ISD::CONCAT_VECTORS, MVT::v32bf16, Custom);
2291 if (!Subtarget.useSoftFloat() && Subtarget.hasVLX()) {
2292 setTruncStoreAction(MVT::v4i64, MVT::v4i8, Legal);
2293 setTruncStoreAction(MVT::v4i64, MVT::v4i16, Legal);
2294 setTruncStoreAction(MVT::v4i64, MVT::v4i32, Legal);
2295 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Legal);
2296 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Legal);
2298 setTruncStoreAction(MVT::v2i64, MVT::v2i8, Legal);
2299 setTruncStoreAction(MVT::v2i64, MVT::v2i16, Legal);
2300 setTruncStoreAction(MVT::v2i64, MVT::v2i32, Legal);
2301 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Legal);
2302 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
2304 if (Subtarget.hasBWI()) {
2305 setTruncStoreAction(MVT::v16i16, MVT::v16i8, Legal);
2306 setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
2309 if (Subtarget.hasFP16()) {
2310 // vcvttph2[u]dq v4f16 -> v4i32/64, v2f16 -> v2i32/64
2311 setOperationAction(ISD::FP_TO_SINT, MVT::v2f16, Custom);
2312 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f16, Custom);
2313 setOperationAction(ISD::FP_TO_UINT, MVT::v2f16, Custom);
2314 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f16, Custom);
2315 setOperationAction(ISD::FP_TO_SINT, MVT::v4f16, Custom);
2316 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4f16, Custom);
2317 setOperationAction(ISD::FP_TO_UINT, MVT::v4f16, Custom);
2318 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4f16, Custom);
2319 // vcvt[u]dq2ph v4i32/64 -> v4f16, v2i32/64 -> v2f16
2320 setOperationAction(ISD::SINT_TO_FP, MVT::v2f16, Custom);
2321 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2f16, Custom);
2322 setOperationAction(ISD::UINT_TO_FP, MVT::v2f16, Custom);
2323 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2f16, Custom);
2324 setOperationAction(ISD::SINT_TO_FP, MVT::v4f16, Custom);
2325 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4f16, Custom);
2326 setOperationAction(ISD::UINT_TO_FP, MVT::v4f16, Custom);
2327 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4f16, Custom);
2328 // vcvtps2phx v4f32 -> v4f16, v2f32 -> v2f16
2329 setOperationAction(ISD::FP_ROUND, MVT::v2f16, Custom);
2330 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v2f16, Custom);
2331 setOperationAction(ISD::FP_ROUND, MVT::v4f16, Custom);
2332 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v4f16, Custom);
2333 // vcvtph2psx v4f16 -> v4f32, v2f16 -> v2f32
2334 setOperationAction(ISD::FP_EXTEND, MVT::v2f16, Custom);
2335 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v2f16, Custom);
2336 setOperationAction(ISD::FP_EXTEND, MVT::v4f16, Custom);
2337 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v4f16, Custom);
2341 if (!Subtarget.useSoftFloat() && Subtarget.hasAMXTILE()) {
2342 addRegisterClass(MVT::x86amx, &X86::TILERegClass);
2345 // We want to custom lower some of our intrinsics.
2346 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
2347 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
2348 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
2349 if (!Subtarget.is64Bit()) {
2350 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
2353 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
2354 // handle type legalization for these operations here.
2356 // FIXME: We really should do custom legalization for addition and
2357 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
2358 // than generic legalization for 64-bit multiplication-with-overflow, though.
2359 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
2360 if (VT == MVT::i64 && !Subtarget.is64Bit())
2361 continue;
2362 // Add/Sub/Mul with overflow operations are custom lowered.
2363 setOperationAction(ISD::SADDO, VT, Custom);
2364 setOperationAction(ISD::UADDO, VT, Custom);
2365 setOperationAction(ISD::SSUBO, VT, Custom);
2366 setOperationAction(ISD::USUBO, VT, Custom);
2367 setOperationAction(ISD::SMULO, VT, Custom);
2368 setOperationAction(ISD::UMULO, VT, Custom);
2370 // Support carry in as value rather than glue.
2371 setOperationAction(ISD::UADDO_CARRY, VT, Custom);
2372 setOperationAction(ISD::USUBO_CARRY, VT, Custom);
2373 setOperationAction(ISD::SETCCCARRY, VT, Custom);
2374 setOperationAction(ISD::SADDO_CARRY, VT, Custom);
2375 setOperationAction(ISD::SSUBO_CARRY, VT, Custom);
2378 if (!Subtarget.is64Bit()) {
2379 // These libcalls are not available in 32-bit.
2380 setLibcallName(RTLIB::SHL_I128, nullptr);
2381 setLibcallName(RTLIB::SRL_I128, nullptr);
2382 setLibcallName(RTLIB::SRA_I128, nullptr);
2383 setLibcallName(RTLIB::MUL_I128, nullptr);
2384 // The MULO libcall is not part of libgcc, only compiler-rt.
2385 setLibcallName(RTLIB::MULO_I64, nullptr);
2387 // The MULO libcall is not part of libgcc, only compiler-rt.
2388 setLibcallName(RTLIB::MULO_I128, nullptr);
2390 // Combine sin / cos into _sincos_stret if it is available.
2391 if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
2392 getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
2393 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
2394 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
2397 if (Subtarget.isTargetWin64()) {
2398 setOperationAction(ISD::SDIV, MVT::i128, Custom);
2399 setOperationAction(ISD::UDIV, MVT::i128, Custom);
2400 setOperationAction(ISD::SREM, MVT::i128, Custom);
2401 setOperationAction(ISD::UREM, MVT::i128, Custom);
2402 setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom);
2403 setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom);
2404 setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom);
2405 setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom);
2406 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i128, Custom);
2407 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i128, Custom);
2408 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i128, Custom);
2409 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i128, Custom);
2412 // On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)`
2413 // is. We should promote the value to 64-bits to solve this.
2414 // This is what the CRT headers do - `fmodf` is an inline header
2415 // function casting to f64 and calling `fmod`.
2416 if (Subtarget.is32Bit() &&
2417 (Subtarget.isTargetWindowsMSVC() || Subtarget.isTargetWindowsItanium()))
2418 for (ISD::NodeType Op :
2419 {ISD::FCEIL, ISD::STRICT_FCEIL,
2420 ISD::FCOS, ISD::STRICT_FCOS,
2421 ISD::FEXP, ISD::STRICT_FEXP,
2422 ISD::FFLOOR, ISD::STRICT_FFLOOR,
2423 ISD::FREM, ISD::STRICT_FREM,
2424 ISD::FLOG, ISD::STRICT_FLOG,
2425 ISD::FLOG10, ISD::STRICT_FLOG10,
2426 ISD::FPOW, ISD::STRICT_FPOW,
2427 ISD::FSIN, ISD::STRICT_FSIN})
2428 if (isOperationExpand(Op, MVT::f32))
2429 setOperationAction(Op, MVT::f32, Promote);
2431 // We have target-specific dag combine patterns for the following nodes:
2432 setTargetDAGCombine({ISD::VECTOR_SHUFFLE,
2433 ISD::SCALAR_TO_VECTOR,
2434 ISD::INSERT_VECTOR_ELT,
2435 ISD::EXTRACT_VECTOR_ELT,
2436 ISD::CONCAT_VECTORS,
2437 ISD::INSERT_SUBVECTOR,
2438 ISD::EXTRACT_SUBVECTOR,
2439 ISD::BITCAST,
2440 ISD::VSELECT,
2441 ISD::SELECT,
2442 ISD::SHL,
2443 ISD::SRA,
2444 ISD::SRL,
2445 ISD::OR,
2446 ISD::AND,
2447 ISD::BITREVERSE,
2448 ISD::ADD,
2449 ISD::FADD,
2450 ISD::FSUB,
2451 ISD::FNEG,
2452 ISD::FMA,
2453 ISD::STRICT_FMA,
2454 ISD::FMINNUM,
2455 ISD::FMAXNUM,
2456 ISD::SUB,
2457 ISD::LOAD,
2458 ISD::MLOAD,
2459 ISD::STORE,
2460 ISD::MSTORE,
2461 ISD::TRUNCATE,
2462 ISD::ZERO_EXTEND,
2463 ISD::ANY_EXTEND,
2464 ISD::SIGN_EXTEND,
2465 ISD::SIGN_EXTEND_INREG,
2466 ISD::ANY_EXTEND_VECTOR_INREG,
2467 ISD::SIGN_EXTEND_VECTOR_INREG,
2468 ISD::ZERO_EXTEND_VECTOR_INREG,
2469 ISD::SINT_TO_FP,
2470 ISD::UINT_TO_FP,
2471 ISD::STRICT_SINT_TO_FP,
2472 ISD::STRICT_UINT_TO_FP,
2473 ISD::SETCC,
2474 ISD::MUL,
2475 ISD::XOR,
2476 ISD::MSCATTER,
2477 ISD::MGATHER,
2478 ISD::FP16_TO_FP,
2479 ISD::FP_EXTEND,
2480 ISD::STRICT_FP_EXTEND,
2481 ISD::FP_ROUND,
2482 ISD::STRICT_FP_ROUND});
2484 computeRegisterProperties(Subtarget.getRegisterInfo());
2486 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
2487 MaxStoresPerMemsetOptSize = 8;
2488 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
2489 MaxStoresPerMemcpyOptSize = 4;
2490 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
2491 MaxStoresPerMemmoveOptSize = 4;
2493 // TODO: These control memcmp expansion in CGP and could be raised higher, but
2494 // that needs to benchmarked and balanced with the potential use of vector
2495 // load/store types (PR33329, PR33914).
2496 MaxLoadsPerMemcmp = 2;
2497 MaxLoadsPerMemcmpOptSize = 2;
2499 // Default loop alignment, which can be overridden by -align-loops.
2500 setPrefLoopAlignment(Align(16));
2502 // An out-of-order CPU can speculatively execute past a predictable branch,
2503 // but a conditional move could be stalled by an expensive earlier operation.
2504 PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder();
2505 EnableExtLdPromotion = true;
2506 setPrefFunctionAlignment(Align(16));
2508 verifyIntrinsicTables();
2510 // Default to having -disable-strictnode-mutation on
2511 IsStrictFPEnabled = true;
2514 // This has so far only been implemented for 64-bit MachO.
2515 bool X86TargetLowering::useLoadStackGuardNode() const {
2516 return Subtarget.isTargetMachO() && Subtarget.is64Bit();
2519 bool X86TargetLowering::useStackGuardXorFP() const {
2520 // Currently only MSVC CRTs XOR the frame pointer into the stack guard value.
2521 return Subtarget.getTargetTriple().isOSMSVCRT() && !Subtarget.isTargetMachO();
2524 SDValue X86TargetLowering::emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
2525 const SDLoc &DL) const {
2526 EVT PtrTy = getPointerTy(DAG.getDataLayout());
2527 unsigned XorOp = Subtarget.is64Bit() ? X86::XOR64_FP : X86::XOR32_FP;
2528 MachineSDNode *Node = DAG.getMachineNode(XorOp, DL, PtrTy, Val);
2529 return SDValue(Node, 0);
2532 TargetLoweringBase::LegalizeTypeAction
2533 X86TargetLowering::getPreferredVectorAction(MVT VT) const {
2534 if ((VT == MVT::v32i1 || VT == MVT::v64i1) && Subtarget.hasAVX512() &&
2535 !Subtarget.hasBWI())
2536 return TypeSplitVector;
2538 if (!VT.isScalableVector() && VT.getVectorNumElements() != 1 &&
2539 !Subtarget.hasF16C() && VT.getVectorElementType() == MVT::f16)
2540 return TypeSplitVector;
2542 if (!VT.isScalableVector() && VT.getVectorNumElements() != 1 &&
2543 VT.getVectorElementType() != MVT::i1)
2544 return TypeWidenVector;
2546 return TargetLoweringBase::getPreferredVectorAction(VT);
2549 FastISel *
2550 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
2551 const TargetLibraryInfo *libInfo) const {
2552 return X86::createFastISel(funcInfo, libInfo);
2555 //===----------------------------------------------------------------------===//
2556 // Other Lowering Hooks
2557 //===----------------------------------------------------------------------===//
2559 bool X86::mayFoldLoad(SDValue Op, const X86Subtarget &Subtarget,
2560 bool AssumeSingleUse) {
2561 if (!AssumeSingleUse && !Op.hasOneUse())
2562 return false;
2563 if (!ISD::isNormalLoad(Op.getNode()))
2564 return false;
2566 // If this is an unaligned vector, make sure the target supports folding it.
2567 auto *Ld = cast<LoadSDNode>(Op.getNode());
2568 if (!Subtarget.hasAVX() && !Subtarget.hasSSEUnalignedMem() &&
2569 Ld->getValueSizeInBits(0) == 128 && Ld->getAlign() < Align(16))
2570 return false;
2572 // TODO: If this is a non-temporal load and the target has an instruction
2573 // for it, it should not be folded. See "useNonTemporalLoad()".
2575 return true;
2578 bool X86::mayFoldLoadIntoBroadcastFromMem(SDValue Op, MVT EltVT,
2579 const X86Subtarget &Subtarget,
2580 bool AssumeSingleUse) {
2581 assert(Subtarget.hasAVX() && "Expected AVX for broadcast from memory");
2582 if (!X86::mayFoldLoad(Op, Subtarget, AssumeSingleUse))
2583 return false;
2585 // We can not replace a wide volatile load with a broadcast-from-memory,
2586 // because that would narrow the load, which isn't legal for volatiles.
2587 auto *Ld = cast<LoadSDNode>(Op.getNode());
2588 return !Ld->isVolatile() ||
2589 Ld->getValueSizeInBits(0) == EltVT.getScalarSizeInBits();
2592 bool X86::mayFoldIntoStore(SDValue Op) {
2593 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
2596 bool X86::mayFoldIntoZeroExtend(SDValue Op) {
2597 if (Op.hasOneUse()) {
2598 unsigned Opcode = Op.getNode()->use_begin()->getOpcode();
2599 return (ISD::ZERO_EXTEND == Opcode);
2601 return false;
2604 static bool isTargetShuffle(unsigned Opcode) {
2605 switch(Opcode) {
2606 default: return false;
2607 case X86ISD::BLENDI:
2608 case X86ISD::PSHUFB:
2609 case X86ISD::PSHUFD:
2610 case X86ISD::PSHUFHW:
2611 case X86ISD::PSHUFLW:
2612 case X86ISD::SHUFP:
2613 case X86ISD::INSERTPS:
2614 case X86ISD::EXTRQI:
2615 case X86ISD::INSERTQI:
2616 case X86ISD::VALIGN:
2617 case X86ISD::PALIGNR:
2618 case X86ISD::VSHLDQ:
2619 case X86ISD::VSRLDQ:
2620 case X86ISD::MOVLHPS:
2621 case X86ISD::MOVHLPS:
2622 case X86ISD::MOVSHDUP:
2623 case X86ISD::MOVSLDUP:
2624 case X86ISD::MOVDDUP:
2625 case X86ISD::MOVSS:
2626 case X86ISD::MOVSD:
2627 case X86ISD::MOVSH:
2628 case X86ISD::UNPCKL:
2629 case X86ISD::UNPCKH:
2630 case X86ISD::VBROADCAST:
2631 case X86ISD::VPERMILPI:
2632 case X86ISD::VPERMILPV:
2633 case X86ISD::VPERM2X128:
2634 case X86ISD::SHUF128:
2635 case X86ISD::VPERMIL2:
2636 case X86ISD::VPERMI:
2637 case X86ISD::VPPERM:
2638 case X86ISD::VPERMV:
2639 case X86ISD::VPERMV3:
2640 case X86ISD::VZEXT_MOVL:
2641 return true;
2645 static bool isTargetShuffleVariableMask(unsigned Opcode) {
2646 switch (Opcode) {
2647 default: return false;
2648 // Target Shuffles.
2649 case X86ISD::PSHUFB:
2650 case X86ISD::VPERMILPV:
2651 case X86ISD::VPERMIL2:
2652 case X86ISD::VPPERM:
2653 case X86ISD::VPERMV:
2654 case X86ISD::VPERMV3:
2655 return true;
2656 // 'Faux' Target Shuffles.
2657 case ISD::OR:
2658 case ISD::AND:
2659 case X86ISD::ANDNP:
2660 return true;
2664 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
2665 MachineFunction &MF = DAG.getMachineFunction();
2666 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
2667 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2668 int ReturnAddrIndex = FuncInfo->getRAIndex();
2670 if (ReturnAddrIndex == 0) {
2671 // Set up a frame object for the return address.
2672 unsigned SlotSize = RegInfo->getSlotSize();
2673 ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(SlotSize,
2674 -(int64_t)SlotSize,
2675 false);
2676 FuncInfo->setRAIndex(ReturnAddrIndex);
2679 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout()));
2682 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model CM,
2683 bool HasSymbolicDisplacement) {
2684 // Offset should fit into 32 bit immediate field.
2685 if (!isInt<32>(Offset))
2686 return false;
2688 // If we don't have a symbolic displacement - we don't have any extra
2689 // restrictions.
2690 if (!HasSymbolicDisplacement)
2691 return true;
2693 // We can fold large offsets in the large code model because we always use
2694 // 64-bit offsets.
2695 if (CM == CodeModel::Large)
2696 return true;
2698 // For kernel code model we know that all object resist in the negative half
2699 // of 32bits address space. We may not accept negative offsets, since they may
2700 // be just off and we may accept pretty large positive ones.
2701 if (CM == CodeModel::Kernel)
2702 return Offset >= 0;
2704 // For other non-large code models we assume that latest small object is 16MB
2705 // before end of 31 bits boundary. We may also accept pretty large negative
2706 // constants knowing that all objects are in the positive half of address
2707 // space.
2708 return Offset < 16 * 1024 * 1024;
2711 /// Return true if the condition is an signed comparison operation.
2712 static bool isX86CCSigned(unsigned X86CC) {
2713 switch (X86CC) {
2714 default:
2715 llvm_unreachable("Invalid integer condition!");
2716 case X86::COND_E:
2717 case X86::COND_NE:
2718 case X86::COND_B:
2719 case X86::COND_A:
2720 case X86::COND_BE:
2721 case X86::COND_AE:
2722 return false;
2723 case X86::COND_G:
2724 case X86::COND_GE:
2725 case X86::COND_L:
2726 case X86::COND_LE:
2727 return true;
2731 static X86::CondCode TranslateIntegerX86CC(ISD::CondCode SetCCOpcode) {
2732 switch (SetCCOpcode) {
2733 default: llvm_unreachable("Invalid integer condition!");
2734 case ISD::SETEQ: return X86::COND_E;
2735 case ISD::SETGT: return X86::COND_G;
2736 case ISD::SETGE: return X86::COND_GE;
2737 case ISD::SETLT: return X86::COND_L;
2738 case ISD::SETLE: return X86::COND_LE;
2739 case ISD::SETNE: return X86::COND_NE;
2740 case ISD::SETULT: return X86::COND_B;
2741 case ISD::SETUGT: return X86::COND_A;
2742 case ISD::SETULE: return X86::COND_BE;
2743 case ISD::SETUGE: return X86::COND_AE;
2747 /// Do a one-to-one translation of a ISD::CondCode to the X86-specific
2748 /// condition code, returning the condition code and the LHS/RHS of the
2749 /// comparison to make.
2750 static X86::CondCode TranslateX86CC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
2751 bool isFP, SDValue &LHS, SDValue &RHS,
2752 SelectionDAG &DAG) {
2753 if (!isFP) {
2754 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
2755 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnes()) {
2756 // X > -1 -> X == 0, jump !sign.
2757 RHS = DAG.getConstant(0, DL, RHS.getValueType());
2758 return X86::COND_NS;
2760 if (SetCCOpcode == ISD::SETLT && RHSC->isZero()) {
2761 // X < 0 -> X == 0, jump on sign.
2762 return X86::COND_S;
2764 if (SetCCOpcode == ISD::SETGE && RHSC->isZero()) {
2765 // X >= 0 -> X == 0, jump on !sign.
2766 return X86::COND_NS;
2768 if (SetCCOpcode == ISD::SETLT && RHSC->isOne()) {
2769 // X < 1 -> X <= 0
2770 RHS = DAG.getConstant(0, DL, RHS.getValueType());
2771 return X86::COND_LE;
2775 return TranslateIntegerX86CC(SetCCOpcode);
2778 // First determine if it is required or is profitable to flip the operands.
2780 // If LHS is a foldable load, but RHS is not, flip the condition.
2781 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
2782 !ISD::isNON_EXTLoad(RHS.getNode())) {
2783 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
2784 std::swap(LHS, RHS);
2787 switch (SetCCOpcode) {
2788 default: break;
2789 case ISD::SETOLT:
2790 case ISD::SETOLE:
2791 case ISD::SETUGT:
2792 case ISD::SETUGE:
2793 std::swap(LHS, RHS);
2794 break;
2797 // On a floating point condition, the flags are set as follows:
2798 // ZF PF CF op
2799 // 0 | 0 | 0 | X > Y
2800 // 0 | 0 | 1 | X < Y
2801 // 1 | 0 | 0 | X == Y
2802 // 1 | 1 | 1 | unordered
2803 switch (SetCCOpcode) {
2804 default: llvm_unreachable("Condcode should be pre-legalized away");
2805 case ISD::SETUEQ:
2806 case ISD::SETEQ: return X86::COND_E;
2807 case ISD::SETOLT: // flipped
2808 case ISD::SETOGT:
2809 case ISD::SETGT: return X86::COND_A;
2810 case ISD::SETOLE: // flipped
2811 case ISD::SETOGE:
2812 case ISD::SETGE: return X86::COND_AE;
2813 case ISD::SETUGT: // flipped
2814 case ISD::SETULT:
2815 case ISD::SETLT: return X86::COND_B;
2816 case ISD::SETUGE: // flipped
2817 case ISD::SETULE:
2818 case ISD::SETLE: return X86::COND_BE;
2819 case ISD::SETONE:
2820 case ISD::SETNE: return X86::COND_NE;
2821 case ISD::SETUO: return X86::COND_P;
2822 case ISD::SETO: return X86::COND_NP;
2823 case ISD::SETOEQ:
2824 case ISD::SETUNE: return X86::COND_INVALID;
2828 /// Is there a floating point cmov for the specific X86 condition code?
2829 /// Current x86 isa includes the following FP cmov instructions:
2830 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
2831 static bool hasFPCMov(unsigned X86CC) {
2832 switch (X86CC) {
2833 default:
2834 return false;
2835 case X86::COND_B:
2836 case X86::COND_BE:
2837 case X86::COND_E:
2838 case X86::COND_P:
2839 case X86::COND_A:
2840 case X86::COND_AE:
2841 case X86::COND_NE:
2842 case X86::COND_NP:
2843 return true;
2847 static bool useVPTERNLOG(const X86Subtarget &Subtarget, MVT VT) {
2848 return Subtarget.hasVLX() || Subtarget.canExtendTo512DQ() ||
2849 VT.is512BitVector();
2852 bool X86TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
2853 const CallInst &I,
2854 MachineFunction &MF,
2855 unsigned Intrinsic) const {
2856 Info.flags = MachineMemOperand::MONone;
2857 Info.offset = 0;
2859 const IntrinsicData* IntrData = getIntrinsicWithChain(Intrinsic);
2860 if (!IntrData) {
2861 switch (Intrinsic) {
2862 case Intrinsic::x86_aesenc128kl:
2863 case Intrinsic::x86_aesdec128kl:
2864 Info.opc = ISD::INTRINSIC_W_CHAIN;
2865 Info.ptrVal = I.getArgOperand(1);
2866 Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), 48);
2867 Info.align = Align(1);
2868 Info.flags |= MachineMemOperand::MOLoad;
2869 return true;
2870 case Intrinsic::x86_aesenc256kl:
2871 case Intrinsic::x86_aesdec256kl:
2872 Info.opc = ISD::INTRINSIC_W_CHAIN;
2873 Info.ptrVal = I.getArgOperand(1);
2874 Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), 64);
2875 Info.align = Align(1);
2876 Info.flags |= MachineMemOperand::MOLoad;
2877 return true;
2878 case Intrinsic::x86_aesencwide128kl:
2879 case Intrinsic::x86_aesdecwide128kl:
2880 Info.opc = ISD::INTRINSIC_W_CHAIN;
2881 Info.ptrVal = I.getArgOperand(0);
2882 Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), 48);
2883 Info.align = Align(1);
2884 Info.flags |= MachineMemOperand::MOLoad;
2885 return true;
2886 case Intrinsic::x86_aesencwide256kl:
2887 case Intrinsic::x86_aesdecwide256kl:
2888 Info.opc = ISD::INTRINSIC_W_CHAIN;
2889 Info.ptrVal = I.getArgOperand(0);
2890 Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), 64);
2891 Info.align = Align(1);
2892 Info.flags |= MachineMemOperand::MOLoad;
2893 return true;
2894 case Intrinsic::x86_cmpccxadd32:
2895 case Intrinsic::x86_cmpccxadd64:
2896 case Intrinsic::x86_atomic_bts:
2897 case Intrinsic::x86_atomic_btc:
2898 case Intrinsic::x86_atomic_btr: {
2899 Info.opc = ISD::INTRINSIC_W_CHAIN;
2900 Info.ptrVal = I.getArgOperand(0);
2901 unsigned Size = I.getType()->getScalarSizeInBits();
2902 Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), Size);
2903 Info.align = Align(Size);
2904 Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
2905 MachineMemOperand::MOVolatile;
2906 return true;
2908 case Intrinsic::x86_atomic_bts_rm:
2909 case Intrinsic::x86_atomic_btc_rm:
2910 case Intrinsic::x86_atomic_btr_rm: {
2911 Info.opc = ISD::INTRINSIC_W_CHAIN;
2912 Info.ptrVal = I.getArgOperand(0);
2913 unsigned Size = I.getArgOperand(1)->getType()->getScalarSizeInBits();
2914 Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), Size);
2915 Info.align = Align(Size);
2916 Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
2917 MachineMemOperand::MOVolatile;
2918 return true;
2920 case Intrinsic::x86_aadd32:
2921 case Intrinsic::x86_aadd64:
2922 case Intrinsic::x86_aand32:
2923 case Intrinsic::x86_aand64:
2924 case Intrinsic::x86_aor32:
2925 case Intrinsic::x86_aor64:
2926 case Intrinsic::x86_axor32:
2927 case Intrinsic::x86_axor64:
2928 case Intrinsic::x86_atomic_add_cc:
2929 case Intrinsic::x86_atomic_sub_cc:
2930 case Intrinsic::x86_atomic_or_cc:
2931 case Intrinsic::x86_atomic_and_cc:
2932 case Intrinsic::x86_atomic_xor_cc: {
2933 Info.opc = ISD::INTRINSIC_W_CHAIN;
2934 Info.ptrVal = I.getArgOperand(0);
2935 unsigned Size = I.getArgOperand(1)->getType()->getScalarSizeInBits();
2936 Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), Size);
2937 Info.align = Align(Size);
2938 Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
2939 MachineMemOperand::MOVolatile;
2940 return true;
2943 return false;
2946 switch (IntrData->Type) {
2947 case TRUNCATE_TO_MEM_VI8:
2948 case TRUNCATE_TO_MEM_VI16:
2949 case TRUNCATE_TO_MEM_VI32: {
2950 Info.opc = ISD::INTRINSIC_VOID;
2951 Info.ptrVal = I.getArgOperand(0);
2952 MVT VT = MVT::getVT(I.getArgOperand(1)->getType());
2953 MVT ScalarVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
2954 if (IntrData->Type == TRUNCATE_TO_MEM_VI8)
2955 ScalarVT = MVT::i8;
2956 else if (IntrData->Type == TRUNCATE_TO_MEM_VI16)
2957 ScalarVT = MVT::i16;
2958 else if (IntrData->Type == TRUNCATE_TO_MEM_VI32)
2959 ScalarVT = MVT::i32;
2961 Info.memVT = MVT::getVectorVT(ScalarVT, VT.getVectorNumElements());
2962 Info.align = Align(1);
2963 Info.flags |= MachineMemOperand::MOStore;
2964 break;
2966 case GATHER:
2967 case GATHER_AVX2: {
2968 Info.opc = ISD::INTRINSIC_W_CHAIN;
2969 Info.ptrVal = nullptr;
2970 MVT DataVT = MVT::getVT(I.getType());
2971 MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
2972 unsigned NumElts = std::min(DataVT.getVectorNumElements(),
2973 IndexVT.getVectorNumElements());
2974 Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
2975 Info.align = Align(1);
2976 Info.flags |= MachineMemOperand::MOLoad;
2977 break;
2979 case SCATTER: {
2980 Info.opc = ISD::INTRINSIC_VOID;
2981 Info.ptrVal = nullptr;
2982 MVT DataVT = MVT::getVT(I.getArgOperand(3)->getType());
2983 MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
2984 unsigned NumElts = std::min(DataVT.getVectorNumElements(),
2985 IndexVT.getVectorNumElements());
2986 Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
2987 Info.align = Align(1);
2988 Info.flags |= MachineMemOperand::MOStore;
2989 break;
2991 default:
2992 return false;
2995 return true;
2998 /// Returns true if the target can instruction select the
2999 /// specified FP immediate natively. If false, the legalizer will
3000 /// materialize the FP immediate as a load from a constant pool.
3001 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
3002 bool ForCodeSize) const {
3003 for (const APFloat &FPImm : LegalFPImmediates)
3004 if (Imm.bitwiseIsEqual(FPImm))
3005 return true;
3006 return false;
3009 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
3010 ISD::LoadExtType ExtTy,
3011 EVT NewVT) const {
3012 assert(cast<LoadSDNode>(Load)->isSimple() && "illegal to narrow");
3014 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
3015 // relocation target a movq or addq instruction: don't let the load shrink.
3016 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
3017 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
3018 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
3019 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
3021 // If this is an (1) AVX vector load with (2) multiple uses and (3) all of
3022 // those uses are extracted directly into a store, then the extract + store
3023 // can be store-folded. Therefore, it's probably not worth splitting the load.
3024 EVT VT = Load->getValueType(0);
3025 if ((VT.is256BitVector() || VT.is512BitVector()) && !Load->hasOneUse()) {
3026 for (auto UI = Load->use_begin(), UE = Load->use_end(); UI != UE; ++UI) {
3027 // Skip uses of the chain value. Result 0 of the node is the load value.
3028 if (UI.getUse().getResNo() != 0)
3029 continue;
3031 // If this use is not an extract + store, it's probably worth splitting.
3032 if (UI->getOpcode() != ISD::EXTRACT_SUBVECTOR || !UI->hasOneUse() ||
3033 UI->use_begin()->getOpcode() != ISD::STORE)
3034 return true;
3036 // All non-chain uses are extract + store.
3037 return false;
3040 return true;
3043 /// Returns true if it is beneficial to convert a load of a constant
3044 /// to just the constant itself.
3045 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
3046 Type *Ty) const {
3047 assert(Ty->isIntegerTy());
3049 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3050 if (BitSize == 0 || BitSize > 64)
3051 return false;
3052 return true;
3055 bool X86TargetLowering::reduceSelectOfFPConstantLoads(EVT CmpOpVT) const {
3056 // If we are using XMM registers in the ABI and the condition of the select is
3057 // a floating-point compare and we have blendv or conditional move, then it is
3058 // cheaper to select instead of doing a cross-register move and creating a
3059 // load that depends on the compare result.
3060 bool IsFPSetCC = CmpOpVT.isFloatingPoint() && CmpOpVT != MVT::f128;
3061 return !IsFPSetCC || !Subtarget.isTarget64BitLP64() || !Subtarget.hasAVX();
3064 bool X86TargetLowering::convertSelectOfConstantsToMath(EVT VT) const {
3065 // TODO: It might be a win to ease or lift this restriction, but the generic
3066 // folds in DAGCombiner conflict with vector folds for an AVX512 target.
3067 if (VT.isVector() && Subtarget.hasAVX512())
3068 return false;
3070 return true;
3073 bool X86TargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
3074 SDValue C) const {
3075 // TODO: We handle scalars using custom code, but generic combining could make
3076 // that unnecessary.
3077 APInt MulC;
3078 if (!ISD::isConstantSplatVector(C.getNode(), MulC))
3079 return false;
3081 // Find the type this will be legalized too. Otherwise we might prematurely
3082 // convert this to shl+add/sub and then still have to type legalize those ops.
3083 // Another choice would be to defer the decision for illegal types until
3084 // after type legalization. But constant splat vectors of i64 can't make it
3085 // through type legalization on 32-bit targets so we would need to special
3086 // case vXi64.
3087 while (getTypeAction(Context, VT) != TypeLegal)
3088 VT = getTypeToTransformTo(Context, VT);
3090 // If vector multiply is legal, assume that's faster than shl + add/sub.
3091 // Multiply is a complex op with higher latency and lower throughput in
3092 // most implementations, sub-vXi32 vector multiplies are always fast,
3093 // vXi32 mustn't have a SlowMULLD implementation, and anything larger (vXi64)
3094 // is always going to be slow.
3095 unsigned EltSizeInBits = VT.getScalarSizeInBits();
3096 if (isOperationLegal(ISD::MUL, VT) && EltSizeInBits <= 32 &&
3097 (EltSizeInBits != 32 || !Subtarget.isPMULLDSlow()))
3098 return false;
3100 // shl+add, shl+sub, shl+add+neg
3101 return (MulC + 1).isPowerOf2() || (MulC - 1).isPowerOf2() ||
3102 (1 - MulC).isPowerOf2() || (-(MulC + 1)).isPowerOf2();
3105 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
3106 unsigned Index) const {
3107 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
3108 return false;
3110 // Mask vectors support all subregister combinations and operations that
3111 // extract half of vector.
3112 if (ResVT.getVectorElementType() == MVT::i1)
3113 return Index == 0 || ((ResVT.getSizeInBits() == SrcVT.getSizeInBits()*2) &&
3114 (Index == ResVT.getVectorNumElements()));
3116 return (Index % ResVT.getVectorNumElements()) == 0;
3119 bool X86TargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
3120 unsigned Opc = VecOp.getOpcode();
3122 // Assume target opcodes can't be scalarized.
3123 // TODO - do we have any exceptions?
3124 if (Opc >= ISD::BUILTIN_OP_END)
3125 return false;
3127 // If the vector op is not supported, try to convert to scalar.
3128 EVT VecVT = VecOp.getValueType();
3129 if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
3130 return true;
3132 // If the vector op is supported, but the scalar op is not, the transform may
3133 // not be worthwhile.
3134 EVT ScalarVT = VecVT.getScalarType();
3135 return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
3138 bool X86TargetLowering::shouldFormOverflowOp(unsigned Opcode, EVT VT,
3139 bool) const {
3140 // TODO: Allow vectors?
3141 if (VT.isVector())
3142 return false;
3143 return VT.isSimple() || !isOperationExpand(Opcode, VT);
3146 bool X86TargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
3147 // Speculate cttz only if we can directly use TZCNT or can promote to i32.
3148 return Subtarget.hasBMI() ||
3149 (!Ty->isVectorTy() && Ty->getScalarSizeInBits() < 32);
3152 bool X86TargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {
3153 // Speculate ctlz only if we can directly use LZCNT.
3154 return Subtarget.hasLZCNT();
3157 bool X86TargetLowering::ShouldShrinkFPConstant(EVT VT) const {
3158 // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more
3159 // expensive than a straight movsd. On the other hand, it's important to
3160 // shrink long double fp constant since fldt is very slow.
3161 return !Subtarget.hasSSE2() || VT == MVT::f80;
3164 bool X86TargetLowering::isScalarFPTypeInSSEReg(EVT VT) const {
3165 return (VT == MVT::f64 && Subtarget.hasSSE2()) ||
3166 (VT == MVT::f32 && Subtarget.hasSSE1()) || VT == MVT::f16;
3169 bool X86TargetLowering::isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
3170 const SelectionDAG &DAG,
3171 const MachineMemOperand &MMO) const {
3172 if (!Subtarget.hasAVX512() && !LoadVT.isVector() && BitcastVT.isVector() &&
3173 BitcastVT.getVectorElementType() == MVT::i1)
3174 return false;
3176 if (!Subtarget.hasDQI() && BitcastVT == MVT::v8i1 && LoadVT == MVT::i8)
3177 return false;
3179 // If both types are legal vectors, it's always ok to convert them.
3180 if (LoadVT.isVector() && BitcastVT.isVector() &&
3181 isTypeLegal(LoadVT) && isTypeLegal(BitcastVT))
3182 return true;
3184 return TargetLowering::isLoadBitCastBeneficial(LoadVT, BitcastVT, DAG, MMO);
3187 bool X86TargetLowering::canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
3188 const MachineFunction &MF) const {
3189 // Do not merge to float value size (128 bytes) if no implicit
3190 // float attribute is set.
3191 bool NoFloat = MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat);
3193 if (NoFloat) {
3194 unsigned MaxIntSize = Subtarget.is64Bit() ? 64 : 32;
3195 return (MemVT.getSizeInBits() <= MaxIntSize);
3197 // Make sure we don't merge greater than our preferred vector
3198 // width.
3199 if (MemVT.getSizeInBits() > Subtarget.getPreferVectorWidth())
3200 return false;
3202 return true;
3205 bool X86TargetLowering::isCtlzFast() const {
3206 return Subtarget.hasFastLZCNT();
3209 bool X86TargetLowering::isMaskAndCmp0FoldingBeneficial(
3210 const Instruction &AndI) const {
3211 return true;
3214 bool X86TargetLowering::hasAndNotCompare(SDValue Y) const {
3215 EVT VT = Y.getValueType();
3217 if (VT.isVector())
3218 return false;
3220 if (!Subtarget.hasBMI())
3221 return false;
3223 // There are only 32-bit and 64-bit forms for 'andn'.
3224 if (VT != MVT::i32 && VT != MVT::i64)
3225 return false;
3227 return !isa<ConstantSDNode>(Y);
3230 bool X86TargetLowering::hasAndNot(SDValue Y) const {
3231 EVT VT = Y.getValueType();
3233 if (!VT.isVector())
3234 return hasAndNotCompare(Y);
3236 // Vector.
3238 if (!Subtarget.hasSSE1() || VT.getSizeInBits() < 128)
3239 return false;
3241 if (VT == MVT::v4i32)
3242 return true;
3244 return Subtarget.hasSSE2();
3247 bool X86TargetLowering::hasBitTest(SDValue X, SDValue Y) const {
3248 return X.getValueType().isScalarInteger(); // 'bt'
3251 bool X86TargetLowering::
3252 shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
3253 SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
3254 unsigned OldShiftOpcode, unsigned NewShiftOpcode,
3255 SelectionDAG &DAG) const {
3256 // Does baseline recommend not to perform the fold by default?
3257 if (!TargetLowering::shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
3258 X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG))
3259 return false;
3260 // For scalars this transform is always beneficial.
3261 if (X.getValueType().isScalarInteger())
3262 return true;
3263 // If all the shift amounts are identical, then transform is beneficial even
3264 // with rudimentary SSE2 shifts.
3265 if (DAG.isSplatValue(Y, /*AllowUndefs=*/true))
3266 return true;
3267 // If we have AVX2 with it's powerful shift operations, then it's also good.
3268 if (Subtarget.hasAVX2())
3269 return true;
3270 // Pre-AVX2 vector codegen for this pattern is best for variant with 'shl'.
3271 return NewShiftOpcode == ISD::SHL;
3274 unsigned X86TargetLowering::preferedOpcodeForCmpEqPiecesOfOperand(
3275 EVT VT, unsigned ShiftOpc, bool MayTransformRotate,
3276 const APInt &ShiftOrRotateAmt, const std::optional<APInt> &AndMask) const {
3277 if (!VT.isInteger())
3278 return ShiftOpc;
3280 bool PreferRotate = false;
3281 if (VT.isVector()) {
3282 // For vectors, if we have rotate instruction support, then its definetly
3283 // best. Otherwise its not clear what the best so just don't make changed.
3284 PreferRotate = Subtarget.hasAVX512() && (VT.getScalarType() == MVT::i32 ||
3285 VT.getScalarType() == MVT::i64);
3286 } else {
3287 // For scalar, if we have bmi prefer rotate for rorx. Otherwise prefer
3288 // rotate unless we have a zext mask+shr.
3289 PreferRotate = Subtarget.hasBMI2();
3290 if (!PreferRotate) {
3291 unsigned MaskBits =
3292 VT.getScalarSizeInBits() - ShiftOrRotateAmt.getZExtValue();
3293 PreferRotate = (MaskBits != 8) && (MaskBits != 16) && (MaskBits != 32);
3297 if (ShiftOpc == ISD::SHL || ShiftOpc == ISD::SRL) {
3298 assert(AndMask.has_value() && "Null andmask when querying about shift+and");
3300 if (PreferRotate && MayTransformRotate)
3301 return ISD::ROTL;
3303 // If vector we don't really get much benefit swapping around constants.
3304 // Maybe we could check if the DAG has the flipped node already in the
3305 // future.
3306 if (VT.isVector())
3307 return ShiftOpc;
3309 // See if the beneficial to swap shift type.
3310 if (ShiftOpc == ISD::SHL) {
3311 // If the current setup has imm64 mask, then inverse will have
3312 // at least imm32 mask (or be zext i32 -> i64).
3313 if (VT == MVT::i64)
3314 return AndMask->getSignificantBits() > 32 ? (unsigned)ISD::SRL
3315 : ShiftOpc;
3317 // We can only benefit if req at least 7-bit for the mask. We
3318 // don't want to replace shl of 1,2,3 as they can be implemented
3319 // with lea/add.
3320 return ShiftOrRotateAmt.uge(7) ? (unsigned)ISD::SRL : ShiftOpc;
3323 if (VT == MVT::i64)
3324 // Keep exactly 32-bit imm64, this is zext i32 -> i64 which is
3325 // extremely efficient.
3326 return AndMask->getSignificantBits() > 33 ? (unsigned)ISD::SHL : ShiftOpc;
3328 // Keep small shifts as shl so we can generate add/lea.
3329 return ShiftOrRotateAmt.ult(7) ? (unsigned)ISD::SHL : ShiftOpc;
3332 // We prefer rotate for vectors of if we won't get a zext mask with SRL
3333 // (PreferRotate will be set in the latter case).
3334 if (PreferRotate || VT.isVector())
3335 return ShiftOpc;
3337 // Non-vector type and we have a zext mask with SRL.
3338 return ISD::SRL;
3341 bool X86TargetLowering::preferScalarizeSplat(SDNode *N) const {
3342 return N->getOpcode() != ISD::FP_EXTEND;
3345 bool X86TargetLowering::shouldFoldConstantShiftPairToMask(
3346 const SDNode *N, CombineLevel Level) const {
3347 assert(((N->getOpcode() == ISD::SHL &&
3348 N->getOperand(0).getOpcode() == ISD::SRL) ||
3349 (N->getOpcode() == ISD::SRL &&
3350 N->getOperand(0).getOpcode() == ISD::SHL)) &&
3351 "Expected shift-shift mask");
3352 // TODO: Should we always create i64 masks? Or only folded immediates?
3353 EVT VT = N->getValueType(0);
3354 if ((Subtarget.hasFastVectorShiftMasks() && VT.isVector()) ||
3355 (Subtarget.hasFastScalarShiftMasks() && !VT.isVector())) {
3356 // Only fold if the shift values are equal - so it folds to AND.
3357 // TODO - we should fold if either is a non-uniform vector but we don't do
3358 // the fold for non-splats yet.
3359 return N->getOperand(1) == N->getOperand(0).getOperand(1);
3361 return TargetLoweringBase::shouldFoldConstantShiftPairToMask(N, Level);
3364 bool X86TargetLowering::shouldFoldMaskToVariableShiftPair(SDValue Y) const {
3365 EVT VT = Y.getValueType();
3367 // For vectors, we don't have a preference, but we probably want a mask.
3368 if (VT.isVector())
3369 return false;
3371 // 64-bit shifts on 32-bit targets produce really bad bloated code.
3372 if (VT == MVT::i64 && !Subtarget.is64Bit())
3373 return false;
3375 return true;
3378 TargetLowering::ShiftLegalizationStrategy
3379 X86TargetLowering::preferredShiftLegalizationStrategy(
3380 SelectionDAG &DAG, SDNode *N, unsigned ExpansionFactor) const {
3381 if (DAG.getMachineFunction().getFunction().hasMinSize() &&
3382 !Subtarget.isOSWindows())
3383 return ShiftLegalizationStrategy::LowerToLibcall;
3384 return TargetLowering::preferredShiftLegalizationStrategy(DAG, N,
3385 ExpansionFactor);
3388 bool X86TargetLowering::shouldSplatInsEltVarIndex(EVT VT) const {
3389 // Any legal vector type can be splatted more efficiently than
3390 // loading/spilling from memory.
3391 return isTypeLegal(VT);
3394 MVT X86TargetLowering::hasFastEqualityCompare(unsigned NumBits) const {
3395 MVT VT = MVT::getIntegerVT(NumBits);
3396 if (isTypeLegal(VT))
3397 return VT;
3399 // PMOVMSKB can handle this.
3400 if (NumBits == 128 && isTypeLegal(MVT::v16i8))
3401 return MVT::v16i8;
3403 // VPMOVMSKB can handle this.
3404 if (NumBits == 256 && isTypeLegal(MVT::v32i8))
3405 return MVT::v32i8;
3407 // TODO: Allow 64-bit type for 32-bit target.
3408 // TODO: 512-bit types should be allowed, but make sure that those
3409 // cases are handled in combineVectorSizedSetCCEquality().
3411 return MVT::INVALID_SIMPLE_VALUE_TYPE;
3414 /// Val is the undef sentinel value or equal to the specified value.
3415 static bool isUndefOrEqual(int Val, int CmpVal) {
3416 return ((Val == SM_SentinelUndef) || (Val == CmpVal));
3419 /// Return true if every element in Mask is the undef sentinel value or equal to
3420 /// the specified value.
3421 static bool isUndefOrEqual(ArrayRef<int> Mask, int CmpVal) {
3422 return llvm::all_of(Mask, [CmpVal](int M) {
3423 return (M == SM_SentinelUndef) || (M == CmpVal);
3427 /// Return true if every element in Mask, beginning from position Pos and ending
3428 /// in Pos+Size is the undef sentinel value or equal to the specified value.
3429 static bool isUndefOrEqualInRange(ArrayRef<int> Mask, int CmpVal, unsigned Pos,
3430 unsigned Size) {
3431 return llvm::all_of(Mask.slice(Pos, Size),
3432 [CmpVal](int M) { return isUndefOrEqual(M, CmpVal); });
3435 /// Val is either the undef or zero sentinel value.
3436 static bool isUndefOrZero(int Val) {
3437 return ((Val == SM_SentinelUndef) || (Val == SM_SentinelZero));
3440 /// Return true if every element in Mask, beginning from position Pos and ending
3441 /// in Pos+Size is the undef sentinel value.
3442 static bool isUndefInRange(ArrayRef<int> Mask, unsigned Pos, unsigned Size) {
3443 return llvm::all_of(Mask.slice(Pos, Size),
3444 [](int M) { return M == SM_SentinelUndef; });
3447 /// Return true if the mask creates a vector whose lower half is undefined.
3448 static bool isUndefLowerHalf(ArrayRef<int> Mask) {
3449 unsigned NumElts = Mask.size();
3450 return isUndefInRange(Mask, 0, NumElts / 2);
3453 /// Return true if the mask creates a vector whose upper half is undefined.
3454 static bool isUndefUpperHalf(ArrayRef<int> Mask) {
3455 unsigned NumElts = Mask.size();
3456 return isUndefInRange(Mask, NumElts / 2, NumElts / 2);
3459 /// Return true if Val falls within the specified range (L, H].
3460 static bool isInRange(int Val, int Low, int Hi) {
3461 return (Val >= Low && Val < Hi);
3464 /// Return true if the value of any element in Mask falls within the specified
3465 /// range (L, H].
3466 static bool isAnyInRange(ArrayRef<int> Mask, int Low, int Hi) {
3467 return llvm::any_of(Mask, [Low, Hi](int M) { return isInRange(M, Low, Hi); });
3470 /// Return true if the value of any element in Mask is the zero sentinel value.
3471 static bool isAnyZero(ArrayRef<int> Mask) {
3472 return llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; });
3475 /// Return true if the value of any element in Mask is the zero or undef
3476 /// sentinel values.
3477 static bool isAnyZeroOrUndef(ArrayRef<int> Mask) {
3478 return llvm::any_of(Mask, [](int M) {
3479 return M == SM_SentinelZero || M == SM_SentinelUndef;
3483 /// Return true if Val is undef or if its value falls within the
3484 /// specified range (L, H].
3485 static bool isUndefOrInRange(int Val, int Low, int Hi) {
3486 return (Val == SM_SentinelUndef) || isInRange(Val, Low, Hi);
3489 /// Return true if every element in Mask is undef or if its value
3490 /// falls within the specified range (L, H].
3491 static bool isUndefOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
3492 return llvm::all_of(
3493 Mask, [Low, Hi](int M) { return isUndefOrInRange(M, Low, Hi); });
3496 /// Return true if Val is undef, zero or if its value falls within the
3497 /// specified range (L, H].
3498 static bool isUndefOrZeroOrInRange(int Val, int Low, int Hi) {
3499 return isUndefOrZero(Val) || isInRange(Val, Low, Hi);
3502 /// Return true if every element in Mask is undef, zero or if its value
3503 /// falls within the specified range (L, H].
3504 static bool isUndefOrZeroOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
3505 return llvm::all_of(
3506 Mask, [Low, Hi](int M) { return isUndefOrZeroOrInRange(M, Low, Hi); });
3509 /// Return true if every element in Mask, beginning
3510 /// from position Pos and ending in Pos + Size, falls within the specified
3511 /// sequence (Low, Low + Step, ..., Low + (Size - 1) * Step) or is undef.
3512 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, unsigned Pos,
3513 unsigned Size, int Low, int Step = 1) {
3514 for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
3515 if (!isUndefOrEqual(Mask[i], Low))
3516 return false;
3517 return true;
3520 /// Return true if every element in Mask, beginning
3521 /// from position Pos and ending in Pos+Size, falls within the specified
3522 /// sequential range (Low, Low+Size], or is undef or is zero.
3523 static bool isSequentialOrUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
3524 unsigned Size, int Low,
3525 int Step = 1) {
3526 for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
3527 if (!isUndefOrZero(Mask[i]) && Mask[i] != Low)
3528 return false;
3529 return true;
3532 /// Return true if every element in Mask, beginning
3533 /// from position Pos and ending in Pos+Size is undef or is zero.
3534 static bool isUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
3535 unsigned Size) {
3536 return llvm::all_of(Mask.slice(Pos, Size), isUndefOrZero);
3539 /// Helper function to test whether a shuffle mask could be
3540 /// simplified by widening the elements being shuffled.
3542 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
3543 /// leaves it in an unspecified state.
3545 /// NOTE: This must handle normal vector shuffle masks and *target* vector
3546 /// shuffle masks. The latter have the special property of a '-2' representing
3547 /// a zero-ed lane of a vector.
3548 static bool canWidenShuffleElements(ArrayRef<int> Mask,
3549 SmallVectorImpl<int> &WidenedMask) {
3550 WidenedMask.assign(Mask.size() / 2, 0);
3551 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
3552 int M0 = Mask[i];
3553 int M1 = Mask[i + 1];
3555 // If both elements are undef, its trivial.
3556 if (M0 == SM_SentinelUndef && M1 == SM_SentinelUndef) {
3557 WidenedMask[i / 2] = SM_SentinelUndef;
3558 continue;
3561 // Check for an undef mask and a mask value properly aligned to fit with
3562 // a pair of values. If we find such a case, use the non-undef mask's value.
3563 if (M0 == SM_SentinelUndef && M1 >= 0 && (M1 % 2) == 1) {
3564 WidenedMask[i / 2] = M1 / 2;
3565 continue;
3567 if (M1 == SM_SentinelUndef && M0 >= 0 && (M0 % 2) == 0) {
3568 WidenedMask[i / 2] = M0 / 2;
3569 continue;
3572 // When zeroing, we need to spread the zeroing across both lanes to widen.
3573 if (M0 == SM_SentinelZero || M1 == SM_SentinelZero) {
3574 if ((M0 == SM_SentinelZero || M0 == SM_SentinelUndef) &&
3575 (M1 == SM_SentinelZero || M1 == SM_SentinelUndef)) {
3576 WidenedMask[i / 2] = SM_SentinelZero;
3577 continue;
3579 return false;
3582 // Finally check if the two mask values are adjacent and aligned with
3583 // a pair.
3584 if (M0 != SM_SentinelUndef && (M0 % 2) == 0 && (M0 + 1) == M1) {
3585 WidenedMask[i / 2] = M0 / 2;
3586 continue;
3589 // Otherwise we can't safely widen the elements used in this shuffle.
3590 return false;
3592 assert(WidenedMask.size() == Mask.size() / 2 &&
3593 "Incorrect size of mask after widening the elements!");
3595 return true;
3598 static bool canWidenShuffleElements(ArrayRef<int> Mask,
3599 const APInt &Zeroable,
3600 bool V2IsZero,
3601 SmallVectorImpl<int> &WidenedMask) {
3602 // Create an alternative mask with info about zeroable elements.
3603 // Here we do not set undef elements as zeroable.
3604 SmallVector<int, 64> ZeroableMask(Mask);
3605 if (V2IsZero) {
3606 assert(!Zeroable.isZero() && "V2's non-undef elements are used?!");
3607 for (int i = 0, Size = Mask.size(); i != Size; ++i)
3608 if (Mask[i] != SM_SentinelUndef && Zeroable[i])
3609 ZeroableMask[i] = SM_SentinelZero;
3611 return canWidenShuffleElements(ZeroableMask, WidenedMask);
3614 static bool canWidenShuffleElements(ArrayRef<int> Mask) {
3615 SmallVector<int, 32> WidenedMask;
3616 return canWidenShuffleElements(Mask, WidenedMask);
3619 // Attempt to narrow/widen shuffle mask until it matches the target number of
3620 // elements.
3621 static bool scaleShuffleElements(ArrayRef<int> Mask, unsigned NumDstElts,
3622 SmallVectorImpl<int> &ScaledMask) {
3623 unsigned NumSrcElts = Mask.size();
3624 assert(((NumSrcElts % NumDstElts) == 0 || (NumDstElts % NumSrcElts) == 0) &&
3625 "Illegal shuffle scale factor");
3627 // Narrowing is guaranteed to work.
3628 if (NumDstElts >= NumSrcElts) {
3629 int Scale = NumDstElts / NumSrcElts;
3630 llvm::narrowShuffleMaskElts(Scale, Mask, ScaledMask);
3631 return true;
3634 // We have to repeat the widening until we reach the target size, but we can
3635 // split out the first widening as it sets up ScaledMask for us.
3636 if (canWidenShuffleElements(Mask, ScaledMask)) {
3637 while (ScaledMask.size() > NumDstElts) {
3638 SmallVector<int, 16> WidenedMask;
3639 if (!canWidenShuffleElements(ScaledMask, WidenedMask))
3640 return false;
3641 ScaledMask = std::move(WidenedMask);
3643 return true;
3646 return false;
3649 /// Returns true if Elt is a constant zero or a floating point constant +0.0.
3650 bool X86::isZeroNode(SDValue Elt) {
3651 return isNullConstant(Elt) || isNullFPConstant(Elt);
3654 // Build a vector of constants.
3655 // Use an UNDEF node if MaskElt == -1.
3656 // Split 64-bit constants in the 32-bit mode.
3657 static SDValue getConstVector(ArrayRef<int> Values, MVT VT, SelectionDAG &DAG,
3658 const SDLoc &dl, bool IsMask = false) {
3660 SmallVector<SDValue, 32> Ops;
3661 bool Split = false;
3663 MVT ConstVecVT = VT;
3664 unsigned NumElts = VT.getVectorNumElements();
3665 bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
3666 if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
3667 ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
3668 Split = true;
3671 MVT EltVT = ConstVecVT.getVectorElementType();
3672 for (unsigned i = 0; i < NumElts; ++i) {
3673 bool IsUndef = Values[i] < 0 && IsMask;
3674 SDValue OpNode = IsUndef ? DAG.getUNDEF(EltVT) :
3675 DAG.getConstant(Values[i], dl, EltVT);
3676 Ops.push_back(OpNode);
3677 if (Split)
3678 Ops.push_back(IsUndef ? DAG.getUNDEF(EltVT) :
3679 DAG.getConstant(0, dl, EltVT));
3681 SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
3682 if (Split)
3683 ConstsNode = DAG.getBitcast(VT, ConstsNode);
3684 return ConstsNode;
3687 static SDValue getConstVector(ArrayRef<APInt> Bits, const APInt &Undefs,
3688 MVT VT, SelectionDAG &DAG, const SDLoc &dl) {
3689 assert(Bits.size() == Undefs.getBitWidth() &&
3690 "Unequal constant and undef arrays");
3691 SmallVector<SDValue, 32> Ops;
3692 bool Split = false;
3694 MVT ConstVecVT = VT;
3695 unsigned NumElts = VT.getVectorNumElements();
3696 bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
3697 if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
3698 ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
3699 Split = true;
3702 MVT EltVT = ConstVecVT.getVectorElementType();
3703 for (unsigned i = 0, e = Bits.size(); i != e; ++i) {
3704 if (Undefs[i]) {
3705 Ops.append(Split ? 2 : 1, DAG.getUNDEF(EltVT));
3706 continue;
3708 const APInt &V = Bits[i];
3709 assert(V.getBitWidth() == VT.getScalarSizeInBits() && "Unexpected sizes");
3710 if (Split) {
3711 Ops.push_back(DAG.getConstant(V.trunc(32), dl, EltVT));
3712 Ops.push_back(DAG.getConstant(V.lshr(32).trunc(32), dl, EltVT));
3713 } else if (EltVT == MVT::f32) {
3714 APFloat FV(APFloat::IEEEsingle(), V);
3715 Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
3716 } else if (EltVT == MVT::f64) {
3717 APFloat FV(APFloat::IEEEdouble(), V);
3718 Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
3719 } else {
3720 Ops.push_back(DAG.getConstant(V, dl, EltVT));
3724 SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
3725 return DAG.getBitcast(VT, ConstsNode);
3728 static SDValue getConstVector(ArrayRef<APInt> Bits, MVT VT,
3729 SelectionDAG &DAG, const SDLoc &dl) {
3730 APInt Undefs = APInt::getZero(Bits.size());
3731 return getConstVector(Bits, Undefs, VT, DAG, dl);
3734 /// Returns a vector of specified type with all zero elements.
3735 static SDValue getZeroVector(MVT VT, const X86Subtarget &Subtarget,
3736 SelectionDAG &DAG, const SDLoc &dl) {
3737 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() ||
3738 VT.getVectorElementType() == MVT::i1) &&
3739 "Unexpected vector type");
3741 // Try to build SSE/AVX zero vectors as <N x i32> bitcasted to their dest
3742 // type. This ensures they get CSE'd. But if the integer type is not
3743 // available, use a floating-point +0.0 instead.
3744 SDValue Vec;
3745 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3746 if (!Subtarget.hasSSE2() && VT.is128BitVector()) {
3747 Vec = DAG.getConstantFP(+0.0, dl, MVT::v4f32);
3748 } else if (VT.isFloatingPoint() &&
3749 TLI.isTypeLegal(VT.getVectorElementType())) {
3750 Vec = DAG.getConstantFP(+0.0, dl, VT);
3751 } else if (VT.getVectorElementType() == MVT::i1) {
3752 assert((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) &&
3753 "Unexpected vector type");
3754 Vec = DAG.getConstant(0, dl, VT);
3755 } else {
3756 unsigned Num32BitElts = VT.getSizeInBits() / 32;
3757 Vec = DAG.getConstant(0, dl, MVT::getVectorVT(MVT::i32, Num32BitElts));
3759 return DAG.getBitcast(VT, Vec);
3762 // Helper to determine if the ops are all the extracted subvectors come from a
3763 // single source. If we allow commute they don't have to be in order (Lo/Hi).
3764 static SDValue getSplitVectorSrc(SDValue LHS, SDValue RHS, bool AllowCommute) {
3765 if (LHS.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
3766 RHS.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
3767 LHS.getValueType() != RHS.getValueType() ||
3768 LHS.getOperand(0) != RHS.getOperand(0))
3769 return SDValue();
3771 SDValue Src = LHS.getOperand(0);
3772 if (Src.getValueSizeInBits() != (LHS.getValueSizeInBits() * 2))
3773 return SDValue();
3775 unsigned NumElts = LHS.getValueType().getVectorNumElements();
3776 if ((LHS.getConstantOperandAPInt(1) == 0 &&
3777 RHS.getConstantOperandAPInt(1) == NumElts) ||
3778 (AllowCommute && RHS.getConstantOperandAPInt(1) == 0 &&
3779 LHS.getConstantOperandAPInt(1) == NumElts))
3780 return Src;
3782 return SDValue();
3785 static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
3786 const SDLoc &dl, unsigned vectorWidth) {
3787 EVT VT = Vec.getValueType();
3788 EVT ElVT = VT.getVectorElementType();
3789 unsigned Factor = VT.getSizeInBits() / vectorWidth;
3790 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
3791 VT.getVectorNumElements() / Factor);
3793 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
3794 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
3795 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
3797 // This is the index of the first element of the vectorWidth-bit chunk
3798 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
3799 IdxVal &= ~(ElemsPerChunk - 1);
3801 // If the input is a buildvector just emit a smaller one.
3802 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
3803 return DAG.getBuildVector(ResultVT, dl,
3804 Vec->ops().slice(IdxVal, ElemsPerChunk));
3806 // Check if we're extracting the upper undef of a widening pattern.
3807 if (Vec.getOpcode() == ISD::INSERT_SUBVECTOR && Vec.getOperand(0).isUndef() &&
3808 Vec.getOperand(1).getValueType().getVectorNumElements() <= IdxVal &&
3809 isNullConstant(Vec.getOperand(2)))
3810 return DAG.getUNDEF(ResultVT);
3812 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
3813 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
3816 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
3817 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
3818 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
3819 /// instructions or a simple subregister reference. Idx is an index in the
3820 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
3821 /// lowering EXTRACT_VECTOR_ELT operations easier.
3822 static SDValue extract128BitVector(SDValue Vec, unsigned IdxVal,
3823 SelectionDAG &DAG, const SDLoc &dl) {
3824 assert((Vec.getValueType().is256BitVector() ||
3825 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
3826 return extractSubVector(Vec, IdxVal, DAG, dl, 128);
3829 /// Generate a DAG to grab 256-bits from a 512-bit vector.
3830 static SDValue extract256BitVector(SDValue Vec, unsigned IdxVal,
3831 SelectionDAG &DAG, const SDLoc &dl) {
3832 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
3833 return extractSubVector(Vec, IdxVal, DAG, dl, 256);
3836 static SDValue insertSubVector(SDValue Result, SDValue Vec, unsigned IdxVal,
3837 SelectionDAG &DAG, const SDLoc &dl,
3838 unsigned vectorWidth) {
3839 assert((vectorWidth == 128 || vectorWidth == 256) &&
3840 "Unsupported vector width");
3841 // Inserting UNDEF is Result
3842 if (Vec.isUndef())
3843 return Result;
3844 EVT VT = Vec.getValueType();
3845 EVT ElVT = VT.getVectorElementType();
3846 EVT ResultVT = Result.getValueType();
3848 // Insert the relevant vectorWidth bits.
3849 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
3850 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
3852 // This is the index of the first element of the vectorWidth-bit chunk
3853 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
3854 IdxVal &= ~(ElemsPerChunk - 1);
3856 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
3857 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
3860 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
3861 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
3862 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
3863 /// simple superregister reference. Idx is an index in the 128 bits
3864 /// we want. It need not be aligned to a 128-bit boundary. That makes
3865 /// lowering INSERT_VECTOR_ELT operations easier.
3866 static SDValue insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
3867 SelectionDAG &DAG, const SDLoc &dl) {
3868 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
3869 return insertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
3872 /// Widen a vector to a larger size with the same scalar type, with the new
3873 /// elements either zero or undef.
3874 static SDValue widenSubVector(MVT VT, SDValue Vec, bool ZeroNewElements,
3875 const X86Subtarget &Subtarget, SelectionDAG &DAG,
3876 const SDLoc &dl) {
3877 assert(Vec.getValueSizeInBits().getFixedValue() <= VT.getFixedSizeInBits() &&
3878 Vec.getValueType().getScalarType() == VT.getScalarType() &&
3879 "Unsupported vector widening type");
3880 SDValue Res = ZeroNewElements ? getZeroVector(VT, Subtarget, DAG, dl)
3881 : DAG.getUNDEF(VT);
3882 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, Vec,
3883 DAG.getIntPtrConstant(0, dl));
3886 /// Widen a vector to a larger size with the same scalar type, with the new
3887 /// elements either zero or undef.
3888 static SDValue widenSubVector(SDValue Vec, bool ZeroNewElements,
3889 const X86Subtarget &Subtarget, SelectionDAG &DAG,
3890 const SDLoc &dl, unsigned WideSizeInBits) {
3891 assert(Vec.getValueSizeInBits() <= WideSizeInBits &&
3892 (WideSizeInBits % Vec.getScalarValueSizeInBits()) == 0 &&
3893 "Unsupported vector widening type");
3894 unsigned WideNumElts = WideSizeInBits / Vec.getScalarValueSizeInBits();
3895 MVT SVT = Vec.getSimpleValueType().getScalarType();
3896 MVT VT = MVT::getVectorVT(SVT, WideNumElts);
3897 return widenSubVector(VT, Vec, ZeroNewElements, Subtarget, DAG, dl);
3900 /// Widen a mask vector type to a minimum of v8i1/v16i1 to allow use of KSHIFT
3901 /// and bitcast with integer types.
3902 static MVT widenMaskVectorType(MVT VT, const X86Subtarget &Subtarget) {
3903 assert(VT.getVectorElementType() == MVT::i1 && "Expected bool vector");
3904 unsigned NumElts = VT.getVectorNumElements();
3905 if ((!Subtarget.hasDQI() && NumElts == 8) || NumElts < 8)
3906 return Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
3907 return VT;
3910 /// Widen a mask vector to a minimum of v8i1/v16i1 to allow use of KSHIFT and
3911 /// bitcast with integer types.
3912 static SDValue widenMaskVector(SDValue Vec, bool ZeroNewElements,
3913 const X86Subtarget &Subtarget, SelectionDAG &DAG,
3914 const SDLoc &dl) {
3915 MVT VT = widenMaskVectorType(Vec.getSimpleValueType(), Subtarget);
3916 return widenSubVector(VT, Vec, ZeroNewElements, Subtarget, DAG, dl);
3919 // Helper function to collect subvector ops that are concatenated together,
3920 // either by ISD::CONCAT_VECTORS or a ISD::INSERT_SUBVECTOR series.
3921 // The subvectors in Ops are guaranteed to be the same type.
3922 static bool collectConcatOps(SDNode *N, SmallVectorImpl<SDValue> &Ops,
3923 SelectionDAG &DAG) {
3924 assert(Ops.empty() && "Expected an empty ops vector");
3926 if (N->getOpcode() == ISD::CONCAT_VECTORS) {
3927 Ops.append(N->op_begin(), N->op_end());
3928 return true;
3931 if (N->getOpcode() == ISD::INSERT_SUBVECTOR) {
3932 SDValue Src = N->getOperand(0);
3933 SDValue Sub = N->getOperand(1);
3934 const APInt &Idx = N->getConstantOperandAPInt(2);
3935 EVT VT = Src.getValueType();
3936 EVT SubVT = Sub.getValueType();
3938 // TODO - Handle more general insert_subvector chains.
3939 if (VT.getSizeInBits() == (SubVT.getSizeInBits() * 2)) {
3940 // insert_subvector(undef, x, lo)
3941 if (Idx == 0 && Src.isUndef()) {
3942 Ops.push_back(Sub);
3943 Ops.push_back(DAG.getUNDEF(SubVT));
3944 return true;
3946 if (Idx == (VT.getVectorNumElements() / 2)) {
3947 // insert_subvector(insert_subvector(undef, x, lo), y, hi)
3948 if (Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
3949 Src.getOperand(1).getValueType() == SubVT &&
3950 isNullConstant(Src.getOperand(2))) {
3951 Ops.push_back(Src.getOperand(1));
3952 Ops.push_back(Sub);
3953 return true;
3955 // insert_subvector(x, extract_subvector(x, lo), hi)
3956 if (Sub.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
3957 Sub.getOperand(0) == Src && isNullConstant(Sub.getOperand(1))) {
3958 Ops.append(2, Sub);
3959 return true;
3961 // insert_subvector(undef, x, hi)
3962 if (Src.isUndef()) {
3963 Ops.push_back(DAG.getUNDEF(SubVT));
3964 Ops.push_back(Sub);
3965 return true;
3971 return false;
3974 // Helper to check if \p V can be split into subvectors and the upper subvectors
3975 // are all undef. In which case return the lower subvector.
3976 static SDValue isUpperSubvectorUndef(SDValue V, const SDLoc &DL,
3977 SelectionDAG &DAG) {
3978 SmallVector<SDValue> SubOps;
3979 if (!collectConcatOps(V.getNode(), SubOps, DAG))
3980 return SDValue();
3982 unsigned NumSubOps = SubOps.size();
3983 unsigned HalfNumSubOps = NumSubOps / 2;
3984 assert((NumSubOps % 2) == 0 && "Unexpected number of subvectors");
3986 ArrayRef<SDValue> UpperOps(SubOps.begin() + HalfNumSubOps, SubOps.end());
3987 if (any_of(UpperOps, [](SDValue Op) { return !Op.isUndef(); }))
3988 return SDValue();
3990 EVT HalfVT = V.getValueType().getHalfNumVectorElementsVT(*DAG.getContext());
3991 ArrayRef<SDValue> LowerOps(SubOps.begin(), SubOps.begin() + HalfNumSubOps);
3992 return DAG.getNode(ISD::CONCAT_VECTORS, DL, HalfVT, LowerOps);
3995 // Helper to check if we can access all the constituent subvectors without any
3996 // extract ops.
3997 static bool isFreeToSplitVector(SDNode *N, SelectionDAG &DAG) {
3998 SmallVector<SDValue> Ops;
3999 return collectConcatOps(N, Ops, DAG);
4002 static std::pair<SDValue, SDValue> splitVector(SDValue Op, SelectionDAG &DAG,
4003 const SDLoc &dl) {
4004 EVT VT = Op.getValueType();
4005 unsigned NumElems = VT.getVectorNumElements();
4006 unsigned SizeInBits = VT.getSizeInBits();
4007 assert((NumElems % 2) == 0 && (SizeInBits % 2) == 0 &&
4008 "Can't split odd sized vector");
4010 // If this is a splat value (with no-undefs) then use the lower subvector,
4011 // which should be a free extraction.
4012 SDValue Lo = extractSubVector(Op, 0, DAG, dl, SizeInBits / 2);
4013 if (DAG.isSplatValue(Op, /*AllowUndefs*/ false))
4014 return std::make_pair(Lo, Lo);
4016 SDValue Hi = extractSubVector(Op, NumElems / 2, DAG, dl, SizeInBits / 2);
4017 return std::make_pair(Lo, Hi);
4020 /// Break an operation into 2 half sized ops and then concatenate the results.
4021 static SDValue splitVectorOp(SDValue Op, SelectionDAG &DAG) {
4022 unsigned NumOps = Op.getNumOperands();
4023 EVT VT = Op.getValueType();
4024 SDLoc dl(Op);
4026 // Extract the LHS Lo/Hi vectors
4027 SmallVector<SDValue> LoOps(NumOps, SDValue());
4028 SmallVector<SDValue> HiOps(NumOps, SDValue());
4029 for (unsigned I = 0; I != NumOps; ++I) {
4030 SDValue SrcOp = Op.getOperand(I);
4031 if (!SrcOp.getValueType().isVector()) {
4032 LoOps[I] = HiOps[I] = SrcOp;
4033 continue;
4035 std::tie(LoOps[I], HiOps[I]) = splitVector(SrcOp, DAG, dl);
4038 EVT LoVT, HiVT;
4039 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
4040 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
4041 DAG.getNode(Op.getOpcode(), dl, LoVT, LoOps),
4042 DAG.getNode(Op.getOpcode(), dl, HiVT, HiOps));
4045 /// Break an unary integer operation into 2 half sized ops and then
4046 /// concatenate the result back.
4047 static SDValue splitVectorIntUnary(SDValue Op, SelectionDAG &DAG) {
4048 // Make sure we only try to split 256/512-bit types to avoid creating
4049 // narrow vectors.
4050 EVT VT = Op.getValueType();
4051 (void)VT;
4052 assert((Op.getOperand(0).getValueType().is256BitVector() ||
4053 Op.getOperand(0).getValueType().is512BitVector()) &&
4054 (VT.is256BitVector() || VT.is512BitVector()) && "Unsupported VT!");
4055 assert(Op.getOperand(0).getValueType().getVectorNumElements() ==
4056 VT.getVectorNumElements() &&
4057 "Unexpected VTs!");
4058 return splitVectorOp(Op, DAG);
4061 /// Break a binary integer operation into 2 half sized ops and then
4062 /// concatenate the result back.
4063 static SDValue splitVectorIntBinary(SDValue Op, SelectionDAG &DAG) {
4064 // Assert that all the types match.
4065 EVT VT = Op.getValueType();
4066 (void)VT;
4067 assert(Op.getOperand(0).getValueType() == VT &&
4068 Op.getOperand(1).getValueType() == VT && "Unexpected VTs!");
4069 assert((VT.is256BitVector() || VT.is512BitVector()) && "Unsupported VT!");
4070 return splitVectorOp(Op, DAG);
4073 // Helper for splitting operands of an operation to legal target size and
4074 // apply a function on each part.
4075 // Useful for operations that are available on SSE2 in 128-bit, on AVX2 in
4076 // 256-bit and on AVX512BW in 512-bit. The argument VT is the type used for
4077 // deciding if/how to split Ops. Ops elements do *not* have to be of type VT.
4078 // The argument Builder is a function that will be applied on each split part:
4079 // SDValue Builder(SelectionDAG&G, SDLoc, ArrayRef<SDValue>)
4080 template <typename F>
4081 SDValue SplitOpsAndApply(SelectionDAG &DAG, const X86Subtarget &Subtarget,
4082 const SDLoc &DL, EVT VT, ArrayRef<SDValue> Ops,
4083 F Builder, bool CheckBWI = true) {
4084 assert(Subtarget.hasSSE2() && "Target assumed to support at least SSE2");
4085 unsigned NumSubs = 1;
4086 if ((CheckBWI && Subtarget.useBWIRegs()) ||
4087 (!CheckBWI && Subtarget.useAVX512Regs())) {
4088 if (VT.getSizeInBits() > 512) {
4089 NumSubs = VT.getSizeInBits() / 512;
4090 assert((VT.getSizeInBits() % 512) == 0 && "Illegal vector size");
4092 } else if (Subtarget.hasAVX2()) {
4093 if (VT.getSizeInBits() > 256) {
4094 NumSubs = VT.getSizeInBits() / 256;
4095 assert((VT.getSizeInBits() % 256) == 0 && "Illegal vector size");
4097 } else {
4098 if (VT.getSizeInBits() > 128) {
4099 NumSubs = VT.getSizeInBits() / 128;
4100 assert((VT.getSizeInBits() % 128) == 0 && "Illegal vector size");
4104 if (NumSubs == 1)
4105 return Builder(DAG, DL, Ops);
4107 SmallVector<SDValue, 4> Subs;
4108 for (unsigned i = 0; i != NumSubs; ++i) {
4109 SmallVector<SDValue, 2> SubOps;
4110 for (SDValue Op : Ops) {
4111 EVT OpVT = Op.getValueType();
4112 unsigned NumSubElts = OpVT.getVectorNumElements() / NumSubs;
4113 unsigned SizeSub = OpVT.getSizeInBits() / NumSubs;
4114 SubOps.push_back(extractSubVector(Op, i * NumSubElts, DAG, DL, SizeSub));
4116 Subs.push_back(Builder(DAG, DL, SubOps));
4118 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Subs);
4121 // Helper function that extends a non-512-bit vector op to 512-bits on non-VLX
4122 // targets.
4123 static SDValue getAVX512Node(unsigned Opcode, const SDLoc &DL, MVT VT,
4124 ArrayRef<SDValue> Ops, SelectionDAG &DAG,
4125 const X86Subtarget &Subtarget) {
4126 assert(Subtarget.hasAVX512() && "AVX512 target expected");
4127 MVT SVT = VT.getScalarType();
4129 // If we have a 32/64 splatted constant, splat it to DstTy to
4130 // encourage a foldable broadcast'd operand.
4131 auto MakeBroadcastOp = [&](SDValue Op, MVT OpVT, MVT DstVT) {
4132 unsigned OpEltSizeInBits = OpVT.getScalarSizeInBits();
4133 // AVX512 broadcasts 32/64-bit operands.
4134 // TODO: Support float once getAVX512Node is used by fp-ops.
4135 if (!OpVT.isInteger() || OpEltSizeInBits < 32 ||
4136 !DAG.getTargetLoweringInfo().isTypeLegal(SVT))
4137 return SDValue();
4138 // If we're not widening, don't bother if we're not bitcasting.
4139 if (OpVT == DstVT && Op.getOpcode() != ISD::BITCAST)
4140 return SDValue();
4141 if (auto *BV = dyn_cast<BuildVectorSDNode>(peekThroughBitcasts(Op))) {
4142 APInt SplatValue, SplatUndef;
4143 unsigned SplatBitSize;
4144 bool HasAnyUndefs;
4145 if (BV->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
4146 HasAnyUndefs, OpEltSizeInBits) &&
4147 !HasAnyUndefs && SplatValue.getBitWidth() == OpEltSizeInBits)
4148 return DAG.getConstant(SplatValue, DL, DstVT);
4150 return SDValue();
4153 bool Widen = !(Subtarget.hasVLX() || VT.is512BitVector());
4155 MVT DstVT = VT;
4156 if (Widen)
4157 DstVT = MVT::getVectorVT(SVT, 512 / SVT.getSizeInBits());
4159 // Canonicalize src operands.
4160 SmallVector<SDValue> SrcOps(Ops.begin(), Ops.end());
4161 for (SDValue &Op : SrcOps) {
4162 MVT OpVT = Op.getSimpleValueType();
4163 // Just pass through scalar operands.
4164 if (!OpVT.isVector())
4165 continue;
4166 assert(OpVT == VT && "Vector type mismatch");
4168 if (SDValue BroadcastOp = MakeBroadcastOp(Op, OpVT, DstVT)) {
4169 Op = BroadcastOp;
4170 continue;
4173 // Just widen the subvector by inserting into an undef wide vector.
4174 if (Widen)
4175 Op = widenSubVector(Op, false, Subtarget, DAG, DL, 512);
4178 SDValue Res = DAG.getNode(Opcode, DL, DstVT, SrcOps);
4180 // Perform the 512-bit op then extract the bottom subvector.
4181 if (Widen)
4182 Res = extractSubVector(Res, 0, DAG, DL, VT.getSizeInBits());
4183 return Res;
4186 /// Insert i1-subvector to i1-vector.
4187 static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
4188 const X86Subtarget &Subtarget) {
4190 SDLoc dl(Op);
4191 SDValue Vec = Op.getOperand(0);
4192 SDValue SubVec = Op.getOperand(1);
4193 SDValue Idx = Op.getOperand(2);
4194 unsigned IdxVal = Op.getConstantOperandVal(2);
4196 // Inserting undef is a nop. We can just return the original vector.
4197 if (SubVec.isUndef())
4198 return Vec;
4200 if (IdxVal == 0 && Vec.isUndef()) // the operation is legal
4201 return Op;
4203 MVT OpVT = Op.getSimpleValueType();
4204 unsigned NumElems = OpVT.getVectorNumElements();
4205 SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
4207 // Extend to natively supported kshift.
4208 MVT WideOpVT = widenMaskVectorType(OpVT, Subtarget);
4210 // Inserting into the lsbs of a zero vector is legal. ISel will insert shifts
4211 // if necessary.
4212 if (IdxVal == 0 && ISD::isBuildVectorAllZeros(Vec.getNode())) {
4213 // May need to promote to a legal type.
4214 Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
4215 DAG.getConstant(0, dl, WideOpVT),
4216 SubVec, Idx);
4217 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
4220 MVT SubVecVT = SubVec.getSimpleValueType();
4221 unsigned SubVecNumElems = SubVecVT.getVectorNumElements();
4222 assert(IdxVal + SubVecNumElems <= NumElems &&
4223 IdxVal % SubVecVT.getSizeInBits() == 0 &&
4224 "Unexpected index value in INSERT_SUBVECTOR");
4226 SDValue Undef = DAG.getUNDEF(WideOpVT);
4228 if (IdxVal == 0) {
4229 // Zero lower bits of the Vec
4230 SDValue ShiftBits = DAG.getTargetConstant(SubVecNumElems, dl, MVT::i8);
4231 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec,
4232 ZeroIdx);
4233 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
4234 Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
4235 // Merge them together, SubVec should be zero extended.
4236 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
4237 DAG.getConstant(0, dl, WideOpVT),
4238 SubVec, ZeroIdx);
4239 Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
4240 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
4243 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
4244 Undef, SubVec, ZeroIdx);
4246 if (Vec.isUndef()) {
4247 assert(IdxVal != 0 && "Unexpected index");
4248 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
4249 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
4250 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
4253 if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
4254 assert(IdxVal != 0 && "Unexpected index");
4255 // If upper elements of Vec are known undef, then just shift into place.
4256 if (llvm::all_of(Vec->ops().slice(IdxVal + SubVecNumElems),
4257 [](SDValue V) { return V.isUndef(); })) {
4258 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
4259 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
4260 } else {
4261 NumElems = WideOpVT.getVectorNumElements();
4262 unsigned ShiftLeft = NumElems - SubVecNumElems;
4263 unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
4264 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
4265 DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
4266 if (ShiftRight != 0)
4267 SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
4268 DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
4270 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
4273 // Simple case when we put subvector in the upper part
4274 if (IdxVal + SubVecNumElems == NumElems) {
4275 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
4276 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
4277 if (SubVecNumElems * 2 == NumElems) {
4278 // Special case, use legal zero extending insert_subvector. This allows
4279 // isel to optimize when bits are known zero.
4280 Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVecVT, Vec, ZeroIdx);
4281 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
4282 DAG.getConstant(0, dl, WideOpVT),
4283 Vec, ZeroIdx);
4284 } else {
4285 // Otherwise use explicit shifts to zero the bits.
4286 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
4287 Undef, Vec, ZeroIdx);
4288 NumElems = WideOpVT.getVectorNumElements();
4289 SDValue ShiftBits = DAG.getTargetConstant(NumElems - IdxVal, dl, MVT::i8);
4290 Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
4291 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
4293 Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
4294 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
4297 // Inserting into the middle is more complicated.
4299 NumElems = WideOpVT.getVectorNumElements();
4301 // Widen the vector if needed.
4302 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
4304 unsigned ShiftLeft = NumElems - SubVecNumElems;
4305 unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
4307 // Do an optimization for the most frequently used types.
4308 if (WideOpVT != MVT::v64i1 || Subtarget.is64Bit()) {
4309 APInt Mask0 = APInt::getBitsSet(NumElems, IdxVal, IdxVal + SubVecNumElems);
4310 Mask0.flipAllBits();
4311 SDValue CMask0 = DAG.getConstant(Mask0, dl, MVT::getIntegerVT(NumElems));
4312 SDValue VMask0 = DAG.getNode(ISD::BITCAST, dl, WideOpVT, CMask0);
4313 Vec = DAG.getNode(ISD::AND, dl, WideOpVT, Vec, VMask0);
4314 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
4315 DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
4316 SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
4317 DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
4318 Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
4320 // Reduce to original width if needed.
4321 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
4324 // Clear the upper bits of the subvector and move it to its insert position.
4325 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
4326 DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
4327 SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
4328 DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
4330 // Isolate the bits below the insertion point.
4331 unsigned LowShift = NumElems - IdxVal;
4332 SDValue Low = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec,
4333 DAG.getTargetConstant(LowShift, dl, MVT::i8));
4334 Low = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Low,
4335 DAG.getTargetConstant(LowShift, dl, MVT::i8));
4337 // Isolate the bits after the last inserted bit.
4338 unsigned HighShift = IdxVal + SubVecNumElems;
4339 SDValue High = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec,
4340 DAG.getTargetConstant(HighShift, dl, MVT::i8));
4341 High = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, High,
4342 DAG.getTargetConstant(HighShift, dl, MVT::i8));
4344 // Now OR all 3 pieces together.
4345 Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Low, High);
4346 SubVec = DAG.getNode(ISD::OR, dl, WideOpVT, SubVec, Vec);
4348 // Reduce to original width if needed.
4349 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
4352 static SDValue concatSubVectors(SDValue V1, SDValue V2, SelectionDAG &DAG,
4353 const SDLoc &dl) {
4354 assert(V1.getValueType() == V2.getValueType() && "subvector type mismatch");
4355 EVT SubVT = V1.getValueType();
4356 EVT SubSVT = SubVT.getScalarType();
4357 unsigned SubNumElts = SubVT.getVectorNumElements();
4358 unsigned SubVectorWidth = SubVT.getSizeInBits();
4359 EVT VT = EVT::getVectorVT(*DAG.getContext(), SubSVT, 2 * SubNumElts);
4360 SDValue V = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, dl, SubVectorWidth);
4361 return insertSubVector(V, V2, SubNumElts, DAG, dl, SubVectorWidth);
4364 /// Returns a vector of specified type with all bits set.
4365 /// Always build ones vectors as <4 x i32>, <8 x i32> or <16 x i32>.
4366 /// Then bitcast to their original type, ensuring they get CSE'd.
4367 static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
4368 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
4369 "Expected a 128/256/512-bit vector type");
4371 APInt Ones = APInt::getAllOnes(32);
4372 unsigned NumElts = VT.getSizeInBits() / 32;
4373 SDValue Vec = DAG.getConstant(Ones, dl, MVT::getVectorVT(MVT::i32, NumElts));
4374 return DAG.getBitcast(VT, Vec);
4377 static SDValue getEXTEND_VECTOR_INREG(unsigned Opcode, const SDLoc &DL, EVT VT,
4378 SDValue In, SelectionDAG &DAG) {
4379 EVT InVT = In.getValueType();
4380 assert(VT.isVector() && InVT.isVector() && "Expected vector VTs.");
4381 assert((ISD::ANY_EXTEND == Opcode || ISD::SIGN_EXTEND == Opcode ||
4382 ISD::ZERO_EXTEND == Opcode) &&
4383 "Unknown extension opcode");
4385 // For 256-bit vectors, we only need the lower (128-bit) input half.
4386 // For 512-bit vectors, we only need the lower input half or quarter.
4387 if (InVT.getSizeInBits() > 128) {
4388 assert(VT.getSizeInBits() == InVT.getSizeInBits() &&
4389 "Expected VTs to be the same size!");
4390 unsigned Scale = VT.getScalarSizeInBits() / InVT.getScalarSizeInBits();
4391 In = extractSubVector(In, 0, DAG, DL,
4392 std::max(128U, (unsigned)VT.getSizeInBits() / Scale));
4393 InVT = In.getValueType();
4396 if (VT.getVectorNumElements() != InVT.getVectorNumElements())
4397 Opcode = DAG.getOpcode_EXTEND_VECTOR_INREG(Opcode);
4399 return DAG.getNode(Opcode, DL, VT, In);
4402 // Create OR(AND(LHS,MASK),AND(RHS,~MASK)) bit select pattern
4403 static SDValue getBitSelect(const SDLoc &DL, MVT VT, SDValue LHS, SDValue RHS,
4404 SDValue Mask, SelectionDAG &DAG) {
4405 LHS = DAG.getNode(ISD::AND, DL, VT, LHS, Mask);
4406 RHS = DAG.getNode(X86ISD::ANDNP, DL, VT, Mask, RHS);
4407 return DAG.getNode(ISD::OR, DL, VT, LHS, RHS);
4410 void llvm::createUnpackShuffleMask(EVT VT, SmallVectorImpl<int> &Mask,
4411 bool Lo, bool Unary) {
4412 assert(VT.getScalarType().isSimple() && (VT.getSizeInBits() % 128) == 0 &&
4413 "Illegal vector type to unpack");
4414 assert(Mask.empty() && "Expected an empty shuffle mask vector");
4415 int NumElts = VT.getVectorNumElements();
4416 int NumEltsInLane = 128 / VT.getScalarSizeInBits();
4417 for (int i = 0; i < NumElts; ++i) {
4418 unsigned LaneStart = (i / NumEltsInLane) * NumEltsInLane;
4419 int Pos = (i % NumEltsInLane) / 2 + LaneStart;
4420 Pos += (Unary ? 0 : NumElts * (i % 2));
4421 Pos += (Lo ? 0 : NumEltsInLane / 2);
4422 Mask.push_back(Pos);
4426 /// Similar to unpacklo/unpackhi, but without the 128-bit lane limitation
4427 /// imposed by AVX and specific to the unary pattern. Example:
4428 /// v8iX Lo --> <0, 0, 1, 1, 2, 2, 3, 3>
4429 /// v8iX Hi --> <4, 4, 5, 5, 6, 6, 7, 7>
4430 void llvm::createSplat2ShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
4431 bool Lo) {
4432 assert(Mask.empty() && "Expected an empty shuffle mask vector");
4433 int NumElts = VT.getVectorNumElements();
4434 for (int i = 0; i < NumElts; ++i) {
4435 int Pos = i / 2;
4436 Pos += (Lo ? 0 : NumElts / 2);
4437 Mask.push_back(Pos);
4441 // Attempt to constant fold, else just create a VECTOR_SHUFFLE.
4442 static SDValue getVectorShuffle(SelectionDAG &DAG, EVT VT, const SDLoc &dl,
4443 SDValue V1, SDValue V2, ArrayRef<int> Mask) {
4444 if ((ISD::isBuildVectorOfConstantSDNodes(V1.getNode()) || V1.isUndef()) &&
4445 (ISD::isBuildVectorOfConstantSDNodes(V2.getNode()) || V2.isUndef())) {
4446 SmallVector<SDValue> Ops(Mask.size(), DAG.getUNDEF(VT.getScalarType()));
4447 for (int I = 0, NumElts = Mask.size(); I != NumElts; ++I) {
4448 int M = Mask[I];
4449 if (M < 0)
4450 continue;
4451 SDValue V = (M < NumElts) ? V1 : V2;
4452 if (V.isUndef())
4453 continue;
4454 Ops[I] = V.getOperand(M % NumElts);
4456 return DAG.getBuildVector(VT, dl, Ops);
4459 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
4462 /// Returns a vector_shuffle node for an unpackl operation.
4463 static SDValue getUnpackl(SelectionDAG &DAG, const SDLoc &dl, EVT VT,
4464 SDValue V1, SDValue V2) {
4465 SmallVector<int, 8> Mask;
4466 createUnpackShuffleMask(VT, Mask, /* Lo = */ true, /* Unary = */ false);
4467 return getVectorShuffle(DAG, VT, dl, V1, V2, Mask);
4470 /// Returns a vector_shuffle node for an unpackh operation.
4471 static SDValue getUnpackh(SelectionDAG &DAG, const SDLoc &dl, EVT VT,
4472 SDValue V1, SDValue V2) {
4473 SmallVector<int, 8> Mask;
4474 createUnpackShuffleMask(VT, Mask, /* Lo = */ false, /* Unary = */ false);
4475 return getVectorShuffle(DAG, VT, dl, V1, V2, Mask);
4478 /// Returns a node that packs the LHS + RHS nodes together at half width.
4479 /// May return X86ISD::PACKSS/PACKUS, packing the top/bottom half.
4480 /// TODO: Add subvector splitting if/when we have a need for it.
4481 static SDValue getPack(SelectionDAG &DAG, const X86Subtarget &Subtarget,
4482 const SDLoc &dl, MVT VT, SDValue LHS, SDValue RHS,
4483 bool PackHiHalf = false) {
4484 MVT OpVT = LHS.getSimpleValueType();
4485 unsigned EltSizeInBits = VT.getScalarSizeInBits();
4486 bool UsePackUS = Subtarget.hasSSE41() || EltSizeInBits == 8;
4487 assert(OpVT == RHS.getSimpleValueType() &&
4488 VT.getSizeInBits() == OpVT.getSizeInBits() &&
4489 (EltSizeInBits * 2) == OpVT.getScalarSizeInBits() &&
4490 "Unexpected PACK operand types");
4491 assert((EltSizeInBits == 8 || EltSizeInBits == 16 || EltSizeInBits == 32) &&
4492 "Unexpected PACK result type");
4494 // Rely on vector shuffles for vXi64 -> vXi32 packing.
4495 if (EltSizeInBits == 32) {
4496 SmallVector<int> PackMask;
4497 int Offset = PackHiHalf ? 1 : 0;
4498 int NumElts = VT.getVectorNumElements();
4499 for (int I = 0; I != NumElts; I += 4) {
4500 PackMask.push_back(I + Offset);
4501 PackMask.push_back(I + Offset + 2);
4502 PackMask.push_back(I + Offset + NumElts);
4503 PackMask.push_back(I + Offset + NumElts + 2);
4505 return DAG.getVectorShuffle(VT, dl, DAG.getBitcast(VT, LHS),
4506 DAG.getBitcast(VT, RHS), PackMask);
4509 // See if we already have sufficient leading bits for PACKSS/PACKUS.
4510 if (!PackHiHalf) {
4511 if (UsePackUS &&
4512 DAG.computeKnownBits(LHS).countMaxActiveBits() <= EltSizeInBits &&
4513 DAG.computeKnownBits(RHS).countMaxActiveBits() <= EltSizeInBits)
4514 return DAG.getNode(X86ISD::PACKUS, dl, VT, LHS, RHS);
4516 if (DAG.ComputeMaxSignificantBits(LHS) <= EltSizeInBits &&
4517 DAG.ComputeMaxSignificantBits(RHS) <= EltSizeInBits)
4518 return DAG.getNode(X86ISD::PACKSS, dl, VT, LHS, RHS);
4521 // Fallback to sign/zero extending the requested half and pack.
4522 SDValue Amt = DAG.getTargetConstant(EltSizeInBits, dl, MVT::i8);
4523 if (UsePackUS) {
4524 if (PackHiHalf) {
4525 LHS = DAG.getNode(X86ISD::VSRLI, dl, OpVT, LHS, Amt);
4526 RHS = DAG.getNode(X86ISD::VSRLI, dl, OpVT, RHS, Amt);
4527 } else {
4528 SDValue Mask = DAG.getConstant((1ULL << EltSizeInBits) - 1, dl, OpVT);
4529 LHS = DAG.getNode(ISD::AND, dl, OpVT, LHS, Mask);
4530 RHS = DAG.getNode(ISD::AND, dl, OpVT, RHS, Mask);
4532 return DAG.getNode(X86ISD::PACKUS, dl, VT, LHS, RHS);
4535 if (!PackHiHalf) {
4536 LHS = DAG.getNode(X86ISD::VSHLI, dl, OpVT, LHS, Amt);
4537 RHS = DAG.getNode(X86ISD::VSHLI, dl, OpVT, RHS, Amt);
4539 LHS = DAG.getNode(X86ISD::VSRAI, dl, OpVT, LHS, Amt);
4540 RHS = DAG.getNode(X86ISD::VSRAI, dl, OpVT, RHS, Amt);
4541 return DAG.getNode(X86ISD::PACKSS, dl, VT, LHS, RHS);
4544 /// Return a vector_shuffle of the specified vector of zero or undef vector.
4545 /// This produces a shuffle where the low element of V2 is swizzled into the
4546 /// zero/undef vector, landing at element Idx.
4547 /// This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
4548 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, int Idx,
4549 bool IsZero,
4550 const X86Subtarget &Subtarget,
4551 SelectionDAG &DAG) {
4552 MVT VT = V2.getSimpleValueType();
4553 SDValue V1 = IsZero
4554 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
4555 int NumElems = VT.getVectorNumElements();
4556 SmallVector<int, 16> MaskVec(NumElems);
4557 for (int i = 0; i != NumElems; ++i)
4558 // If this is the insertion idx, put the low elt of V2 here.
4559 MaskVec[i] = (i == Idx) ? NumElems : i;
4560 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, MaskVec);
4563 static ConstantPoolSDNode *getTargetConstantPoolFromBasePtr(SDValue Ptr) {
4564 if (Ptr.getOpcode() == X86ISD::Wrapper ||
4565 Ptr.getOpcode() == X86ISD::WrapperRIP)
4566 Ptr = Ptr.getOperand(0);
4567 return dyn_cast<ConstantPoolSDNode>(Ptr);
4570 static const Constant *getTargetConstantFromBasePtr(SDValue Ptr) {
4571 ConstantPoolSDNode *CNode = getTargetConstantPoolFromBasePtr(Ptr);
4572 if (!CNode || CNode->isMachineConstantPoolEntry() || CNode->getOffset() != 0)
4573 return nullptr;
4574 return CNode->getConstVal();
4577 static const Constant *getTargetConstantFromNode(LoadSDNode *Load) {
4578 if (!Load || !ISD::isNormalLoad(Load))
4579 return nullptr;
4580 return getTargetConstantFromBasePtr(Load->getBasePtr());
4583 static const Constant *getTargetConstantFromNode(SDValue Op) {
4584 Op = peekThroughBitcasts(Op);
4585 return getTargetConstantFromNode(dyn_cast<LoadSDNode>(Op));
4588 const Constant *
4589 X86TargetLowering::getTargetConstantFromLoad(LoadSDNode *LD) const {
4590 assert(LD && "Unexpected null LoadSDNode");
4591 return getTargetConstantFromNode(LD);
4594 // Extract raw constant bits from constant pools.
4595 static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits,
4596 APInt &UndefElts,
4597 SmallVectorImpl<APInt> &EltBits,
4598 bool AllowWholeUndefs = true,
4599 bool AllowPartialUndefs = true) {
4600 assert(EltBits.empty() && "Expected an empty EltBits vector");
4602 Op = peekThroughBitcasts(Op);
4604 EVT VT = Op.getValueType();
4605 unsigned SizeInBits = VT.getSizeInBits();
4606 assert((SizeInBits % EltSizeInBits) == 0 && "Can't split constant!");
4607 unsigned NumElts = SizeInBits / EltSizeInBits;
4609 // Bitcast a source array of element bits to the target size.
4610 auto CastBitData = [&](APInt &UndefSrcElts, ArrayRef<APInt> SrcEltBits) {
4611 unsigned NumSrcElts = UndefSrcElts.getBitWidth();
4612 unsigned SrcEltSizeInBits = SrcEltBits[0].getBitWidth();
4613 assert((NumSrcElts * SrcEltSizeInBits) == SizeInBits &&
4614 "Constant bit sizes don't match");
4616 // Don't split if we don't allow undef bits.
4617 bool AllowUndefs = AllowWholeUndefs || AllowPartialUndefs;
4618 if (UndefSrcElts.getBoolValue() && !AllowUndefs)
4619 return false;
4621 // If we're already the right size, don't bother bitcasting.
4622 if (NumSrcElts == NumElts) {
4623 UndefElts = UndefSrcElts;
4624 EltBits.assign(SrcEltBits.begin(), SrcEltBits.end());
4625 return true;
4628 // Extract all the undef/constant element data and pack into single bitsets.
4629 APInt UndefBits(SizeInBits, 0);
4630 APInt MaskBits(SizeInBits, 0);
4632 for (unsigned i = 0; i != NumSrcElts; ++i) {
4633 unsigned BitOffset = i * SrcEltSizeInBits;
4634 if (UndefSrcElts[i])
4635 UndefBits.setBits(BitOffset, BitOffset + SrcEltSizeInBits);
4636 MaskBits.insertBits(SrcEltBits[i], BitOffset);
4639 // Split the undef/constant single bitset data into the target elements.
4640 UndefElts = APInt(NumElts, 0);
4641 EltBits.resize(NumElts, APInt(EltSizeInBits, 0));
4643 for (unsigned i = 0; i != NumElts; ++i) {
4644 unsigned BitOffset = i * EltSizeInBits;
4645 APInt UndefEltBits = UndefBits.extractBits(EltSizeInBits, BitOffset);
4647 // Only treat an element as UNDEF if all bits are UNDEF.
4648 if (UndefEltBits.isAllOnes()) {
4649 if (!AllowWholeUndefs)
4650 return false;
4651 UndefElts.setBit(i);
4652 continue;
4655 // If only some bits are UNDEF then treat them as zero (or bail if not
4656 // supported).
4657 if (UndefEltBits.getBoolValue() && !AllowPartialUndefs)
4658 return false;
4660 EltBits[i] = MaskBits.extractBits(EltSizeInBits, BitOffset);
4662 return true;
4665 // Collect constant bits and insert into mask/undef bit masks.
4666 auto CollectConstantBits = [](const Constant *Cst, APInt &Mask, APInt &Undefs,
4667 unsigned UndefBitIndex) {
4668 if (!Cst)
4669 return false;
4670 if (isa<UndefValue>(Cst)) {
4671 Undefs.setBit(UndefBitIndex);
4672 return true;
4674 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
4675 Mask = CInt->getValue();
4676 return true;
4678 if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
4679 Mask = CFP->getValueAPF().bitcastToAPInt();
4680 return true;
4682 if (auto *CDS = dyn_cast<ConstantDataSequential>(Cst)) {
4683 Type *Ty = CDS->getType();
4684 Mask = APInt::getZero(Ty->getPrimitiveSizeInBits());
4685 Type *EltTy = CDS->getElementType();
4686 bool IsInteger = EltTy->isIntegerTy();
4687 bool IsFP =
4688 EltTy->isHalfTy() || EltTy->isFloatTy() || EltTy->isDoubleTy();
4689 if (!IsInteger && !IsFP)
4690 return false;
4691 unsigned EltBits = EltTy->getPrimitiveSizeInBits();
4692 for (unsigned I = 0, E = CDS->getNumElements(); I != E; ++I)
4693 if (IsInteger)
4694 Mask.insertBits(CDS->getElementAsAPInt(I), I * EltBits);
4695 else
4696 Mask.insertBits(CDS->getElementAsAPFloat(I).bitcastToAPInt(),
4697 I * EltBits);
4698 return true;
4700 return false;
4703 // Handle UNDEFs.
4704 if (Op.isUndef()) {
4705 APInt UndefSrcElts = APInt::getAllOnes(NumElts);
4706 SmallVector<APInt, 64> SrcEltBits(NumElts, APInt(EltSizeInBits, 0));
4707 return CastBitData(UndefSrcElts, SrcEltBits);
4710 // Extract scalar constant bits.
4711 if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) {
4712 APInt UndefSrcElts = APInt::getZero(1);
4713 SmallVector<APInt, 64> SrcEltBits(1, Cst->getAPIntValue());
4714 return CastBitData(UndefSrcElts, SrcEltBits);
4716 if (auto *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
4717 APInt UndefSrcElts = APInt::getZero(1);
4718 APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
4719 SmallVector<APInt, 64> SrcEltBits(1, RawBits);
4720 return CastBitData(UndefSrcElts, SrcEltBits);
4723 // Extract constant bits from build vector.
4724 if (auto *BV = dyn_cast<BuildVectorSDNode>(Op)) {
4725 BitVector Undefs;
4726 SmallVector<APInt> SrcEltBits;
4727 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
4728 if (BV->getConstantRawBits(true, SrcEltSizeInBits, SrcEltBits, Undefs)) {
4729 APInt UndefSrcElts = APInt::getZero(SrcEltBits.size());
4730 for (unsigned I = 0, E = SrcEltBits.size(); I != E; ++I)
4731 if (Undefs[I])
4732 UndefSrcElts.setBit(I);
4733 return CastBitData(UndefSrcElts, SrcEltBits);
4737 // Extract constant bits from constant pool vector.
4738 if (auto *Cst = getTargetConstantFromNode(Op)) {
4739 Type *CstTy = Cst->getType();
4740 unsigned CstSizeInBits = CstTy->getPrimitiveSizeInBits();
4741 if (!CstTy->isVectorTy() || (CstSizeInBits % SizeInBits) != 0)
4742 return false;
4744 unsigned SrcEltSizeInBits = CstTy->getScalarSizeInBits();
4745 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
4746 if ((SizeInBits % SrcEltSizeInBits) != 0)
4747 return false;
4749 APInt UndefSrcElts(NumSrcElts, 0);
4750 SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
4751 for (unsigned i = 0; i != NumSrcElts; ++i)
4752 if (!CollectConstantBits(Cst->getAggregateElement(i), SrcEltBits[i],
4753 UndefSrcElts, i))
4754 return false;
4756 return CastBitData(UndefSrcElts, SrcEltBits);
4759 // Extract constant bits from a broadcasted constant pool scalar.
4760 if (Op.getOpcode() == X86ISD::VBROADCAST_LOAD &&
4761 EltSizeInBits <= VT.getScalarSizeInBits()) {
4762 auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
4763 if (MemIntr->getMemoryVT().getStoreSizeInBits() != VT.getScalarSizeInBits())
4764 return false;
4766 SDValue Ptr = MemIntr->getBasePtr();
4767 if (const Constant *C = getTargetConstantFromBasePtr(Ptr)) {
4768 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
4769 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
4771 APInt UndefSrcElts(NumSrcElts, 0);
4772 SmallVector<APInt, 64> SrcEltBits(1, APInt(SrcEltSizeInBits, 0));
4773 if (CollectConstantBits(C, SrcEltBits[0], UndefSrcElts, 0)) {
4774 if (UndefSrcElts[0])
4775 UndefSrcElts.setBits(0, NumSrcElts);
4776 if (SrcEltBits[0].getBitWidth() != SrcEltSizeInBits)
4777 SrcEltBits[0] = SrcEltBits[0].trunc(SrcEltSizeInBits);
4778 SrcEltBits.append(NumSrcElts - 1, SrcEltBits[0]);
4779 return CastBitData(UndefSrcElts, SrcEltBits);
4784 // Extract constant bits from a subvector broadcast.
4785 if (Op.getOpcode() == X86ISD::SUBV_BROADCAST_LOAD) {
4786 auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
4787 SDValue Ptr = MemIntr->getBasePtr();
4788 // The source constant may be larger than the subvector broadcast,
4789 // ensure we extract the correct subvector constants.
4790 if (const Constant *Cst = getTargetConstantFromBasePtr(Ptr)) {
4791 Type *CstTy = Cst->getType();
4792 unsigned CstSizeInBits = CstTy->getPrimitiveSizeInBits();
4793 unsigned SubVecSizeInBits = MemIntr->getMemoryVT().getStoreSizeInBits();
4794 if (!CstTy->isVectorTy() || (CstSizeInBits % SubVecSizeInBits) != 0 ||
4795 (SizeInBits % SubVecSizeInBits) != 0)
4796 return false;
4797 unsigned CstEltSizeInBits = CstTy->getScalarSizeInBits();
4798 unsigned NumSubElts = SubVecSizeInBits / CstEltSizeInBits;
4799 unsigned NumSubVecs = SizeInBits / SubVecSizeInBits;
4800 APInt UndefSubElts(NumSubElts, 0);
4801 SmallVector<APInt, 64> SubEltBits(NumSubElts * NumSubVecs,
4802 APInt(CstEltSizeInBits, 0));
4803 for (unsigned i = 0; i != NumSubElts; ++i) {
4804 if (!CollectConstantBits(Cst->getAggregateElement(i), SubEltBits[i],
4805 UndefSubElts, i))
4806 return false;
4807 for (unsigned j = 1; j != NumSubVecs; ++j)
4808 SubEltBits[i + (j * NumSubElts)] = SubEltBits[i];
4810 UndefSubElts = APInt::getSplat(NumSubVecs * UndefSubElts.getBitWidth(),
4811 UndefSubElts);
4812 return CastBitData(UndefSubElts, SubEltBits);
4816 // Extract a rematerialized scalar constant insertion.
4817 if (Op.getOpcode() == X86ISD::VZEXT_MOVL &&
4818 Op.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
4819 isa<ConstantSDNode>(Op.getOperand(0).getOperand(0))) {
4820 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
4821 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
4823 APInt UndefSrcElts(NumSrcElts, 0);
4824 SmallVector<APInt, 64> SrcEltBits;
4825 const APInt &C = Op.getOperand(0).getConstantOperandAPInt(0);
4826 SrcEltBits.push_back(C.zextOrTrunc(SrcEltSizeInBits));
4827 SrcEltBits.append(NumSrcElts - 1, APInt(SrcEltSizeInBits, 0));
4828 return CastBitData(UndefSrcElts, SrcEltBits);
4831 // Insert constant bits from a base and sub vector sources.
4832 if (Op.getOpcode() == ISD::INSERT_SUBVECTOR) {
4833 // If bitcasts to larger elements we might lose track of undefs - don't
4834 // allow any to be safe.
4835 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
4836 bool AllowUndefs = EltSizeInBits >= SrcEltSizeInBits;
4838 APInt UndefSrcElts, UndefSubElts;
4839 SmallVector<APInt, 32> EltSrcBits, EltSubBits;
4840 if (getTargetConstantBitsFromNode(Op.getOperand(1), SrcEltSizeInBits,
4841 UndefSubElts, EltSubBits,
4842 AllowWholeUndefs && AllowUndefs,
4843 AllowPartialUndefs && AllowUndefs) &&
4844 getTargetConstantBitsFromNode(Op.getOperand(0), SrcEltSizeInBits,
4845 UndefSrcElts, EltSrcBits,
4846 AllowWholeUndefs && AllowUndefs,
4847 AllowPartialUndefs && AllowUndefs)) {
4848 unsigned BaseIdx = Op.getConstantOperandVal(2);
4849 UndefSrcElts.insertBits(UndefSubElts, BaseIdx);
4850 for (unsigned i = 0, e = EltSubBits.size(); i != e; ++i)
4851 EltSrcBits[BaseIdx + i] = EltSubBits[i];
4852 return CastBitData(UndefSrcElts, EltSrcBits);
4856 // Extract constant bits from a subvector's source.
4857 if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
4858 // TODO - support extract_subvector through bitcasts.
4859 if (EltSizeInBits != VT.getScalarSizeInBits())
4860 return false;
4862 if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
4863 UndefElts, EltBits, AllowWholeUndefs,
4864 AllowPartialUndefs)) {
4865 EVT SrcVT = Op.getOperand(0).getValueType();
4866 unsigned NumSrcElts = SrcVT.getVectorNumElements();
4867 unsigned NumSubElts = VT.getVectorNumElements();
4868 unsigned BaseIdx = Op.getConstantOperandVal(1);
4869 UndefElts = UndefElts.extractBits(NumSubElts, BaseIdx);
4870 if ((BaseIdx + NumSubElts) != NumSrcElts)
4871 EltBits.erase(EltBits.begin() + BaseIdx + NumSubElts, EltBits.end());
4872 if (BaseIdx != 0)
4873 EltBits.erase(EltBits.begin(), EltBits.begin() + BaseIdx);
4874 return true;
4878 // Extract constant bits from shuffle node sources.
4879 if (auto *SVN = dyn_cast<ShuffleVectorSDNode>(Op)) {
4880 // TODO - support shuffle through bitcasts.
4881 if (EltSizeInBits != VT.getScalarSizeInBits())
4882 return false;
4884 ArrayRef<int> Mask = SVN->getMask();
4885 if ((!AllowWholeUndefs || !AllowPartialUndefs) &&
4886 llvm::any_of(Mask, [](int M) { return M < 0; }))
4887 return false;
4889 APInt UndefElts0, UndefElts1;
4890 SmallVector<APInt, 32> EltBits0, EltBits1;
4891 if (isAnyInRange(Mask, 0, NumElts) &&
4892 !getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
4893 UndefElts0, EltBits0, AllowWholeUndefs,
4894 AllowPartialUndefs))
4895 return false;
4896 if (isAnyInRange(Mask, NumElts, 2 * NumElts) &&
4897 !getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits,
4898 UndefElts1, EltBits1, AllowWholeUndefs,
4899 AllowPartialUndefs))
4900 return false;
4902 UndefElts = APInt::getZero(NumElts);
4903 for (int i = 0; i != (int)NumElts; ++i) {
4904 int M = Mask[i];
4905 if (M < 0) {
4906 UndefElts.setBit(i);
4907 EltBits.push_back(APInt::getZero(EltSizeInBits));
4908 } else if (M < (int)NumElts) {
4909 if (UndefElts0[M])
4910 UndefElts.setBit(i);
4911 EltBits.push_back(EltBits0[M]);
4912 } else {
4913 if (UndefElts1[M - NumElts])
4914 UndefElts.setBit(i);
4915 EltBits.push_back(EltBits1[M - NumElts]);
4918 return true;
4921 return false;
4924 namespace llvm {
4925 namespace X86 {
4926 bool isConstantSplat(SDValue Op, APInt &SplatVal, bool AllowPartialUndefs) {
4927 APInt UndefElts;
4928 SmallVector<APInt, 16> EltBits;
4929 if (getTargetConstantBitsFromNode(Op, Op.getScalarValueSizeInBits(),
4930 UndefElts, EltBits, true,
4931 AllowPartialUndefs)) {
4932 int SplatIndex = -1;
4933 for (int i = 0, e = EltBits.size(); i != e; ++i) {
4934 if (UndefElts[i])
4935 continue;
4936 if (0 <= SplatIndex && EltBits[i] != EltBits[SplatIndex]) {
4937 SplatIndex = -1;
4938 break;
4940 SplatIndex = i;
4942 if (0 <= SplatIndex) {
4943 SplatVal = EltBits[SplatIndex];
4944 return true;
4948 return false;
4950 } // namespace X86
4951 } // namespace llvm
4953 static bool getTargetShuffleMaskIndices(SDValue MaskNode,
4954 unsigned MaskEltSizeInBits,
4955 SmallVectorImpl<uint64_t> &RawMask,
4956 APInt &UndefElts) {
4957 // Extract the raw target constant bits.
4958 SmallVector<APInt, 64> EltBits;
4959 if (!getTargetConstantBitsFromNode(MaskNode, MaskEltSizeInBits, UndefElts,
4960 EltBits, /* AllowWholeUndefs */ true,
4961 /* AllowPartialUndefs */ false))
4962 return false;
4964 // Insert the extracted elements into the mask.
4965 for (const APInt &Elt : EltBits)
4966 RawMask.push_back(Elt.getZExtValue());
4968 return true;
4971 // Match not(xor X, -1) -> X.
4972 // Match not(pcmpgt(C, X)) -> pcmpgt(X, C - 1).
4973 // Match not(extract_subvector(xor X, -1)) -> extract_subvector(X).
4974 // Match not(concat_vectors(xor X, -1, xor Y, -1)) -> concat_vectors(X, Y).
4975 static SDValue IsNOT(SDValue V, SelectionDAG &DAG) {
4976 V = peekThroughBitcasts(V);
4977 if (V.getOpcode() == ISD::XOR &&
4978 (ISD::isBuildVectorAllOnes(V.getOperand(1).getNode()) ||
4979 isAllOnesConstant(V.getOperand(1))))
4980 return V.getOperand(0);
4981 if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
4982 (isNullConstant(V.getOperand(1)) || V.getOperand(0).hasOneUse())) {
4983 if (SDValue Not = IsNOT(V.getOperand(0), DAG)) {
4984 Not = DAG.getBitcast(V.getOperand(0).getValueType(), Not);
4985 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Not), V.getValueType(),
4986 Not, V.getOperand(1));
4989 if (V.getOpcode() == X86ISD::PCMPGT &&
4990 !ISD::isBuildVectorAllZeros(V.getOperand(0).getNode()) &&
4991 !ISD::isBuildVectorAllOnes(V.getOperand(0).getNode()) &&
4992 V.getOperand(0).hasOneUse()) {
4993 APInt UndefElts;
4994 SmallVector<APInt> EltBits;
4995 if (getTargetConstantBitsFromNode(V.getOperand(0),
4996 V.getScalarValueSizeInBits(), UndefElts,
4997 EltBits)) {
4998 // Don't fold min_signed_value -> (min_signed_value - 1)
4999 bool MinSigned = false;
5000 for (APInt &Elt : EltBits) {
5001 MinSigned |= Elt.isMinSignedValue();
5002 Elt -= 1;
5004 if (!MinSigned) {
5005 SDLoc DL(V);
5006 MVT VT = V.getSimpleValueType();
5007 return DAG.getNode(X86ISD::PCMPGT, DL, VT, V.getOperand(1),
5008 getConstVector(EltBits, UndefElts, VT, DAG, DL));
5012 SmallVector<SDValue, 2> CatOps;
5013 if (collectConcatOps(V.getNode(), CatOps, DAG)) {
5014 for (SDValue &CatOp : CatOps) {
5015 SDValue NotCat = IsNOT(CatOp, DAG);
5016 if (!NotCat) return SDValue();
5017 CatOp = DAG.getBitcast(CatOp.getValueType(), NotCat);
5019 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(V), V.getValueType(), CatOps);
5021 return SDValue();
5024 /// Create a shuffle mask that matches the PACKSS/PACKUS truncation.
5025 /// A multi-stage pack shuffle mask is created by specifying NumStages > 1.
5026 /// Note: This ignores saturation, so inputs must be checked first.
5027 static void createPackShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
5028 bool Unary, unsigned NumStages = 1) {
5029 assert(Mask.empty() && "Expected an empty shuffle mask vector");
5030 unsigned NumElts = VT.getVectorNumElements();
5031 unsigned NumLanes = VT.getSizeInBits() / 128;
5032 unsigned NumEltsPerLane = 128 / VT.getScalarSizeInBits();
5033 unsigned Offset = Unary ? 0 : NumElts;
5034 unsigned Repetitions = 1u << (NumStages - 1);
5035 unsigned Increment = 1u << NumStages;
5036 assert((NumEltsPerLane >> NumStages) > 0 && "Illegal packing compaction");
5038 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
5039 for (unsigned Stage = 0; Stage != Repetitions; ++Stage) {
5040 for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += Increment)
5041 Mask.push_back(Elt + (Lane * NumEltsPerLane));
5042 for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += Increment)
5043 Mask.push_back(Elt + (Lane * NumEltsPerLane) + Offset);
5048 // Split the demanded elts of a PACKSS/PACKUS node between its operands.
5049 static void getPackDemandedElts(EVT VT, const APInt &DemandedElts,
5050 APInt &DemandedLHS, APInt &DemandedRHS) {
5051 int NumLanes = VT.getSizeInBits() / 128;
5052 int NumElts = DemandedElts.getBitWidth();
5053 int NumInnerElts = NumElts / 2;
5054 int NumEltsPerLane = NumElts / NumLanes;
5055 int NumInnerEltsPerLane = NumInnerElts / NumLanes;
5057 DemandedLHS = APInt::getZero(NumInnerElts);
5058 DemandedRHS = APInt::getZero(NumInnerElts);
5060 // Map DemandedElts to the packed operands.
5061 for (int Lane = 0; Lane != NumLanes; ++Lane) {
5062 for (int Elt = 0; Elt != NumInnerEltsPerLane; ++Elt) {
5063 int OuterIdx = (Lane * NumEltsPerLane) + Elt;
5064 int InnerIdx = (Lane * NumInnerEltsPerLane) + Elt;
5065 if (DemandedElts[OuterIdx])
5066 DemandedLHS.setBit(InnerIdx);
5067 if (DemandedElts[OuterIdx + NumInnerEltsPerLane])
5068 DemandedRHS.setBit(InnerIdx);
5073 // Split the demanded elts of a HADD/HSUB node between its operands.
5074 static void getHorizDemandedElts(EVT VT, const APInt &DemandedElts,
5075 APInt &DemandedLHS, APInt &DemandedRHS) {
5076 int NumLanes = VT.getSizeInBits() / 128;
5077 int NumElts = DemandedElts.getBitWidth();
5078 int NumEltsPerLane = NumElts / NumLanes;
5079 int HalfEltsPerLane = NumEltsPerLane / 2;
5081 DemandedLHS = APInt::getZero(NumElts);
5082 DemandedRHS = APInt::getZero(NumElts);
5084 // Map DemandedElts to the horizontal operands.
5085 for (int Idx = 0; Idx != NumElts; ++Idx) {
5086 if (!DemandedElts[Idx])
5087 continue;
5088 int LaneIdx = (Idx / NumEltsPerLane) * NumEltsPerLane;
5089 int LocalIdx = Idx % NumEltsPerLane;
5090 if (LocalIdx < HalfEltsPerLane) {
5091 DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 0);
5092 DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 1);
5093 } else {
5094 LocalIdx -= HalfEltsPerLane;
5095 DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 0);
5096 DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 1);
5101 /// Calculates the shuffle mask corresponding to the target-specific opcode.
5102 /// If the mask could be calculated, returns it in \p Mask, returns the shuffle
5103 /// operands in \p Ops, and returns true.
5104 /// Sets \p IsUnary to true if only one source is used. Note that this will set
5105 /// IsUnary for shuffles which use a single input multiple times, and in those
5106 /// cases it will adjust the mask to only have indices within that single input.
5107 /// It is an error to call this with non-empty Mask/Ops vectors.
5108 static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
5109 SmallVectorImpl<SDValue> &Ops,
5110 SmallVectorImpl<int> &Mask, bool &IsUnary) {
5111 unsigned NumElems = VT.getVectorNumElements();
5112 unsigned MaskEltSize = VT.getScalarSizeInBits();
5113 SmallVector<uint64_t, 32> RawMask;
5114 APInt RawUndefs;
5115 uint64_t ImmN;
5117 assert(Mask.empty() && "getTargetShuffleMask expects an empty Mask vector");
5118 assert(Ops.empty() && "getTargetShuffleMask expects an empty Ops vector");
5120 IsUnary = false;
5121 bool IsFakeUnary = false;
5122 switch (N->getOpcode()) {
5123 case X86ISD::BLENDI:
5124 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5125 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5126 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5127 DecodeBLENDMask(NumElems, ImmN, Mask);
5128 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5129 break;
5130 case X86ISD::SHUFP:
5131 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5132 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5133 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5134 DecodeSHUFPMask(NumElems, MaskEltSize, ImmN, Mask);
5135 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5136 break;
5137 case X86ISD::INSERTPS:
5138 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5139 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5140 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5141 DecodeINSERTPSMask(ImmN, Mask);
5142 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5143 break;
5144 case X86ISD::EXTRQI:
5145 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5146 if (isa<ConstantSDNode>(N->getOperand(1)) &&
5147 isa<ConstantSDNode>(N->getOperand(2))) {
5148 int BitLen = N->getConstantOperandVal(1);
5149 int BitIdx = N->getConstantOperandVal(2);
5150 DecodeEXTRQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
5151 IsUnary = true;
5153 break;
5154 case X86ISD::INSERTQI:
5155 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5156 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5157 if (isa<ConstantSDNode>(N->getOperand(2)) &&
5158 isa<ConstantSDNode>(N->getOperand(3))) {
5159 int BitLen = N->getConstantOperandVal(2);
5160 int BitIdx = N->getConstantOperandVal(3);
5161 DecodeINSERTQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
5162 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5164 break;
5165 case X86ISD::UNPCKH:
5166 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5167 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5168 DecodeUNPCKHMask(NumElems, MaskEltSize, Mask);
5169 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5170 break;
5171 case X86ISD::UNPCKL:
5172 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5173 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5174 DecodeUNPCKLMask(NumElems, MaskEltSize, Mask);
5175 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5176 break;
5177 case X86ISD::MOVHLPS:
5178 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5179 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5180 DecodeMOVHLPSMask(NumElems, Mask);
5181 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5182 break;
5183 case X86ISD::MOVLHPS:
5184 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5185 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5186 DecodeMOVLHPSMask(NumElems, Mask);
5187 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5188 break;
5189 case X86ISD::VALIGN:
5190 assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&
5191 "Only 32-bit and 64-bit elements are supported!");
5192 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5193 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5194 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5195 DecodeVALIGNMask(NumElems, ImmN, Mask);
5196 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5197 Ops.push_back(N->getOperand(1));
5198 Ops.push_back(N->getOperand(0));
5199 break;
5200 case X86ISD::PALIGNR:
5201 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
5202 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5203 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5204 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5205 DecodePALIGNRMask(NumElems, ImmN, Mask);
5206 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5207 Ops.push_back(N->getOperand(1));
5208 Ops.push_back(N->getOperand(0));
5209 break;
5210 case X86ISD::VSHLDQ:
5211 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
5212 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5213 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5214 DecodePSLLDQMask(NumElems, ImmN, Mask);
5215 IsUnary = true;
5216 break;
5217 case X86ISD::VSRLDQ:
5218 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
5219 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5220 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5221 DecodePSRLDQMask(NumElems, ImmN, Mask);
5222 IsUnary = true;
5223 break;
5224 case X86ISD::PSHUFD:
5225 case X86ISD::VPERMILPI:
5226 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5227 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5228 DecodePSHUFMask(NumElems, MaskEltSize, ImmN, Mask);
5229 IsUnary = true;
5230 break;
5231 case X86ISD::PSHUFHW:
5232 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5233 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5234 DecodePSHUFHWMask(NumElems, ImmN, Mask);
5235 IsUnary = true;
5236 break;
5237 case X86ISD::PSHUFLW:
5238 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5239 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5240 DecodePSHUFLWMask(NumElems, ImmN, Mask);
5241 IsUnary = true;
5242 break;
5243 case X86ISD::VZEXT_MOVL:
5244 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5245 DecodeZeroMoveLowMask(NumElems, Mask);
5246 IsUnary = true;
5247 break;
5248 case X86ISD::VBROADCAST:
5249 // We only decode broadcasts of same-sized vectors, peeking through to
5250 // extracted subvectors is likely to cause hasOneUse issues with
5251 // SimplifyDemandedBits etc.
5252 if (N->getOperand(0).getValueType() == VT) {
5253 DecodeVectorBroadcast(NumElems, Mask);
5254 IsUnary = true;
5255 break;
5257 return false;
5258 case X86ISD::VPERMILPV: {
5259 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5260 IsUnary = true;
5261 SDValue MaskNode = N->getOperand(1);
5262 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
5263 RawUndefs)) {
5264 DecodeVPERMILPMask(NumElems, MaskEltSize, RawMask, RawUndefs, Mask);
5265 break;
5267 return false;
5269 case X86ISD::PSHUFB: {
5270 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
5271 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5272 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5273 IsUnary = true;
5274 SDValue MaskNode = N->getOperand(1);
5275 if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
5276 DecodePSHUFBMask(RawMask, RawUndefs, Mask);
5277 break;
5279 return false;
5281 case X86ISD::VPERMI:
5282 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5283 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5284 DecodeVPERMMask(NumElems, ImmN, Mask);
5285 IsUnary = true;
5286 break;
5287 case X86ISD::MOVSS:
5288 case X86ISD::MOVSD:
5289 case X86ISD::MOVSH:
5290 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5291 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5292 DecodeScalarMoveMask(NumElems, /* IsLoad */ false, Mask);
5293 break;
5294 case X86ISD::VPERM2X128:
5295 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5296 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5297 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5298 DecodeVPERM2X128Mask(NumElems, ImmN, Mask);
5299 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5300 break;
5301 case X86ISD::SHUF128:
5302 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5303 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5304 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5305 decodeVSHUF64x2FamilyMask(NumElems, MaskEltSize, ImmN, Mask);
5306 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5307 break;
5308 case X86ISD::MOVSLDUP:
5309 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5310 DecodeMOVSLDUPMask(NumElems, Mask);
5311 IsUnary = true;
5312 break;
5313 case X86ISD::MOVSHDUP:
5314 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5315 DecodeMOVSHDUPMask(NumElems, Mask);
5316 IsUnary = true;
5317 break;
5318 case X86ISD::MOVDDUP:
5319 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5320 DecodeMOVDDUPMask(NumElems, Mask);
5321 IsUnary = true;
5322 break;
5323 case X86ISD::VPERMIL2: {
5324 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5325 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5326 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5327 SDValue MaskNode = N->getOperand(2);
5328 SDValue CtrlNode = N->getOperand(3);
5329 if (ConstantSDNode *CtrlOp = dyn_cast<ConstantSDNode>(CtrlNode)) {
5330 unsigned CtrlImm = CtrlOp->getZExtValue();
5331 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
5332 RawUndefs)) {
5333 DecodeVPERMIL2PMask(NumElems, MaskEltSize, CtrlImm, RawMask, RawUndefs,
5334 Mask);
5335 break;
5338 return false;
5340 case X86ISD::VPPERM: {
5341 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5342 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5343 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5344 SDValue MaskNode = N->getOperand(2);
5345 if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
5346 DecodeVPPERMMask(RawMask, RawUndefs, Mask);
5347 break;
5349 return false;
5351 case X86ISD::VPERMV: {
5352 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5353 IsUnary = true;
5354 // Unlike most shuffle nodes, VPERMV's mask operand is operand 0.
5355 Ops.push_back(N->getOperand(1));
5356 SDValue MaskNode = N->getOperand(0);
5357 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
5358 RawUndefs)) {
5359 DecodeVPERMVMask(RawMask, RawUndefs, Mask);
5360 break;
5362 return false;
5364 case X86ISD::VPERMV3: {
5365 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5366 assert(N->getOperand(2).getValueType() == VT && "Unexpected value type");
5367 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(2);
5368 // Unlike most shuffle nodes, VPERMV3's mask operand is the middle one.
5369 Ops.push_back(N->getOperand(0));
5370 Ops.push_back(N->getOperand(2));
5371 SDValue MaskNode = N->getOperand(1);
5372 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
5373 RawUndefs)) {
5374 DecodeVPERMV3Mask(RawMask, RawUndefs, Mask);
5375 break;
5377 return false;
5379 default: llvm_unreachable("unknown target shuffle node");
5382 // Empty mask indicates the decode failed.
5383 if (Mask.empty())
5384 return false;
5386 // Check if we're getting a shuffle mask with zero'd elements.
5387 if (!AllowSentinelZero && isAnyZero(Mask))
5388 return false;
5390 // If we have a fake unary shuffle, the shuffle mask is spread across two
5391 // inputs that are actually the same node. Re-map the mask to always point
5392 // into the first input.
5393 if (IsFakeUnary)
5394 for (int &M : Mask)
5395 if (M >= (int)Mask.size())
5396 M -= Mask.size();
5398 // If we didn't already add operands in the opcode-specific code, default to
5399 // adding 1 or 2 operands starting at 0.
5400 if (Ops.empty()) {
5401 Ops.push_back(N->getOperand(0));
5402 if (!IsUnary || IsFakeUnary)
5403 Ops.push_back(N->getOperand(1));
5406 return true;
5409 // Wrapper for getTargetShuffleMask with InUnary;
5410 static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
5411 SmallVectorImpl<SDValue> &Ops,
5412 SmallVectorImpl<int> &Mask) {
5413 bool IsUnary;
5414 return getTargetShuffleMask(N, VT, AllowSentinelZero, Ops, Mask, IsUnary);
5417 /// Compute whether each element of a shuffle is zeroable.
5419 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
5420 /// Either it is an undef element in the shuffle mask, the element of the input
5421 /// referenced is undef, or the element of the input referenced is known to be
5422 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
5423 /// as many lanes with this technique as possible to simplify the remaining
5424 /// shuffle.
5425 static void computeZeroableShuffleElements(ArrayRef<int> Mask,
5426 SDValue V1, SDValue V2,
5427 APInt &KnownUndef, APInt &KnownZero) {
5428 int Size = Mask.size();
5429 KnownUndef = KnownZero = APInt::getZero(Size);
5431 V1 = peekThroughBitcasts(V1);
5432 V2 = peekThroughBitcasts(V2);
5434 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
5435 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
5437 int VectorSizeInBits = V1.getValueSizeInBits();
5438 int ScalarSizeInBits = VectorSizeInBits / Size;
5439 assert(!(VectorSizeInBits % ScalarSizeInBits) && "Illegal shuffle mask size");
5441 for (int i = 0; i < Size; ++i) {
5442 int M = Mask[i];
5443 // Handle the easy cases.
5444 if (M < 0) {
5445 KnownUndef.setBit(i);
5446 continue;
5448 if ((M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
5449 KnownZero.setBit(i);
5450 continue;
5453 // Determine shuffle input and normalize the mask.
5454 SDValue V = M < Size ? V1 : V2;
5455 M %= Size;
5457 // Currently we can only search BUILD_VECTOR for UNDEF/ZERO elements.
5458 if (V.getOpcode() != ISD::BUILD_VECTOR)
5459 continue;
5461 // If the BUILD_VECTOR has fewer elements then the bitcasted portion of
5462 // the (larger) source element must be UNDEF/ZERO.
5463 if ((Size % V.getNumOperands()) == 0) {
5464 int Scale = Size / V->getNumOperands();
5465 SDValue Op = V.getOperand(M / Scale);
5466 if (Op.isUndef())
5467 KnownUndef.setBit(i);
5468 if (X86::isZeroNode(Op))
5469 KnownZero.setBit(i);
5470 else if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
5471 APInt Val = Cst->getAPIntValue();
5472 Val = Val.extractBits(ScalarSizeInBits, (M % Scale) * ScalarSizeInBits);
5473 if (Val == 0)
5474 KnownZero.setBit(i);
5475 } else if (ConstantFPSDNode *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
5476 APInt Val = Cst->getValueAPF().bitcastToAPInt();
5477 Val = Val.extractBits(ScalarSizeInBits, (M % Scale) * ScalarSizeInBits);
5478 if (Val == 0)
5479 KnownZero.setBit(i);
5481 continue;
5484 // If the BUILD_VECTOR has more elements then all the (smaller) source
5485 // elements must be UNDEF or ZERO.
5486 if ((V.getNumOperands() % Size) == 0) {
5487 int Scale = V->getNumOperands() / Size;
5488 bool AllUndef = true;
5489 bool AllZero = true;
5490 for (int j = 0; j < Scale; ++j) {
5491 SDValue Op = V.getOperand((M * Scale) + j);
5492 AllUndef &= Op.isUndef();
5493 AllZero &= X86::isZeroNode(Op);
5495 if (AllUndef)
5496 KnownUndef.setBit(i);
5497 if (AllZero)
5498 KnownZero.setBit(i);
5499 continue;
5504 /// Decode a target shuffle mask and inputs and see if any values are
5505 /// known to be undef or zero from their inputs.
5506 /// Returns true if the target shuffle mask was decoded.
5507 /// FIXME: Merge this with computeZeroableShuffleElements?
5508 static bool getTargetShuffleAndZeroables(SDValue N, SmallVectorImpl<int> &Mask,
5509 SmallVectorImpl<SDValue> &Ops,
5510 APInt &KnownUndef, APInt &KnownZero) {
5511 bool IsUnary;
5512 if (!isTargetShuffle(N.getOpcode()))
5513 return false;
5515 MVT VT = N.getSimpleValueType();
5516 if (!getTargetShuffleMask(N.getNode(), VT, true, Ops, Mask, IsUnary))
5517 return false;
5519 int Size = Mask.size();
5520 SDValue V1 = Ops[0];
5521 SDValue V2 = IsUnary ? V1 : Ops[1];
5522 KnownUndef = KnownZero = APInt::getZero(Size);
5524 V1 = peekThroughBitcasts(V1);
5525 V2 = peekThroughBitcasts(V2);
5527 assert((VT.getSizeInBits() % Size) == 0 &&
5528 "Illegal split of shuffle value type");
5529 unsigned EltSizeInBits = VT.getSizeInBits() / Size;
5531 // Extract known constant input data.
5532 APInt UndefSrcElts[2];
5533 SmallVector<APInt, 32> SrcEltBits[2];
5534 bool IsSrcConstant[2] = {
5535 getTargetConstantBitsFromNode(V1, EltSizeInBits, UndefSrcElts[0],
5536 SrcEltBits[0], true, false),
5537 getTargetConstantBitsFromNode(V2, EltSizeInBits, UndefSrcElts[1],
5538 SrcEltBits[1], true, false)};
5540 for (int i = 0; i < Size; ++i) {
5541 int M = Mask[i];
5543 // Already decoded as SM_SentinelZero / SM_SentinelUndef.
5544 if (M < 0) {
5545 assert(isUndefOrZero(M) && "Unknown shuffle sentinel value!");
5546 if (SM_SentinelUndef == M)
5547 KnownUndef.setBit(i);
5548 if (SM_SentinelZero == M)
5549 KnownZero.setBit(i);
5550 continue;
5553 // Determine shuffle input and normalize the mask.
5554 unsigned SrcIdx = M / Size;
5555 SDValue V = M < Size ? V1 : V2;
5556 M %= Size;
5558 // We are referencing an UNDEF input.
5559 if (V.isUndef()) {
5560 KnownUndef.setBit(i);
5561 continue;
5564 // SCALAR_TO_VECTOR - only the first element is defined, and the rest UNDEF.
5565 // TODO: We currently only set UNDEF for integer types - floats use the same
5566 // registers as vectors and many of the scalar folded loads rely on the
5567 // SCALAR_TO_VECTOR pattern.
5568 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
5569 (Size % V.getValueType().getVectorNumElements()) == 0) {
5570 int Scale = Size / V.getValueType().getVectorNumElements();
5571 int Idx = M / Scale;
5572 if (Idx != 0 && !VT.isFloatingPoint())
5573 KnownUndef.setBit(i);
5574 else if (Idx == 0 && X86::isZeroNode(V.getOperand(0)))
5575 KnownZero.setBit(i);
5576 continue;
5579 // INSERT_SUBVECTOR - to widen vectors we often insert them into UNDEF
5580 // base vectors.
5581 if (V.getOpcode() == ISD::INSERT_SUBVECTOR) {
5582 SDValue Vec = V.getOperand(0);
5583 int NumVecElts = Vec.getValueType().getVectorNumElements();
5584 if (Vec.isUndef() && Size == NumVecElts) {
5585 int Idx = V.getConstantOperandVal(2);
5586 int NumSubElts = V.getOperand(1).getValueType().getVectorNumElements();
5587 if (M < Idx || (Idx + NumSubElts) <= M)
5588 KnownUndef.setBit(i);
5590 continue;
5593 // Attempt to extract from the source's constant bits.
5594 if (IsSrcConstant[SrcIdx]) {
5595 if (UndefSrcElts[SrcIdx][M])
5596 KnownUndef.setBit(i);
5597 else if (SrcEltBits[SrcIdx][M] == 0)
5598 KnownZero.setBit(i);
5602 assert(VT.getVectorNumElements() == (unsigned)Size &&
5603 "Different mask size from vector size!");
5604 return true;
5607 // Replace target shuffle mask elements with known undef/zero sentinels.
5608 static void resolveTargetShuffleFromZeroables(SmallVectorImpl<int> &Mask,
5609 const APInt &KnownUndef,
5610 const APInt &KnownZero,
5611 bool ResolveKnownZeros= true) {
5612 unsigned NumElts = Mask.size();
5613 assert(KnownUndef.getBitWidth() == NumElts &&
5614 KnownZero.getBitWidth() == NumElts && "Shuffle mask size mismatch");
5616 for (unsigned i = 0; i != NumElts; ++i) {
5617 if (KnownUndef[i])
5618 Mask[i] = SM_SentinelUndef;
5619 else if (ResolveKnownZeros && KnownZero[i])
5620 Mask[i] = SM_SentinelZero;
5624 // Extract target shuffle mask sentinel elements to known undef/zero bitmasks.
5625 static void resolveZeroablesFromTargetShuffle(const SmallVectorImpl<int> &Mask,
5626 APInt &KnownUndef,
5627 APInt &KnownZero) {
5628 unsigned NumElts = Mask.size();
5629 KnownUndef = KnownZero = APInt::getZero(NumElts);
5631 for (unsigned i = 0; i != NumElts; ++i) {
5632 int M = Mask[i];
5633 if (SM_SentinelUndef == M)
5634 KnownUndef.setBit(i);
5635 if (SM_SentinelZero == M)
5636 KnownZero.setBit(i);
5640 // Attempt to create a shuffle mask from a VSELECT/BLENDV condition mask.
5641 static bool createShuffleMaskFromVSELECT(SmallVectorImpl<int> &Mask,
5642 SDValue Cond, bool IsBLENDV = false) {
5643 EVT CondVT = Cond.getValueType();
5644 unsigned EltSizeInBits = CondVT.getScalarSizeInBits();
5645 unsigned NumElts = CondVT.getVectorNumElements();
5647 APInt UndefElts;
5648 SmallVector<APInt, 32> EltBits;
5649 if (!getTargetConstantBitsFromNode(Cond, EltSizeInBits, UndefElts, EltBits,
5650 true, false))
5651 return false;
5653 Mask.resize(NumElts, SM_SentinelUndef);
5655 for (int i = 0; i != (int)NumElts; ++i) {
5656 Mask[i] = i;
5657 // Arbitrarily choose from the 2nd operand if the select condition element
5658 // is undef.
5659 // TODO: Can we do better by matching patterns such as even/odd?
5660 if (UndefElts[i] || (!IsBLENDV && EltBits[i].isZero()) ||
5661 (IsBLENDV && EltBits[i].isNonNegative()))
5662 Mask[i] += NumElts;
5665 return true;
5668 // Forward declaration (for getFauxShuffleMask recursive check).
5669 static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
5670 SmallVectorImpl<SDValue> &Inputs,
5671 SmallVectorImpl<int> &Mask,
5672 const SelectionDAG &DAG, unsigned Depth,
5673 bool ResolveKnownElts);
5675 // Attempt to decode ops that could be represented as a shuffle mask.
5676 // The decoded shuffle mask may contain a different number of elements to the
5677 // destination value type.
5678 // TODO: Merge into getTargetShuffleInputs()
5679 static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
5680 SmallVectorImpl<int> &Mask,
5681 SmallVectorImpl<SDValue> &Ops,
5682 const SelectionDAG &DAG, unsigned Depth,
5683 bool ResolveKnownElts) {
5684 Mask.clear();
5685 Ops.clear();
5687 MVT VT = N.getSimpleValueType();
5688 unsigned NumElts = VT.getVectorNumElements();
5689 unsigned NumSizeInBits = VT.getSizeInBits();
5690 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
5691 if ((NumBitsPerElt % 8) != 0 || (NumSizeInBits % 8) != 0)
5692 return false;
5693 assert(NumElts == DemandedElts.getBitWidth() && "Unexpected vector size");
5694 unsigned NumSizeInBytes = NumSizeInBits / 8;
5695 unsigned NumBytesPerElt = NumBitsPerElt / 8;
5697 unsigned Opcode = N.getOpcode();
5698 switch (Opcode) {
5699 case ISD::VECTOR_SHUFFLE: {
5700 // Don't treat ISD::VECTOR_SHUFFLE as a target shuffle so decode it here.
5701 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(N)->getMask();
5702 if (isUndefOrInRange(ShuffleMask, 0, 2 * NumElts)) {
5703 Mask.append(ShuffleMask.begin(), ShuffleMask.end());
5704 Ops.push_back(N.getOperand(0));
5705 Ops.push_back(N.getOperand(1));
5706 return true;
5708 return false;
5710 case ISD::AND:
5711 case X86ISD::ANDNP: {
5712 // Attempt to decode as a per-byte mask.
5713 APInt UndefElts;
5714 SmallVector<APInt, 32> EltBits;
5715 SDValue N0 = N.getOperand(0);
5716 SDValue N1 = N.getOperand(1);
5717 bool IsAndN = (X86ISD::ANDNP == Opcode);
5718 uint64_t ZeroMask = IsAndN ? 255 : 0;
5719 if (!getTargetConstantBitsFromNode(IsAndN ? N0 : N1, 8, UndefElts, EltBits))
5720 return false;
5721 // We can't assume an undef src element gives an undef dst - the other src
5722 // might be zero.
5723 if (!UndefElts.isZero())
5724 return false;
5725 for (int i = 0, e = (int)EltBits.size(); i != e; ++i) {
5726 const APInt &ByteBits = EltBits[i];
5727 if (ByteBits != 0 && ByteBits != 255)
5728 return false;
5729 Mask.push_back(ByteBits == ZeroMask ? SM_SentinelZero : i);
5731 Ops.push_back(IsAndN ? N1 : N0);
5732 return true;
5734 case ISD::OR: {
5735 // Handle OR(SHUFFLE,SHUFFLE) case where one source is zero and the other
5736 // is a valid shuffle index.
5737 SDValue N0 = peekThroughBitcasts(N.getOperand(0));
5738 SDValue N1 = peekThroughBitcasts(N.getOperand(1));
5739 if (!N0.getValueType().isVector() || !N1.getValueType().isVector())
5740 return false;
5742 SmallVector<int, 64> SrcMask0, SrcMask1;
5743 SmallVector<SDValue, 2> SrcInputs0, SrcInputs1;
5744 APInt Demand0 = APInt::getAllOnes(N0.getValueType().getVectorNumElements());
5745 APInt Demand1 = APInt::getAllOnes(N1.getValueType().getVectorNumElements());
5746 if (!getTargetShuffleInputs(N0, Demand0, SrcInputs0, SrcMask0, DAG,
5747 Depth + 1, true) ||
5748 !getTargetShuffleInputs(N1, Demand1, SrcInputs1, SrcMask1, DAG,
5749 Depth + 1, true))
5750 return false;
5752 size_t MaskSize = std::max(SrcMask0.size(), SrcMask1.size());
5753 SmallVector<int, 64> Mask0, Mask1;
5754 narrowShuffleMaskElts(MaskSize / SrcMask0.size(), SrcMask0, Mask0);
5755 narrowShuffleMaskElts(MaskSize / SrcMask1.size(), SrcMask1, Mask1);
5756 for (int i = 0; i != (int)MaskSize; ++i) {
5757 // NOTE: Don't handle SM_SentinelUndef, as we can end up in infinite
5758 // loops converting between OR and BLEND shuffles due to
5759 // canWidenShuffleElements merging away undef elements, meaning we
5760 // fail to recognise the OR as the undef element isn't known zero.
5761 if (Mask0[i] == SM_SentinelZero && Mask1[i] == SM_SentinelZero)
5762 Mask.push_back(SM_SentinelZero);
5763 else if (Mask1[i] == SM_SentinelZero)
5764 Mask.push_back(i);
5765 else if (Mask0[i] == SM_SentinelZero)
5766 Mask.push_back(i + MaskSize);
5767 else
5768 return false;
5770 Ops.push_back(N0);
5771 Ops.push_back(N1);
5772 return true;
5774 case ISD::INSERT_SUBVECTOR: {
5775 SDValue Src = N.getOperand(0);
5776 SDValue Sub = N.getOperand(1);
5777 EVT SubVT = Sub.getValueType();
5778 unsigned NumSubElts = SubVT.getVectorNumElements();
5779 if (!N->isOnlyUserOf(Sub.getNode()))
5780 return false;
5781 SDValue SubBC = peekThroughBitcasts(Sub);
5782 uint64_t InsertIdx = N.getConstantOperandVal(2);
5783 // Handle INSERT_SUBVECTOR(SRC0, EXTRACT_SUBVECTOR(SRC1)).
5784 if (SubBC.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
5785 SubBC.getOperand(0).getValueSizeInBits() == NumSizeInBits) {
5786 uint64_t ExtractIdx = SubBC.getConstantOperandVal(1);
5787 SDValue SubBCSrc = SubBC.getOperand(0);
5788 unsigned NumSubSrcBCElts = SubBCSrc.getValueType().getVectorNumElements();
5789 unsigned MaxElts = std::max(NumElts, NumSubSrcBCElts);
5790 assert((MaxElts % NumElts) == 0 && (MaxElts % NumSubSrcBCElts) == 0 &&
5791 "Subvector valuetype mismatch");
5792 InsertIdx *= (MaxElts / NumElts);
5793 ExtractIdx *= (MaxElts / NumSubSrcBCElts);
5794 NumSubElts *= (MaxElts / NumElts);
5795 bool SrcIsUndef = Src.isUndef();
5796 for (int i = 0; i != (int)MaxElts; ++i)
5797 Mask.push_back(SrcIsUndef ? SM_SentinelUndef : i);
5798 for (int i = 0; i != (int)NumSubElts; ++i)
5799 Mask[InsertIdx + i] = (SrcIsUndef ? 0 : MaxElts) + ExtractIdx + i;
5800 if (!SrcIsUndef)
5801 Ops.push_back(Src);
5802 Ops.push_back(SubBCSrc);
5803 return true;
5805 // Handle INSERT_SUBVECTOR(SRC0, SHUFFLE(SRC1)).
5806 SmallVector<int, 64> SubMask;
5807 SmallVector<SDValue, 2> SubInputs;
5808 SDValue SubSrc = peekThroughOneUseBitcasts(Sub);
5809 EVT SubSrcVT = SubSrc.getValueType();
5810 if (!SubSrcVT.isVector())
5811 return false;
5813 APInt SubDemand = APInt::getAllOnes(SubSrcVT.getVectorNumElements());
5814 if (!getTargetShuffleInputs(SubSrc, SubDemand, SubInputs, SubMask, DAG,
5815 Depth + 1, ResolveKnownElts))
5816 return false;
5818 // Subvector shuffle inputs must not be larger than the subvector.
5819 if (llvm::any_of(SubInputs, [SubVT](SDValue SubInput) {
5820 return SubVT.getFixedSizeInBits() <
5821 SubInput.getValueSizeInBits().getFixedValue();
5823 return false;
5825 if (SubMask.size() != NumSubElts) {
5826 assert(((SubMask.size() % NumSubElts) == 0 ||
5827 (NumSubElts % SubMask.size()) == 0) && "Illegal submask scale");
5828 if ((NumSubElts % SubMask.size()) == 0) {
5829 int Scale = NumSubElts / SubMask.size();
5830 SmallVector<int,64> ScaledSubMask;
5831 narrowShuffleMaskElts(Scale, SubMask, ScaledSubMask);
5832 SubMask = ScaledSubMask;
5833 } else {
5834 int Scale = SubMask.size() / NumSubElts;
5835 NumSubElts = SubMask.size();
5836 NumElts *= Scale;
5837 InsertIdx *= Scale;
5840 Ops.push_back(Src);
5841 Ops.append(SubInputs.begin(), SubInputs.end());
5842 if (ISD::isBuildVectorAllZeros(Src.getNode()))
5843 Mask.append(NumElts, SM_SentinelZero);
5844 else
5845 for (int i = 0; i != (int)NumElts; ++i)
5846 Mask.push_back(i);
5847 for (int i = 0; i != (int)NumSubElts; ++i) {
5848 int M = SubMask[i];
5849 if (0 <= M) {
5850 int InputIdx = M / NumSubElts;
5851 M = (NumElts * (1 + InputIdx)) + (M % NumSubElts);
5853 Mask[i + InsertIdx] = M;
5855 return true;
5857 case X86ISD::PINSRB:
5858 case X86ISD::PINSRW:
5859 case ISD::SCALAR_TO_VECTOR:
5860 case ISD::INSERT_VECTOR_ELT: {
5861 // Match against a insert_vector_elt/scalar_to_vector of an extract from a
5862 // vector, for matching src/dst vector types.
5863 SDValue Scl = N.getOperand(Opcode == ISD::SCALAR_TO_VECTOR ? 0 : 1);
5865 unsigned DstIdx = 0;
5866 if (Opcode != ISD::SCALAR_TO_VECTOR) {
5867 // Check we have an in-range constant insertion index.
5868 if (!isa<ConstantSDNode>(N.getOperand(2)) ||
5869 N.getConstantOperandAPInt(2).uge(NumElts))
5870 return false;
5871 DstIdx = N.getConstantOperandVal(2);
5873 // Attempt to recognise an INSERT*(VEC, 0, DstIdx) shuffle pattern.
5874 if (X86::isZeroNode(Scl)) {
5875 Ops.push_back(N.getOperand(0));
5876 for (unsigned i = 0; i != NumElts; ++i)
5877 Mask.push_back(i == DstIdx ? SM_SentinelZero : (int)i);
5878 return true;
5882 // Peek through trunc/aext/zext.
5883 // TODO: aext shouldn't require SM_SentinelZero padding.
5884 // TODO: handle shift of scalars.
5885 unsigned MinBitsPerElt = Scl.getScalarValueSizeInBits();
5886 while (Scl.getOpcode() == ISD::TRUNCATE ||
5887 Scl.getOpcode() == ISD::ANY_EXTEND ||
5888 Scl.getOpcode() == ISD::ZERO_EXTEND) {
5889 Scl = Scl.getOperand(0);
5890 MinBitsPerElt =
5891 std::min<unsigned>(MinBitsPerElt, Scl.getScalarValueSizeInBits());
5893 if ((MinBitsPerElt % 8) != 0)
5894 return false;
5896 // Attempt to find the source vector the scalar was extracted from.
5897 SDValue SrcExtract;
5898 if ((Scl.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
5899 Scl.getOpcode() == X86ISD::PEXTRW ||
5900 Scl.getOpcode() == X86ISD::PEXTRB) &&
5901 Scl.getOperand(0).getValueSizeInBits() == NumSizeInBits) {
5902 SrcExtract = Scl;
5904 if (!SrcExtract || !isa<ConstantSDNode>(SrcExtract.getOperand(1)))
5905 return false;
5907 SDValue SrcVec = SrcExtract.getOperand(0);
5908 EVT SrcVT = SrcVec.getValueType();
5909 if (!SrcVT.getScalarType().isByteSized())
5910 return false;
5911 unsigned SrcIdx = SrcExtract.getConstantOperandVal(1);
5912 unsigned SrcByte = SrcIdx * (SrcVT.getScalarSizeInBits() / 8);
5913 unsigned DstByte = DstIdx * NumBytesPerElt;
5914 MinBitsPerElt =
5915 std::min<unsigned>(MinBitsPerElt, SrcVT.getScalarSizeInBits());
5917 // Create 'identity' byte level shuffle mask and then add inserted bytes.
5918 if (Opcode == ISD::SCALAR_TO_VECTOR) {
5919 Ops.push_back(SrcVec);
5920 Mask.append(NumSizeInBytes, SM_SentinelUndef);
5921 } else {
5922 Ops.push_back(SrcVec);
5923 Ops.push_back(N.getOperand(0));
5924 for (int i = 0; i != (int)NumSizeInBytes; ++i)
5925 Mask.push_back(NumSizeInBytes + i);
5928 unsigned MinBytesPerElts = MinBitsPerElt / 8;
5929 MinBytesPerElts = std::min(MinBytesPerElts, NumBytesPerElt);
5930 for (unsigned i = 0; i != MinBytesPerElts; ++i)
5931 Mask[DstByte + i] = SrcByte + i;
5932 for (unsigned i = MinBytesPerElts; i < NumBytesPerElt; ++i)
5933 Mask[DstByte + i] = SM_SentinelZero;
5934 return true;
5936 case X86ISD::PACKSS:
5937 case X86ISD::PACKUS: {
5938 SDValue N0 = N.getOperand(0);
5939 SDValue N1 = N.getOperand(1);
5940 assert(N0.getValueType().getVectorNumElements() == (NumElts / 2) &&
5941 N1.getValueType().getVectorNumElements() == (NumElts / 2) &&
5942 "Unexpected input value type");
5944 APInt EltsLHS, EltsRHS;
5945 getPackDemandedElts(VT, DemandedElts, EltsLHS, EltsRHS);
5947 // If we know input saturation won't happen (or we don't care for particular
5948 // lanes), we can treat this as a truncation shuffle.
5949 bool Offset0 = false, Offset1 = false;
5950 if (Opcode == X86ISD::PACKSS) {
5951 if ((!(N0.isUndef() || EltsLHS.isZero()) &&
5952 DAG.ComputeNumSignBits(N0, EltsLHS, Depth + 1) <= NumBitsPerElt) ||
5953 (!(N1.isUndef() || EltsRHS.isZero()) &&
5954 DAG.ComputeNumSignBits(N1, EltsRHS, Depth + 1) <= NumBitsPerElt))
5955 return false;
5956 // We can't easily fold ASHR into a shuffle, but if it was feeding a
5957 // PACKSS then it was likely being used for sign-extension for a
5958 // truncation, so just peek through and adjust the mask accordingly.
5959 if (N0.getOpcode() == X86ISD::VSRAI && N->isOnlyUserOf(N0.getNode()) &&
5960 N0.getConstantOperandAPInt(1) == NumBitsPerElt) {
5961 Offset0 = true;
5962 N0 = N0.getOperand(0);
5964 if (N1.getOpcode() == X86ISD::VSRAI && N->isOnlyUserOf(N1.getNode()) &&
5965 N1.getConstantOperandAPInt(1) == NumBitsPerElt) {
5966 Offset1 = true;
5967 N1 = N1.getOperand(0);
5969 } else {
5970 APInt ZeroMask = APInt::getHighBitsSet(2 * NumBitsPerElt, NumBitsPerElt);
5971 if ((!(N0.isUndef() || EltsLHS.isZero()) &&
5972 !DAG.MaskedValueIsZero(N0, ZeroMask, EltsLHS, Depth + 1)) ||
5973 (!(N1.isUndef() || EltsRHS.isZero()) &&
5974 !DAG.MaskedValueIsZero(N1, ZeroMask, EltsRHS, Depth + 1)))
5975 return false;
5978 bool IsUnary = (N0 == N1);
5980 Ops.push_back(N0);
5981 if (!IsUnary)
5982 Ops.push_back(N1);
5984 createPackShuffleMask(VT, Mask, IsUnary);
5986 if (Offset0 || Offset1) {
5987 for (int &M : Mask)
5988 if ((Offset0 && isInRange(M, 0, NumElts)) ||
5989 (Offset1 && isInRange(M, NumElts, 2 * NumElts)))
5990 ++M;
5992 return true;
5994 case ISD::VSELECT:
5995 case X86ISD::BLENDV: {
5996 SDValue Cond = N.getOperand(0);
5997 if (createShuffleMaskFromVSELECT(Mask, Cond, Opcode == X86ISD::BLENDV)) {
5998 Ops.push_back(N.getOperand(1));
5999 Ops.push_back(N.getOperand(2));
6000 return true;
6002 return false;
6004 case X86ISD::VTRUNC: {
6005 SDValue Src = N.getOperand(0);
6006 EVT SrcVT = Src.getValueType();
6007 // Truncated source must be a simple vector.
6008 if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
6009 (SrcVT.getScalarSizeInBits() % 8) != 0)
6010 return false;
6011 unsigned NumSrcElts = SrcVT.getVectorNumElements();
6012 unsigned NumBitsPerSrcElt = SrcVT.getScalarSizeInBits();
6013 unsigned Scale = NumBitsPerSrcElt / NumBitsPerElt;
6014 assert((NumBitsPerSrcElt % NumBitsPerElt) == 0 && "Illegal truncation");
6015 for (unsigned i = 0; i != NumSrcElts; ++i)
6016 Mask.push_back(i * Scale);
6017 Mask.append(NumElts - NumSrcElts, SM_SentinelZero);
6018 Ops.push_back(Src);
6019 return true;
6021 case X86ISD::VSHLI:
6022 case X86ISD::VSRLI: {
6023 uint64_t ShiftVal = N.getConstantOperandVal(1);
6024 // Out of range bit shifts are guaranteed to be zero.
6025 if (NumBitsPerElt <= ShiftVal) {
6026 Mask.append(NumElts, SM_SentinelZero);
6027 return true;
6030 // We can only decode 'whole byte' bit shifts as shuffles.
6031 if ((ShiftVal % 8) != 0)
6032 break;
6034 uint64_t ByteShift = ShiftVal / 8;
6035 Ops.push_back(N.getOperand(0));
6037 // Clear mask to all zeros and insert the shifted byte indices.
6038 Mask.append(NumSizeInBytes, SM_SentinelZero);
6040 if (X86ISD::VSHLI == Opcode) {
6041 for (unsigned i = 0; i != NumSizeInBytes; i += NumBytesPerElt)
6042 for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
6043 Mask[i + j] = i + j - ByteShift;
6044 } else {
6045 for (unsigned i = 0; i != NumSizeInBytes; i += NumBytesPerElt)
6046 for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
6047 Mask[i + j - ByteShift] = i + j;
6049 return true;
6051 case X86ISD::VROTLI:
6052 case X86ISD::VROTRI: {
6053 // We can only decode 'whole byte' bit rotates as shuffles.
6054 uint64_t RotateVal = N.getConstantOperandAPInt(1).urem(NumBitsPerElt);
6055 if ((RotateVal % 8) != 0)
6056 return false;
6057 Ops.push_back(N.getOperand(0));
6058 int Offset = RotateVal / 8;
6059 Offset = (X86ISD::VROTLI == Opcode ? NumBytesPerElt - Offset : Offset);
6060 for (int i = 0; i != (int)NumElts; ++i) {
6061 int BaseIdx = i * NumBytesPerElt;
6062 for (int j = 0; j != (int)NumBytesPerElt; ++j) {
6063 Mask.push_back(BaseIdx + ((Offset + j) % NumBytesPerElt));
6066 return true;
6068 case X86ISD::VBROADCAST: {
6069 SDValue Src = N.getOperand(0);
6070 if (!Src.getSimpleValueType().isVector()) {
6071 if (Src.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6072 !isNullConstant(Src.getOperand(1)) ||
6073 Src.getOperand(0).getValueType().getScalarType() !=
6074 VT.getScalarType())
6075 return false;
6076 Src = Src.getOperand(0);
6078 Ops.push_back(Src);
6079 Mask.append(NumElts, 0);
6080 return true;
6082 case ISD::SIGN_EXTEND_VECTOR_INREG: {
6083 SDValue Src = N.getOperand(0);
6084 EVT SrcVT = Src.getValueType();
6085 unsigned NumBitsPerSrcElt = SrcVT.getScalarSizeInBits();
6087 // Extended source must be a simple vector.
6088 if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
6089 (NumBitsPerSrcElt % 8) != 0)
6090 return false;
6092 // We can only handle all-signbits extensions.
6093 APInt DemandedSrcElts =
6094 DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
6095 if (DAG.ComputeNumSignBits(Src, DemandedSrcElts) != NumBitsPerSrcElt)
6096 return false;
6098 assert((NumBitsPerElt % NumBitsPerSrcElt) == 0 && "Unexpected extension");
6099 unsigned Scale = NumBitsPerElt / NumBitsPerSrcElt;
6100 for (unsigned I = 0; I != NumElts; ++I)
6101 Mask.append(Scale, I);
6102 Ops.push_back(Src);
6103 return true;
6105 case ISD::ZERO_EXTEND:
6106 case ISD::ANY_EXTEND:
6107 case ISD::ZERO_EXTEND_VECTOR_INREG:
6108 case ISD::ANY_EXTEND_VECTOR_INREG: {
6109 SDValue Src = N.getOperand(0);
6110 EVT SrcVT = Src.getValueType();
6112 // Extended source must be a simple vector.
6113 if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
6114 (SrcVT.getScalarSizeInBits() % 8) != 0)
6115 return false;
6117 bool IsAnyExtend =
6118 (ISD::ANY_EXTEND == Opcode || ISD::ANY_EXTEND_VECTOR_INREG == Opcode);
6119 DecodeZeroExtendMask(SrcVT.getScalarSizeInBits(), NumBitsPerElt, NumElts,
6120 IsAnyExtend, Mask);
6121 Ops.push_back(Src);
6122 return true;
6126 return false;
6129 /// Removes unused/repeated shuffle source inputs and adjusts the shuffle mask.
6130 static void resolveTargetShuffleInputsAndMask(SmallVectorImpl<SDValue> &Inputs,
6131 SmallVectorImpl<int> &Mask) {
6132 int MaskWidth = Mask.size();
6133 SmallVector<SDValue, 16> UsedInputs;
6134 for (int i = 0, e = Inputs.size(); i < e; ++i) {
6135 int lo = UsedInputs.size() * MaskWidth;
6136 int hi = lo + MaskWidth;
6138 // Strip UNDEF input usage.
6139 if (Inputs[i].isUndef())
6140 for (int &M : Mask)
6141 if ((lo <= M) && (M < hi))
6142 M = SM_SentinelUndef;
6144 // Check for unused inputs.
6145 if (none_of(Mask, [lo, hi](int i) { return (lo <= i) && (i < hi); })) {
6146 for (int &M : Mask)
6147 if (lo <= M)
6148 M -= MaskWidth;
6149 continue;
6152 // Check for repeated inputs.
6153 bool IsRepeat = false;
6154 for (int j = 0, ue = UsedInputs.size(); j != ue; ++j) {
6155 if (UsedInputs[j] != Inputs[i])
6156 continue;
6157 for (int &M : Mask)
6158 if (lo <= M)
6159 M = (M < hi) ? ((M - lo) + (j * MaskWidth)) : (M - MaskWidth);
6160 IsRepeat = true;
6161 break;
6163 if (IsRepeat)
6164 continue;
6166 UsedInputs.push_back(Inputs[i]);
6168 Inputs = UsedInputs;
6171 /// Calls getTargetShuffleAndZeroables to resolve a target shuffle mask's inputs
6172 /// and then sets the SM_SentinelUndef and SM_SentinelZero values.
6173 /// Returns true if the target shuffle mask was decoded.
6174 static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
6175 SmallVectorImpl<SDValue> &Inputs,
6176 SmallVectorImpl<int> &Mask,
6177 APInt &KnownUndef, APInt &KnownZero,
6178 const SelectionDAG &DAG, unsigned Depth,
6179 bool ResolveKnownElts) {
6180 if (Depth >= SelectionDAG::MaxRecursionDepth)
6181 return false; // Limit search depth.
6183 EVT VT = Op.getValueType();
6184 if (!VT.isSimple() || !VT.isVector())
6185 return false;
6187 if (getTargetShuffleAndZeroables(Op, Mask, Inputs, KnownUndef, KnownZero)) {
6188 if (ResolveKnownElts)
6189 resolveTargetShuffleFromZeroables(Mask, KnownUndef, KnownZero);
6190 return true;
6192 if (getFauxShuffleMask(Op, DemandedElts, Mask, Inputs, DAG, Depth,
6193 ResolveKnownElts)) {
6194 resolveZeroablesFromTargetShuffle(Mask, KnownUndef, KnownZero);
6195 return true;
6197 return false;
6200 static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
6201 SmallVectorImpl<SDValue> &Inputs,
6202 SmallVectorImpl<int> &Mask,
6203 const SelectionDAG &DAG, unsigned Depth,
6204 bool ResolveKnownElts) {
6205 APInt KnownUndef, KnownZero;
6206 return getTargetShuffleInputs(Op, DemandedElts, Inputs, Mask, KnownUndef,
6207 KnownZero, DAG, Depth, ResolveKnownElts);
6210 static bool getTargetShuffleInputs(SDValue Op, SmallVectorImpl<SDValue> &Inputs,
6211 SmallVectorImpl<int> &Mask,
6212 const SelectionDAG &DAG, unsigned Depth = 0,
6213 bool ResolveKnownElts = true) {
6214 EVT VT = Op.getValueType();
6215 if (!VT.isSimple() || !VT.isVector())
6216 return false;
6218 unsigned NumElts = Op.getValueType().getVectorNumElements();
6219 APInt DemandedElts = APInt::getAllOnes(NumElts);
6220 return getTargetShuffleInputs(Op, DemandedElts, Inputs, Mask, DAG, Depth,
6221 ResolveKnownElts);
6224 // Attempt to create a scalar/subvector broadcast from the base MemSDNode.
6225 static SDValue getBROADCAST_LOAD(unsigned Opcode, const SDLoc &DL, EVT VT,
6226 EVT MemVT, MemSDNode *Mem, unsigned Offset,
6227 SelectionDAG &DAG) {
6228 assert((Opcode == X86ISD::VBROADCAST_LOAD ||
6229 Opcode == X86ISD::SUBV_BROADCAST_LOAD) &&
6230 "Unknown broadcast load type");
6232 // Ensure this is a simple (non-atomic, non-voltile), temporal read memop.
6233 if (!Mem || !Mem->readMem() || !Mem->isSimple() || Mem->isNonTemporal())
6234 return SDValue();
6236 SDValue Ptr = DAG.getMemBasePlusOffset(Mem->getBasePtr(),
6237 TypeSize::getFixed(Offset), DL);
6238 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
6239 SDValue Ops[] = {Mem->getChain(), Ptr};
6240 SDValue BcstLd = DAG.getMemIntrinsicNode(
6241 Opcode, DL, Tys, Ops, MemVT,
6242 DAG.getMachineFunction().getMachineMemOperand(
6243 Mem->getMemOperand(), Offset, MemVT.getStoreSize()));
6244 DAG.makeEquivalentMemoryOrdering(SDValue(Mem, 1), BcstLd.getValue(1));
6245 return BcstLd;
6248 /// Returns the scalar element that will make up the i'th
6249 /// element of the result of the vector shuffle.
6250 static SDValue getShuffleScalarElt(SDValue Op, unsigned Index,
6251 SelectionDAG &DAG, unsigned Depth) {
6252 if (Depth >= SelectionDAG::MaxRecursionDepth)
6253 return SDValue(); // Limit search depth.
6255 EVT VT = Op.getValueType();
6256 unsigned Opcode = Op.getOpcode();
6257 unsigned NumElems = VT.getVectorNumElements();
6259 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
6260 if (auto *SV = dyn_cast<ShuffleVectorSDNode>(Op)) {
6261 int Elt = SV->getMaskElt(Index);
6263 if (Elt < 0)
6264 return DAG.getUNDEF(VT.getVectorElementType());
6266 SDValue Src = (Elt < (int)NumElems) ? SV->getOperand(0) : SV->getOperand(1);
6267 return getShuffleScalarElt(Src, Elt % NumElems, DAG, Depth + 1);
6270 // Recurse into target specific vector shuffles to find scalars.
6271 if (isTargetShuffle(Opcode)) {
6272 MVT ShufVT = VT.getSimpleVT();
6273 MVT ShufSVT = ShufVT.getVectorElementType();
6274 int NumElems = (int)ShufVT.getVectorNumElements();
6275 SmallVector<int, 16> ShuffleMask;
6276 SmallVector<SDValue, 16> ShuffleOps;
6277 if (!getTargetShuffleMask(Op.getNode(), ShufVT, true, ShuffleOps,
6278 ShuffleMask))
6279 return SDValue();
6281 int Elt = ShuffleMask[Index];
6282 if (Elt == SM_SentinelZero)
6283 return ShufSVT.isInteger() ? DAG.getConstant(0, SDLoc(Op), ShufSVT)
6284 : DAG.getConstantFP(+0.0, SDLoc(Op), ShufSVT);
6285 if (Elt == SM_SentinelUndef)
6286 return DAG.getUNDEF(ShufSVT);
6288 assert(0 <= Elt && Elt < (2 * NumElems) && "Shuffle index out of range");
6289 SDValue Src = (Elt < NumElems) ? ShuffleOps[0] : ShuffleOps[1];
6290 return getShuffleScalarElt(Src, Elt % NumElems, DAG, Depth + 1);
6293 // Recurse into insert_subvector base/sub vector to find scalars.
6294 if (Opcode == ISD::INSERT_SUBVECTOR) {
6295 SDValue Vec = Op.getOperand(0);
6296 SDValue Sub = Op.getOperand(1);
6297 uint64_t SubIdx = Op.getConstantOperandVal(2);
6298 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
6300 if (SubIdx <= Index && Index < (SubIdx + NumSubElts))
6301 return getShuffleScalarElt(Sub, Index - SubIdx, DAG, Depth + 1);
6302 return getShuffleScalarElt(Vec, Index, DAG, Depth + 1);
6305 // Recurse into concat_vectors sub vector to find scalars.
6306 if (Opcode == ISD::CONCAT_VECTORS) {
6307 EVT SubVT = Op.getOperand(0).getValueType();
6308 unsigned NumSubElts = SubVT.getVectorNumElements();
6309 uint64_t SubIdx = Index / NumSubElts;
6310 uint64_t SubElt = Index % NumSubElts;
6311 return getShuffleScalarElt(Op.getOperand(SubIdx), SubElt, DAG, Depth + 1);
6314 // Recurse into extract_subvector src vector to find scalars.
6315 if (Opcode == ISD::EXTRACT_SUBVECTOR) {
6316 SDValue Src = Op.getOperand(0);
6317 uint64_t SrcIdx = Op.getConstantOperandVal(1);
6318 return getShuffleScalarElt(Src, Index + SrcIdx, DAG, Depth + 1);
6321 // We only peek through bitcasts of the same vector width.
6322 if (Opcode == ISD::BITCAST) {
6323 SDValue Src = Op.getOperand(0);
6324 EVT SrcVT = Src.getValueType();
6325 if (SrcVT.isVector() && SrcVT.getVectorNumElements() == NumElems)
6326 return getShuffleScalarElt(Src, Index, DAG, Depth + 1);
6327 return SDValue();
6330 // Actual nodes that may contain scalar elements
6332 // For insert_vector_elt - either return the index matching scalar or recurse
6333 // into the base vector.
6334 if (Opcode == ISD::INSERT_VECTOR_ELT &&
6335 isa<ConstantSDNode>(Op.getOperand(2))) {
6336 if (Op.getConstantOperandAPInt(2) == Index)
6337 return Op.getOperand(1);
6338 return getShuffleScalarElt(Op.getOperand(0), Index, DAG, Depth + 1);
6341 if (Opcode == ISD::SCALAR_TO_VECTOR)
6342 return (Index == 0) ? Op.getOperand(0)
6343 : DAG.getUNDEF(VT.getVectorElementType());
6345 if (Opcode == ISD::BUILD_VECTOR)
6346 return Op.getOperand(Index);
6348 return SDValue();
6351 // Use PINSRB/PINSRW/PINSRD to create a build vector.
6352 static SDValue LowerBuildVectorAsInsert(SDValue Op, const APInt &NonZeroMask,
6353 unsigned NumNonZero, unsigned NumZero,
6354 SelectionDAG &DAG,
6355 const X86Subtarget &Subtarget) {
6356 MVT VT = Op.getSimpleValueType();
6357 unsigned NumElts = VT.getVectorNumElements();
6358 assert(((VT == MVT::v8i16 && Subtarget.hasSSE2()) ||
6359 ((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41())) &&
6360 "Illegal vector insertion");
6362 SDLoc dl(Op);
6363 SDValue V;
6364 bool First = true;
6366 for (unsigned i = 0; i < NumElts; ++i) {
6367 bool IsNonZero = NonZeroMask[i];
6368 if (!IsNonZero)
6369 continue;
6371 // If the build vector contains zeros or our first insertion is not the
6372 // first index then insert into zero vector to break any register
6373 // dependency else use SCALAR_TO_VECTOR.
6374 if (First) {
6375 First = false;
6376 if (NumZero || 0 != i)
6377 V = getZeroVector(VT, Subtarget, DAG, dl);
6378 else {
6379 assert(0 == i && "Expected insertion into zero-index");
6380 V = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
6381 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, V);
6382 V = DAG.getBitcast(VT, V);
6383 continue;
6386 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, V, Op.getOperand(i),
6387 DAG.getIntPtrConstant(i, dl));
6390 return V;
6393 /// Custom lower build_vector of v16i8.
6394 static SDValue LowerBuildVectorv16i8(SDValue Op, const APInt &NonZeroMask,
6395 unsigned NumNonZero, unsigned NumZero,
6396 SelectionDAG &DAG,
6397 const X86Subtarget &Subtarget) {
6398 if (NumNonZero > 8 && !Subtarget.hasSSE41())
6399 return SDValue();
6401 // SSE4.1 - use PINSRB to insert each byte directly.
6402 if (Subtarget.hasSSE41())
6403 return LowerBuildVectorAsInsert(Op, NonZeroMask, NumNonZero, NumZero, DAG,
6404 Subtarget);
6406 SDLoc dl(Op);
6407 SDValue V;
6409 // Pre-SSE4.1 - merge byte pairs and insert with PINSRW.
6410 // If both the lowest 16-bits are non-zero, then convert to MOVD.
6411 if (!NonZeroMask.extractBits(2, 0).isZero() &&
6412 !NonZeroMask.extractBits(2, 2).isZero()) {
6413 for (unsigned I = 0; I != 4; ++I) {
6414 if (!NonZeroMask[I])
6415 continue;
6416 SDValue Elt = DAG.getZExtOrTrunc(Op.getOperand(I), dl, MVT::i32);
6417 if (I != 0)
6418 Elt = DAG.getNode(ISD::SHL, dl, MVT::i32, Elt,
6419 DAG.getConstant(I * 8, dl, MVT::i8));
6420 V = V ? DAG.getNode(ISD::OR, dl, MVT::i32, V, Elt) : Elt;
6422 assert(V && "Failed to fold v16i8 vector to zero");
6423 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, V);
6424 V = DAG.getNode(X86ISD::VZEXT_MOVL, dl, MVT::v4i32, V);
6425 V = DAG.getBitcast(MVT::v8i16, V);
6427 for (unsigned i = V ? 4 : 0; i < 16; i += 2) {
6428 bool ThisIsNonZero = NonZeroMask[i];
6429 bool NextIsNonZero = NonZeroMask[i + 1];
6430 if (!ThisIsNonZero && !NextIsNonZero)
6431 continue;
6433 SDValue Elt;
6434 if (ThisIsNonZero) {
6435 if (NumZero || NextIsNonZero)
6436 Elt = DAG.getZExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
6437 else
6438 Elt = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
6441 if (NextIsNonZero) {
6442 SDValue NextElt = Op.getOperand(i + 1);
6443 if (i == 0 && NumZero)
6444 NextElt = DAG.getZExtOrTrunc(NextElt, dl, MVT::i32);
6445 else
6446 NextElt = DAG.getAnyExtOrTrunc(NextElt, dl, MVT::i32);
6447 NextElt = DAG.getNode(ISD::SHL, dl, MVT::i32, NextElt,
6448 DAG.getConstant(8, dl, MVT::i8));
6449 if (ThisIsNonZero)
6450 Elt = DAG.getNode(ISD::OR, dl, MVT::i32, NextElt, Elt);
6451 else
6452 Elt = NextElt;
6455 // If our first insertion is not the first index or zeros are needed, then
6456 // insert into zero vector. Otherwise, use SCALAR_TO_VECTOR (leaves high
6457 // elements undefined).
6458 if (!V) {
6459 if (i != 0 || NumZero)
6460 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
6461 else {
6462 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Elt);
6463 V = DAG.getBitcast(MVT::v8i16, V);
6464 continue;
6467 Elt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Elt);
6468 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, Elt,
6469 DAG.getIntPtrConstant(i / 2, dl));
6472 return DAG.getBitcast(MVT::v16i8, V);
6475 /// Custom lower build_vector of v8i16.
6476 static SDValue LowerBuildVectorv8i16(SDValue Op, const APInt &NonZeroMask,
6477 unsigned NumNonZero, unsigned NumZero,
6478 SelectionDAG &DAG,
6479 const X86Subtarget &Subtarget) {
6480 if (NumNonZero > 4 && !Subtarget.hasSSE41())
6481 return SDValue();
6483 // Use PINSRW to insert each byte directly.
6484 return LowerBuildVectorAsInsert(Op, NonZeroMask, NumNonZero, NumZero, DAG,
6485 Subtarget);
6488 /// Custom lower build_vector of v4i32 or v4f32.
6489 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
6490 const X86Subtarget &Subtarget) {
6491 // If this is a splat of a pair of elements, use MOVDDUP (unless the target
6492 // has XOP; in that case defer lowering to potentially use VPERMIL2PS).
6493 // Because we're creating a less complicated build vector here, we may enable
6494 // further folding of the MOVDDUP via shuffle transforms.
6495 if (Subtarget.hasSSE3() && !Subtarget.hasXOP() &&
6496 Op.getOperand(0) == Op.getOperand(2) &&
6497 Op.getOperand(1) == Op.getOperand(3) &&
6498 Op.getOperand(0) != Op.getOperand(1)) {
6499 SDLoc DL(Op);
6500 MVT VT = Op.getSimpleValueType();
6501 MVT EltVT = VT.getVectorElementType();
6502 // Create a new build vector with the first 2 elements followed by undef
6503 // padding, bitcast to v2f64, duplicate, and bitcast back.
6504 SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
6505 DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
6506 SDValue NewBV = DAG.getBitcast(MVT::v2f64, DAG.getBuildVector(VT, DL, Ops));
6507 SDValue Dup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, NewBV);
6508 return DAG.getBitcast(VT, Dup);
6511 // Find all zeroable elements.
6512 std::bitset<4> Zeroable, Undefs;
6513 for (int i = 0; i < 4; ++i) {
6514 SDValue Elt = Op.getOperand(i);
6515 Undefs[i] = Elt.isUndef();
6516 Zeroable[i] = (Elt.isUndef() || X86::isZeroNode(Elt));
6518 assert(Zeroable.size() - Zeroable.count() > 1 &&
6519 "We expect at least two non-zero elements!");
6521 // We only know how to deal with build_vector nodes where elements are either
6522 // zeroable or extract_vector_elt with constant index.
6523 SDValue FirstNonZero;
6524 unsigned FirstNonZeroIdx;
6525 for (unsigned i = 0; i < 4; ++i) {
6526 if (Zeroable[i])
6527 continue;
6528 SDValue Elt = Op.getOperand(i);
6529 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6530 !isa<ConstantSDNode>(Elt.getOperand(1)))
6531 return SDValue();
6532 // Make sure that this node is extracting from a 128-bit vector.
6533 MVT VT = Elt.getOperand(0).getSimpleValueType();
6534 if (!VT.is128BitVector())
6535 return SDValue();
6536 if (!FirstNonZero.getNode()) {
6537 FirstNonZero = Elt;
6538 FirstNonZeroIdx = i;
6542 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
6543 SDValue V1 = FirstNonZero.getOperand(0);
6544 MVT VT = V1.getSimpleValueType();
6546 // See if this build_vector can be lowered as a blend with zero.
6547 SDValue Elt;
6548 unsigned EltMaskIdx, EltIdx;
6549 int Mask[4];
6550 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
6551 if (Zeroable[EltIdx]) {
6552 // The zero vector will be on the right hand side.
6553 Mask[EltIdx] = EltIdx+4;
6554 continue;
6557 Elt = Op->getOperand(EltIdx);
6558 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
6559 EltMaskIdx = Elt.getConstantOperandVal(1);
6560 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
6561 break;
6562 Mask[EltIdx] = EltIdx;
6565 if (EltIdx == 4) {
6566 // Let the shuffle legalizer deal with blend operations.
6567 SDValue VZeroOrUndef = (Zeroable == Undefs)
6568 ? DAG.getUNDEF(VT)
6569 : getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
6570 if (V1.getSimpleValueType() != VT)
6571 V1 = DAG.getBitcast(VT, V1);
6572 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZeroOrUndef, Mask);
6575 // See if we can lower this build_vector to a INSERTPS.
6576 if (!Subtarget.hasSSE41())
6577 return SDValue();
6579 SDValue V2 = Elt.getOperand(0);
6580 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
6581 V1 = SDValue();
6583 bool CanFold = true;
6584 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
6585 if (Zeroable[i])
6586 continue;
6588 SDValue Current = Op->getOperand(i);
6589 SDValue SrcVector = Current->getOperand(0);
6590 if (!V1.getNode())
6591 V1 = SrcVector;
6592 CanFold = (SrcVector == V1) && (Current.getConstantOperandAPInt(1) == i);
6595 if (!CanFold)
6596 return SDValue();
6598 assert(V1.getNode() && "Expected at least two non-zero elements!");
6599 if (V1.getSimpleValueType() != MVT::v4f32)
6600 V1 = DAG.getBitcast(MVT::v4f32, V1);
6601 if (V2.getSimpleValueType() != MVT::v4f32)
6602 V2 = DAG.getBitcast(MVT::v4f32, V2);
6604 // Ok, we can emit an INSERTPS instruction.
6605 unsigned ZMask = Zeroable.to_ulong();
6607 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
6608 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
6609 SDLoc DL(Op);
6610 SDValue Result = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
6611 DAG.getIntPtrConstant(InsertPSMask, DL, true));
6612 return DAG.getBitcast(VT, Result);
6615 /// Return a vector logical shift node.
6616 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, unsigned NumBits,
6617 SelectionDAG &DAG, const TargetLowering &TLI,
6618 const SDLoc &dl) {
6619 assert(VT.is128BitVector() && "Unknown type for VShift");
6620 MVT ShVT = MVT::v16i8;
6621 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
6622 SrcOp = DAG.getBitcast(ShVT, SrcOp);
6623 assert(NumBits % 8 == 0 && "Only support byte sized shifts");
6624 SDValue ShiftVal = DAG.getTargetConstant(NumBits / 8, dl, MVT::i8);
6625 return DAG.getBitcast(VT, DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
6628 static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl,
6629 SelectionDAG &DAG) {
6631 // Check if the scalar load can be widened into a vector load. And if
6632 // the address is "base + cst" see if the cst can be "absorbed" into
6633 // the shuffle mask.
6634 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
6635 SDValue Ptr = LD->getBasePtr();
6636 if (!ISD::isNormalLoad(LD) || !LD->isSimple())
6637 return SDValue();
6638 EVT PVT = LD->getValueType(0);
6639 if (PVT != MVT::i32 && PVT != MVT::f32)
6640 return SDValue();
6642 int FI = -1;
6643 int64_t Offset = 0;
6644 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
6645 FI = FINode->getIndex();
6646 Offset = 0;
6647 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
6648 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
6649 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
6650 Offset = Ptr.getConstantOperandVal(1);
6651 Ptr = Ptr.getOperand(0);
6652 } else {
6653 return SDValue();
6656 // FIXME: 256-bit vector instructions don't require a strict alignment,
6657 // improve this code to support it better.
6658 Align RequiredAlign(VT.getSizeInBits() / 8);
6659 SDValue Chain = LD->getChain();
6660 // Make sure the stack object alignment is at least 16 or 32.
6661 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
6662 MaybeAlign InferredAlign = DAG.InferPtrAlign(Ptr);
6663 if (!InferredAlign || *InferredAlign < RequiredAlign) {
6664 if (MFI.isFixedObjectIndex(FI)) {
6665 // Can't change the alignment. FIXME: It's possible to compute
6666 // the exact stack offset and reference FI + adjust offset instead.
6667 // If someone *really* cares about this. That's the way to implement it.
6668 return SDValue();
6669 } else {
6670 MFI.setObjectAlignment(FI, RequiredAlign);
6674 // (Offset % 16 or 32) must be multiple of 4. Then address is then
6675 // Ptr + (Offset & ~15).
6676 if (Offset < 0)
6677 return SDValue();
6678 if ((Offset % RequiredAlign.value()) & 3)
6679 return SDValue();
6680 int64_t StartOffset = Offset & ~int64_t(RequiredAlign.value() - 1);
6681 if (StartOffset) {
6682 SDLoc DL(Ptr);
6683 Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
6684 DAG.getConstant(StartOffset, DL, Ptr.getValueType()));
6687 int EltNo = (Offset - StartOffset) >> 2;
6688 unsigned NumElems = VT.getVectorNumElements();
6690 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
6691 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
6692 LD->getPointerInfo().getWithOffset(StartOffset));
6694 SmallVector<int, 8> Mask(NumElems, EltNo);
6696 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), Mask);
6699 return SDValue();
6702 // Recurse to find a LoadSDNode source and the accumulated ByteOffest.
6703 static bool findEltLoadSrc(SDValue Elt, LoadSDNode *&Ld, int64_t &ByteOffset) {
6704 if (ISD::isNON_EXTLoad(Elt.getNode())) {
6705 auto *BaseLd = cast<LoadSDNode>(Elt);
6706 if (!BaseLd->isSimple())
6707 return false;
6708 Ld = BaseLd;
6709 ByteOffset = 0;
6710 return true;
6713 switch (Elt.getOpcode()) {
6714 case ISD::BITCAST:
6715 case ISD::TRUNCATE:
6716 case ISD::SCALAR_TO_VECTOR:
6717 return findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset);
6718 case ISD::SRL:
6719 if (auto *AmtC = dyn_cast<ConstantSDNode>(Elt.getOperand(1))) {
6720 uint64_t Amt = AmtC->getZExtValue();
6721 if ((Amt % 8) == 0 && findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset)) {
6722 ByteOffset += Amt / 8;
6723 return true;
6726 break;
6727 case ISD::EXTRACT_VECTOR_ELT:
6728 if (auto *IdxC = dyn_cast<ConstantSDNode>(Elt.getOperand(1))) {
6729 SDValue Src = Elt.getOperand(0);
6730 unsigned SrcSizeInBits = Src.getScalarValueSizeInBits();
6731 unsigned DstSizeInBits = Elt.getScalarValueSizeInBits();
6732 if (DstSizeInBits == SrcSizeInBits && (SrcSizeInBits % 8) == 0 &&
6733 findEltLoadSrc(Src, Ld, ByteOffset)) {
6734 uint64_t Idx = IdxC->getZExtValue();
6735 ByteOffset += Idx * (SrcSizeInBits / 8);
6736 return true;
6739 break;
6742 return false;
6745 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
6746 /// elements can be replaced by a single large load which has the same value as
6747 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
6749 /// Example: <load i32 *a, load i32 *a+4, zero, undef> -> zextload a
6750 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
6751 const SDLoc &DL, SelectionDAG &DAG,
6752 const X86Subtarget &Subtarget,
6753 bool IsAfterLegalize) {
6754 if ((VT.getScalarSizeInBits() % 8) != 0)
6755 return SDValue();
6757 unsigned NumElems = Elts.size();
6759 int LastLoadedElt = -1;
6760 APInt LoadMask = APInt::getZero(NumElems);
6761 APInt ZeroMask = APInt::getZero(NumElems);
6762 APInt UndefMask = APInt::getZero(NumElems);
6764 SmallVector<LoadSDNode*, 8> Loads(NumElems, nullptr);
6765 SmallVector<int64_t, 8> ByteOffsets(NumElems, 0);
6767 // For each element in the initializer, see if we've found a load, zero or an
6768 // undef.
6769 for (unsigned i = 0; i < NumElems; ++i) {
6770 SDValue Elt = peekThroughBitcasts(Elts[i]);
6771 if (!Elt.getNode())
6772 return SDValue();
6773 if (Elt.isUndef()) {
6774 UndefMask.setBit(i);
6775 continue;
6777 if (X86::isZeroNode(Elt) || ISD::isBuildVectorAllZeros(Elt.getNode())) {
6778 ZeroMask.setBit(i);
6779 continue;
6782 // Each loaded element must be the correct fractional portion of the
6783 // requested vector load.
6784 unsigned EltSizeInBits = Elt.getValueSizeInBits();
6785 if ((NumElems * EltSizeInBits) != VT.getSizeInBits())
6786 return SDValue();
6788 if (!findEltLoadSrc(Elt, Loads[i], ByteOffsets[i]) || ByteOffsets[i] < 0)
6789 return SDValue();
6790 unsigned LoadSizeInBits = Loads[i]->getValueSizeInBits(0);
6791 if (((ByteOffsets[i] * 8) + EltSizeInBits) > LoadSizeInBits)
6792 return SDValue();
6794 LoadMask.setBit(i);
6795 LastLoadedElt = i;
6797 assert((ZeroMask.popcount() + UndefMask.popcount() + LoadMask.popcount()) ==
6798 NumElems &&
6799 "Incomplete element masks");
6801 // Handle Special Cases - all undef or undef/zero.
6802 if (UndefMask.popcount() == NumElems)
6803 return DAG.getUNDEF(VT);
6804 if ((ZeroMask.popcount() + UndefMask.popcount()) == NumElems)
6805 return VT.isInteger() ? DAG.getConstant(0, DL, VT)
6806 : DAG.getConstantFP(0.0, DL, VT);
6808 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6809 int FirstLoadedElt = LoadMask.countr_zero();
6810 SDValue EltBase = peekThroughBitcasts(Elts[FirstLoadedElt]);
6811 EVT EltBaseVT = EltBase.getValueType();
6812 assert(EltBaseVT.getSizeInBits() == EltBaseVT.getStoreSizeInBits() &&
6813 "Register/Memory size mismatch");
6814 LoadSDNode *LDBase = Loads[FirstLoadedElt];
6815 assert(LDBase && "Did not find base load for merging consecutive loads");
6816 unsigned BaseSizeInBits = EltBaseVT.getStoreSizeInBits();
6817 unsigned BaseSizeInBytes = BaseSizeInBits / 8;
6818 int NumLoadedElts = (1 + LastLoadedElt - FirstLoadedElt);
6819 int LoadSizeInBits = NumLoadedElts * BaseSizeInBits;
6820 assert((BaseSizeInBits % 8) == 0 && "Sub-byte element loads detected");
6822 // TODO: Support offsetting the base load.
6823 if (ByteOffsets[FirstLoadedElt] != 0)
6824 return SDValue();
6826 // Check to see if the element's load is consecutive to the base load
6827 // or offset from a previous (already checked) load.
6828 auto CheckConsecutiveLoad = [&](LoadSDNode *Base, int EltIdx) {
6829 LoadSDNode *Ld = Loads[EltIdx];
6830 int64_t ByteOffset = ByteOffsets[EltIdx];
6831 if (ByteOffset && (ByteOffset % BaseSizeInBytes) == 0) {
6832 int64_t BaseIdx = EltIdx - (ByteOffset / BaseSizeInBytes);
6833 return (0 <= BaseIdx && BaseIdx < (int)NumElems && LoadMask[BaseIdx] &&
6834 Loads[BaseIdx] == Ld && ByteOffsets[BaseIdx] == 0);
6836 return DAG.areNonVolatileConsecutiveLoads(Ld, Base, BaseSizeInBytes,
6837 EltIdx - FirstLoadedElt);
6840 // Consecutive loads can contain UNDEFS but not ZERO elements.
6841 // Consecutive loads with UNDEFs and ZEROs elements require a
6842 // an additional shuffle stage to clear the ZERO elements.
6843 bool IsConsecutiveLoad = true;
6844 bool IsConsecutiveLoadWithZeros = true;
6845 for (int i = FirstLoadedElt + 1; i <= LastLoadedElt; ++i) {
6846 if (LoadMask[i]) {
6847 if (!CheckConsecutiveLoad(LDBase, i)) {
6848 IsConsecutiveLoad = false;
6849 IsConsecutiveLoadWithZeros = false;
6850 break;
6852 } else if (ZeroMask[i]) {
6853 IsConsecutiveLoad = false;
6857 auto CreateLoad = [&DAG, &DL, &Loads](EVT VT, LoadSDNode *LDBase) {
6858 auto MMOFlags = LDBase->getMemOperand()->getFlags();
6859 assert(LDBase->isSimple() &&
6860 "Cannot merge volatile or atomic loads.");
6861 SDValue NewLd =
6862 DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
6863 LDBase->getPointerInfo(), LDBase->getOriginalAlign(),
6864 MMOFlags);
6865 for (auto *LD : Loads)
6866 if (LD)
6867 DAG.makeEquivalentMemoryOrdering(LD, NewLd);
6868 return NewLd;
6871 // Check if the base load is entirely dereferenceable.
6872 bool IsDereferenceable = LDBase->getPointerInfo().isDereferenceable(
6873 VT.getSizeInBits() / 8, *DAG.getContext(), DAG.getDataLayout());
6875 // LOAD - all consecutive load/undefs (must start/end with a load or be
6876 // entirely dereferenceable). If we have found an entire vector of loads and
6877 // undefs, then return a large load of the entire vector width starting at the
6878 // base pointer. If the vector contains zeros, then attempt to shuffle those
6879 // elements.
6880 if (FirstLoadedElt == 0 &&
6881 (NumLoadedElts == (int)NumElems || IsDereferenceable) &&
6882 (IsConsecutiveLoad || IsConsecutiveLoadWithZeros)) {
6883 if (IsAfterLegalize && !TLI.isOperationLegal(ISD::LOAD, VT))
6884 return SDValue();
6886 // Don't create 256-bit non-temporal aligned loads without AVX2 as these
6887 // will lower to regular temporal loads and use the cache.
6888 if (LDBase->isNonTemporal() && LDBase->getAlign() >= Align(32) &&
6889 VT.is256BitVector() && !Subtarget.hasInt256())
6890 return SDValue();
6892 if (NumElems == 1)
6893 return DAG.getBitcast(VT, Elts[FirstLoadedElt]);
6895 if (!ZeroMask)
6896 return CreateLoad(VT, LDBase);
6898 // IsConsecutiveLoadWithZeros - we need to create a shuffle of the loaded
6899 // vector and a zero vector to clear out the zero elements.
6900 if (!IsAfterLegalize && VT.isVector()) {
6901 unsigned NumMaskElts = VT.getVectorNumElements();
6902 if ((NumMaskElts % NumElems) == 0) {
6903 unsigned Scale = NumMaskElts / NumElems;
6904 SmallVector<int, 4> ClearMask(NumMaskElts, -1);
6905 for (unsigned i = 0; i < NumElems; ++i) {
6906 if (UndefMask[i])
6907 continue;
6908 int Offset = ZeroMask[i] ? NumMaskElts : 0;
6909 for (unsigned j = 0; j != Scale; ++j)
6910 ClearMask[(i * Scale) + j] = (i * Scale) + j + Offset;
6912 SDValue V = CreateLoad(VT, LDBase);
6913 SDValue Z = VT.isInteger() ? DAG.getConstant(0, DL, VT)
6914 : DAG.getConstantFP(0.0, DL, VT);
6915 return DAG.getVectorShuffle(VT, DL, V, Z, ClearMask);
6920 // If the upper half of a ymm/zmm load is undef then just load the lower half.
6921 if (VT.is256BitVector() || VT.is512BitVector()) {
6922 unsigned HalfNumElems = NumElems / 2;
6923 if (UndefMask.extractBits(HalfNumElems, HalfNumElems).isAllOnes()) {
6924 EVT HalfVT =
6925 EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), HalfNumElems);
6926 SDValue HalfLD =
6927 EltsFromConsecutiveLoads(HalfVT, Elts.drop_back(HalfNumElems), DL,
6928 DAG, Subtarget, IsAfterLegalize);
6929 if (HalfLD)
6930 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT),
6931 HalfLD, DAG.getIntPtrConstant(0, DL));
6935 // VZEXT_LOAD - consecutive 32/64-bit load/undefs followed by zeros/undefs.
6936 if (IsConsecutiveLoad && FirstLoadedElt == 0 &&
6937 ((LoadSizeInBits == 16 && Subtarget.hasFP16()) || LoadSizeInBits == 32 ||
6938 LoadSizeInBits == 64) &&
6939 ((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))) {
6940 MVT VecSVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(LoadSizeInBits)
6941 : MVT::getIntegerVT(LoadSizeInBits);
6942 MVT VecVT = MVT::getVectorVT(VecSVT, VT.getSizeInBits() / LoadSizeInBits);
6943 // Allow v4f32 on SSE1 only targets.
6944 // FIXME: Add more isel patterns so we can just use VT directly.
6945 if (!Subtarget.hasSSE2() && VT == MVT::v4f32)
6946 VecVT = MVT::v4f32;
6947 if (TLI.isTypeLegal(VecVT)) {
6948 SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
6949 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
6950 SDValue ResNode = DAG.getMemIntrinsicNode(
6951 X86ISD::VZEXT_LOAD, DL, Tys, Ops, VecSVT, LDBase->getPointerInfo(),
6952 LDBase->getOriginalAlign(), MachineMemOperand::MOLoad);
6953 for (auto *LD : Loads)
6954 if (LD)
6955 DAG.makeEquivalentMemoryOrdering(LD, ResNode);
6956 return DAG.getBitcast(VT, ResNode);
6960 // BROADCAST - match the smallest possible repetition pattern, load that
6961 // scalar/subvector element and then broadcast to the entire vector.
6962 if (ZeroMask.isZero() && isPowerOf2_32(NumElems) && Subtarget.hasAVX() &&
6963 (VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector())) {
6964 for (unsigned SubElems = 1; SubElems < NumElems; SubElems *= 2) {
6965 unsigned RepeatSize = SubElems * BaseSizeInBits;
6966 unsigned ScalarSize = std::min(RepeatSize, 64u);
6967 if (!Subtarget.hasAVX2() && ScalarSize < 32)
6968 continue;
6970 // Don't attempt a 1:N subvector broadcast - it should be caught by
6971 // combineConcatVectorOps, else will cause infinite loops.
6972 if (RepeatSize > ScalarSize && SubElems == 1)
6973 continue;
6975 bool Match = true;
6976 SmallVector<SDValue, 8> RepeatedLoads(SubElems, DAG.getUNDEF(EltBaseVT));
6977 for (unsigned i = 0; i != NumElems && Match; ++i) {
6978 if (!LoadMask[i])
6979 continue;
6980 SDValue Elt = peekThroughBitcasts(Elts[i]);
6981 if (RepeatedLoads[i % SubElems].isUndef())
6982 RepeatedLoads[i % SubElems] = Elt;
6983 else
6984 Match &= (RepeatedLoads[i % SubElems] == Elt);
6987 // We must have loads at both ends of the repetition.
6988 Match &= !RepeatedLoads.front().isUndef();
6989 Match &= !RepeatedLoads.back().isUndef();
6990 if (!Match)
6991 continue;
6993 EVT RepeatVT =
6994 VT.isInteger() && (RepeatSize != 64 || TLI.isTypeLegal(MVT::i64))
6995 ? EVT::getIntegerVT(*DAG.getContext(), ScalarSize)
6996 : EVT::getFloatingPointVT(ScalarSize);
6997 if (RepeatSize > ScalarSize)
6998 RepeatVT = EVT::getVectorVT(*DAG.getContext(), RepeatVT,
6999 RepeatSize / ScalarSize);
7000 EVT BroadcastVT =
7001 EVT::getVectorVT(*DAG.getContext(), RepeatVT.getScalarType(),
7002 VT.getSizeInBits() / ScalarSize);
7003 if (TLI.isTypeLegal(BroadcastVT)) {
7004 if (SDValue RepeatLoad = EltsFromConsecutiveLoads(
7005 RepeatVT, RepeatedLoads, DL, DAG, Subtarget, IsAfterLegalize)) {
7006 SDValue Broadcast = RepeatLoad;
7007 if (RepeatSize > ScalarSize) {
7008 while (Broadcast.getValueSizeInBits() < VT.getSizeInBits())
7009 Broadcast = concatSubVectors(Broadcast, Broadcast, DAG, DL);
7010 } else {
7011 if (!Subtarget.hasAVX2() &&
7012 !X86::mayFoldLoadIntoBroadcastFromMem(
7013 RepeatLoad, RepeatVT.getScalarType().getSimpleVT(),
7014 Subtarget,
7015 /*AssumeSingleUse=*/true))
7016 return SDValue();
7017 Broadcast =
7018 DAG.getNode(X86ISD::VBROADCAST, DL, BroadcastVT, RepeatLoad);
7020 return DAG.getBitcast(VT, Broadcast);
7026 return SDValue();
7029 // Combine a vector ops (shuffles etc.) that is equal to build_vector load1,
7030 // load2, load3, load4, <0, 1, 2, 3> into a vector load if the load addresses
7031 // are consecutive, non-overlapping, and in the right order.
7032 static SDValue combineToConsecutiveLoads(EVT VT, SDValue Op, const SDLoc &DL,
7033 SelectionDAG &DAG,
7034 const X86Subtarget &Subtarget,
7035 bool IsAfterLegalize) {
7036 SmallVector<SDValue, 64> Elts;
7037 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
7038 if (SDValue Elt = getShuffleScalarElt(Op, i, DAG, 0)) {
7039 Elts.push_back(Elt);
7040 continue;
7042 return SDValue();
7044 assert(Elts.size() == VT.getVectorNumElements());
7045 return EltsFromConsecutiveLoads(VT, Elts, DL, DAG, Subtarget,
7046 IsAfterLegalize);
7049 static Constant *getConstantVector(MVT VT, ArrayRef<APInt> Bits,
7050 const APInt &Undefs, LLVMContext &C) {
7051 unsigned ScalarSize = VT.getScalarSizeInBits();
7052 Type *Ty = EVT(VT.getScalarType()).getTypeForEVT(C);
7054 auto getConstantScalar = [&](const APInt &Val) -> Constant * {
7055 if (VT.isFloatingPoint()) {
7056 if (ScalarSize == 16)
7057 return ConstantFP::get(C, APFloat(APFloat::IEEEhalf(), Val));
7058 if (ScalarSize == 32)
7059 return ConstantFP::get(C, APFloat(APFloat::IEEEsingle(), Val));
7060 assert(ScalarSize == 64 && "Unsupported floating point scalar size");
7061 return ConstantFP::get(C, APFloat(APFloat::IEEEdouble(), Val));
7063 return Constant::getIntegerValue(Ty, Val);
7066 SmallVector<Constant *, 32> ConstantVec;
7067 for (unsigned I = 0, E = Bits.size(); I != E; ++I)
7068 ConstantVec.push_back(Undefs[I] ? UndefValue::get(Ty)
7069 : getConstantScalar(Bits[I]));
7071 return ConstantVector::get(ArrayRef<Constant *>(ConstantVec));
7074 static Constant *getConstantVector(MVT VT, const APInt &SplatValue,
7075 unsigned SplatBitSize, LLVMContext &C) {
7076 unsigned ScalarSize = VT.getScalarSizeInBits();
7078 auto getConstantScalar = [&](const APInt &Val) -> Constant * {
7079 if (VT.isFloatingPoint()) {
7080 if (ScalarSize == 16)
7081 return ConstantFP::get(C, APFloat(APFloat::IEEEhalf(), Val));
7082 if (ScalarSize == 32)
7083 return ConstantFP::get(C, APFloat(APFloat::IEEEsingle(), Val));
7084 assert(ScalarSize == 64 && "Unsupported floating point scalar size");
7085 return ConstantFP::get(C, APFloat(APFloat::IEEEdouble(), Val));
7087 return Constant::getIntegerValue(Type::getIntNTy(C, ScalarSize), Val);
7090 if (ScalarSize == SplatBitSize)
7091 return getConstantScalar(SplatValue);
7093 unsigned NumElm = SplatBitSize / ScalarSize;
7094 SmallVector<Constant *, 32> ConstantVec;
7095 for (unsigned I = 0; I != NumElm; ++I) {
7096 APInt Val = SplatValue.extractBits(ScalarSize, ScalarSize * I);
7097 ConstantVec.push_back(getConstantScalar(Val));
7099 return ConstantVector::get(ArrayRef<Constant *>(ConstantVec));
7102 static bool isFoldableUseOfShuffle(SDNode *N) {
7103 for (auto *U : N->uses()) {
7104 unsigned Opc = U->getOpcode();
7105 // VPERMV/VPERMV3 shuffles can never fold their index operands.
7106 if (Opc == X86ISD::VPERMV && U->getOperand(0).getNode() == N)
7107 return false;
7108 if (Opc == X86ISD::VPERMV3 && U->getOperand(1).getNode() == N)
7109 return false;
7110 if (isTargetShuffle(Opc))
7111 return true;
7112 if (Opc == ISD::BITCAST) // Ignore bitcasts
7113 return isFoldableUseOfShuffle(U);
7114 if (N->hasOneUse()) {
7115 // TODO, there may be some general way to know if a SDNode can
7116 // be folded. We now only know whether an MI is foldable.
7117 if (Opc == X86ISD::VPDPBUSD && U->getOperand(2).getNode() != N)
7118 return false;
7119 return true;
7122 return false;
7125 /// Attempt to use the vbroadcast instruction to generate a splat value
7126 /// from a splat BUILD_VECTOR which uses:
7127 /// a. A single scalar load, or a constant.
7128 /// b. Repeated pattern of constants (e.g. <0,1,0,1> or <0,1,2,3,0,1,2,3>).
7130 /// The VBROADCAST node is returned when a pattern is found,
7131 /// or SDValue() otherwise.
7132 static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
7133 const X86Subtarget &Subtarget,
7134 SelectionDAG &DAG) {
7135 // VBROADCAST requires AVX.
7136 // TODO: Splats could be generated for non-AVX CPUs using SSE
7137 // instructions, but there's less potential gain for only 128-bit vectors.
7138 if (!Subtarget.hasAVX())
7139 return SDValue();
7141 MVT VT = BVOp->getSimpleValueType(0);
7142 unsigned NumElts = VT.getVectorNumElements();
7143 SDLoc dl(BVOp);
7145 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
7146 "Unsupported vector type for broadcast.");
7148 // See if the build vector is a repeating sequence of scalars (inc. splat).
7149 SDValue Ld;
7150 BitVector UndefElements;
7151 SmallVector<SDValue, 16> Sequence;
7152 if (BVOp->getRepeatedSequence(Sequence, &UndefElements)) {
7153 assert((NumElts % Sequence.size()) == 0 && "Sequence doesn't fit.");
7154 if (Sequence.size() == 1)
7155 Ld = Sequence[0];
7158 // Attempt to use VBROADCASTM
7159 // From this pattern:
7160 // a. t0 = (zext_i64 (bitcast_i8 v2i1 X))
7161 // b. t1 = (build_vector t0 t0)
7163 // Create (VBROADCASTM v2i1 X)
7164 if (!Sequence.empty() && Subtarget.hasCDI()) {
7165 // If not a splat, are the upper sequence values zeroable?
7166 unsigned SeqLen = Sequence.size();
7167 bool UpperZeroOrUndef =
7168 SeqLen == 1 ||
7169 llvm::all_of(ArrayRef(Sequence).drop_front(), [](SDValue V) {
7170 return !V || V.isUndef() || isNullConstant(V);
7172 SDValue Op0 = Sequence[0];
7173 if (UpperZeroOrUndef && ((Op0.getOpcode() == ISD::BITCAST) ||
7174 (Op0.getOpcode() == ISD::ZERO_EXTEND &&
7175 Op0.getOperand(0).getOpcode() == ISD::BITCAST))) {
7176 SDValue BOperand = Op0.getOpcode() == ISD::BITCAST
7177 ? Op0.getOperand(0)
7178 : Op0.getOperand(0).getOperand(0);
7179 MVT MaskVT = BOperand.getSimpleValueType();
7180 MVT EltType = MVT::getIntegerVT(VT.getScalarSizeInBits() * SeqLen);
7181 if ((EltType == MVT::i64 && MaskVT == MVT::v8i1) || // for broadcastmb2q
7182 (EltType == MVT::i32 && MaskVT == MVT::v16i1)) { // for broadcastmw2d
7183 MVT BcstVT = MVT::getVectorVT(EltType, NumElts / SeqLen);
7184 if (!VT.is512BitVector() && !Subtarget.hasVLX()) {
7185 unsigned Scale = 512 / VT.getSizeInBits();
7186 BcstVT = MVT::getVectorVT(EltType, Scale * (NumElts / SeqLen));
7188 SDValue Bcst = DAG.getNode(X86ISD::VBROADCASTM, dl, BcstVT, BOperand);
7189 if (BcstVT.getSizeInBits() != VT.getSizeInBits())
7190 Bcst = extractSubVector(Bcst, 0, DAG, dl, VT.getSizeInBits());
7191 return DAG.getBitcast(VT, Bcst);
7196 unsigned NumUndefElts = UndefElements.count();
7197 if (!Ld || (NumElts - NumUndefElts) <= 1) {
7198 APInt SplatValue, Undef;
7199 unsigned SplatBitSize;
7200 bool HasUndef;
7201 // Check if this is a repeated constant pattern suitable for broadcasting.
7202 if (BVOp->isConstantSplat(SplatValue, Undef, SplatBitSize, HasUndef) &&
7203 SplatBitSize > VT.getScalarSizeInBits() &&
7204 SplatBitSize < VT.getSizeInBits()) {
7205 // Avoid replacing with broadcast when it's a use of a shuffle
7206 // instruction to preserve the present custom lowering of shuffles.
7207 if (isFoldableUseOfShuffle(BVOp))
7208 return SDValue();
7209 // replace BUILD_VECTOR with broadcast of the repeated constants.
7210 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7211 LLVMContext *Ctx = DAG.getContext();
7212 MVT PVT = TLI.getPointerTy(DAG.getDataLayout());
7213 if (SplatBitSize == 32 || SplatBitSize == 64 ||
7214 (SplatBitSize < 32 && Subtarget.hasAVX2())) {
7215 // Load the constant scalar/subvector and broadcast it.
7216 MVT CVT = MVT::getIntegerVT(SplatBitSize);
7217 Constant *C = getConstantVector(VT, SplatValue, SplatBitSize, *Ctx);
7218 SDValue CP = DAG.getConstantPool(C, PVT);
7219 unsigned Repeat = VT.getSizeInBits() / SplatBitSize;
7221 Align Alignment = cast<ConstantPoolSDNode>(CP)->getAlign();
7222 SDVTList Tys = DAG.getVTList(MVT::getVectorVT(CVT, Repeat), MVT::Other);
7223 SDValue Ops[] = {DAG.getEntryNode(), CP};
7224 MachinePointerInfo MPI =
7225 MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
7226 SDValue Brdcst =
7227 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops, CVT,
7228 MPI, Alignment, MachineMemOperand::MOLoad);
7229 return DAG.getBitcast(VT, Brdcst);
7231 if (SplatBitSize > 64) {
7232 // Load the vector of constants and broadcast it.
7233 Constant *VecC = getConstantVector(VT, SplatValue, SplatBitSize, *Ctx);
7234 SDValue VCP = DAG.getConstantPool(VecC, PVT);
7235 unsigned NumElm = SplatBitSize / VT.getScalarSizeInBits();
7236 MVT VVT = MVT::getVectorVT(VT.getScalarType(), NumElm);
7237 Align Alignment = cast<ConstantPoolSDNode>(VCP)->getAlign();
7238 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
7239 SDValue Ops[] = {DAG.getEntryNode(), VCP};
7240 MachinePointerInfo MPI =
7241 MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
7242 return DAG.getMemIntrinsicNode(X86ISD::SUBV_BROADCAST_LOAD, dl, Tys,
7243 Ops, VVT, MPI, Alignment,
7244 MachineMemOperand::MOLoad);
7248 // If we are moving a scalar into a vector (Ld must be set and all elements
7249 // but 1 are undef) and that operation is not obviously supported by
7250 // vmovd/vmovq/vmovss/vmovsd, then keep trying to form a broadcast.
7251 // That's better than general shuffling and may eliminate a load to GPR and
7252 // move from scalar to vector register.
7253 if (!Ld || NumElts - NumUndefElts != 1)
7254 return SDValue();
7255 unsigned ScalarSize = Ld.getValueSizeInBits();
7256 if (!(UndefElements[0] || (ScalarSize != 32 && ScalarSize != 64)))
7257 return SDValue();
7260 bool ConstSplatVal =
7261 (Ld.getOpcode() == ISD::Constant || Ld.getOpcode() == ISD::ConstantFP);
7262 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
7264 // TODO: Handle broadcasts of non-constant sequences.
7266 // Make sure that all of the users of a non-constant load are from the
7267 // BUILD_VECTOR node.
7268 // FIXME: Is the use count needed for non-constant, non-load case?
7269 if (!ConstSplatVal && !IsLoad && !BVOp->isOnlyUserOf(Ld.getNode()))
7270 return SDValue();
7272 unsigned ScalarSize = Ld.getValueSizeInBits();
7273 bool IsGE256 = (VT.getSizeInBits() >= 256);
7275 // When optimizing for size, generate up to 5 extra bytes for a broadcast
7276 // instruction to save 8 or more bytes of constant pool data.
7277 // TODO: If multiple splats are generated to load the same constant,
7278 // it may be detrimental to overall size. There needs to be a way to detect
7279 // that condition to know if this is truly a size win.
7280 bool OptForSize = DAG.shouldOptForSize();
7282 // Handle broadcasting a single constant scalar from the constant pool
7283 // into a vector.
7284 // On Sandybridge (no AVX2), it is still better to load a constant vector
7285 // from the constant pool and not to broadcast it from a scalar.
7286 // But override that restriction when optimizing for size.
7287 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
7288 if (ConstSplatVal && (Subtarget.hasAVX2() || OptForSize)) {
7289 EVT CVT = Ld.getValueType();
7290 assert(!CVT.isVector() && "Must not broadcast a vector type");
7292 // Splat f16, f32, i32, v4f64, v4i64 in all cases with AVX2.
7293 // For size optimization, also splat v2f64 and v2i64, and for size opt
7294 // with AVX2, also splat i8 and i16.
7295 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
7296 if (ScalarSize == 32 ||
7297 (ScalarSize == 64 && (IsGE256 || Subtarget.hasVLX())) ||
7298 CVT == MVT::f16 ||
7299 (OptForSize && (ScalarSize == 64 || Subtarget.hasAVX2()))) {
7300 const Constant *C = nullptr;
7301 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
7302 C = CI->getConstantIntValue();
7303 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
7304 C = CF->getConstantFPValue();
7306 assert(C && "Invalid constant type");
7308 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7309 SDValue CP =
7310 DAG.getConstantPool(C, TLI.getPointerTy(DAG.getDataLayout()));
7311 Align Alignment = cast<ConstantPoolSDNode>(CP)->getAlign();
7313 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
7314 SDValue Ops[] = {DAG.getEntryNode(), CP};
7315 MachinePointerInfo MPI =
7316 MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
7317 return DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops, CVT,
7318 MPI, Alignment, MachineMemOperand::MOLoad);
7322 // Handle AVX2 in-register broadcasts.
7323 if (!IsLoad && Subtarget.hasInt256() &&
7324 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
7325 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
7327 // The scalar source must be a normal load.
7328 if (!IsLoad)
7329 return SDValue();
7331 // Make sure the non-chain result is only used by this build vector.
7332 if (!Ld->hasNUsesOfValue(NumElts - NumUndefElts, 0))
7333 return SDValue();
7335 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
7336 (Subtarget.hasVLX() && ScalarSize == 64)) {
7337 auto *LN = cast<LoadSDNode>(Ld);
7338 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
7339 SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
7340 SDValue BCast =
7341 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
7342 LN->getMemoryVT(), LN->getMemOperand());
7343 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BCast.getValue(1));
7344 return BCast;
7347 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
7348 // double since there is no vbroadcastsd xmm
7349 if (Subtarget.hasInt256() && Ld.getValueType().isInteger() &&
7350 (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)) {
7351 auto *LN = cast<LoadSDNode>(Ld);
7352 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
7353 SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
7354 SDValue BCast =
7355 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
7356 LN->getMemoryVT(), LN->getMemOperand());
7357 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BCast.getValue(1));
7358 return BCast;
7361 if (ScalarSize == 16 && Subtarget.hasFP16() && IsGE256)
7362 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
7364 // Unsupported broadcast.
7365 return SDValue();
7368 /// For an EXTRACT_VECTOR_ELT with a constant index return the real
7369 /// underlying vector and index.
7371 /// Modifies \p ExtractedFromVec to the real vector and returns the real
7372 /// index.
7373 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
7374 SDValue ExtIdx) {
7375 int Idx = ExtIdx->getAsZExtVal();
7376 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
7377 return Idx;
7379 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
7380 // lowered this:
7381 // (extract_vector_elt (v8f32 %1), Constant<6>)
7382 // to:
7383 // (extract_vector_elt (vector_shuffle<2,u,u,u>
7384 // (extract_subvector (v8f32 %0), Constant<4>),
7385 // undef)
7386 // Constant<0>)
7387 // In this case the vector is the extract_subvector expression and the index
7388 // is 2, as specified by the shuffle.
7389 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
7390 SDValue ShuffleVec = SVOp->getOperand(0);
7391 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
7392 assert(ShuffleVecVT.getVectorElementType() ==
7393 ExtractedFromVec.getSimpleValueType().getVectorElementType());
7395 int ShuffleIdx = SVOp->getMaskElt(Idx);
7396 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
7397 ExtractedFromVec = ShuffleVec;
7398 return ShuffleIdx;
7400 return Idx;
7403 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
7404 MVT VT = Op.getSimpleValueType();
7406 // Skip if insert_vec_elt is not supported.
7407 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7408 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
7409 return SDValue();
7411 SDLoc DL(Op);
7412 unsigned NumElems = Op.getNumOperands();
7414 SDValue VecIn1;
7415 SDValue VecIn2;
7416 SmallVector<unsigned, 4> InsertIndices;
7417 SmallVector<int, 8> Mask(NumElems, -1);
7419 for (unsigned i = 0; i != NumElems; ++i) {
7420 unsigned Opc = Op.getOperand(i).getOpcode();
7422 if (Opc == ISD::UNDEF)
7423 continue;
7425 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
7426 // Quit if more than 1 elements need inserting.
7427 if (InsertIndices.size() > 1)
7428 return SDValue();
7430 InsertIndices.push_back(i);
7431 continue;
7434 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
7435 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
7437 // Quit if non-constant index.
7438 if (!isa<ConstantSDNode>(ExtIdx))
7439 return SDValue();
7440 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
7442 // Quit if extracted from vector of different type.
7443 if (ExtractedFromVec.getValueType() != VT)
7444 return SDValue();
7446 if (!VecIn1.getNode())
7447 VecIn1 = ExtractedFromVec;
7448 else if (VecIn1 != ExtractedFromVec) {
7449 if (!VecIn2.getNode())
7450 VecIn2 = ExtractedFromVec;
7451 else if (VecIn2 != ExtractedFromVec)
7452 // Quit if more than 2 vectors to shuffle
7453 return SDValue();
7456 if (ExtractedFromVec == VecIn1)
7457 Mask[i] = Idx;
7458 else if (ExtractedFromVec == VecIn2)
7459 Mask[i] = Idx + NumElems;
7462 if (!VecIn1.getNode())
7463 return SDValue();
7465 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
7466 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, Mask);
7468 for (unsigned Idx : InsertIndices)
7469 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
7470 DAG.getIntPtrConstant(Idx, DL));
7472 return NV;
7475 // Lower BUILD_VECTOR operation for v8bf16, v16bf16 and v32bf16 types.
7476 static SDValue LowerBUILD_VECTORvXbf16(SDValue Op, SelectionDAG &DAG,
7477 const X86Subtarget &Subtarget) {
7478 MVT VT = Op.getSimpleValueType();
7479 MVT IVT =
7480 VT.changeVectorElementType(Subtarget.hasFP16() ? MVT::f16 : MVT::i16);
7481 SmallVector<SDValue, 16> NewOps;
7482 for (unsigned I = 0, E = Op.getNumOperands(); I != E; ++I)
7483 NewOps.push_back(DAG.getBitcast(Subtarget.hasFP16() ? MVT::f16 : MVT::i16,
7484 Op.getOperand(I)));
7485 SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, SDLoc(), IVT, NewOps);
7486 return DAG.getBitcast(VT, Res);
7489 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
7490 static SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG,
7491 const X86Subtarget &Subtarget) {
7493 MVT VT = Op.getSimpleValueType();
7494 assert((VT.getVectorElementType() == MVT::i1) &&
7495 "Unexpected type in LowerBUILD_VECTORvXi1!");
7497 SDLoc dl(Op);
7498 if (ISD::isBuildVectorAllZeros(Op.getNode()) ||
7499 ISD::isBuildVectorAllOnes(Op.getNode()))
7500 return Op;
7502 uint64_t Immediate = 0;
7503 SmallVector<unsigned, 16> NonConstIdx;
7504 bool IsSplat = true;
7505 bool HasConstElts = false;
7506 int SplatIdx = -1;
7507 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
7508 SDValue In = Op.getOperand(idx);
7509 if (In.isUndef())
7510 continue;
7511 if (auto *InC = dyn_cast<ConstantSDNode>(In)) {
7512 Immediate |= (InC->getZExtValue() & 0x1) << idx;
7513 HasConstElts = true;
7514 } else {
7515 NonConstIdx.push_back(idx);
7517 if (SplatIdx < 0)
7518 SplatIdx = idx;
7519 else if (In != Op.getOperand(SplatIdx))
7520 IsSplat = false;
7523 // for splat use " (select i1 splat_elt, all-ones, all-zeroes)"
7524 if (IsSplat) {
7525 // The build_vector allows the scalar element to be larger than the vector
7526 // element type. We need to mask it to use as a condition unless we know
7527 // the upper bits are zero.
7528 // FIXME: Use computeKnownBits instead of checking specific opcode?
7529 SDValue Cond = Op.getOperand(SplatIdx);
7530 assert(Cond.getValueType() == MVT::i8 && "Unexpected VT!");
7531 if (Cond.getOpcode() != ISD::SETCC)
7532 Cond = DAG.getNode(ISD::AND, dl, MVT::i8, Cond,
7533 DAG.getConstant(1, dl, MVT::i8));
7535 // Perform the select in the scalar domain so we can use cmov.
7536 if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
7537 SDValue Select = DAG.getSelect(dl, MVT::i32, Cond,
7538 DAG.getAllOnesConstant(dl, MVT::i32),
7539 DAG.getConstant(0, dl, MVT::i32));
7540 Select = DAG.getBitcast(MVT::v32i1, Select);
7541 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Select, Select);
7542 } else {
7543 MVT ImmVT = MVT::getIntegerVT(std::max((unsigned)VT.getSizeInBits(), 8U));
7544 SDValue Select = DAG.getSelect(dl, ImmVT, Cond,
7545 DAG.getAllOnesConstant(dl, ImmVT),
7546 DAG.getConstant(0, dl, ImmVT));
7547 MVT VecVT = VT.getSizeInBits() >= 8 ? VT : MVT::v8i1;
7548 Select = DAG.getBitcast(VecVT, Select);
7549 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Select,
7550 DAG.getIntPtrConstant(0, dl));
7554 // insert elements one by one
7555 SDValue DstVec;
7556 if (HasConstElts) {
7557 if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
7558 SDValue ImmL = DAG.getConstant(Lo_32(Immediate), dl, MVT::i32);
7559 SDValue ImmH = DAG.getConstant(Hi_32(Immediate), dl, MVT::i32);
7560 ImmL = DAG.getBitcast(MVT::v32i1, ImmL);
7561 ImmH = DAG.getBitcast(MVT::v32i1, ImmH);
7562 DstVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, ImmL, ImmH);
7563 } else {
7564 MVT ImmVT = MVT::getIntegerVT(std::max((unsigned)VT.getSizeInBits(), 8U));
7565 SDValue Imm = DAG.getConstant(Immediate, dl, ImmVT);
7566 MVT VecVT = VT.getSizeInBits() >= 8 ? VT : MVT::v8i1;
7567 DstVec = DAG.getBitcast(VecVT, Imm);
7568 DstVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, DstVec,
7569 DAG.getIntPtrConstant(0, dl));
7571 } else
7572 DstVec = DAG.getUNDEF(VT);
7574 for (unsigned InsertIdx : NonConstIdx) {
7575 DstVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
7576 Op.getOperand(InsertIdx),
7577 DAG.getIntPtrConstant(InsertIdx, dl));
7579 return DstVec;
7582 LLVM_ATTRIBUTE_UNUSED static bool isHorizOp(unsigned Opcode) {
7583 switch (Opcode) {
7584 case X86ISD::PACKSS:
7585 case X86ISD::PACKUS:
7586 case X86ISD::FHADD:
7587 case X86ISD::FHSUB:
7588 case X86ISD::HADD:
7589 case X86ISD::HSUB:
7590 return true;
7592 return false;
7595 /// This is a helper function of LowerToHorizontalOp().
7596 /// This function checks that the build_vector \p N in input implements a
7597 /// 128-bit partial horizontal operation on a 256-bit vector, but that operation
7598 /// may not match the layout of an x86 256-bit horizontal instruction.
7599 /// In other words, if this returns true, then some extraction/insertion will
7600 /// be required to produce a valid horizontal instruction.
7602 /// Parameter \p Opcode defines the kind of horizontal operation to match.
7603 /// For example, if \p Opcode is equal to ISD::ADD, then this function
7604 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
7605 /// is equal to ISD::SUB, then this function checks if this is a horizontal
7606 /// arithmetic sub.
7608 /// This function only analyzes elements of \p N whose indices are
7609 /// in range [BaseIdx, LastIdx).
7611 /// TODO: This function was originally used to match both real and fake partial
7612 /// horizontal operations, but the index-matching logic is incorrect for that.
7613 /// See the corrected implementation in isHopBuildVector(). Can we reduce this
7614 /// code because it is only used for partial h-op matching now?
7615 static bool isHorizontalBinOpPart(const BuildVectorSDNode *N, unsigned Opcode,
7616 SelectionDAG &DAG,
7617 unsigned BaseIdx, unsigned LastIdx,
7618 SDValue &V0, SDValue &V1) {
7619 EVT VT = N->getValueType(0);
7620 assert(VT.is256BitVector() && "Only use for matching partial 256-bit h-ops");
7621 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
7622 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
7623 "Invalid Vector in input!");
7625 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
7626 bool CanFold = true;
7627 unsigned ExpectedVExtractIdx = BaseIdx;
7628 unsigned NumElts = LastIdx - BaseIdx;
7629 V0 = DAG.getUNDEF(VT);
7630 V1 = DAG.getUNDEF(VT);
7632 // Check if N implements a horizontal binop.
7633 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
7634 SDValue Op = N->getOperand(i + BaseIdx);
7636 // Skip UNDEFs.
7637 if (Op->isUndef()) {
7638 // Update the expected vector extract index.
7639 if (i * 2 == NumElts)
7640 ExpectedVExtractIdx = BaseIdx;
7641 ExpectedVExtractIdx += 2;
7642 continue;
7645 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
7647 if (!CanFold)
7648 break;
7650 SDValue Op0 = Op.getOperand(0);
7651 SDValue Op1 = Op.getOperand(1);
7653 // Try to match the following pattern:
7654 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
7655 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
7656 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
7657 Op0.getOperand(0) == Op1.getOperand(0) &&
7658 isa<ConstantSDNode>(Op0.getOperand(1)) &&
7659 isa<ConstantSDNode>(Op1.getOperand(1)));
7660 if (!CanFold)
7661 break;
7663 unsigned I0 = Op0.getConstantOperandVal(1);
7664 unsigned I1 = Op1.getConstantOperandVal(1);
7666 if (i * 2 < NumElts) {
7667 if (V0.isUndef()) {
7668 V0 = Op0.getOperand(0);
7669 if (V0.getValueType() != VT)
7670 return false;
7672 } else {
7673 if (V1.isUndef()) {
7674 V1 = Op0.getOperand(0);
7675 if (V1.getValueType() != VT)
7676 return false;
7678 if (i * 2 == NumElts)
7679 ExpectedVExtractIdx = BaseIdx;
7682 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
7683 if (I0 == ExpectedVExtractIdx)
7684 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
7685 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
7686 // Try to match the following dag sequence:
7687 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
7688 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
7689 } else
7690 CanFold = false;
7692 ExpectedVExtractIdx += 2;
7695 return CanFold;
7698 /// Emit a sequence of two 128-bit horizontal add/sub followed by
7699 /// a concat_vector.
7701 /// This is a helper function of LowerToHorizontalOp().
7702 /// This function expects two 256-bit vectors called V0 and V1.
7703 /// At first, each vector is split into two separate 128-bit vectors.
7704 /// Then, the resulting 128-bit vectors are used to implement two
7705 /// horizontal binary operations.
7707 /// The kind of horizontal binary operation is defined by \p X86Opcode.
7709 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
7710 /// the two new horizontal binop.
7711 /// When Mode is set, the first horizontal binop dag node would take as input
7712 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
7713 /// horizontal binop dag node would take as input the lower 128-bit of V1
7714 /// and the upper 128-bit of V1.
7715 /// Example:
7716 /// HADD V0_LO, V0_HI
7717 /// HADD V1_LO, V1_HI
7719 /// Otherwise, the first horizontal binop dag node takes as input the lower
7720 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
7721 /// dag node takes the upper 128-bit of V0 and the upper 128-bit of V1.
7722 /// Example:
7723 /// HADD V0_LO, V1_LO
7724 /// HADD V0_HI, V1_HI
7726 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
7727 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
7728 /// the upper 128-bits of the result.
7729 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
7730 const SDLoc &DL, SelectionDAG &DAG,
7731 unsigned X86Opcode, bool Mode,
7732 bool isUndefLO, bool isUndefHI) {
7733 MVT VT = V0.getSimpleValueType();
7734 assert(VT.is256BitVector() && VT == V1.getSimpleValueType() &&
7735 "Invalid nodes in input!");
7737 unsigned NumElts = VT.getVectorNumElements();
7738 SDValue V0_LO = extract128BitVector(V0, 0, DAG, DL);
7739 SDValue V0_HI = extract128BitVector(V0, NumElts/2, DAG, DL);
7740 SDValue V1_LO = extract128BitVector(V1, 0, DAG, DL);
7741 SDValue V1_HI = extract128BitVector(V1, NumElts/2, DAG, DL);
7742 MVT NewVT = V0_LO.getSimpleValueType();
7744 SDValue LO = DAG.getUNDEF(NewVT);
7745 SDValue HI = DAG.getUNDEF(NewVT);
7747 if (Mode) {
7748 // Don't emit a horizontal binop if the result is expected to be UNDEF.
7749 if (!isUndefLO && !V0->isUndef())
7750 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
7751 if (!isUndefHI && !V1->isUndef())
7752 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
7753 } else {
7754 // Don't emit a horizontal binop if the result is expected to be UNDEF.
7755 if (!isUndefLO && (!V0_LO->isUndef() || !V1_LO->isUndef()))
7756 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
7758 if (!isUndefHI && (!V0_HI->isUndef() || !V1_HI->isUndef()))
7759 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
7762 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
7765 /// Returns true iff \p BV builds a vector with the result equivalent to
7766 /// the result of ADDSUB/SUBADD operation.
7767 /// If true is returned then the operands of ADDSUB = Opnd0 +- Opnd1
7768 /// (SUBADD = Opnd0 -+ Opnd1) operation are written to the parameters
7769 /// \p Opnd0 and \p Opnd1.
7770 static bool isAddSubOrSubAdd(const BuildVectorSDNode *BV,
7771 const X86Subtarget &Subtarget, SelectionDAG &DAG,
7772 SDValue &Opnd0, SDValue &Opnd1,
7773 unsigned &NumExtracts,
7774 bool &IsSubAdd) {
7776 MVT VT = BV->getSimpleValueType(0);
7777 if (!Subtarget.hasSSE3() || !VT.isFloatingPoint())
7778 return false;
7780 unsigned NumElts = VT.getVectorNumElements();
7781 SDValue InVec0 = DAG.getUNDEF(VT);
7782 SDValue InVec1 = DAG.getUNDEF(VT);
7784 NumExtracts = 0;
7786 // Odd-numbered elements in the input build vector are obtained from
7787 // adding/subtracting two integer/float elements.
7788 // Even-numbered elements in the input build vector are obtained from
7789 // subtracting/adding two integer/float elements.
7790 unsigned Opc[2] = {0, 0};
7791 for (unsigned i = 0, e = NumElts; i != e; ++i) {
7792 SDValue Op = BV->getOperand(i);
7794 // Skip 'undef' values.
7795 unsigned Opcode = Op.getOpcode();
7796 if (Opcode == ISD::UNDEF)
7797 continue;
7799 // Early exit if we found an unexpected opcode.
7800 if (Opcode != ISD::FADD && Opcode != ISD::FSUB)
7801 return false;
7803 SDValue Op0 = Op.getOperand(0);
7804 SDValue Op1 = Op.getOperand(1);
7806 // Try to match the following pattern:
7807 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
7808 // Early exit if we cannot match that sequence.
7809 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
7810 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
7811 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
7812 Op0.getOperand(1) != Op1.getOperand(1))
7813 return false;
7815 unsigned I0 = Op0.getConstantOperandVal(1);
7816 if (I0 != i)
7817 return false;
7819 // We found a valid add/sub node, make sure its the same opcode as previous
7820 // elements for this parity.
7821 if (Opc[i % 2] != 0 && Opc[i % 2] != Opcode)
7822 return false;
7823 Opc[i % 2] = Opcode;
7825 // Update InVec0 and InVec1.
7826 if (InVec0.isUndef()) {
7827 InVec0 = Op0.getOperand(0);
7828 if (InVec0.getSimpleValueType() != VT)
7829 return false;
7831 if (InVec1.isUndef()) {
7832 InVec1 = Op1.getOperand(0);
7833 if (InVec1.getSimpleValueType() != VT)
7834 return false;
7837 // Make sure that operands in input to each add/sub node always
7838 // come from a same pair of vectors.
7839 if (InVec0 != Op0.getOperand(0)) {
7840 if (Opcode == ISD::FSUB)
7841 return false;
7843 // FADD is commutable. Try to commute the operands
7844 // and then test again.
7845 std::swap(Op0, Op1);
7846 if (InVec0 != Op0.getOperand(0))
7847 return false;
7850 if (InVec1 != Op1.getOperand(0))
7851 return false;
7853 // Increment the number of extractions done.
7854 ++NumExtracts;
7857 // Ensure we have found an opcode for both parities and that they are
7858 // different. Don't try to fold this build_vector into an ADDSUB/SUBADD if the
7859 // inputs are undef.
7860 if (!Opc[0] || !Opc[1] || Opc[0] == Opc[1] ||
7861 InVec0.isUndef() || InVec1.isUndef())
7862 return false;
7864 IsSubAdd = Opc[0] == ISD::FADD;
7866 Opnd0 = InVec0;
7867 Opnd1 = InVec1;
7868 return true;
7871 /// Returns true if is possible to fold MUL and an idiom that has already been
7872 /// recognized as ADDSUB/SUBADD(\p Opnd0, \p Opnd1) into
7873 /// FMADDSUB/FMSUBADD(x, y, \p Opnd1). If (and only if) true is returned, the
7874 /// operands of FMADDSUB/FMSUBADD are written to parameters \p Opnd0, \p Opnd1, \p Opnd2.
7876 /// Prior to calling this function it should be known that there is some
7877 /// SDNode that potentially can be replaced with an X86ISD::ADDSUB operation
7878 /// using \p Opnd0 and \p Opnd1 as operands. Also, this method is called
7879 /// before replacement of such SDNode with ADDSUB operation. Thus the number
7880 /// of \p Opnd0 uses is expected to be equal to 2.
7881 /// For example, this function may be called for the following IR:
7882 /// %AB = fmul fast <2 x double> %A, %B
7883 /// %Sub = fsub fast <2 x double> %AB, %C
7884 /// %Add = fadd fast <2 x double> %AB, %C
7885 /// %Addsub = shufflevector <2 x double> %Sub, <2 x double> %Add,
7886 /// <2 x i32> <i32 0, i32 3>
7887 /// There is a def for %Addsub here, which potentially can be replaced by
7888 /// X86ISD::ADDSUB operation:
7889 /// %Addsub = X86ISD::ADDSUB %AB, %C
7890 /// and such ADDSUB can further be replaced with FMADDSUB:
7891 /// %Addsub = FMADDSUB %A, %B, %C.
7893 /// The main reason why this method is called before the replacement of the
7894 /// recognized ADDSUB idiom with ADDSUB operation is that such replacement
7895 /// is illegal sometimes. E.g. 512-bit ADDSUB is not available, while 512-bit
7896 /// FMADDSUB is.
7897 static bool isFMAddSubOrFMSubAdd(const X86Subtarget &Subtarget,
7898 SelectionDAG &DAG,
7899 SDValue &Opnd0, SDValue &Opnd1, SDValue &Opnd2,
7900 unsigned ExpectedUses) {
7901 if (Opnd0.getOpcode() != ISD::FMUL ||
7902 !Opnd0->hasNUsesOfValue(ExpectedUses, 0) || !Subtarget.hasAnyFMA())
7903 return false;
7905 // FIXME: These checks must match the similar ones in
7906 // DAGCombiner::visitFADDForFMACombine. It would be good to have one
7907 // function that would answer if it is Ok to fuse MUL + ADD to FMADD
7908 // or MUL + ADDSUB to FMADDSUB.
7909 const TargetOptions &Options = DAG.getTarget().Options;
7910 bool AllowFusion =
7911 (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath);
7912 if (!AllowFusion)
7913 return false;
7915 Opnd2 = Opnd1;
7916 Opnd1 = Opnd0.getOperand(1);
7917 Opnd0 = Opnd0.getOperand(0);
7919 return true;
7922 /// Try to fold a build_vector that performs an 'addsub' or 'fmaddsub' or
7923 /// 'fsubadd' operation accordingly to X86ISD::ADDSUB or X86ISD::FMADDSUB or
7924 /// X86ISD::FMSUBADD node.
7925 static SDValue lowerToAddSubOrFMAddSub(const BuildVectorSDNode *BV,
7926 const X86Subtarget &Subtarget,
7927 SelectionDAG &DAG) {
7928 SDValue Opnd0, Opnd1;
7929 unsigned NumExtracts;
7930 bool IsSubAdd;
7931 if (!isAddSubOrSubAdd(BV, Subtarget, DAG, Opnd0, Opnd1, NumExtracts,
7932 IsSubAdd))
7933 return SDValue();
7935 MVT VT = BV->getSimpleValueType(0);
7936 SDLoc DL(BV);
7938 // Try to generate X86ISD::FMADDSUB node here.
7939 SDValue Opnd2;
7940 if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, NumExtracts)) {
7941 unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
7942 return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
7945 // We only support ADDSUB.
7946 if (IsSubAdd)
7947 return SDValue();
7949 // There are no known X86 targets with 512-bit ADDSUB instructions!
7950 // Convert to blend(fsub,fadd).
7951 if (VT.is512BitVector()) {
7952 SmallVector<int> Mask;
7953 for (int I = 0, E = VT.getVectorNumElements(); I != E; I += 2) {
7954 Mask.push_back(I);
7955 Mask.push_back(I + E + 1);
7957 SDValue Sub = DAG.getNode(ISD::FSUB, DL, VT, Opnd0, Opnd1);
7958 SDValue Add = DAG.getNode(ISD::FADD, DL, VT, Opnd0, Opnd1);
7959 return DAG.getVectorShuffle(VT, DL, Sub, Add, Mask);
7962 return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
7965 static bool isHopBuildVector(const BuildVectorSDNode *BV, SelectionDAG &DAG,
7966 unsigned &HOpcode, SDValue &V0, SDValue &V1) {
7967 // Initialize outputs to known values.
7968 MVT VT = BV->getSimpleValueType(0);
7969 HOpcode = ISD::DELETED_NODE;
7970 V0 = DAG.getUNDEF(VT);
7971 V1 = DAG.getUNDEF(VT);
7973 // x86 256-bit horizontal ops are defined in a non-obvious way. Each 128-bit
7974 // half of the result is calculated independently from the 128-bit halves of
7975 // the inputs, so that makes the index-checking logic below more complicated.
7976 unsigned NumElts = VT.getVectorNumElements();
7977 unsigned GenericOpcode = ISD::DELETED_NODE;
7978 unsigned Num128BitChunks = VT.is256BitVector() ? 2 : 1;
7979 unsigned NumEltsIn128Bits = NumElts / Num128BitChunks;
7980 unsigned NumEltsIn64Bits = NumEltsIn128Bits / 2;
7981 for (unsigned i = 0; i != Num128BitChunks; ++i) {
7982 for (unsigned j = 0; j != NumEltsIn128Bits; ++j) {
7983 // Ignore undef elements.
7984 SDValue Op = BV->getOperand(i * NumEltsIn128Bits + j);
7985 if (Op.isUndef())
7986 continue;
7988 // If there's an opcode mismatch, we're done.
7989 if (HOpcode != ISD::DELETED_NODE && Op.getOpcode() != GenericOpcode)
7990 return false;
7992 // Initialize horizontal opcode.
7993 if (HOpcode == ISD::DELETED_NODE) {
7994 GenericOpcode = Op.getOpcode();
7995 switch (GenericOpcode) {
7996 case ISD::ADD: HOpcode = X86ISD::HADD; break;
7997 case ISD::SUB: HOpcode = X86ISD::HSUB; break;
7998 case ISD::FADD: HOpcode = X86ISD::FHADD; break;
7999 case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
8000 default: return false;
8004 SDValue Op0 = Op.getOperand(0);
8005 SDValue Op1 = Op.getOperand(1);
8006 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8007 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8008 Op0.getOperand(0) != Op1.getOperand(0) ||
8009 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
8010 !isa<ConstantSDNode>(Op1.getOperand(1)) || !Op.hasOneUse())
8011 return false;
8013 // The source vector is chosen based on which 64-bit half of the
8014 // destination vector is being calculated.
8015 if (j < NumEltsIn64Bits) {
8016 if (V0.isUndef())
8017 V0 = Op0.getOperand(0);
8018 } else {
8019 if (V1.isUndef())
8020 V1 = Op0.getOperand(0);
8023 SDValue SourceVec = (j < NumEltsIn64Bits) ? V0 : V1;
8024 if (SourceVec != Op0.getOperand(0))
8025 return false;
8027 // op (extract_vector_elt A, I), (extract_vector_elt A, I+1)
8028 unsigned ExtIndex0 = Op0.getConstantOperandVal(1);
8029 unsigned ExtIndex1 = Op1.getConstantOperandVal(1);
8030 unsigned ExpectedIndex = i * NumEltsIn128Bits +
8031 (j % NumEltsIn64Bits) * 2;
8032 if (ExpectedIndex == ExtIndex0 && ExtIndex1 == ExtIndex0 + 1)
8033 continue;
8035 // If this is not a commutative op, this does not match.
8036 if (GenericOpcode != ISD::ADD && GenericOpcode != ISD::FADD)
8037 return false;
8039 // Addition is commutative, so try swapping the extract indexes.
8040 // op (extract_vector_elt A, I+1), (extract_vector_elt A, I)
8041 if (ExpectedIndex == ExtIndex1 && ExtIndex0 == ExtIndex1 + 1)
8042 continue;
8044 // Extract indexes do not match horizontal requirement.
8045 return false;
8048 // We matched. Opcode and operands are returned by reference as arguments.
8049 return true;
8052 static SDValue getHopForBuildVector(const BuildVectorSDNode *BV,
8053 SelectionDAG &DAG, unsigned HOpcode,
8054 SDValue V0, SDValue V1) {
8055 // If either input vector is not the same size as the build vector,
8056 // extract/insert the low bits to the correct size.
8057 // This is free (examples: zmm --> xmm, xmm --> ymm).
8058 MVT VT = BV->getSimpleValueType(0);
8059 unsigned Width = VT.getSizeInBits();
8060 if (V0.getValueSizeInBits() > Width)
8061 V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), Width);
8062 else if (V0.getValueSizeInBits() < Width)
8063 V0 = insertSubVector(DAG.getUNDEF(VT), V0, 0, DAG, SDLoc(BV), Width);
8065 if (V1.getValueSizeInBits() > Width)
8066 V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), Width);
8067 else if (V1.getValueSizeInBits() < Width)
8068 V1 = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, SDLoc(BV), Width);
8070 unsigned NumElts = VT.getVectorNumElements();
8071 APInt DemandedElts = APInt::getAllOnes(NumElts);
8072 for (unsigned i = 0; i != NumElts; ++i)
8073 if (BV->getOperand(i).isUndef())
8074 DemandedElts.clearBit(i);
8076 // If we don't need the upper xmm, then perform as a xmm hop.
8077 unsigned HalfNumElts = NumElts / 2;
8078 if (VT.is256BitVector() && DemandedElts.lshr(HalfNumElts) == 0) {
8079 MVT HalfVT = VT.getHalfNumVectorElementsVT();
8080 V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), 128);
8081 V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), 128);
8082 SDValue Half = DAG.getNode(HOpcode, SDLoc(BV), HalfVT, V0, V1);
8083 return insertSubVector(DAG.getUNDEF(VT), Half, 0, DAG, SDLoc(BV), 256);
8086 return DAG.getNode(HOpcode, SDLoc(BV), VT, V0, V1);
8089 /// Lower BUILD_VECTOR to a horizontal add/sub operation if possible.
8090 static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
8091 const X86Subtarget &Subtarget,
8092 SelectionDAG &DAG) {
8093 // We need at least 2 non-undef elements to make this worthwhile by default.
8094 unsigned NumNonUndefs =
8095 count_if(BV->op_values(), [](SDValue V) { return !V.isUndef(); });
8096 if (NumNonUndefs < 2)
8097 return SDValue();
8099 // There are 4 sets of horizontal math operations distinguished by type:
8100 // int/FP at 128-bit/256-bit. Each type was introduced with a different
8101 // subtarget feature. Try to match those "native" patterns first.
8102 MVT VT = BV->getSimpleValueType(0);
8103 if (((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget.hasSSE3()) ||
8104 ((VT == MVT::v8i16 || VT == MVT::v4i32) && Subtarget.hasSSSE3()) ||
8105 ((VT == MVT::v8f32 || VT == MVT::v4f64) && Subtarget.hasAVX()) ||
8106 ((VT == MVT::v16i16 || VT == MVT::v8i32) && Subtarget.hasAVX2())) {
8107 unsigned HOpcode;
8108 SDValue V0, V1;
8109 if (isHopBuildVector(BV, DAG, HOpcode, V0, V1))
8110 return getHopForBuildVector(BV, DAG, HOpcode, V0, V1);
8113 // Try harder to match 256-bit ops by using extract/concat.
8114 if (!Subtarget.hasAVX() || !VT.is256BitVector())
8115 return SDValue();
8117 // Count the number of UNDEF operands in the build_vector in input.
8118 unsigned NumElts = VT.getVectorNumElements();
8119 unsigned Half = NumElts / 2;
8120 unsigned NumUndefsLO = 0;
8121 unsigned NumUndefsHI = 0;
8122 for (unsigned i = 0, e = Half; i != e; ++i)
8123 if (BV->getOperand(i)->isUndef())
8124 NumUndefsLO++;
8126 for (unsigned i = Half, e = NumElts; i != e; ++i)
8127 if (BV->getOperand(i)->isUndef())
8128 NumUndefsHI++;
8130 SDLoc DL(BV);
8131 SDValue InVec0, InVec1;
8132 if (VT == MVT::v8i32 || VT == MVT::v16i16) {
8133 SDValue InVec2, InVec3;
8134 unsigned X86Opcode;
8135 bool CanFold = true;
8137 if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
8138 isHorizontalBinOpPart(BV, ISD::ADD, DAG, Half, NumElts, InVec2,
8139 InVec3) &&
8140 ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
8141 ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
8142 X86Opcode = X86ISD::HADD;
8143 else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, Half, InVec0,
8144 InVec1) &&
8145 isHorizontalBinOpPart(BV, ISD::SUB, DAG, Half, NumElts, InVec2,
8146 InVec3) &&
8147 ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
8148 ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
8149 X86Opcode = X86ISD::HSUB;
8150 else
8151 CanFold = false;
8153 if (CanFold) {
8154 // Do not try to expand this build_vector into a pair of horizontal
8155 // add/sub if we can emit a pair of scalar add/sub.
8156 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
8157 return SDValue();
8159 // Convert this build_vector into a pair of horizontal binops followed by
8160 // a concat vector. We must adjust the outputs from the partial horizontal
8161 // matching calls above to account for undefined vector halves.
8162 SDValue V0 = InVec0.isUndef() ? InVec2 : InVec0;
8163 SDValue V1 = InVec1.isUndef() ? InVec3 : InVec1;
8164 assert((!V0.isUndef() || !V1.isUndef()) && "Horizontal-op of undefs?");
8165 bool isUndefLO = NumUndefsLO == Half;
8166 bool isUndefHI = NumUndefsHI == Half;
8167 return ExpandHorizontalBinOp(V0, V1, DL, DAG, X86Opcode, false, isUndefLO,
8168 isUndefHI);
8172 if (VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
8173 VT == MVT::v16i16) {
8174 unsigned X86Opcode;
8175 if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
8176 X86Opcode = X86ISD::HADD;
8177 else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, NumElts, InVec0,
8178 InVec1))
8179 X86Opcode = X86ISD::HSUB;
8180 else if (isHorizontalBinOpPart(BV, ISD::FADD, DAG, 0, NumElts, InVec0,
8181 InVec1))
8182 X86Opcode = X86ISD::FHADD;
8183 else if (isHorizontalBinOpPart(BV, ISD::FSUB, DAG, 0, NumElts, InVec0,
8184 InVec1))
8185 X86Opcode = X86ISD::FHSUB;
8186 else
8187 return SDValue();
8189 // Don't try to expand this build_vector into a pair of horizontal add/sub
8190 // if we can simply emit a pair of scalar add/sub.
8191 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
8192 return SDValue();
8194 // Convert this build_vector into two horizontal add/sub followed by
8195 // a concat vector.
8196 bool isUndefLO = NumUndefsLO == Half;
8197 bool isUndefHI = NumUndefsHI == Half;
8198 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
8199 isUndefLO, isUndefHI);
8202 return SDValue();
8205 static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
8206 SelectionDAG &DAG);
8208 /// If a BUILD_VECTOR's source elements all apply the same bit operation and
8209 /// one of their operands is constant, lower to a pair of BUILD_VECTOR and
8210 /// just apply the bit to the vectors.
8211 /// NOTE: Its not in our interest to start make a general purpose vectorizer
8212 /// from this, but enough scalar bit operations are created from the later
8213 /// legalization + scalarization stages to need basic support.
8214 static SDValue lowerBuildVectorToBitOp(BuildVectorSDNode *Op,
8215 const X86Subtarget &Subtarget,
8216 SelectionDAG &DAG) {
8217 SDLoc DL(Op);
8218 MVT VT = Op->getSimpleValueType(0);
8219 unsigned NumElems = VT.getVectorNumElements();
8220 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8222 // Check that all elements have the same opcode.
8223 // TODO: Should we allow UNDEFS and if so how many?
8224 unsigned Opcode = Op->getOperand(0).getOpcode();
8225 for (unsigned i = 1; i < NumElems; ++i)
8226 if (Opcode != Op->getOperand(i).getOpcode())
8227 return SDValue();
8229 // TODO: We may be able to add support for other Ops (ADD/SUB + shifts).
8230 bool IsShift = false;
8231 switch (Opcode) {
8232 default:
8233 return SDValue();
8234 case ISD::SHL:
8235 case ISD::SRL:
8236 case ISD::SRA:
8237 IsShift = true;
8238 break;
8239 case ISD::AND:
8240 case ISD::XOR:
8241 case ISD::OR:
8242 // Don't do this if the buildvector is a splat - we'd replace one
8243 // constant with an entire vector.
8244 if (Op->getSplatValue())
8245 return SDValue();
8246 if (!TLI.isOperationLegalOrPromote(Opcode, VT))
8247 return SDValue();
8248 break;
8251 SmallVector<SDValue, 4> LHSElts, RHSElts;
8252 for (SDValue Elt : Op->ops()) {
8253 SDValue LHS = Elt.getOperand(0);
8254 SDValue RHS = Elt.getOperand(1);
8256 // We expect the canonicalized RHS operand to be the constant.
8257 if (!isa<ConstantSDNode>(RHS))
8258 return SDValue();
8260 // Extend shift amounts.
8261 if (RHS.getValueSizeInBits() != VT.getScalarSizeInBits()) {
8262 if (!IsShift)
8263 return SDValue();
8264 RHS = DAG.getZExtOrTrunc(RHS, DL, VT.getScalarType());
8267 LHSElts.push_back(LHS);
8268 RHSElts.push_back(RHS);
8271 // Limit to shifts by uniform immediates.
8272 // TODO: Only accept vXi8/vXi64 special cases?
8273 // TODO: Permit non-uniform XOP/AVX2/MULLO cases?
8274 if (IsShift && any_of(RHSElts, [&](SDValue V) { return RHSElts[0] != V; }))
8275 return SDValue();
8277 SDValue LHS = DAG.getBuildVector(VT, DL, LHSElts);
8278 SDValue RHS = DAG.getBuildVector(VT, DL, RHSElts);
8279 SDValue Res = DAG.getNode(Opcode, DL, VT, LHS, RHS);
8281 if (!IsShift)
8282 return Res;
8284 // Immediately lower the shift to ensure the constant build vector doesn't
8285 // get converted to a constant pool before the shift is lowered.
8286 return LowerShift(Res, Subtarget, DAG);
8289 /// Create a vector constant without a load. SSE/AVX provide the bare minimum
8290 /// functionality to do this, so it's all zeros, all ones, or some derivation
8291 /// that is cheap to calculate.
8292 static SDValue materializeVectorConstant(SDValue Op, SelectionDAG &DAG,
8293 const X86Subtarget &Subtarget) {
8294 SDLoc DL(Op);
8295 MVT VT = Op.getSimpleValueType();
8297 // Vectors containing all zeros can be matched by pxor and xorps.
8298 if (ISD::isBuildVectorAllZeros(Op.getNode()))
8299 return Op;
8301 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
8302 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
8303 // vpcmpeqd on 256-bit vectors.
8304 if (Subtarget.hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
8305 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
8306 return Op;
8308 return getOnesVector(VT, DAG, DL);
8311 return SDValue();
8314 /// Look for opportunities to create a VPERMV/VPERMILPV/PSHUFB variable permute
8315 /// from a vector of source values and a vector of extraction indices.
8316 /// The vectors might be manipulated to match the type of the permute op.
8317 static SDValue createVariablePermute(MVT VT, SDValue SrcVec, SDValue IndicesVec,
8318 SDLoc &DL, SelectionDAG &DAG,
8319 const X86Subtarget &Subtarget) {
8320 MVT ShuffleVT = VT;
8321 EVT IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
8322 unsigned NumElts = VT.getVectorNumElements();
8323 unsigned SizeInBits = VT.getSizeInBits();
8325 // Adjust IndicesVec to match VT size.
8326 assert(IndicesVec.getValueType().getVectorNumElements() >= NumElts &&
8327 "Illegal variable permute mask size");
8328 if (IndicesVec.getValueType().getVectorNumElements() > NumElts) {
8329 // Narrow/widen the indices vector to the correct size.
8330 if (IndicesVec.getValueSizeInBits() > SizeInBits)
8331 IndicesVec = extractSubVector(IndicesVec, 0, DAG, SDLoc(IndicesVec),
8332 NumElts * VT.getScalarSizeInBits());
8333 else if (IndicesVec.getValueSizeInBits() < SizeInBits)
8334 IndicesVec = widenSubVector(IndicesVec, false, Subtarget, DAG,
8335 SDLoc(IndicesVec), SizeInBits);
8336 // Zero-extend the index elements within the vector.
8337 if (IndicesVec.getValueType().getVectorNumElements() > NumElts)
8338 IndicesVec = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(IndicesVec),
8339 IndicesVT, IndicesVec);
8341 IndicesVec = DAG.getZExtOrTrunc(IndicesVec, SDLoc(IndicesVec), IndicesVT);
8343 // Handle SrcVec that don't match VT type.
8344 if (SrcVec.getValueSizeInBits() != SizeInBits) {
8345 if ((SrcVec.getValueSizeInBits() % SizeInBits) == 0) {
8346 // Handle larger SrcVec by treating it as a larger permute.
8347 unsigned Scale = SrcVec.getValueSizeInBits() / SizeInBits;
8348 VT = MVT::getVectorVT(VT.getScalarType(), Scale * NumElts);
8349 IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
8350 IndicesVec = widenSubVector(IndicesVT.getSimpleVT(), IndicesVec, false,
8351 Subtarget, DAG, SDLoc(IndicesVec));
8352 SDValue NewSrcVec =
8353 createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget);
8354 if (NewSrcVec)
8355 return extractSubVector(NewSrcVec, 0, DAG, DL, SizeInBits);
8356 return SDValue();
8357 } else if (SrcVec.getValueSizeInBits() < SizeInBits) {
8358 // Widen smaller SrcVec to match VT.
8359 SrcVec = widenSubVector(VT, SrcVec, false, Subtarget, DAG, SDLoc(SrcVec));
8360 } else
8361 return SDValue();
8364 auto ScaleIndices = [&DAG](SDValue Idx, uint64_t Scale) {
8365 assert(isPowerOf2_64(Scale) && "Illegal variable permute shuffle scale");
8366 EVT SrcVT = Idx.getValueType();
8367 unsigned NumDstBits = SrcVT.getScalarSizeInBits() / Scale;
8368 uint64_t IndexScale = 0;
8369 uint64_t IndexOffset = 0;
8371 // If we're scaling a smaller permute op, then we need to repeat the
8372 // indices, scaling and offsetting them as well.
8373 // e.g. v4i32 -> v16i8 (Scale = 4)
8374 // IndexScale = v4i32 Splat(4 << 24 | 4 << 16 | 4 << 8 | 4)
8375 // IndexOffset = v4i32 Splat(3 << 24 | 2 << 16 | 1 << 8 | 0)
8376 for (uint64_t i = 0; i != Scale; ++i) {
8377 IndexScale |= Scale << (i * NumDstBits);
8378 IndexOffset |= i << (i * NumDstBits);
8381 Idx = DAG.getNode(ISD::MUL, SDLoc(Idx), SrcVT, Idx,
8382 DAG.getConstant(IndexScale, SDLoc(Idx), SrcVT));
8383 Idx = DAG.getNode(ISD::ADD, SDLoc(Idx), SrcVT, Idx,
8384 DAG.getConstant(IndexOffset, SDLoc(Idx), SrcVT));
8385 return Idx;
8388 unsigned Opcode = 0;
8389 switch (VT.SimpleTy) {
8390 default:
8391 break;
8392 case MVT::v16i8:
8393 if (Subtarget.hasSSSE3())
8394 Opcode = X86ISD::PSHUFB;
8395 break;
8396 case MVT::v8i16:
8397 if (Subtarget.hasVLX() && Subtarget.hasBWI())
8398 Opcode = X86ISD::VPERMV;
8399 else if (Subtarget.hasSSSE3()) {
8400 Opcode = X86ISD::PSHUFB;
8401 ShuffleVT = MVT::v16i8;
8403 break;
8404 case MVT::v4f32:
8405 case MVT::v4i32:
8406 if (Subtarget.hasAVX()) {
8407 Opcode = X86ISD::VPERMILPV;
8408 ShuffleVT = MVT::v4f32;
8409 } else if (Subtarget.hasSSSE3()) {
8410 Opcode = X86ISD::PSHUFB;
8411 ShuffleVT = MVT::v16i8;
8413 break;
8414 case MVT::v2f64:
8415 case MVT::v2i64:
8416 if (Subtarget.hasAVX()) {
8417 // VPERMILPD selects using bit#1 of the index vector, so scale IndicesVec.
8418 IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
8419 Opcode = X86ISD::VPERMILPV;
8420 ShuffleVT = MVT::v2f64;
8421 } else if (Subtarget.hasSSE41()) {
8422 // SSE41 can compare v2i64 - select between indices 0 and 1.
8423 return DAG.getSelectCC(
8424 DL, IndicesVec,
8425 getZeroVector(IndicesVT.getSimpleVT(), Subtarget, DAG, DL),
8426 DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {0, 0}),
8427 DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {1, 1}),
8428 ISD::CondCode::SETEQ);
8430 break;
8431 case MVT::v32i8:
8432 if (Subtarget.hasVLX() && Subtarget.hasVBMI())
8433 Opcode = X86ISD::VPERMV;
8434 else if (Subtarget.hasXOP()) {
8435 SDValue LoSrc = extract128BitVector(SrcVec, 0, DAG, DL);
8436 SDValue HiSrc = extract128BitVector(SrcVec, 16, DAG, DL);
8437 SDValue LoIdx = extract128BitVector(IndicesVec, 0, DAG, DL);
8438 SDValue HiIdx = extract128BitVector(IndicesVec, 16, DAG, DL);
8439 return DAG.getNode(
8440 ISD::CONCAT_VECTORS, DL, VT,
8441 DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, LoIdx),
8442 DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, HiIdx));
8443 } else if (Subtarget.hasAVX()) {
8444 SDValue Lo = extract128BitVector(SrcVec, 0, DAG, DL);
8445 SDValue Hi = extract128BitVector(SrcVec, 16, DAG, DL);
8446 SDValue LoLo = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Lo);
8447 SDValue HiHi = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Hi, Hi);
8448 auto PSHUFBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
8449 ArrayRef<SDValue> Ops) {
8450 // Permute Lo and Hi and then select based on index range.
8451 // This works as SHUFB uses bits[3:0] to permute elements and we don't
8452 // care about the bit[7] as its just an index vector.
8453 SDValue Idx = Ops[2];
8454 EVT VT = Idx.getValueType();
8455 return DAG.getSelectCC(DL, Idx, DAG.getConstant(15, DL, VT),
8456 DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[1], Idx),
8457 DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[0], Idx),
8458 ISD::CondCode::SETGT);
8460 SDValue Ops[] = {LoLo, HiHi, IndicesVec};
8461 return SplitOpsAndApply(DAG, Subtarget, DL, MVT::v32i8, Ops,
8462 PSHUFBBuilder);
8464 break;
8465 case MVT::v16i16:
8466 if (Subtarget.hasVLX() && Subtarget.hasBWI())
8467 Opcode = X86ISD::VPERMV;
8468 else if (Subtarget.hasAVX()) {
8469 // Scale to v32i8 and perform as v32i8.
8470 IndicesVec = ScaleIndices(IndicesVec, 2);
8471 return DAG.getBitcast(
8472 VT, createVariablePermute(
8473 MVT::v32i8, DAG.getBitcast(MVT::v32i8, SrcVec),
8474 DAG.getBitcast(MVT::v32i8, IndicesVec), DL, DAG, Subtarget));
8476 break;
8477 case MVT::v8f32:
8478 case MVT::v8i32:
8479 if (Subtarget.hasAVX2())
8480 Opcode = X86ISD::VPERMV;
8481 else if (Subtarget.hasAVX()) {
8482 SrcVec = DAG.getBitcast(MVT::v8f32, SrcVec);
8483 SDValue LoLo = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
8484 {0, 1, 2, 3, 0, 1, 2, 3});
8485 SDValue HiHi = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
8486 {4, 5, 6, 7, 4, 5, 6, 7});
8487 if (Subtarget.hasXOP())
8488 return DAG.getBitcast(
8489 VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v8f32, LoLo, HiHi,
8490 IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
8491 // Permute Lo and Hi and then select based on index range.
8492 // This works as VPERMILPS only uses index bits[0:1] to permute elements.
8493 SDValue Res = DAG.getSelectCC(
8494 DL, IndicesVec, DAG.getConstant(3, DL, MVT::v8i32),
8495 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, HiHi, IndicesVec),
8496 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, LoLo, IndicesVec),
8497 ISD::CondCode::SETGT);
8498 return DAG.getBitcast(VT, Res);
8500 break;
8501 case MVT::v4i64:
8502 case MVT::v4f64:
8503 if (Subtarget.hasAVX512()) {
8504 if (!Subtarget.hasVLX()) {
8505 MVT WidenSrcVT = MVT::getVectorVT(VT.getScalarType(), 8);
8506 SrcVec = widenSubVector(WidenSrcVT, SrcVec, false, Subtarget, DAG,
8507 SDLoc(SrcVec));
8508 IndicesVec = widenSubVector(MVT::v8i64, IndicesVec, false, Subtarget,
8509 DAG, SDLoc(IndicesVec));
8510 SDValue Res = createVariablePermute(WidenSrcVT, SrcVec, IndicesVec, DL,
8511 DAG, Subtarget);
8512 return extract256BitVector(Res, 0, DAG, DL);
8514 Opcode = X86ISD::VPERMV;
8515 } else if (Subtarget.hasAVX()) {
8516 SrcVec = DAG.getBitcast(MVT::v4f64, SrcVec);
8517 SDValue LoLo =
8518 DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {0, 1, 0, 1});
8519 SDValue HiHi =
8520 DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {2, 3, 2, 3});
8521 // VPERMIL2PD selects with bit#1 of the index vector, so scale IndicesVec.
8522 IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
8523 if (Subtarget.hasXOP())
8524 return DAG.getBitcast(
8525 VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v4f64, LoLo, HiHi,
8526 IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
8527 // Permute Lo and Hi and then select based on index range.
8528 // This works as VPERMILPD only uses index bit[1] to permute elements.
8529 SDValue Res = DAG.getSelectCC(
8530 DL, IndicesVec, DAG.getConstant(2, DL, MVT::v4i64),
8531 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, HiHi, IndicesVec),
8532 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, LoLo, IndicesVec),
8533 ISD::CondCode::SETGT);
8534 return DAG.getBitcast(VT, Res);
8536 break;
8537 case MVT::v64i8:
8538 if (Subtarget.hasVBMI())
8539 Opcode = X86ISD::VPERMV;
8540 break;
8541 case MVT::v32i16:
8542 if (Subtarget.hasBWI())
8543 Opcode = X86ISD::VPERMV;
8544 break;
8545 case MVT::v16f32:
8546 case MVT::v16i32:
8547 case MVT::v8f64:
8548 case MVT::v8i64:
8549 if (Subtarget.hasAVX512())
8550 Opcode = X86ISD::VPERMV;
8551 break;
8553 if (!Opcode)
8554 return SDValue();
8556 assert((VT.getSizeInBits() == ShuffleVT.getSizeInBits()) &&
8557 (VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits()) == 0 &&
8558 "Illegal variable permute shuffle type");
8560 uint64_t Scale = VT.getScalarSizeInBits() / ShuffleVT.getScalarSizeInBits();
8561 if (Scale > 1)
8562 IndicesVec = ScaleIndices(IndicesVec, Scale);
8564 EVT ShuffleIdxVT = EVT(ShuffleVT).changeVectorElementTypeToInteger();
8565 IndicesVec = DAG.getBitcast(ShuffleIdxVT, IndicesVec);
8567 SrcVec = DAG.getBitcast(ShuffleVT, SrcVec);
8568 SDValue Res = Opcode == X86ISD::VPERMV
8569 ? DAG.getNode(Opcode, DL, ShuffleVT, IndicesVec, SrcVec)
8570 : DAG.getNode(Opcode, DL, ShuffleVT, SrcVec, IndicesVec);
8571 return DAG.getBitcast(VT, Res);
8574 // Tries to lower a BUILD_VECTOR composed of extract-extract chains that can be
8575 // reasoned to be a permutation of a vector by indices in a non-constant vector.
8576 // (build_vector (extract_elt V, (extract_elt I, 0)),
8577 // (extract_elt V, (extract_elt I, 1)),
8578 // ...
8579 // ->
8580 // (vpermv I, V)
8582 // TODO: Handle undefs
8583 // TODO: Utilize pshufb and zero mask blending to support more efficient
8584 // construction of vectors with constant-0 elements.
8585 static SDValue
8586 LowerBUILD_VECTORAsVariablePermute(SDValue V, SelectionDAG &DAG,
8587 const X86Subtarget &Subtarget) {
8588 SDValue SrcVec, IndicesVec;
8589 // Check for a match of the permute source vector and permute index elements.
8590 // This is done by checking that the i-th build_vector operand is of the form:
8591 // (extract_elt SrcVec, (extract_elt IndicesVec, i)).
8592 for (unsigned Idx = 0, E = V.getNumOperands(); Idx != E; ++Idx) {
8593 SDValue Op = V.getOperand(Idx);
8594 if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
8595 return SDValue();
8597 // If this is the first extract encountered in V, set the source vector,
8598 // otherwise verify the extract is from the previously defined source
8599 // vector.
8600 if (!SrcVec)
8601 SrcVec = Op.getOperand(0);
8602 else if (SrcVec != Op.getOperand(0))
8603 return SDValue();
8604 SDValue ExtractedIndex = Op->getOperand(1);
8605 // Peek through extends.
8606 if (ExtractedIndex.getOpcode() == ISD::ZERO_EXTEND ||
8607 ExtractedIndex.getOpcode() == ISD::SIGN_EXTEND)
8608 ExtractedIndex = ExtractedIndex.getOperand(0);
8609 if (ExtractedIndex.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
8610 return SDValue();
8612 // If this is the first extract from the index vector candidate, set the
8613 // indices vector, otherwise verify the extract is from the previously
8614 // defined indices vector.
8615 if (!IndicesVec)
8616 IndicesVec = ExtractedIndex.getOperand(0);
8617 else if (IndicesVec != ExtractedIndex.getOperand(0))
8618 return SDValue();
8620 auto *PermIdx = dyn_cast<ConstantSDNode>(ExtractedIndex.getOperand(1));
8621 if (!PermIdx || PermIdx->getAPIntValue() != Idx)
8622 return SDValue();
8625 SDLoc DL(V);
8626 MVT VT = V.getSimpleValueType();
8627 return createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget);
8630 SDValue
8631 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
8632 SDLoc dl(Op);
8634 MVT VT = Op.getSimpleValueType();
8635 MVT EltVT = VT.getVectorElementType();
8636 MVT OpEltVT = Op.getOperand(0).getSimpleValueType();
8637 unsigned NumElems = Op.getNumOperands();
8639 // Generate vectors for predicate vectors.
8640 if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512())
8641 return LowerBUILD_VECTORvXi1(Op, DAG, Subtarget);
8643 if (VT.getVectorElementType() == MVT::bf16 &&
8644 (Subtarget.hasAVXNECONVERT() || Subtarget.hasBF16()))
8645 return LowerBUILD_VECTORvXbf16(Op, DAG, Subtarget);
8647 if (SDValue VectorConstant = materializeVectorConstant(Op, DAG, Subtarget))
8648 return VectorConstant;
8650 unsigned EVTBits = EltVT.getSizeInBits();
8651 APInt UndefMask = APInt::getZero(NumElems);
8652 APInt FrozenUndefMask = APInt::getZero(NumElems);
8653 APInt ZeroMask = APInt::getZero(NumElems);
8654 APInt NonZeroMask = APInt::getZero(NumElems);
8655 bool IsAllConstants = true;
8656 bool OneUseFrozenUndefs = true;
8657 SmallSet<SDValue, 8> Values;
8658 unsigned NumConstants = NumElems;
8659 for (unsigned i = 0; i < NumElems; ++i) {
8660 SDValue Elt = Op.getOperand(i);
8661 if (Elt.isUndef()) {
8662 UndefMask.setBit(i);
8663 continue;
8665 if (ISD::isFreezeUndef(Elt.getNode())) {
8666 OneUseFrozenUndefs = OneUseFrozenUndefs && Elt->hasOneUse();
8667 FrozenUndefMask.setBit(i);
8668 continue;
8670 Values.insert(Elt);
8671 if (!isIntOrFPConstant(Elt)) {
8672 IsAllConstants = false;
8673 NumConstants--;
8675 if (X86::isZeroNode(Elt)) {
8676 ZeroMask.setBit(i);
8677 } else {
8678 NonZeroMask.setBit(i);
8682 // All undef vector. Return an UNDEF.
8683 if (UndefMask.isAllOnes())
8684 return DAG.getUNDEF(VT);
8686 // All undef/freeze(undef) vector. Return a FREEZE UNDEF.
8687 if (OneUseFrozenUndefs && (UndefMask | FrozenUndefMask).isAllOnes())
8688 return DAG.getFreeze(DAG.getUNDEF(VT));
8690 // All undef/freeze(undef)/zero vector. Return a zero vector.
8691 if ((UndefMask | FrozenUndefMask | ZeroMask).isAllOnes())
8692 return getZeroVector(VT, Subtarget, DAG, dl);
8694 // If we have multiple FREEZE-UNDEF operands, we are likely going to end up
8695 // lowering into a suboptimal insertion sequence. Instead, thaw the UNDEF in
8696 // our source BUILD_VECTOR, create another FREEZE-UNDEF splat BUILD_VECTOR,
8697 // and blend the FREEZE-UNDEF operands back in.
8698 // FIXME: is this worthwhile even for a single FREEZE-UNDEF operand?
8699 if (unsigned NumFrozenUndefElts = FrozenUndefMask.popcount();
8700 NumFrozenUndefElts >= 2 && NumFrozenUndefElts < NumElems) {
8701 SmallVector<int, 16> BlendMask(NumElems, -1);
8702 SmallVector<SDValue, 16> Elts(NumElems, DAG.getUNDEF(OpEltVT));
8703 for (unsigned i = 0; i < NumElems; ++i) {
8704 if (UndefMask[i]) {
8705 BlendMask[i] = -1;
8706 continue;
8708 BlendMask[i] = i;
8709 if (!FrozenUndefMask[i])
8710 Elts[i] = Op.getOperand(i);
8711 else
8712 BlendMask[i] += NumElems;
8714 SDValue EltsBV = DAG.getBuildVector(VT, dl, Elts);
8715 SDValue FrozenUndefElt = DAG.getFreeze(DAG.getUNDEF(OpEltVT));
8716 SDValue FrozenUndefBV = DAG.getSplatBuildVector(VT, dl, FrozenUndefElt);
8717 return DAG.getVectorShuffle(VT, dl, EltsBV, FrozenUndefBV, BlendMask);
8720 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(Op.getNode());
8722 // If the upper elts of a ymm/zmm are undef/freeze(undef)/zero then we might
8723 // be better off lowering to a smaller build vector and padding with
8724 // undef/zero.
8725 if ((VT.is256BitVector() || VT.is512BitVector()) &&
8726 !isFoldableUseOfShuffle(BV)) {
8727 unsigned UpperElems = NumElems / 2;
8728 APInt UndefOrZeroMask = FrozenUndefMask | UndefMask | ZeroMask;
8729 unsigned NumUpperUndefsOrZeros = UndefOrZeroMask.countl_one();
8730 if (NumUpperUndefsOrZeros >= UpperElems) {
8731 if (VT.is512BitVector() &&
8732 NumUpperUndefsOrZeros >= (NumElems - (NumElems / 4)))
8733 UpperElems = NumElems - (NumElems / 4);
8734 // If freeze(undef) is in any upper elements, force to zero.
8735 bool UndefUpper = UndefMask.countl_one() >= UpperElems;
8736 MVT LowerVT = MVT::getVectorVT(EltVT, NumElems - UpperElems);
8737 SDValue NewBV =
8738 DAG.getBuildVector(LowerVT, dl, Op->ops().drop_back(UpperElems));
8739 return widenSubVector(VT, NewBV, !UndefUpper, Subtarget, DAG, dl);
8743 if (SDValue AddSub = lowerToAddSubOrFMAddSub(BV, Subtarget, DAG))
8744 return AddSub;
8745 if (SDValue HorizontalOp = LowerToHorizontalOp(BV, Subtarget, DAG))
8746 return HorizontalOp;
8747 if (SDValue Broadcast = lowerBuildVectorAsBroadcast(BV, Subtarget, DAG))
8748 return Broadcast;
8749 if (SDValue BitOp = lowerBuildVectorToBitOp(BV, Subtarget, DAG))
8750 return BitOp;
8752 unsigned NumZero = ZeroMask.popcount();
8753 unsigned NumNonZero = NonZeroMask.popcount();
8755 // If we are inserting one variable into a vector of non-zero constants, try
8756 // to avoid loading each constant element as a scalar. Load the constants as a
8757 // vector and then insert the variable scalar element. If insertion is not
8758 // supported, fall back to a shuffle to get the scalar blended with the
8759 // constants. Insertion into a zero vector is handled as a special-case
8760 // somewhere below here.
8761 if (NumConstants == NumElems - 1 && NumNonZero != 1 &&
8762 FrozenUndefMask.isZero() &&
8763 (isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT) ||
8764 isOperationLegalOrCustom(ISD::VECTOR_SHUFFLE, VT))) {
8765 // Create an all-constant vector. The variable element in the old
8766 // build vector is replaced by undef in the constant vector. Save the
8767 // variable scalar element and its index for use in the insertelement.
8768 LLVMContext &Context = *DAG.getContext();
8769 Type *EltType = Op.getValueType().getScalarType().getTypeForEVT(Context);
8770 SmallVector<Constant *, 16> ConstVecOps(NumElems, UndefValue::get(EltType));
8771 SDValue VarElt;
8772 SDValue InsIndex;
8773 for (unsigned i = 0; i != NumElems; ++i) {
8774 SDValue Elt = Op.getOperand(i);
8775 if (auto *C = dyn_cast<ConstantSDNode>(Elt))
8776 ConstVecOps[i] = ConstantInt::get(Context, C->getAPIntValue());
8777 else if (auto *C = dyn_cast<ConstantFPSDNode>(Elt))
8778 ConstVecOps[i] = ConstantFP::get(Context, C->getValueAPF());
8779 else if (!Elt.isUndef()) {
8780 assert(!VarElt.getNode() && !InsIndex.getNode() &&
8781 "Expected one variable element in this vector");
8782 VarElt = Elt;
8783 InsIndex = DAG.getVectorIdxConstant(i, dl);
8786 Constant *CV = ConstantVector::get(ConstVecOps);
8787 SDValue DAGConstVec = DAG.getConstantPool(CV, VT);
8789 // The constants we just created may not be legal (eg, floating point). We
8790 // must lower the vector right here because we can not guarantee that we'll
8791 // legalize it before loading it. This is also why we could not just create
8792 // a new build vector here. If the build vector contains illegal constants,
8793 // it could get split back up into a series of insert elements.
8794 // TODO: Improve this by using shorter loads with broadcast/VZEXT_LOAD.
8795 SDValue LegalDAGConstVec = LowerConstantPool(DAGConstVec, DAG);
8796 MachineFunction &MF = DAG.getMachineFunction();
8797 MachinePointerInfo MPI = MachinePointerInfo::getConstantPool(MF);
8798 SDValue Ld = DAG.getLoad(VT, dl, DAG.getEntryNode(), LegalDAGConstVec, MPI);
8799 unsigned InsertC = InsIndex->getAsZExtVal();
8800 unsigned NumEltsInLow128Bits = 128 / VT.getScalarSizeInBits();
8801 if (InsertC < NumEltsInLow128Bits)
8802 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ld, VarElt, InsIndex);
8804 // There's no good way to insert into the high elements of a >128-bit
8805 // vector, so use shuffles to avoid an extract/insert sequence.
8806 assert(VT.getSizeInBits() > 128 && "Invalid insertion index?");
8807 assert(Subtarget.hasAVX() && "Must have AVX with >16-byte vector");
8808 SmallVector<int, 8> ShuffleMask;
8809 unsigned NumElts = VT.getVectorNumElements();
8810 for (unsigned i = 0; i != NumElts; ++i)
8811 ShuffleMask.push_back(i == InsertC ? NumElts : i);
8812 SDValue S2V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, VarElt);
8813 return DAG.getVectorShuffle(VT, dl, Ld, S2V, ShuffleMask);
8816 // Special case for single non-zero, non-undef, element.
8817 if (NumNonZero == 1) {
8818 unsigned Idx = NonZeroMask.countr_zero();
8819 SDValue Item = Op.getOperand(Idx);
8821 // If we have a constant or non-constant insertion into the low element of
8822 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
8823 // the rest of the elements. This will be matched as movd/movq/movss/movsd
8824 // depending on what the source datatype is.
8825 if (Idx == 0) {
8826 if (NumZero == 0)
8827 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
8829 if (EltVT == MVT::i32 || EltVT == MVT::f16 || EltVT == MVT::f32 ||
8830 EltVT == MVT::f64 || (EltVT == MVT::i64 && Subtarget.is64Bit()) ||
8831 (EltVT == MVT::i16 && Subtarget.hasFP16())) {
8832 assert((VT.is128BitVector() || VT.is256BitVector() ||
8833 VT.is512BitVector()) &&
8834 "Expected an SSE value type!");
8835 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
8836 // Turn it into a MOVL (i.e. movsh, movss, movsd, movw or movd) to a
8837 // zero vector.
8838 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
8841 // We can't directly insert an i8 or i16 into a vector, so zero extend
8842 // it to i32 first.
8843 if (EltVT == MVT::i16 || EltVT == MVT::i8) {
8844 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
8845 MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits() / 32);
8846 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, Item);
8847 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
8848 return DAG.getBitcast(VT, Item);
8852 // Is it a vector logical left shift?
8853 if (NumElems == 2 && Idx == 1 &&
8854 X86::isZeroNode(Op.getOperand(0)) &&
8855 !X86::isZeroNode(Op.getOperand(1))) {
8856 unsigned NumBits = VT.getSizeInBits();
8857 return getVShift(true, VT,
8858 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
8859 VT, Op.getOperand(1)),
8860 NumBits/2, DAG, *this, dl);
8863 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
8864 return SDValue();
8866 // Otherwise, if this is a vector with i32 or f32 elements, and the element
8867 // is a non-constant being inserted into an element other than the low one,
8868 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
8869 // movd/movss) to move this into the low element, then shuffle it into
8870 // place.
8871 if (EVTBits == 32) {
8872 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
8873 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
8877 // Splat is obviously ok. Let legalizer expand it to a shuffle.
8878 if (Values.size() == 1) {
8879 if (EVTBits == 32) {
8880 // Instead of a shuffle like this:
8881 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
8882 // Check if it's possible to issue this instead.
8883 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
8884 unsigned Idx = NonZeroMask.countr_zero();
8885 SDValue Item = Op.getOperand(Idx);
8886 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
8887 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
8889 return SDValue();
8892 // A vector full of immediates; various special cases are already
8893 // handled, so this is best done with a single constant-pool load.
8894 if (IsAllConstants)
8895 return SDValue();
8897 if (SDValue V = LowerBUILD_VECTORAsVariablePermute(Op, DAG, Subtarget))
8898 return V;
8900 // See if we can use a vector load to get all of the elements.
8902 SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElems);
8903 if (SDValue LD =
8904 EltsFromConsecutiveLoads(VT, Ops, dl, DAG, Subtarget, false))
8905 return LD;
8908 // If this is a splat of pairs of 32-bit elements, we can use a narrower
8909 // build_vector and broadcast it.
8910 // TODO: We could probably generalize this more.
8911 if (Subtarget.hasAVX2() && EVTBits == 32 && Values.size() == 2) {
8912 SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
8913 DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
8914 auto CanSplat = [](SDValue Op, unsigned NumElems, ArrayRef<SDValue> Ops) {
8915 // Make sure all the even/odd operands match.
8916 for (unsigned i = 2; i != NumElems; ++i)
8917 if (Ops[i % 2] != Op.getOperand(i))
8918 return false;
8919 return true;
8921 if (CanSplat(Op, NumElems, Ops)) {
8922 MVT WideEltVT = VT.isFloatingPoint() ? MVT::f64 : MVT::i64;
8923 MVT NarrowVT = MVT::getVectorVT(EltVT, 4);
8924 // Create a new build vector and cast to v2i64/v2f64.
8925 SDValue NewBV = DAG.getBitcast(MVT::getVectorVT(WideEltVT, 2),
8926 DAG.getBuildVector(NarrowVT, dl, Ops));
8927 // Broadcast from v2i64/v2f64 and cast to final VT.
8928 MVT BcastVT = MVT::getVectorVT(WideEltVT, NumElems / 2);
8929 return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, dl, BcastVT,
8930 NewBV));
8934 // For AVX-length vectors, build the individual 128-bit pieces and use
8935 // shuffles to put them in place.
8936 if (VT.getSizeInBits() > 128) {
8937 MVT HVT = MVT::getVectorVT(EltVT, NumElems / 2);
8939 // Build both the lower and upper subvector.
8940 SDValue Lower =
8941 DAG.getBuildVector(HVT, dl, Op->ops().slice(0, NumElems / 2));
8942 SDValue Upper = DAG.getBuildVector(
8943 HVT, dl, Op->ops().slice(NumElems / 2, NumElems /2));
8945 // Recreate the wider vector with the lower and upper part.
8946 return concatSubVectors(Lower, Upper, DAG, dl);
8949 // Let legalizer expand 2-wide build_vectors.
8950 if (EVTBits == 64) {
8951 if (NumNonZero == 1) {
8952 // One half is zero or undef.
8953 unsigned Idx = NonZeroMask.countr_zero();
8954 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
8955 Op.getOperand(Idx));
8956 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
8958 return SDValue();
8961 // If element VT is < 32 bits, convert it to inserts into a zero vector.
8962 if (EVTBits == 8 && NumElems == 16)
8963 if (SDValue V = LowerBuildVectorv16i8(Op, NonZeroMask, NumNonZero, NumZero,
8964 DAG, Subtarget))
8965 return V;
8967 if (EltVT == MVT::i16 && NumElems == 8)
8968 if (SDValue V = LowerBuildVectorv8i16(Op, NonZeroMask, NumNonZero, NumZero,
8969 DAG, Subtarget))
8970 return V;
8972 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
8973 if (EVTBits == 32 && NumElems == 4)
8974 if (SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget))
8975 return V;
8977 // If element VT is == 32 bits, turn it into a number of shuffles.
8978 if (NumElems == 4 && NumZero > 0) {
8979 SmallVector<SDValue, 8> Ops(NumElems);
8980 for (unsigned i = 0; i < 4; ++i) {
8981 bool isZero = !NonZeroMask[i];
8982 if (isZero)
8983 Ops[i] = getZeroVector(VT, Subtarget, DAG, dl);
8984 else
8985 Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
8988 for (unsigned i = 0; i < 2; ++i) {
8989 switch (NonZeroMask.extractBitsAsZExtValue(2, i * 2)) {
8990 default: llvm_unreachable("Unexpected NonZero count");
8991 case 0:
8992 Ops[i] = Ops[i*2]; // Must be a zero vector.
8993 break;
8994 case 1:
8995 Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2+1], Ops[i*2]);
8996 break;
8997 case 2:
8998 Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
8999 break;
9000 case 3:
9001 Ops[i] = getUnpackl(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
9002 break;
9006 bool Reverse1 = NonZeroMask.extractBitsAsZExtValue(2, 0) == 2;
9007 bool Reverse2 = NonZeroMask.extractBitsAsZExtValue(2, 2) == 2;
9008 int MaskVec[] = {
9009 Reverse1 ? 1 : 0,
9010 Reverse1 ? 0 : 1,
9011 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
9012 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
9014 return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], MaskVec);
9017 assert(Values.size() > 1 && "Expected non-undef and non-splat vector");
9019 // Check for a build vector from mostly shuffle plus few inserting.
9020 if (SDValue Sh = buildFromShuffleMostly(Op, DAG))
9021 return Sh;
9023 // For SSE 4.1, use insertps to put the high elements into the low element.
9024 if (Subtarget.hasSSE41() && EltVT != MVT::f16) {
9025 SDValue Result;
9026 if (!Op.getOperand(0).isUndef())
9027 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
9028 else
9029 Result = DAG.getUNDEF(VT);
9031 for (unsigned i = 1; i < NumElems; ++i) {
9032 if (Op.getOperand(i).isUndef()) continue;
9033 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
9034 Op.getOperand(i), DAG.getIntPtrConstant(i, dl));
9036 return Result;
9039 // Otherwise, expand into a number of unpckl*, start by extending each of
9040 // our (non-undef) elements to the full vector width with the element in the
9041 // bottom slot of the vector (which generates no code for SSE).
9042 SmallVector<SDValue, 8> Ops(NumElems);
9043 for (unsigned i = 0; i < NumElems; ++i) {
9044 if (!Op.getOperand(i).isUndef())
9045 Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
9046 else
9047 Ops[i] = DAG.getUNDEF(VT);
9050 // Next, we iteratively mix elements, e.g. for v4f32:
9051 // Step 1: unpcklps 0, 1 ==> X: <?, ?, 1, 0>
9052 // : unpcklps 2, 3 ==> Y: <?, ?, 3, 2>
9053 // Step 2: unpcklpd X, Y ==> <3, 2, 1, 0>
9054 for (unsigned Scale = 1; Scale < NumElems; Scale *= 2) {
9055 // Generate scaled UNPCKL shuffle mask.
9056 SmallVector<int, 16> Mask;
9057 for(unsigned i = 0; i != Scale; ++i)
9058 Mask.push_back(i);
9059 for (unsigned i = 0; i != Scale; ++i)
9060 Mask.push_back(NumElems+i);
9061 Mask.append(NumElems - Mask.size(), SM_SentinelUndef);
9063 for (unsigned i = 0, e = NumElems / (2 * Scale); i != e; ++i)
9064 Ops[i] = DAG.getVectorShuffle(VT, dl, Ops[2*i], Ops[(2*i)+1], Mask);
9066 return Ops[0];
9069 // 256-bit AVX can use the vinsertf128 instruction
9070 // to create 256-bit vectors from two other 128-bit ones.
9071 // TODO: Detect subvector broadcast here instead of DAG combine?
9072 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
9073 const X86Subtarget &Subtarget) {
9074 SDLoc dl(Op);
9075 MVT ResVT = Op.getSimpleValueType();
9077 assert((ResVT.is256BitVector() ||
9078 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
9080 unsigned NumOperands = Op.getNumOperands();
9081 unsigned NumFreezeUndef = 0;
9082 unsigned NumZero = 0;
9083 unsigned NumNonZero = 0;
9084 unsigned NonZeros = 0;
9085 for (unsigned i = 0; i != NumOperands; ++i) {
9086 SDValue SubVec = Op.getOperand(i);
9087 if (SubVec.isUndef())
9088 continue;
9089 if (ISD::isFreezeUndef(SubVec.getNode())) {
9090 // If the freeze(undef) has multiple uses then we must fold to zero.
9091 if (SubVec.hasOneUse())
9092 ++NumFreezeUndef;
9093 else
9094 ++NumZero;
9096 else if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
9097 ++NumZero;
9098 else {
9099 assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
9100 NonZeros |= 1 << i;
9101 ++NumNonZero;
9105 // If we have more than 2 non-zeros, build each half separately.
9106 if (NumNonZero > 2) {
9107 MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
9108 ArrayRef<SDUse> Ops = Op->ops();
9109 SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
9110 Ops.slice(0, NumOperands/2));
9111 SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
9112 Ops.slice(NumOperands/2));
9113 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
9116 // Otherwise, build it up through insert_subvectors.
9117 SDValue Vec = NumZero ? getZeroVector(ResVT, Subtarget, DAG, dl)
9118 : (NumFreezeUndef ? DAG.getFreeze(DAG.getUNDEF(ResVT))
9119 : DAG.getUNDEF(ResVT));
9121 MVT SubVT = Op.getOperand(0).getSimpleValueType();
9122 unsigned NumSubElems = SubVT.getVectorNumElements();
9123 for (unsigned i = 0; i != NumOperands; ++i) {
9124 if ((NonZeros & (1 << i)) == 0)
9125 continue;
9127 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec,
9128 Op.getOperand(i),
9129 DAG.getIntPtrConstant(i * NumSubElems, dl));
9132 return Vec;
9135 // Returns true if the given node is a type promotion (by concatenating i1
9136 // zeros) of the result of a node that already zeros all upper bits of
9137 // k-register.
9138 // TODO: Merge this with LowerAVXCONCAT_VECTORS?
9139 static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
9140 const X86Subtarget &Subtarget,
9141 SelectionDAG & DAG) {
9142 SDLoc dl(Op);
9143 MVT ResVT = Op.getSimpleValueType();
9144 unsigned NumOperands = Op.getNumOperands();
9146 assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&
9147 "Unexpected number of operands in CONCAT_VECTORS");
9149 uint64_t Zeros = 0;
9150 uint64_t NonZeros = 0;
9151 for (unsigned i = 0; i != NumOperands; ++i) {
9152 SDValue SubVec = Op.getOperand(i);
9153 if (SubVec.isUndef())
9154 continue;
9155 assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
9156 if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
9157 Zeros |= (uint64_t)1 << i;
9158 else
9159 NonZeros |= (uint64_t)1 << i;
9162 unsigned NumElems = ResVT.getVectorNumElements();
9164 // If we are inserting non-zero vector and there are zeros in LSBs and undef
9165 // in the MSBs we need to emit a KSHIFTL. The generic lowering to
9166 // insert_subvector will give us two kshifts.
9167 if (isPowerOf2_64(NonZeros) && Zeros != 0 && NonZeros > Zeros &&
9168 Log2_64(NonZeros) != NumOperands - 1) {
9169 unsigned Idx = Log2_64(NonZeros);
9170 SDValue SubVec = Op.getOperand(Idx);
9171 unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
9172 MVT ShiftVT = widenMaskVectorType(ResVT, Subtarget);
9173 Op = widenSubVector(ShiftVT, SubVec, false, Subtarget, DAG, dl);
9174 Op = DAG.getNode(X86ISD::KSHIFTL, dl, ShiftVT, Op,
9175 DAG.getTargetConstant(Idx * SubVecNumElts, dl, MVT::i8));
9176 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResVT, Op,
9177 DAG.getIntPtrConstant(0, dl));
9180 // If there are zero or one non-zeros we can handle this very simply.
9181 if (NonZeros == 0 || isPowerOf2_64(NonZeros)) {
9182 SDValue Vec = Zeros ? DAG.getConstant(0, dl, ResVT) : DAG.getUNDEF(ResVT);
9183 if (!NonZeros)
9184 return Vec;
9185 unsigned Idx = Log2_64(NonZeros);
9186 SDValue SubVec = Op.getOperand(Idx);
9187 unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
9188 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, SubVec,
9189 DAG.getIntPtrConstant(Idx * SubVecNumElts, dl));
9192 if (NumOperands > 2) {
9193 MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
9194 ArrayRef<SDUse> Ops = Op->ops();
9195 SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
9196 Ops.slice(0, NumOperands/2));
9197 SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
9198 Ops.slice(NumOperands/2));
9199 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
9202 assert(llvm::popcount(NonZeros) == 2 && "Simple cases not handled?");
9204 if (ResVT.getVectorNumElements() >= 16)
9205 return Op; // The operation is legal with KUNPCK
9207 SDValue Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT,
9208 DAG.getUNDEF(ResVT), Op.getOperand(0),
9209 DAG.getIntPtrConstant(0, dl));
9210 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, Op.getOperand(1),
9211 DAG.getIntPtrConstant(NumElems/2, dl));
9214 static SDValue LowerCONCAT_VECTORS(SDValue Op,
9215 const X86Subtarget &Subtarget,
9216 SelectionDAG &DAG) {
9217 MVT VT = Op.getSimpleValueType();
9218 if (VT.getVectorElementType() == MVT::i1)
9219 return LowerCONCAT_VECTORSvXi1(Op, Subtarget, DAG);
9221 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
9222 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
9223 Op.getNumOperands() == 4)));
9225 // AVX can use the vinsertf128 instruction to create 256-bit vectors
9226 // from two other 128-bit ones.
9228 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
9229 return LowerAVXCONCAT_VECTORS(Op, DAG, Subtarget);
9232 //===----------------------------------------------------------------------===//
9233 // Vector shuffle lowering
9235 // This is an experimental code path for lowering vector shuffles on x86. It is
9236 // designed to handle arbitrary vector shuffles and blends, gracefully
9237 // degrading performance as necessary. It works hard to recognize idiomatic
9238 // shuffles and lower them to optimal instruction patterns without leaving
9239 // a framework that allows reasonably efficient handling of all vector shuffle
9240 // patterns.
9241 //===----------------------------------------------------------------------===//
9243 /// Tiny helper function to identify a no-op mask.
9245 /// This is a somewhat boring predicate function. It checks whether the mask
9246 /// array input, which is assumed to be a single-input shuffle mask of the kind
9247 /// used by the X86 shuffle instructions (not a fully general
9248 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
9249 /// in-place shuffle are 'no-op's.
9250 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
9251 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
9252 assert(Mask[i] >= -1 && "Out of bound mask element!");
9253 if (Mask[i] >= 0 && Mask[i] != i)
9254 return false;
9256 return true;
9259 /// Test whether there are elements crossing LaneSizeInBits lanes in this
9260 /// shuffle mask.
9262 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
9263 /// and we routinely test for these.
9264 static bool isLaneCrossingShuffleMask(unsigned LaneSizeInBits,
9265 unsigned ScalarSizeInBits,
9266 ArrayRef<int> Mask) {
9267 assert(LaneSizeInBits && ScalarSizeInBits &&
9268 (LaneSizeInBits % ScalarSizeInBits) == 0 &&
9269 "Illegal shuffle lane size");
9270 int LaneSize = LaneSizeInBits / ScalarSizeInBits;
9271 int Size = Mask.size();
9272 for (int i = 0; i < Size; ++i)
9273 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
9274 return true;
9275 return false;
9278 /// Test whether there are elements crossing 128-bit lanes in this
9279 /// shuffle mask.
9280 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
9281 return isLaneCrossingShuffleMask(128, VT.getScalarSizeInBits(), Mask);
9284 /// Test whether elements in each LaneSizeInBits lane in this shuffle mask come
9285 /// from multiple lanes - this is different to isLaneCrossingShuffleMask to
9286 /// better support 'repeated mask + lane permute' style shuffles.
9287 static bool isMultiLaneShuffleMask(unsigned LaneSizeInBits,
9288 unsigned ScalarSizeInBits,
9289 ArrayRef<int> Mask) {
9290 assert(LaneSizeInBits && ScalarSizeInBits &&
9291 (LaneSizeInBits % ScalarSizeInBits) == 0 &&
9292 "Illegal shuffle lane size");
9293 int NumElts = Mask.size();
9294 int NumEltsPerLane = LaneSizeInBits / ScalarSizeInBits;
9295 int NumLanes = NumElts / NumEltsPerLane;
9296 if (NumLanes > 1) {
9297 for (int i = 0; i != NumLanes; ++i) {
9298 int SrcLane = -1;
9299 for (int j = 0; j != NumEltsPerLane; ++j) {
9300 int M = Mask[(i * NumEltsPerLane) + j];
9301 if (M < 0)
9302 continue;
9303 int Lane = (M % NumElts) / NumEltsPerLane;
9304 if (SrcLane >= 0 && SrcLane != Lane)
9305 return true;
9306 SrcLane = Lane;
9310 return false;
9313 /// Test whether a shuffle mask is equivalent within each sub-lane.
9315 /// This checks a shuffle mask to see if it is performing the same
9316 /// lane-relative shuffle in each sub-lane. This trivially implies
9317 /// that it is also not lane-crossing. It may however involve a blend from the
9318 /// same lane of a second vector.
9320 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
9321 /// non-trivial to compute in the face of undef lanes. The representation is
9322 /// suitable for use with existing 128-bit shuffles as entries from the second
9323 /// vector have been remapped to [LaneSize, 2*LaneSize).
9324 static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT,
9325 ArrayRef<int> Mask,
9326 SmallVectorImpl<int> &RepeatedMask) {
9327 auto LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
9328 RepeatedMask.assign(LaneSize, -1);
9329 int Size = Mask.size();
9330 for (int i = 0; i < Size; ++i) {
9331 assert(Mask[i] == SM_SentinelUndef || Mask[i] >= 0);
9332 if (Mask[i] < 0)
9333 continue;
9334 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
9335 // This entry crosses lanes, so there is no way to model this shuffle.
9336 return false;
9338 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
9339 // Adjust second vector indices to start at LaneSize instead of Size.
9340 int LocalM = Mask[i] < Size ? Mask[i] % LaneSize
9341 : Mask[i] % LaneSize + LaneSize;
9342 if (RepeatedMask[i % LaneSize] < 0)
9343 // This is the first non-undef entry in this slot of a 128-bit lane.
9344 RepeatedMask[i % LaneSize] = LocalM;
9345 else if (RepeatedMask[i % LaneSize] != LocalM)
9346 // Found a mismatch with the repeated mask.
9347 return false;
9349 return true;
9352 /// Test whether a shuffle mask is equivalent within each 128-bit lane.
9353 static bool
9354 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
9355 SmallVectorImpl<int> &RepeatedMask) {
9356 return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
9359 static bool
9360 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask) {
9361 SmallVector<int, 32> RepeatedMask;
9362 return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
9365 /// Test whether a shuffle mask is equivalent within each 256-bit lane.
9366 static bool
9367 is256BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
9368 SmallVectorImpl<int> &RepeatedMask) {
9369 return isRepeatedShuffleMask(256, VT, Mask, RepeatedMask);
9372 /// Test whether a target shuffle mask is equivalent within each sub-lane.
9373 /// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero.
9374 static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits,
9375 unsigned EltSizeInBits,
9376 ArrayRef<int> Mask,
9377 SmallVectorImpl<int> &RepeatedMask) {
9378 int LaneSize = LaneSizeInBits / EltSizeInBits;
9379 RepeatedMask.assign(LaneSize, SM_SentinelUndef);
9380 int Size = Mask.size();
9381 for (int i = 0; i < Size; ++i) {
9382 assert(isUndefOrZero(Mask[i]) || (Mask[i] >= 0));
9383 if (Mask[i] == SM_SentinelUndef)
9384 continue;
9385 if (Mask[i] == SM_SentinelZero) {
9386 if (!isUndefOrZero(RepeatedMask[i % LaneSize]))
9387 return false;
9388 RepeatedMask[i % LaneSize] = SM_SentinelZero;
9389 continue;
9391 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
9392 // This entry crosses lanes, so there is no way to model this shuffle.
9393 return false;
9395 // Handle the in-lane shuffles by detecting if and when they repeat. Adjust
9396 // later vector indices to start at multiples of LaneSize instead of Size.
9397 int LaneM = Mask[i] / Size;
9398 int LocalM = (Mask[i] % LaneSize) + (LaneM * LaneSize);
9399 if (RepeatedMask[i % LaneSize] == SM_SentinelUndef)
9400 // This is the first non-undef entry in this slot of a 128-bit lane.
9401 RepeatedMask[i % LaneSize] = LocalM;
9402 else if (RepeatedMask[i % LaneSize] != LocalM)
9403 // Found a mismatch with the repeated mask.
9404 return false;
9406 return true;
9409 /// Test whether a target shuffle mask is equivalent within each sub-lane.
9410 /// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero.
9411 static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits, MVT VT,
9412 ArrayRef<int> Mask,
9413 SmallVectorImpl<int> &RepeatedMask) {
9414 return isRepeatedTargetShuffleMask(LaneSizeInBits, VT.getScalarSizeInBits(),
9415 Mask, RepeatedMask);
9418 /// Checks whether the vector elements referenced by two shuffle masks are
9419 /// equivalent.
9420 static bool IsElementEquivalent(int MaskSize, SDValue Op, SDValue ExpectedOp,
9421 int Idx, int ExpectedIdx) {
9422 assert(0 <= Idx && Idx < MaskSize && 0 <= ExpectedIdx &&
9423 ExpectedIdx < MaskSize && "Out of range element index");
9424 if (!Op || !ExpectedOp || Op.getOpcode() != ExpectedOp.getOpcode())
9425 return false;
9427 switch (Op.getOpcode()) {
9428 case ISD::BUILD_VECTOR:
9429 // If the values are build vectors, we can look through them to find
9430 // equivalent inputs that make the shuffles equivalent.
9431 // TODO: Handle MaskSize != Op.getNumOperands()?
9432 if (MaskSize == (int)Op.getNumOperands() &&
9433 MaskSize == (int)ExpectedOp.getNumOperands())
9434 return Op.getOperand(Idx) == ExpectedOp.getOperand(ExpectedIdx);
9435 break;
9436 case X86ISD::VBROADCAST:
9437 case X86ISD::VBROADCAST_LOAD:
9438 // TODO: Handle MaskSize != Op.getValueType().getVectorNumElements()?
9439 return (Op == ExpectedOp &&
9440 (int)Op.getValueType().getVectorNumElements() == MaskSize);
9441 case X86ISD::HADD:
9442 case X86ISD::HSUB:
9443 case X86ISD::FHADD:
9444 case X86ISD::FHSUB:
9445 case X86ISD::PACKSS:
9446 case X86ISD::PACKUS:
9447 // HOP(X,X) can refer to the elt from the lower/upper half of a lane.
9448 // TODO: Handle MaskSize != NumElts?
9449 // TODO: Handle HOP(X,Y) vs HOP(Y,X) equivalence cases.
9450 if (Op == ExpectedOp && Op.getOperand(0) == Op.getOperand(1)) {
9451 MVT VT = Op.getSimpleValueType();
9452 int NumElts = VT.getVectorNumElements();
9453 if (MaskSize == NumElts) {
9454 int NumLanes = VT.getSizeInBits() / 128;
9455 int NumEltsPerLane = NumElts / NumLanes;
9456 int NumHalfEltsPerLane = NumEltsPerLane / 2;
9457 bool SameLane =
9458 (Idx / NumEltsPerLane) == (ExpectedIdx / NumEltsPerLane);
9459 bool SameElt =
9460 (Idx % NumHalfEltsPerLane) == (ExpectedIdx % NumHalfEltsPerLane);
9461 return SameLane && SameElt;
9464 break;
9467 return false;
9470 /// Checks whether a shuffle mask is equivalent to an explicit list of
9471 /// arguments.
9473 /// This is a fast way to test a shuffle mask against a fixed pattern:
9475 /// if (isShuffleEquivalent(Mask, 3, 2, {1, 0})) { ... }
9477 /// It returns true if the mask is exactly as wide as the argument list, and
9478 /// each element of the mask is either -1 (signifying undef) or the value given
9479 /// in the argument.
9480 static bool isShuffleEquivalent(ArrayRef<int> Mask, ArrayRef<int> ExpectedMask,
9481 SDValue V1 = SDValue(),
9482 SDValue V2 = SDValue()) {
9483 int Size = Mask.size();
9484 if (Size != (int)ExpectedMask.size())
9485 return false;
9487 for (int i = 0; i < Size; ++i) {
9488 assert(Mask[i] >= -1 && "Out of bound mask element!");
9489 int MaskIdx = Mask[i];
9490 int ExpectedIdx = ExpectedMask[i];
9491 if (0 <= MaskIdx && MaskIdx != ExpectedIdx) {
9492 SDValue MaskV = MaskIdx < Size ? V1 : V2;
9493 SDValue ExpectedV = ExpectedIdx < Size ? V1 : V2;
9494 MaskIdx = MaskIdx < Size ? MaskIdx : (MaskIdx - Size);
9495 ExpectedIdx = ExpectedIdx < Size ? ExpectedIdx : (ExpectedIdx - Size);
9496 if (!IsElementEquivalent(Size, MaskV, ExpectedV, MaskIdx, ExpectedIdx))
9497 return false;
9500 return true;
9503 /// Checks whether a target shuffle mask is equivalent to an explicit pattern.
9505 /// The masks must be exactly the same width.
9507 /// If an element in Mask matches SM_SentinelUndef (-1) then the corresponding
9508 /// value in ExpectedMask is always accepted. Otherwise the indices must match.
9510 /// SM_SentinelZero is accepted as a valid negative index but must match in
9511 /// both, or via a known bits test.
9512 static bool isTargetShuffleEquivalent(MVT VT, ArrayRef<int> Mask,
9513 ArrayRef<int> ExpectedMask,
9514 const SelectionDAG &DAG,
9515 SDValue V1 = SDValue(),
9516 SDValue V2 = SDValue()) {
9517 int Size = Mask.size();
9518 if (Size != (int)ExpectedMask.size())
9519 return false;
9520 assert(llvm::all_of(ExpectedMask,
9521 [Size](int M) { return isInRange(M, 0, 2 * Size); }) &&
9522 "Illegal target shuffle mask");
9524 // Check for out-of-range target shuffle mask indices.
9525 if (!isUndefOrZeroOrInRange(Mask, 0, 2 * Size))
9526 return false;
9528 // Don't use V1/V2 if they're not the same size as the shuffle mask type.
9529 if (V1 && (V1.getValueSizeInBits() != VT.getSizeInBits() ||
9530 !V1.getValueType().isVector()))
9531 V1 = SDValue();
9532 if (V2 && (V2.getValueSizeInBits() != VT.getSizeInBits() ||
9533 !V2.getValueType().isVector()))
9534 V2 = SDValue();
9536 APInt ZeroV1 = APInt::getZero(Size);
9537 APInt ZeroV2 = APInt::getZero(Size);
9539 for (int i = 0; i < Size; ++i) {
9540 int MaskIdx = Mask[i];
9541 int ExpectedIdx = ExpectedMask[i];
9542 if (MaskIdx == SM_SentinelUndef || MaskIdx == ExpectedIdx)
9543 continue;
9544 if (MaskIdx == SM_SentinelZero) {
9545 // If we need this expected index to be a zero element, then update the
9546 // relevant zero mask and perform the known bits at the end to minimize
9547 // repeated computes.
9548 SDValue ExpectedV = ExpectedIdx < Size ? V1 : V2;
9549 if (ExpectedV &&
9550 Size == (int)ExpectedV.getValueType().getVectorNumElements()) {
9551 int BitIdx = ExpectedIdx < Size ? ExpectedIdx : (ExpectedIdx - Size);
9552 APInt &ZeroMask = ExpectedIdx < Size ? ZeroV1 : ZeroV2;
9553 ZeroMask.setBit(BitIdx);
9554 continue;
9557 if (MaskIdx >= 0) {
9558 SDValue MaskV = MaskIdx < Size ? V1 : V2;
9559 SDValue ExpectedV = ExpectedIdx < Size ? V1 : V2;
9560 MaskIdx = MaskIdx < Size ? MaskIdx : (MaskIdx - Size);
9561 ExpectedIdx = ExpectedIdx < Size ? ExpectedIdx : (ExpectedIdx - Size);
9562 if (IsElementEquivalent(Size, MaskV, ExpectedV, MaskIdx, ExpectedIdx))
9563 continue;
9565 return false;
9567 return (ZeroV1.isZero() || DAG.MaskedVectorIsZero(V1, ZeroV1)) &&
9568 (ZeroV2.isZero() || DAG.MaskedVectorIsZero(V2, ZeroV2));
9571 // Check if the shuffle mask is suitable for the AVX vpunpcklwd or vpunpckhwd
9572 // instructions.
9573 static bool isUnpackWdShuffleMask(ArrayRef<int> Mask, MVT VT,
9574 const SelectionDAG &DAG) {
9575 if (VT != MVT::v8i32 && VT != MVT::v8f32)
9576 return false;
9578 SmallVector<int, 8> Unpcklwd;
9579 createUnpackShuffleMask(MVT::v8i16, Unpcklwd, /* Lo = */ true,
9580 /* Unary = */ false);
9581 SmallVector<int, 8> Unpckhwd;
9582 createUnpackShuffleMask(MVT::v8i16, Unpckhwd, /* Lo = */ false,
9583 /* Unary = */ false);
9584 bool IsUnpackwdMask = (isTargetShuffleEquivalent(VT, Mask, Unpcklwd, DAG) ||
9585 isTargetShuffleEquivalent(VT, Mask, Unpckhwd, DAG));
9586 return IsUnpackwdMask;
9589 static bool is128BitUnpackShuffleMask(ArrayRef<int> Mask,
9590 const SelectionDAG &DAG) {
9591 // Create 128-bit vector type based on mask size.
9592 MVT EltVT = MVT::getIntegerVT(128 / Mask.size());
9593 MVT VT = MVT::getVectorVT(EltVT, Mask.size());
9595 // We can't assume a canonical shuffle mask, so try the commuted version too.
9596 SmallVector<int, 4> CommutedMask(Mask);
9597 ShuffleVectorSDNode::commuteMask(CommutedMask);
9599 // Match any of unary/binary or low/high.
9600 for (unsigned i = 0; i != 4; ++i) {
9601 SmallVector<int, 16> UnpackMask;
9602 createUnpackShuffleMask(VT, UnpackMask, (i >> 1) % 2, i % 2);
9603 if (isTargetShuffleEquivalent(VT, Mask, UnpackMask, DAG) ||
9604 isTargetShuffleEquivalent(VT, CommutedMask, UnpackMask, DAG))
9605 return true;
9607 return false;
9610 /// Return true if a shuffle mask chooses elements identically in its top and
9611 /// bottom halves. For example, any splat mask has the same top and bottom
9612 /// halves. If an element is undefined in only one half of the mask, the halves
9613 /// are not considered identical.
9614 static bool hasIdenticalHalvesShuffleMask(ArrayRef<int> Mask) {
9615 assert(Mask.size() % 2 == 0 && "Expecting even number of elements in mask");
9616 unsigned HalfSize = Mask.size() / 2;
9617 for (unsigned i = 0; i != HalfSize; ++i) {
9618 if (Mask[i] != Mask[i + HalfSize])
9619 return false;
9621 return true;
9624 /// Get a 4-lane 8-bit shuffle immediate for a mask.
9626 /// This helper function produces an 8-bit shuffle immediate corresponding to
9627 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
9628 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
9629 /// example.
9631 /// NB: We rely heavily on "undef" masks preserving the input lane.
9632 static unsigned getV4X86ShuffleImm(ArrayRef<int> Mask) {
9633 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
9634 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
9635 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
9636 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
9637 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
9639 // If the mask only uses one non-undef element, then fully 'splat' it to
9640 // improve later broadcast matching.
9641 int FirstIndex = find_if(Mask, [](int M) { return M >= 0; }) - Mask.begin();
9642 assert(0 <= FirstIndex && FirstIndex < 4 && "All undef shuffle mask");
9644 int FirstElt = Mask[FirstIndex];
9645 if (all_of(Mask, [FirstElt](int M) { return M < 0 || M == FirstElt; }))
9646 return (FirstElt << 6) | (FirstElt << 4) | (FirstElt << 2) | FirstElt;
9648 unsigned Imm = 0;
9649 Imm |= (Mask[0] < 0 ? 0 : Mask[0]) << 0;
9650 Imm |= (Mask[1] < 0 ? 1 : Mask[1]) << 2;
9651 Imm |= (Mask[2] < 0 ? 2 : Mask[2]) << 4;
9652 Imm |= (Mask[3] < 0 ? 3 : Mask[3]) << 6;
9653 return Imm;
9656 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, const SDLoc &DL,
9657 SelectionDAG &DAG) {
9658 return DAG.getTargetConstant(getV4X86ShuffleImm(Mask), DL, MVT::i8);
9661 // The Shuffle result is as follow:
9662 // 0*a[0]0*a[1]...0*a[n] , n >=0 where a[] elements in a ascending order.
9663 // Each Zeroable's element correspond to a particular Mask's element.
9664 // As described in computeZeroableShuffleElements function.
9666 // The function looks for a sub-mask that the nonzero elements are in
9667 // increasing order. If such sub-mask exist. The function returns true.
9668 static bool isNonZeroElementsInOrder(const APInt &Zeroable,
9669 ArrayRef<int> Mask, const EVT &VectorType,
9670 bool &IsZeroSideLeft) {
9671 int NextElement = -1;
9672 // Check if the Mask's nonzero elements are in increasing order.
9673 for (int i = 0, e = Mask.size(); i < e; i++) {
9674 // Checks if the mask's zeros elements are built from only zeros.
9675 assert(Mask[i] >= -1 && "Out of bound mask element!");
9676 if (Mask[i] < 0)
9677 return false;
9678 if (Zeroable[i])
9679 continue;
9680 // Find the lowest non zero element
9681 if (NextElement < 0) {
9682 NextElement = Mask[i] != 0 ? VectorType.getVectorNumElements() : 0;
9683 IsZeroSideLeft = NextElement != 0;
9685 // Exit if the mask's non zero elements are not in increasing order.
9686 if (NextElement != Mask[i])
9687 return false;
9688 NextElement++;
9690 return true;
9693 /// Try to lower a shuffle with a single PSHUFB of V1 or V2.
9694 static SDValue lowerShuffleWithPSHUFB(const SDLoc &DL, MVT VT,
9695 ArrayRef<int> Mask, SDValue V1,
9696 SDValue V2, const APInt &Zeroable,
9697 const X86Subtarget &Subtarget,
9698 SelectionDAG &DAG) {
9699 int Size = Mask.size();
9700 int LaneSize = 128 / VT.getScalarSizeInBits();
9701 const int NumBytes = VT.getSizeInBits() / 8;
9702 const int NumEltBytes = VT.getScalarSizeInBits() / 8;
9704 assert((Subtarget.hasSSSE3() && VT.is128BitVector()) ||
9705 (Subtarget.hasAVX2() && VT.is256BitVector()) ||
9706 (Subtarget.hasBWI() && VT.is512BitVector()));
9708 SmallVector<SDValue, 64> PSHUFBMask(NumBytes);
9709 // Sign bit set in i8 mask means zero element.
9710 SDValue ZeroMask = DAG.getConstant(0x80, DL, MVT::i8);
9712 SDValue V;
9713 for (int i = 0; i < NumBytes; ++i) {
9714 int M = Mask[i / NumEltBytes];
9715 if (M < 0) {
9716 PSHUFBMask[i] = DAG.getUNDEF(MVT::i8);
9717 continue;
9719 if (Zeroable[i / NumEltBytes]) {
9720 PSHUFBMask[i] = ZeroMask;
9721 continue;
9724 // We can only use a single input of V1 or V2.
9725 SDValue SrcV = (M >= Size ? V2 : V1);
9726 if (V && V != SrcV)
9727 return SDValue();
9728 V = SrcV;
9729 M %= Size;
9731 // PSHUFB can't cross lanes, ensure this doesn't happen.
9732 if ((M / LaneSize) != ((i / NumEltBytes) / LaneSize))
9733 return SDValue();
9735 M = M % LaneSize;
9736 M = M * NumEltBytes + (i % NumEltBytes);
9737 PSHUFBMask[i] = DAG.getConstant(M, DL, MVT::i8);
9739 assert(V && "Failed to find a source input");
9741 MVT I8VT = MVT::getVectorVT(MVT::i8, NumBytes);
9742 return DAG.getBitcast(
9743 VT, DAG.getNode(X86ISD::PSHUFB, DL, I8VT, DAG.getBitcast(I8VT, V),
9744 DAG.getBuildVector(I8VT, DL, PSHUFBMask)));
9747 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
9748 const X86Subtarget &Subtarget, SelectionDAG &DAG,
9749 const SDLoc &dl);
9751 // X86 has dedicated shuffle that can be lowered to VEXPAND
9752 static SDValue lowerShuffleToEXPAND(const SDLoc &DL, MVT VT,
9753 const APInt &Zeroable,
9754 ArrayRef<int> Mask, SDValue &V1,
9755 SDValue &V2, SelectionDAG &DAG,
9756 const X86Subtarget &Subtarget) {
9757 bool IsLeftZeroSide = true;
9758 if (!isNonZeroElementsInOrder(Zeroable, Mask, V1.getValueType(),
9759 IsLeftZeroSide))
9760 return SDValue();
9761 unsigned VEXPANDMask = (~Zeroable).getZExtValue();
9762 MVT IntegerType =
9763 MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
9764 SDValue MaskNode = DAG.getConstant(VEXPANDMask, DL, IntegerType);
9765 unsigned NumElts = VT.getVectorNumElements();
9766 assert((NumElts == 4 || NumElts == 8 || NumElts == 16) &&
9767 "Unexpected number of vector elements");
9768 SDValue VMask = getMaskNode(MaskNode, MVT::getVectorVT(MVT::i1, NumElts),
9769 Subtarget, DAG, DL);
9770 SDValue ZeroVector = getZeroVector(VT, Subtarget, DAG, DL);
9771 SDValue ExpandedVector = IsLeftZeroSide ? V2 : V1;
9772 return DAG.getNode(X86ISD::EXPAND, DL, VT, ExpandedVector, ZeroVector, VMask);
9775 static bool matchShuffleWithUNPCK(MVT VT, SDValue &V1, SDValue &V2,
9776 unsigned &UnpackOpcode, bool IsUnary,
9777 ArrayRef<int> TargetMask, const SDLoc &DL,
9778 SelectionDAG &DAG,
9779 const X86Subtarget &Subtarget) {
9780 int NumElts = VT.getVectorNumElements();
9782 bool Undef1 = true, Undef2 = true, Zero1 = true, Zero2 = true;
9783 for (int i = 0; i != NumElts; i += 2) {
9784 int M1 = TargetMask[i + 0];
9785 int M2 = TargetMask[i + 1];
9786 Undef1 &= (SM_SentinelUndef == M1);
9787 Undef2 &= (SM_SentinelUndef == M2);
9788 Zero1 &= isUndefOrZero(M1);
9789 Zero2 &= isUndefOrZero(M2);
9791 assert(!((Undef1 || Zero1) && (Undef2 || Zero2)) &&
9792 "Zeroable shuffle detected");
9794 // Attempt to match the target mask against the unpack lo/hi mask patterns.
9795 SmallVector<int, 64> Unpckl, Unpckh;
9796 createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, IsUnary);
9797 if (isTargetShuffleEquivalent(VT, TargetMask, Unpckl, DAG, V1,
9798 (IsUnary ? V1 : V2))) {
9799 UnpackOpcode = X86ISD::UNPCKL;
9800 V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
9801 V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
9802 return true;
9805 createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, IsUnary);
9806 if (isTargetShuffleEquivalent(VT, TargetMask, Unpckh, DAG, V1,
9807 (IsUnary ? V1 : V2))) {
9808 UnpackOpcode = X86ISD::UNPCKH;
9809 V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
9810 V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
9811 return true;
9814 // If an unary shuffle, attempt to match as an unpack lo/hi with zero.
9815 if (IsUnary && (Zero1 || Zero2)) {
9816 // Don't bother if we can blend instead.
9817 if ((Subtarget.hasSSE41() || VT == MVT::v2i64 || VT == MVT::v2f64) &&
9818 isSequentialOrUndefOrZeroInRange(TargetMask, 0, NumElts, 0))
9819 return false;
9821 bool MatchLo = true, MatchHi = true;
9822 for (int i = 0; (i != NumElts) && (MatchLo || MatchHi); ++i) {
9823 int M = TargetMask[i];
9825 // Ignore if the input is known to be zero or the index is undef.
9826 if ((((i & 1) == 0) && Zero1) || (((i & 1) == 1) && Zero2) ||
9827 (M == SM_SentinelUndef))
9828 continue;
9830 MatchLo &= (M == Unpckl[i]);
9831 MatchHi &= (M == Unpckh[i]);
9834 if (MatchLo || MatchHi) {
9835 UnpackOpcode = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
9836 V2 = Zero2 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
9837 V1 = Zero1 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
9838 return true;
9842 // If a binary shuffle, commute and try again.
9843 if (!IsUnary) {
9844 ShuffleVectorSDNode::commuteMask(Unpckl);
9845 if (isTargetShuffleEquivalent(VT, TargetMask, Unpckl, DAG)) {
9846 UnpackOpcode = X86ISD::UNPCKL;
9847 std::swap(V1, V2);
9848 return true;
9851 ShuffleVectorSDNode::commuteMask(Unpckh);
9852 if (isTargetShuffleEquivalent(VT, TargetMask, Unpckh, DAG)) {
9853 UnpackOpcode = X86ISD::UNPCKH;
9854 std::swap(V1, V2);
9855 return true;
9859 return false;
9862 // X86 has dedicated unpack instructions that can handle specific blend
9863 // operations: UNPCKH and UNPCKL.
9864 static SDValue lowerShuffleWithUNPCK(const SDLoc &DL, MVT VT,
9865 ArrayRef<int> Mask, SDValue V1, SDValue V2,
9866 SelectionDAG &DAG) {
9867 SmallVector<int, 8> Unpckl;
9868 createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, /* Unary = */ false);
9869 if (isShuffleEquivalent(Mask, Unpckl, V1, V2))
9870 return DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2);
9872 SmallVector<int, 8> Unpckh;
9873 createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, /* Unary = */ false);
9874 if (isShuffleEquivalent(Mask, Unpckh, V1, V2))
9875 return DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2);
9877 // Commute and try again.
9878 ShuffleVectorSDNode::commuteMask(Unpckl);
9879 if (isShuffleEquivalent(Mask, Unpckl, V1, V2))
9880 return DAG.getNode(X86ISD::UNPCKL, DL, VT, V2, V1);
9882 ShuffleVectorSDNode::commuteMask(Unpckh);
9883 if (isShuffleEquivalent(Mask, Unpckh, V1, V2))
9884 return DAG.getNode(X86ISD::UNPCKH, DL, VT, V2, V1);
9886 return SDValue();
9889 /// Check if the mask can be mapped to a preliminary shuffle (vperm 64-bit)
9890 /// followed by unpack 256-bit.
9891 static SDValue lowerShuffleWithUNPCK256(const SDLoc &DL, MVT VT,
9892 ArrayRef<int> Mask, SDValue V1,
9893 SDValue V2, SelectionDAG &DAG) {
9894 SmallVector<int, 32> Unpckl, Unpckh;
9895 createSplat2ShuffleMask(VT, Unpckl, /* Lo */ true);
9896 createSplat2ShuffleMask(VT, Unpckh, /* Lo */ false);
9898 unsigned UnpackOpcode;
9899 if (isShuffleEquivalent(Mask, Unpckl, V1, V2))
9900 UnpackOpcode = X86ISD::UNPCKL;
9901 else if (isShuffleEquivalent(Mask, Unpckh, V1, V2))
9902 UnpackOpcode = X86ISD::UNPCKH;
9903 else
9904 return SDValue();
9906 // This is a "natural" unpack operation (rather than the 128-bit sectored
9907 // operation implemented by AVX). We need to rearrange 64-bit chunks of the
9908 // input in order to use the x86 instruction.
9909 V1 = DAG.getVectorShuffle(MVT::v4f64, DL, DAG.getBitcast(MVT::v4f64, V1),
9910 DAG.getUNDEF(MVT::v4f64), {0, 2, 1, 3});
9911 V1 = DAG.getBitcast(VT, V1);
9912 return DAG.getNode(UnpackOpcode, DL, VT, V1, V1);
9915 // Check if the mask can be mapped to a TRUNCATE or VTRUNC, truncating the
9916 // source into the lower elements and zeroing the upper elements.
9917 static bool matchShuffleAsVTRUNC(MVT &SrcVT, MVT &DstVT, MVT VT,
9918 ArrayRef<int> Mask, const APInt &Zeroable,
9919 const X86Subtarget &Subtarget) {
9920 if (!VT.is512BitVector() && !Subtarget.hasVLX())
9921 return false;
9923 unsigned NumElts = Mask.size();
9924 unsigned EltSizeInBits = VT.getScalarSizeInBits();
9925 unsigned MaxScale = 64 / EltSizeInBits;
9927 for (unsigned Scale = 2; Scale <= MaxScale; Scale += Scale) {
9928 unsigned SrcEltBits = EltSizeInBits * Scale;
9929 if (SrcEltBits < 32 && !Subtarget.hasBWI())
9930 continue;
9931 unsigned NumSrcElts = NumElts / Scale;
9932 if (!isSequentialOrUndefInRange(Mask, 0, NumSrcElts, 0, Scale))
9933 continue;
9934 unsigned UpperElts = NumElts - NumSrcElts;
9935 if (!Zeroable.extractBits(UpperElts, NumSrcElts).isAllOnes())
9936 continue;
9937 SrcVT = MVT::getIntegerVT(EltSizeInBits * Scale);
9938 SrcVT = MVT::getVectorVT(SrcVT, NumSrcElts);
9939 DstVT = MVT::getIntegerVT(EltSizeInBits);
9940 if ((NumSrcElts * EltSizeInBits) >= 128) {
9941 // ISD::TRUNCATE
9942 DstVT = MVT::getVectorVT(DstVT, NumSrcElts);
9943 } else {
9944 // X86ISD::VTRUNC
9945 DstVT = MVT::getVectorVT(DstVT, 128 / EltSizeInBits);
9947 return true;
9950 return false;
9953 // Helper to create TRUNCATE/VTRUNC nodes, optionally with zero/undef upper
9954 // element padding to the final DstVT.
9955 static SDValue getAVX512TruncNode(const SDLoc &DL, MVT DstVT, SDValue Src,
9956 const X86Subtarget &Subtarget,
9957 SelectionDAG &DAG, bool ZeroUppers) {
9958 MVT SrcVT = Src.getSimpleValueType();
9959 MVT DstSVT = DstVT.getScalarType();
9960 unsigned NumDstElts = DstVT.getVectorNumElements();
9961 unsigned NumSrcElts = SrcVT.getVectorNumElements();
9962 unsigned DstEltSizeInBits = DstVT.getScalarSizeInBits();
9964 if (!DAG.getTargetLoweringInfo().isTypeLegal(SrcVT))
9965 return SDValue();
9967 // Perform a direct ISD::TRUNCATE if possible.
9968 if (NumSrcElts == NumDstElts)
9969 return DAG.getNode(ISD::TRUNCATE, DL, DstVT, Src);
9971 if (NumSrcElts > NumDstElts) {
9972 MVT TruncVT = MVT::getVectorVT(DstSVT, NumSrcElts);
9973 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Src);
9974 return extractSubVector(Trunc, 0, DAG, DL, DstVT.getSizeInBits());
9977 if ((NumSrcElts * DstEltSizeInBits) >= 128) {
9978 MVT TruncVT = MVT::getVectorVT(DstSVT, NumSrcElts);
9979 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Src);
9980 return widenSubVector(Trunc, ZeroUppers, Subtarget, DAG, DL,
9981 DstVT.getSizeInBits());
9984 // Non-VLX targets must truncate from a 512-bit type, so we need to
9985 // widen, truncate and then possibly extract the original subvector.
9986 if (!Subtarget.hasVLX() && !SrcVT.is512BitVector()) {
9987 SDValue NewSrc = widenSubVector(Src, ZeroUppers, Subtarget, DAG, DL, 512);
9988 return getAVX512TruncNode(DL, DstVT, NewSrc, Subtarget, DAG, ZeroUppers);
9991 // Fallback to a X86ISD::VTRUNC, padding if necessary.
9992 MVT TruncVT = MVT::getVectorVT(DstSVT, 128 / DstEltSizeInBits);
9993 SDValue Trunc = DAG.getNode(X86ISD::VTRUNC, DL, TruncVT, Src);
9994 if (DstVT != TruncVT)
9995 Trunc = widenSubVector(Trunc, ZeroUppers, Subtarget, DAG, DL,
9996 DstVT.getSizeInBits());
9997 return Trunc;
10000 // Try to lower trunc+vector_shuffle to a vpmovdb or a vpmovdw instruction.
10002 // An example is the following:
10004 // t0: ch = EntryToken
10005 // t2: v4i64,ch = CopyFromReg t0, Register:v4i64 %0
10006 // t25: v4i32 = truncate t2
10007 // t41: v8i16 = bitcast t25
10008 // t21: v8i16 = BUILD_VECTOR undef:i16, undef:i16, undef:i16, undef:i16,
10009 // Constant:i16<0>, Constant:i16<0>, Constant:i16<0>, Constant:i16<0>
10010 // t51: v8i16 = vector_shuffle<0,2,4,6,12,13,14,15> t41, t21
10011 // t18: v2i64 = bitcast t51
10013 // One can just use a single vpmovdw instruction, without avx512vl we need to
10014 // use the zmm variant and extract the lower subvector, padding with zeroes.
10015 // TODO: Merge with lowerShuffleAsVTRUNC.
10016 static SDValue lowerShuffleWithVPMOV(const SDLoc &DL, MVT VT, SDValue V1,
10017 SDValue V2, ArrayRef<int> Mask,
10018 const APInt &Zeroable,
10019 const X86Subtarget &Subtarget,
10020 SelectionDAG &DAG) {
10021 assert((VT == MVT::v16i8 || VT == MVT::v8i16) && "Unexpected VTRUNC type");
10022 if (!Subtarget.hasAVX512())
10023 return SDValue();
10025 unsigned NumElts = VT.getVectorNumElements();
10026 unsigned EltSizeInBits = VT.getScalarSizeInBits();
10027 unsigned MaxScale = 64 / EltSizeInBits;
10028 for (unsigned Scale = 2; Scale <= MaxScale; Scale += Scale) {
10029 unsigned SrcEltBits = EltSizeInBits * Scale;
10030 unsigned NumSrcElts = NumElts / Scale;
10031 unsigned UpperElts = NumElts - NumSrcElts;
10032 if (!isSequentialOrUndefInRange(Mask, 0, NumSrcElts, 0, Scale) ||
10033 !Zeroable.extractBits(UpperElts, NumSrcElts).isAllOnes())
10034 continue;
10036 // Attempt to find a matching source truncation, but as a fall back VLX
10037 // cases can use the VPMOV directly.
10038 SDValue Src = peekThroughBitcasts(V1);
10039 if (Src.getOpcode() == ISD::TRUNCATE &&
10040 Src.getScalarValueSizeInBits() == SrcEltBits) {
10041 Src = Src.getOperand(0);
10042 } else if (Subtarget.hasVLX()) {
10043 MVT SrcSVT = MVT::getIntegerVT(SrcEltBits);
10044 MVT SrcVT = MVT::getVectorVT(SrcSVT, NumSrcElts);
10045 Src = DAG.getBitcast(SrcVT, Src);
10046 // Don't do this if PACKSS/PACKUS could perform it cheaper.
10047 if (Scale == 2 &&
10048 ((DAG.ComputeNumSignBits(Src) > EltSizeInBits) ||
10049 (DAG.computeKnownBits(Src).countMinLeadingZeros() >= EltSizeInBits)))
10050 return SDValue();
10051 } else
10052 return SDValue();
10054 // VPMOVWB is only available with avx512bw.
10055 if (!Subtarget.hasBWI() && Src.getScalarValueSizeInBits() < 32)
10056 return SDValue();
10058 bool UndefUppers = isUndefInRange(Mask, NumSrcElts, UpperElts);
10059 return getAVX512TruncNode(DL, VT, Src, Subtarget, DAG, !UndefUppers);
10062 return SDValue();
10065 // Attempt to match binary shuffle patterns as a truncate.
10066 static SDValue lowerShuffleAsVTRUNC(const SDLoc &DL, MVT VT, SDValue V1,
10067 SDValue V2, ArrayRef<int> Mask,
10068 const APInt &Zeroable,
10069 const X86Subtarget &Subtarget,
10070 SelectionDAG &DAG) {
10071 assert((VT.is128BitVector() || VT.is256BitVector()) &&
10072 "Unexpected VTRUNC type");
10073 if (!Subtarget.hasAVX512())
10074 return SDValue();
10076 unsigned NumElts = VT.getVectorNumElements();
10077 unsigned EltSizeInBits = VT.getScalarSizeInBits();
10078 unsigned MaxScale = 64 / EltSizeInBits;
10079 for (unsigned Scale = 2; Scale <= MaxScale; Scale += Scale) {
10080 // TODO: Support non-BWI VPMOVWB truncations?
10081 unsigned SrcEltBits = EltSizeInBits * Scale;
10082 if (SrcEltBits < 32 && !Subtarget.hasBWI())
10083 continue;
10085 // Match shuffle <Ofs,Ofs+Scale,Ofs+2*Scale,..,undef_or_zero,undef_or_zero>
10086 // Bail if the V2 elements are undef.
10087 unsigned NumHalfSrcElts = NumElts / Scale;
10088 unsigned NumSrcElts = 2 * NumHalfSrcElts;
10089 for (unsigned Offset = 0; Offset != Scale; ++Offset) {
10090 if (!isSequentialOrUndefInRange(Mask, 0, NumSrcElts, Offset, Scale) ||
10091 isUndefInRange(Mask, NumHalfSrcElts, NumHalfSrcElts))
10092 continue;
10094 // The elements beyond the truncation must be undef/zero.
10095 unsigned UpperElts = NumElts - NumSrcElts;
10096 if (UpperElts > 0 &&
10097 !Zeroable.extractBits(UpperElts, NumSrcElts).isAllOnes())
10098 continue;
10099 bool UndefUppers =
10100 UpperElts > 0 && isUndefInRange(Mask, NumSrcElts, UpperElts);
10102 // For offset truncations, ensure that the concat is cheap.
10103 if (Offset) {
10104 auto IsCheapConcat = [&](SDValue Lo, SDValue Hi) {
10105 if (Lo.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
10106 Hi.getOpcode() == ISD::EXTRACT_SUBVECTOR)
10107 return Lo.getOperand(0) == Hi.getOperand(0);
10108 if (ISD::isNormalLoad(Lo.getNode()) &&
10109 ISD::isNormalLoad(Hi.getNode())) {
10110 auto *LDLo = cast<LoadSDNode>(Lo);
10111 auto *LDHi = cast<LoadSDNode>(Hi);
10112 return DAG.areNonVolatileConsecutiveLoads(
10113 LDHi, LDLo, Lo.getValueType().getStoreSize(), 1);
10115 return false;
10117 if (!IsCheapConcat(V1, V2))
10118 continue;
10121 // As we're using both sources then we need to concat them together
10122 // and truncate from the double-sized src.
10123 MVT ConcatVT = MVT::getVectorVT(VT.getScalarType(), NumElts * 2);
10124 SDValue Src = DAG.getNode(ISD::CONCAT_VECTORS, DL, ConcatVT, V1, V2);
10126 MVT SrcSVT = MVT::getIntegerVT(SrcEltBits);
10127 MVT SrcVT = MVT::getVectorVT(SrcSVT, NumSrcElts);
10128 Src = DAG.getBitcast(SrcVT, Src);
10130 // Shift the offset'd elements into place for the truncation.
10131 // TODO: Use getTargetVShiftByConstNode.
10132 if (Offset)
10133 Src = DAG.getNode(
10134 X86ISD::VSRLI, DL, SrcVT, Src,
10135 DAG.getTargetConstant(Offset * EltSizeInBits, DL, MVT::i8));
10137 return getAVX512TruncNode(DL, VT, Src, Subtarget, DAG, !UndefUppers);
10141 return SDValue();
10144 /// Check whether a compaction lowering can be done by dropping even/odd
10145 /// elements and compute how many times even/odd elements must be dropped.
10147 /// This handles shuffles which take every Nth element where N is a power of
10148 /// two. Example shuffle masks:
10150 /// (even)
10151 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
10152 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
10153 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
10154 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
10155 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
10156 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
10158 /// (odd)
10159 /// N = 1: 1, 3, 5, 7, 9, 11, 13, 15, 0, 2, 4, 6, 8, 10, 12, 14
10160 /// N = 1: 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
10162 /// Any of these lanes can of course be undef.
10164 /// This routine only supports N <= 3.
10165 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
10166 /// for larger N.
10168 /// \returns N above, or the number of times even/odd elements must be dropped
10169 /// if there is such a number. Otherwise returns zero.
10170 static int canLowerByDroppingElements(ArrayRef<int> Mask, bool MatchEven,
10171 bool IsSingleInput) {
10172 // The modulus for the shuffle vector entries is based on whether this is
10173 // a single input or not.
10174 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
10175 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
10176 "We should only be called with masks with a power-of-2 size!");
10178 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
10179 int Offset = MatchEven ? 0 : 1;
10181 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
10182 // and 2^3 simultaneously. This is because we may have ambiguity with
10183 // partially undef inputs.
10184 bool ViableForN[3] = {true, true, true};
10186 for (int i = 0, e = Mask.size(); i < e; ++i) {
10187 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
10188 // want.
10189 if (Mask[i] < 0)
10190 continue;
10192 bool IsAnyViable = false;
10193 for (unsigned j = 0; j != std::size(ViableForN); ++j)
10194 if (ViableForN[j]) {
10195 uint64_t N = j + 1;
10197 // The shuffle mask must be equal to (i * 2^N) % M.
10198 if ((uint64_t)(Mask[i] - Offset) == (((uint64_t)i << N) & ModMask))
10199 IsAnyViable = true;
10200 else
10201 ViableForN[j] = false;
10203 // Early exit if we exhaust the possible powers of two.
10204 if (!IsAnyViable)
10205 break;
10208 for (unsigned j = 0; j != std::size(ViableForN); ++j)
10209 if (ViableForN[j])
10210 return j + 1;
10212 // Return 0 as there is no viable power of two.
10213 return 0;
10216 // X86 has dedicated pack instructions that can handle specific truncation
10217 // operations: PACKSS and PACKUS.
10218 // Checks for compaction shuffle masks if MaxStages > 1.
10219 // TODO: Add support for matching multiple PACKSS/PACKUS stages.
10220 static bool matchShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1, SDValue &V2,
10221 unsigned &PackOpcode, ArrayRef<int> TargetMask,
10222 const SelectionDAG &DAG,
10223 const X86Subtarget &Subtarget,
10224 unsigned MaxStages = 1) {
10225 unsigned NumElts = VT.getVectorNumElements();
10226 unsigned BitSize = VT.getScalarSizeInBits();
10227 assert(0 < MaxStages && MaxStages <= 3 && (BitSize << MaxStages) <= 64 &&
10228 "Illegal maximum compaction");
10230 auto MatchPACK = [&](SDValue N1, SDValue N2, MVT PackVT) {
10231 unsigned NumSrcBits = PackVT.getScalarSizeInBits();
10232 unsigned NumPackedBits = NumSrcBits - BitSize;
10233 N1 = peekThroughBitcasts(N1);
10234 N2 = peekThroughBitcasts(N2);
10235 unsigned NumBits1 = N1.getScalarValueSizeInBits();
10236 unsigned NumBits2 = N2.getScalarValueSizeInBits();
10237 bool IsZero1 = llvm::isNullOrNullSplat(N1, /*AllowUndefs*/ false);
10238 bool IsZero2 = llvm::isNullOrNullSplat(N2, /*AllowUndefs*/ false);
10239 if ((!N1.isUndef() && !IsZero1 && NumBits1 != NumSrcBits) ||
10240 (!N2.isUndef() && !IsZero2 && NumBits2 != NumSrcBits))
10241 return false;
10242 if (Subtarget.hasSSE41() || BitSize == 8) {
10243 APInt ZeroMask = APInt::getHighBitsSet(NumSrcBits, NumPackedBits);
10244 if ((N1.isUndef() || IsZero1 || DAG.MaskedValueIsZero(N1, ZeroMask)) &&
10245 (N2.isUndef() || IsZero2 || DAG.MaskedValueIsZero(N2, ZeroMask))) {
10246 V1 = N1;
10247 V2 = N2;
10248 SrcVT = PackVT;
10249 PackOpcode = X86ISD::PACKUS;
10250 return true;
10253 bool IsAllOnes1 = llvm::isAllOnesOrAllOnesSplat(N1, /*AllowUndefs*/ false);
10254 bool IsAllOnes2 = llvm::isAllOnesOrAllOnesSplat(N2, /*AllowUndefs*/ false);
10255 if ((N1.isUndef() || IsZero1 || IsAllOnes1 ||
10256 DAG.ComputeNumSignBits(N1) > NumPackedBits) &&
10257 (N2.isUndef() || IsZero2 || IsAllOnes2 ||
10258 DAG.ComputeNumSignBits(N2) > NumPackedBits)) {
10259 V1 = N1;
10260 V2 = N2;
10261 SrcVT = PackVT;
10262 PackOpcode = X86ISD::PACKSS;
10263 return true;
10265 return false;
10268 // Attempt to match against wider and wider compaction patterns.
10269 for (unsigned NumStages = 1; NumStages <= MaxStages; ++NumStages) {
10270 MVT PackSVT = MVT::getIntegerVT(BitSize << NumStages);
10271 MVT PackVT = MVT::getVectorVT(PackSVT, NumElts >> NumStages);
10273 // Try binary shuffle.
10274 SmallVector<int, 32> BinaryMask;
10275 createPackShuffleMask(VT, BinaryMask, false, NumStages);
10276 if (isTargetShuffleEquivalent(VT, TargetMask, BinaryMask, DAG, V1, V2))
10277 if (MatchPACK(V1, V2, PackVT))
10278 return true;
10280 // Try unary shuffle.
10281 SmallVector<int, 32> UnaryMask;
10282 createPackShuffleMask(VT, UnaryMask, true, NumStages);
10283 if (isTargetShuffleEquivalent(VT, TargetMask, UnaryMask, DAG, V1))
10284 if (MatchPACK(V1, V1, PackVT))
10285 return true;
10288 return false;
10291 static SDValue lowerShuffleWithPACK(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
10292 SDValue V1, SDValue V2, SelectionDAG &DAG,
10293 const X86Subtarget &Subtarget) {
10294 MVT PackVT;
10295 unsigned PackOpcode;
10296 unsigned SizeBits = VT.getSizeInBits();
10297 unsigned EltBits = VT.getScalarSizeInBits();
10298 unsigned MaxStages = Log2_32(64 / EltBits);
10299 if (!matchShuffleWithPACK(VT, PackVT, V1, V2, PackOpcode, Mask, DAG,
10300 Subtarget, MaxStages))
10301 return SDValue();
10303 unsigned CurrentEltBits = PackVT.getScalarSizeInBits();
10304 unsigned NumStages = Log2_32(CurrentEltBits / EltBits);
10306 // Don't lower multi-stage packs on AVX512, truncation is better.
10307 if (NumStages != 1 && SizeBits == 128 && Subtarget.hasVLX())
10308 return SDValue();
10310 // Pack to the largest type possible:
10311 // vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB.
10312 unsigned MaxPackBits = 16;
10313 if (CurrentEltBits > 16 &&
10314 (PackOpcode == X86ISD::PACKSS || Subtarget.hasSSE41()))
10315 MaxPackBits = 32;
10317 // Repeatedly pack down to the target size.
10318 SDValue Res;
10319 for (unsigned i = 0; i != NumStages; ++i) {
10320 unsigned SrcEltBits = std::min(MaxPackBits, CurrentEltBits);
10321 unsigned NumSrcElts = SizeBits / SrcEltBits;
10322 MVT SrcSVT = MVT::getIntegerVT(SrcEltBits);
10323 MVT DstSVT = MVT::getIntegerVT(SrcEltBits / 2);
10324 MVT SrcVT = MVT::getVectorVT(SrcSVT, NumSrcElts);
10325 MVT DstVT = MVT::getVectorVT(DstSVT, NumSrcElts * 2);
10326 Res = DAG.getNode(PackOpcode, DL, DstVT, DAG.getBitcast(SrcVT, V1),
10327 DAG.getBitcast(SrcVT, V2));
10328 V1 = V2 = Res;
10329 CurrentEltBits /= 2;
10331 assert(Res && Res.getValueType() == VT &&
10332 "Failed to lower compaction shuffle");
10333 return Res;
10336 /// Try to emit a bitmask instruction for a shuffle.
10338 /// This handles cases where we can model a blend exactly as a bitmask due to
10339 /// one of the inputs being zeroable.
10340 static SDValue lowerShuffleAsBitMask(const SDLoc &DL, MVT VT, SDValue V1,
10341 SDValue V2, ArrayRef<int> Mask,
10342 const APInt &Zeroable,
10343 const X86Subtarget &Subtarget,
10344 SelectionDAG &DAG) {
10345 MVT MaskVT = VT;
10346 MVT EltVT = VT.getVectorElementType();
10347 SDValue Zero, AllOnes;
10348 // Use f64 if i64 isn't legal.
10349 if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
10350 EltVT = MVT::f64;
10351 MaskVT = MVT::getVectorVT(EltVT, Mask.size());
10354 MVT LogicVT = VT;
10355 if (EltVT == MVT::f32 || EltVT == MVT::f64) {
10356 Zero = DAG.getConstantFP(0.0, DL, EltVT);
10357 APFloat AllOnesValue =
10358 APFloat::getAllOnesValue(SelectionDAG::EVTToAPFloatSemantics(EltVT));
10359 AllOnes = DAG.getConstantFP(AllOnesValue, DL, EltVT);
10360 LogicVT =
10361 MVT::getVectorVT(EltVT == MVT::f64 ? MVT::i64 : MVT::i32, Mask.size());
10362 } else {
10363 Zero = DAG.getConstant(0, DL, EltVT);
10364 AllOnes = DAG.getAllOnesConstant(DL, EltVT);
10367 SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
10368 SDValue V;
10369 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10370 if (Zeroable[i])
10371 continue;
10372 if (Mask[i] % Size != i)
10373 return SDValue(); // Not a blend.
10374 if (!V)
10375 V = Mask[i] < Size ? V1 : V2;
10376 else if (V != (Mask[i] < Size ? V1 : V2))
10377 return SDValue(); // Can only let one input through the mask.
10379 VMaskOps[i] = AllOnes;
10381 if (!V)
10382 return SDValue(); // No non-zeroable elements!
10384 SDValue VMask = DAG.getBuildVector(MaskVT, DL, VMaskOps);
10385 VMask = DAG.getBitcast(LogicVT, VMask);
10386 V = DAG.getBitcast(LogicVT, V);
10387 SDValue And = DAG.getNode(ISD::AND, DL, LogicVT, V, VMask);
10388 return DAG.getBitcast(VT, And);
10391 /// Try to emit a blend instruction for a shuffle using bit math.
10393 /// This is used as a fallback approach when first class blend instructions are
10394 /// unavailable. Currently it is only suitable for integer vectors, but could
10395 /// be generalized for floating point vectors if desirable.
10396 static SDValue lowerShuffleAsBitBlend(const SDLoc &DL, MVT VT, SDValue V1,
10397 SDValue V2, ArrayRef<int> Mask,
10398 SelectionDAG &DAG) {
10399 assert(VT.isInteger() && "Only supports integer vector types!");
10400 MVT EltVT = VT.getVectorElementType();
10401 SDValue Zero = DAG.getConstant(0, DL, EltVT);
10402 SDValue AllOnes = DAG.getAllOnesConstant(DL, EltVT);
10403 SmallVector<SDValue, 16> MaskOps;
10404 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10405 if (Mask[i] >= 0 && Mask[i] != i && Mask[i] != i + Size)
10406 return SDValue(); // Shuffled input!
10407 MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
10410 SDValue V1Mask = DAG.getBuildVector(VT, DL, MaskOps);
10411 return getBitSelect(DL, VT, V1, V2, V1Mask, DAG);
10414 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
10415 SDValue PreservedSrc,
10416 const X86Subtarget &Subtarget,
10417 SelectionDAG &DAG);
10419 static bool matchShuffleAsBlend(MVT VT, SDValue V1, SDValue V2,
10420 MutableArrayRef<int> Mask,
10421 const APInt &Zeroable, bool &ForceV1Zero,
10422 bool &ForceV2Zero, uint64_t &BlendMask) {
10423 bool V1IsZeroOrUndef =
10424 V1.isUndef() || ISD::isBuildVectorAllZeros(V1.getNode());
10425 bool V2IsZeroOrUndef =
10426 V2.isUndef() || ISD::isBuildVectorAllZeros(V2.getNode());
10428 BlendMask = 0;
10429 ForceV1Zero = false, ForceV2Zero = false;
10430 assert(Mask.size() <= 64 && "Shuffle mask too big for blend mask");
10432 int NumElts = Mask.size();
10433 int NumLanes = VT.getSizeInBits() / 128;
10434 int NumEltsPerLane = NumElts / NumLanes;
10435 assert((NumLanes * NumEltsPerLane) == NumElts && "Value type mismatch");
10437 // For 32/64-bit elements, if we only reference one input (plus any undefs),
10438 // then ensure the blend mask part for that lane just references that input.
10439 bool ForceWholeLaneMasks =
10440 VT.is256BitVector() && VT.getScalarSizeInBits() >= 32;
10442 // Attempt to generate the binary blend mask. If an input is zero then
10443 // we can use any lane.
10444 for (int Lane = 0; Lane != NumLanes; ++Lane) {
10445 // Keep track of the inputs used per lane.
10446 bool LaneV1InUse = false;
10447 bool LaneV2InUse = false;
10448 uint64_t LaneBlendMask = 0;
10449 for (int LaneElt = 0; LaneElt != NumEltsPerLane; ++LaneElt) {
10450 int Elt = (Lane * NumEltsPerLane) + LaneElt;
10451 int M = Mask[Elt];
10452 if (M == SM_SentinelUndef)
10453 continue;
10454 if (M == Elt || (0 <= M && M < NumElts &&
10455 IsElementEquivalent(NumElts, V1, V1, M, Elt))) {
10456 Mask[Elt] = Elt;
10457 LaneV1InUse = true;
10458 continue;
10460 if (M == (Elt + NumElts) ||
10461 (NumElts <= M &&
10462 IsElementEquivalent(NumElts, V2, V2, M - NumElts, Elt))) {
10463 LaneBlendMask |= 1ull << LaneElt;
10464 Mask[Elt] = Elt + NumElts;
10465 LaneV2InUse = true;
10466 continue;
10468 if (Zeroable[Elt]) {
10469 if (V1IsZeroOrUndef) {
10470 ForceV1Zero = true;
10471 Mask[Elt] = Elt;
10472 LaneV1InUse = true;
10473 continue;
10475 if (V2IsZeroOrUndef) {
10476 ForceV2Zero = true;
10477 LaneBlendMask |= 1ull << LaneElt;
10478 Mask[Elt] = Elt + NumElts;
10479 LaneV2InUse = true;
10480 continue;
10483 return false;
10486 // If we only used V2 then splat the lane blend mask to avoid any demanded
10487 // elts from V1 in this lane (the V1 equivalent is implicit with a zero
10488 // blend mask bit).
10489 if (ForceWholeLaneMasks && LaneV2InUse && !LaneV1InUse)
10490 LaneBlendMask = (1ull << NumEltsPerLane) - 1;
10492 BlendMask |= LaneBlendMask << (Lane * NumEltsPerLane);
10494 return true;
10497 static uint64_t scaleVectorShuffleBlendMask(uint64_t BlendMask, int Size,
10498 int Scale) {
10499 uint64_t ScaledMask = 0;
10500 for (int i = 0; i != Size; ++i)
10501 if (BlendMask & (1ull << i))
10502 ScaledMask |= ((1ull << Scale) - 1) << (i * Scale);
10503 return ScaledMask;
10506 /// Try to emit a blend instruction for a shuffle.
10508 /// This doesn't do any checks for the availability of instructions for blending
10509 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
10510 /// be matched in the backend with the type given. What it does check for is
10511 /// that the shuffle mask is a blend, or convertible into a blend with zero.
10512 static SDValue lowerShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
10513 SDValue V2, ArrayRef<int> Original,
10514 const APInt &Zeroable,
10515 const X86Subtarget &Subtarget,
10516 SelectionDAG &DAG) {
10517 uint64_t BlendMask = 0;
10518 bool ForceV1Zero = false, ForceV2Zero = false;
10519 SmallVector<int, 64> Mask(Original);
10520 if (!matchShuffleAsBlend(VT, V1, V2, Mask, Zeroable, ForceV1Zero, ForceV2Zero,
10521 BlendMask))
10522 return SDValue();
10524 // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
10525 if (ForceV1Zero)
10526 V1 = getZeroVector(VT, Subtarget, DAG, DL);
10527 if (ForceV2Zero)
10528 V2 = getZeroVector(VT, Subtarget, DAG, DL);
10530 unsigned NumElts = VT.getVectorNumElements();
10532 switch (VT.SimpleTy) {
10533 case MVT::v4i64:
10534 case MVT::v8i32:
10535 assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
10536 [[fallthrough]];
10537 case MVT::v4f64:
10538 case MVT::v8f32:
10539 assert(Subtarget.hasAVX() && "256-bit float blends require AVX!");
10540 [[fallthrough]];
10541 case MVT::v2f64:
10542 case MVT::v2i64:
10543 case MVT::v4f32:
10544 case MVT::v4i32:
10545 case MVT::v8i16:
10546 assert(Subtarget.hasSSE41() && "128-bit blends require SSE41!");
10547 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
10548 DAG.getTargetConstant(BlendMask, DL, MVT::i8));
10549 case MVT::v16i16: {
10550 assert(Subtarget.hasAVX2() && "v16i16 blends require AVX2!");
10551 SmallVector<int, 8> RepeatedMask;
10552 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
10553 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
10554 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
10555 BlendMask = 0;
10556 for (int i = 0; i < 8; ++i)
10557 if (RepeatedMask[i] >= 8)
10558 BlendMask |= 1ull << i;
10559 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
10560 DAG.getTargetConstant(BlendMask, DL, MVT::i8));
10562 // Use PBLENDW for lower/upper lanes and then blend lanes.
10563 // TODO - we should allow 2 PBLENDW here and leave shuffle combine to
10564 // merge to VSELECT where useful.
10565 uint64_t LoMask = BlendMask & 0xFF;
10566 uint64_t HiMask = (BlendMask >> 8) & 0xFF;
10567 if (LoMask == 0 || LoMask == 255 || HiMask == 0 || HiMask == 255) {
10568 SDValue Lo = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
10569 DAG.getTargetConstant(LoMask, DL, MVT::i8));
10570 SDValue Hi = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
10571 DAG.getTargetConstant(HiMask, DL, MVT::i8));
10572 return DAG.getVectorShuffle(
10573 MVT::v16i16, DL, Lo, Hi,
10574 {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31});
10576 [[fallthrough]];
10578 case MVT::v32i8:
10579 assert(Subtarget.hasAVX2() && "256-bit byte-blends require AVX2!");
10580 [[fallthrough]];
10581 case MVT::v16i8: {
10582 assert(Subtarget.hasSSE41() && "128-bit byte-blends require SSE41!");
10584 // Attempt to lower to a bitmask if we can. VPAND is faster than VPBLENDVB.
10585 if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
10586 Subtarget, DAG))
10587 return Masked;
10589 if (Subtarget.hasBWI() && Subtarget.hasVLX()) {
10590 MVT IntegerType = MVT::getIntegerVT(std::max<unsigned>(NumElts, 8));
10591 SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
10592 return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
10595 // If we have VPTERNLOG, we can use that as a bit blend.
10596 if (Subtarget.hasVLX())
10597 if (SDValue BitBlend =
10598 lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
10599 return BitBlend;
10601 // Scale the blend by the number of bytes per element.
10602 int Scale = VT.getScalarSizeInBits() / 8;
10604 // This form of blend is always done on bytes. Compute the byte vector
10605 // type.
10606 MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
10608 // x86 allows load folding with blendvb from the 2nd source operand. But
10609 // we are still using LLVM select here (see comment below), so that's V1.
10610 // If V2 can be load-folded and V1 cannot be load-folded, then commute to
10611 // allow that load-folding possibility.
10612 if (!ISD::isNormalLoad(V1.getNode()) && ISD::isNormalLoad(V2.getNode())) {
10613 ShuffleVectorSDNode::commuteMask(Mask);
10614 std::swap(V1, V2);
10617 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
10618 // mix of LLVM's code generator and the x86 backend. We tell the code
10619 // generator that boolean values in the elements of an x86 vector register
10620 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
10621 // mapping a select to operand #1, and 'false' mapping to operand #2. The
10622 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
10623 // of the element (the remaining are ignored) and 0 in that high bit would
10624 // mean operand #1 while 1 in the high bit would mean operand #2. So while
10625 // the LLVM model for boolean values in vector elements gets the relevant
10626 // bit set, it is set backwards and over constrained relative to x86's
10627 // actual model.
10628 SmallVector<SDValue, 32> VSELECTMask;
10629 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10630 for (int j = 0; j < Scale; ++j)
10631 VSELECTMask.push_back(
10632 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
10633 : DAG.getConstant(Mask[i] < Size ? -1 : 0, DL,
10634 MVT::i8));
10636 V1 = DAG.getBitcast(BlendVT, V1);
10637 V2 = DAG.getBitcast(BlendVT, V2);
10638 return DAG.getBitcast(
10640 DAG.getSelect(DL, BlendVT, DAG.getBuildVector(BlendVT, DL, VSELECTMask),
10641 V1, V2));
10643 case MVT::v16f32:
10644 case MVT::v8f64:
10645 case MVT::v8i64:
10646 case MVT::v16i32:
10647 case MVT::v32i16:
10648 case MVT::v64i8: {
10649 // Attempt to lower to a bitmask if we can. Only if not optimizing for size.
10650 bool OptForSize = DAG.shouldOptForSize();
10651 if (!OptForSize) {
10652 if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
10653 Subtarget, DAG))
10654 return Masked;
10657 // Otherwise load an immediate into a GPR, cast to k-register, and use a
10658 // masked move.
10659 MVT IntegerType = MVT::getIntegerVT(std::max<unsigned>(NumElts, 8));
10660 SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
10661 return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
10663 default:
10664 llvm_unreachable("Not a supported integer vector type!");
10668 /// Try to lower as a blend of elements from two inputs followed by
10669 /// a single-input permutation.
10671 /// This matches the pattern where we can blend elements from two inputs and
10672 /// then reduce the shuffle to a single-input permutation.
10673 static SDValue lowerShuffleAsBlendAndPermute(const SDLoc &DL, MVT VT,
10674 SDValue V1, SDValue V2,
10675 ArrayRef<int> Mask,
10676 SelectionDAG &DAG,
10677 bool ImmBlends = false) {
10678 // We build up the blend mask while checking whether a blend is a viable way
10679 // to reduce the shuffle.
10680 SmallVector<int, 32> BlendMask(Mask.size(), -1);
10681 SmallVector<int, 32> PermuteMask(Mask.size(), -1);
10683 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10684 if (Mask[i] < 0)
10685 continue;
10687 assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
10689 if (BlendMask[Mask[i] % Size] < 0)
10690 BlendMask[Mask[i] % Size] = Mask[i];
10691 else if (BlendMask[Mask[i] % Size] != Mask[i])
10692 return SDValue(); // Can't blend in the needed input!
10694 PermuteMask[i] = Mask[i] % Size;
10697 // If only immediate blends, then bail if the blend mask can't be widened to
10698 // i16.
10699 unsigned EltSize = VT.getScalarSizeInBits();
10700 if (ImmBlends && EltSize == 8 && !canWidenShuffleElements(BlendMask))
10701 return SDValue();
10703 SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
10704 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
10707 /// Try to lower as an unpack of elements from two inputs followed by
10708 /// a single-input permutation.
10710 /// This matches the pattern where we can unpack elements from two inputs and
10711 /// then reduce the shuffle to a single-input (wider) permutation.
10712 static SDValue lowerShuffleAsUNPCKAndPermute(const SDLoc &DL, MVT VT,
10713 SDValue V1, SDValue V2,
10714 ArrayRef<int> Mask,
10715 SelectionDAG &DAG) {
10716 int NumElts = Mask.size();
10717 int NumLanes = VT.getSizeInBits() / 128;
10718 int NumLaneElts = NumElts / NumLanes;
10719 int NumHalfLaneElts = NumLaneElts / 2;
10721 bool MatchLo = true, MatchHi = true;
10722 SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
10724 // Determine UNPCKL/UNPCKH type and operand order.
10725 for (int Elt = 0; Elt != NumElts; ++Elt) {
10726 int M = Mask[Elt];
10727 if (M < 0)
10728 continue;
10730 // Normalize the mask value depending on whether it's V1 or V2.
10731 int NormM = M;
10732 SDValue &Op = Ops[Elt & 1];
10733 if (M < NumElts && (Op.isUndef() || Op == V1))
10734 Op = V1;
10735 else if (NumElts <= M && (Op.isUndef() || Op == V2)) {
10736 Op = V2;
10737 NormM -= NumElts;
10738 } else
10739 return SDValue();
10741 bool MatchLoAnyLane = false, MatchHiAnyLane = false;
10742 for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
10743 int Lo = Lane, Mid = Lane + NumHalfLaneElts, Hi = Lane + NumLaneElts;
10744 MatchLoAnyLane |= isUndefOrInRange(NormM, Lo, Mid);
10745 MatchHiAnyLane |= isUndefOrInRange(NormM, Mid, Hi);
10746 if (MatchLoAnyLane || MatchHiAnyLane) {
10747 assert((MatchLoAnyLane ^ MatchHiAnyLane) &&
10748 "Failed to match UNPCKLO/UNPCKHI");
10749 break;
10752 MatchLo &= MatchLoAnyLane;
10753 MatchHi &= MatchHiAnyLane;
10754 if (!MatchLo && !MatchHi)
10755 return SDValue();
10757 assert((MatchLo ^ MatchHi) && "Failed to match UNPCKLO/UNPCKHI");
10759 // Element indices have changed after unpacking. Calculate permute mask
10760 // so that they will be put back to the position as dictated by the
10761 // original shuffle mask indices.
10762 SmallVector<int, 32> PermuteMask(NumElts, -1);
10763 for (int Elt = 0; Elt != NumElts; ++Elt) {
10764 int M = Mask[Elt];
10765 if (M < 0)
10766 continue;
10767 int NormM = M;
10768 if (NumElts <= M)
10769 NormM -= NumElts;
10770 bool IsFirstOp = M < NumElts;
10771 int BaseMaskElt =
10772 NumLaneElts * (NormM / NumLaneElts) + (2 * (NormM % NumHalfLaneElts));
10773 if ((IsFirstOp && V1 == Ops[0]) || (!IsFirstOp && V2 == Ops[0]))
10774 PermuteMask[Elt] = BaseMaskElt;
10775 else if ((IsFirstOp && V1 == Ops[1]) || (!IsFirstOp && V2 == Ops[1]))
10776 PermuteMask[Elt] = BaseMaskElt + 1;
10777 assert(PermuteMask[Elt] != -1 &&
10778 "Input mask element is defined but failed to assign permute mask");
10781 unsigned UnpckOp = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
10782 SDValue Unpck = DAG.getNode(UnpckOp, DL, VT, Ops);
10783 return DAG.getVectorShuffle(VT, DL, Unpck, DAG.getUNDEF(VT), PermuteMask);
10786 /// Try to lower a shuffle as a permute of the inputs followed by an
10787 /// UNPCK instruction.
10789 /// This specifically targets cases where we end up with alternating between
10790 /// the two inputs, and so can permute them into something that feeds a single
10791 /// UNPCK instruction. Note that this routine only targets integer vectors
10792 /// because for floating point vectors we have a generalized SHUFPS lowering
10793 /// strategy that handles everything that doesn't *exactly* match an unpack,
10794 /// making this clever lowering unnecessary.
10795 static SDValue lowerShuffleAsPermuteAndUnpack(const SDLoc &DL, MVT VT,
10796 SDValue V1, SDValue V2,
10797 ArrayRef<int> Mask,
10798 const X86Subtarget &Subtarget,
10799 SelectionDAG &DAG) {
10800 int Size = Mask.size();
10801 assert(Mask.size() >= 2 && "Single element masks are invalid.");
10803 // This routine only supports 128-bit integer dual input vectors.
10804 if (VT.isFloatingPoint() || !VT.is128BitVector() || V2.isUndef())
10805 return SDValue();
10807 int NumLoInputs =
10808 count_if(Mask, [Size](int M) { return M >= 0 && M % Size < Size / 2; });
10809 int NumHiInputs =
10810 count_if(Mask, [Size](int M) { return M % Size >= Size / 2; });
10812 bool UnpackLo = NumLoInputs >= NumHiInputs;
10814 auto TryUnpack = [&](int ScalarSize, int Scale) {
10815 SmallVector<int, 16> V1Mask((unsigned)Size, -1);
10816 SmallVector<int, 16> V2Mask((unsigned)Size, -1);
10818 for (int i = 0; i < Size; ++i) {
10819 if (Mask[i] < 0)
10820 continue;
10822 // Each element of the unpack contains Scale elements from this mask.
10823 int UnpackIdx = i / Scale;
10825 // We only handle the case where V1 feeds the first slots of the unpack.
10826 // We rely on canonicalization to ensure this is the case.
10827 if ((UnpackIdx % 2 == 0) != (Mask[i] < Size))
10828 return SDValue();
10830 // Setup the mask for this input. The indexing is tricky as we have to
10831 // handle the unpack stride.
10832 SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask;
10833 VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] =
10834 Mask[i] % Size;
10837 // If we will have to shuffle both inputs to use the unpack, check whether
10838 // we can just unpack first and shuffle the result. If so, skip this unpack.
10839 if ((NumLoInputs == 0 || NumHiInputs == 0) && !isNoopShuffleMask(V1Mask) &&
10840 !isNoopShuffleMask(V2Mask))
10841 return SDValue();
10843 // Shuffle the inputs into place.
10844 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
10845 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
10847 // Cast the inputs to the type we will use to unpack them.
10848 MVT UnpackVT =
10849 MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), Size / Scale);
10850 V1 = DAG.getBitcast(UnpackVT, V1);
10851 V2 = DAG.getBitcast(UnpackVT, V2);
10853 // Unpack the inputs and cast the result back to the desired type.
10854 return DAG.getBitcast(
10855 VT, DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
10856 UnpackVT, V1, V2));
10859 // We try each unpack from the largest to the smallest to try and find one
10860 // that fits this mask.
10861 int OrigScalarSize = VT.getScalarSizeInBits();
10862 for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2)
10863 if (SDValue Unpack = TryUnpack(ScalarSize, ScalarSize / OrigScalarSize))
10864 return Unpack;
10866 // If we're shuffling with a zero vector then we're better off not doing
10867 // VECTOR_SHUFFLE(UNPCK()) as we lose track of those zero elements.
10868 if (ISD::isBuildVectorAllZeros(V1.getNode()) ||
10869 ISD::isBuildVectorAllZeros(V2.getNode()))
10870 return SDValue();
10872 // If none of the unpack-rooted lowerings worked (or were profitable) try an
10873 // initial unpack.
10874 if (NumLoInputs == 0 || NumHiInputs == 0) {
10875 assert((NumLoInputs > 0 || NumHiInputs > 0) &&
10876 "We have to have *some* inputs!");
10877 int HalfOffset = NumLoInputs == 0 ? Size / 2 : 0;
10879 // FIXME: We could consider the total complexity of the permute of each
10880 // possible unpacking. Or at the least we should consider how many
10881 // half-crossings are created.
10882 // FIXME: We could consider commuting the unpacks.
10884 SmallVector<int, 32> PermMask((unsigned)Size, -1);
10885 for (int i = 0; i < Size; ++i) {
10886 if (Mask[i] < 0)
10887 continue;
10889 assert(Mask[i] % Size >= HalfOffset && "Found input from wrong half!");
10891 PermMask[i] =
10892 2 * ((Mask[i] % Size) - HalfOffset) + (Mask[i] < Size ? 0 : 1);
10894 return DAG.getVectorShuffle(
10895 VT, DL,
10896 DAG.getNode(NumLoInputs == 0 ? X86ISD::UNPCKH : X86ISD::UNPCKL, DL, VT,
10897 V1, V2),
10898 DAG.getUNDEF(VT), PermMask);
10901 return SDValue();
10904 /// Helper to form a PALIGNR-based rotate+permute, merging 2 inputs and then
10905 /// permuting the elements of the result in place.
10906 static SDValue lowerShuffleAsByteRotateAndPermute(
10907 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
10908 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
10909 if ((VT.is128BitVector() && !Subtarget.hasSSSE3()) ||
10910 (VT.is256BitVector() && !Subtarget.hasAVX2()) ||
10911 (VT.is512BitVector() && !Subtarget.hasBWI()))
10912 return SDValue();
10914 // We don't currently support lane crossing permutes.
10915 if (is128BitLaneCrossingShuffleMask(VT, Mask))
10916 return SDValue();
10918 int Scale = VT.getScalarSizeInBits() / 8;
10919 int NumLanes = VT.getSizeInBits() / 128;
10920 int NumElts = VT.getVectorNumElements();
10921 int NumEltsPerLane = NumElts / NumLanes;
10923 // Determine range of mask elts.
10924 bool Blend1 = true;
10925 bool Blend2 = true;
10926 std::pair<int, int> Range1 = std::make_pair(INT_MAX, INT_MIN);
10927 std::pair<int, int> Range2 = std::make_pair(INT_MAX, INT_MIN);
10928 for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
10929 for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
10930 int M = Mask[Lane + Elt];
10931 if (M < 0)
10932 continue;
10933 if (M < NumElts) {
10934 Blend1 &= (M == (Lane + Elt));
10935 assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
10936 M = M % NumEltsPerLane;
10937 Range1.first = std::min(Range1.first, M);
10938 Range1.second = std::max(Range1.second, M);
10939 } else {
10940 M -= NumElts;
10941 Blend2 &= (M == (Lane + Elt));
10942 assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
10943 M = M % NumEltsPerLane;
10944 Range2.first = std::min(Range2.first, M);
10945 Range2.second = std::max(Range2.second, M);
10950 // Bail if we don't need both elements.
10951 // TODO - it might be worth doing this for unary shuffles if the permute
10952 // can be widened.
10953 if (!(0 <= Range1.first && Range1.second < NumEltsPerLane) ||
10954 !(0 <= Range2.first && Range2.second < NumEltsPerLane))
10955 return SDValue();
10957 if (VT.getSizeInBits() > 128 && (Blend1 || Blend2))
10958 return SDValue();
10960 // Rotate the 2 ops so we can access both ranges, then permute the result.
10961 auto RotateAndPermute = [&](SDValue Lo, SDValue Hi, int RotAmt, int Ofs) {
10962 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
10963 SDValue Rotate = DAG.getBitcast(
10964 VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, DAG.getBitcast(ByteVT, Hi),
10965 DAG.getBitcast(ByteVT, Lo),
10966 DAG.getTargetConstant(Scale * RotAmt, DL, MVT::i8)));
10967 SmallVector<int, 64> PermMask(NumElts, SM_SentinelUndef);
10968 for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
10969 for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
10970 int M = Mask[Lane + Elt];
10971 if (M < 0)
10972 continue;
10973 if (M < NumElts)
10974 PermMask[Lane + Elt] = Lane + ((M + Ofs - RotAmt) % NumEltsPerLane);
10975 else
10976 PermMask[Lane + Elt] = Lane + ((M - Ofs - RotAmt) % NumEltsPerLane);
10979 return DAG.getVectorShuffle(VT, DL, Rotate, DAG.getUNDEF(VT), PermMask);
10982 // Check if the ranges are small enough to rotate from either direction.
10983 if (Range2.second < Range1.first)
10984 return RotateAndPermute(V1, V2, Range1.first, 0);
10985 if (Range1.second < Range2.first)
10986 return RotateAndPermute(V2, V1, Range2.first, NumElts);
10987 return SDValue();
10990 static bool isBroadcastShuffleMask(ArrayRef<int> Mask) {
10991 return isUndefOrEqual(Mask, 0);
10994 static bool isNoopOrBroadcastShuffleMask(ArrayRef<int> Mask) {
10995 return isNoopShuffleMask(Mask) || isBroadcastShuffleMask(Mask);
10998 /// Check if the Mask consists of the same element repeated multiple times.
10999 static bool isSingleElementRepeatedMask(ArrayRef<int> Mask) {
11000 size_t NumUndefs = 0;
11001 std::optional<int> UniqueElt;
11002 for (int Elt : Mask) {
11003 if (Elt == SM_SentinelUndef) {
11004 NumUndefs++;
11005 continue;
11007 if (UniqueElt.has_value() && UniqueElt.value() != Elt)
11008 return false;
11009 UniqueElt = Elt;
11011 // Make sure the element is repeated enough times by checking the number of
11012 // undefs is small.
11013 return NumUndefs <= Mask.size() / 2 && UniqueElt.has_value();
11016 /// Generic routine to decompose a shuffle and blend into independent
11017 /// blends and permutes.
11019 /// This matches the extremely common pattern for handling combined
11020 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
11021 /// operations. It will try to pick the best arrangement of shuffles and
11022 /// blends. For vXi8/vXi16 shuffles we may use unpack instead of blend.
11023 static SDValue lowerShuffleAsDecomposedShuffleMerge(
11024 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11025 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11026 int NumElts = Mask.size();
11027 int NumLanes = VT.getSizeInBits() / 128;
11028 int NumEltsPerLane = NumElts / NumLanes;
11030 // Shuffle the input elements into the desired positions in V1 and V2 and
11031 // unpack/blend them together.
11032 bool IsAlternating = true;
11033 SmallVector<int, 32> V1Mask(NumElts, -1);
11034 SmallVector<int, 32> V2Mask(NumElts, -1);
11035 SmallVector<int, 32> FinalMask(NumElts, -1);
11036 for (int i = 0; i < NumElts; ++i) {
11037 int M = Mask[i];
11038 if (M >= 0 && M < NumElts) {
11039 V1Mask[i] = M;
11040 FinalMask[i] = i;
11041 IsAlternating &= (i & 1) == 0;
11042 } else if (M >= NumElts) {
11043 V2Mask[i] = M - NumElts;
11044 FinalMask[i] = i + NumElts;
11045 IsAlternating &= (i & 1) == 1;
11049 // If we effectively only demand the 0'th element of \p Input, and not only
11050 // as 0'th element, then broadcast said input,
11051 // and change \p InputMask to be a no-op (identity) mask.
11052 auto canonicalizeBroadcastableInput = [DL, VT, &Subtarget,
11053 &DAG](SDValue &Input,
11054 MutableArrayRef<int> InputMask) {
11055 unsigned EltSizeInBits = Input.getScalarValueSizeInBits();
11056 if (!Subtarget.hasAVX2() && (!Subtarget.hasAVX() || EltSizeInBits < 32 ||
11057 !X86::mayFoldLoad(Input, Subtarget)))
11058 return;
11059 if (isNoopShuffleMask(InputMask))
11060 return;
11061 assert(isBroadcastShuffleMask(InputMask) &&
11062 "Expected to demand only the 0'th element.");
11063 Input = DAG.getNode(X86ISD::VBROADCAST, DL, VT, Input);
11064 for (auto I : enumerate(InputMask)) {
11065 int &InputMaskElt = I.value();
11066 if (InputMaskElt >= 0)
11067 InputMaskElt = I.index();
11071 // Currently, we may need to produce one shuffle per input, and blend results.
11072 // It is possible that the shuffle for one of the inputs is already a no-op.
11073 // See if we can simplify non-no-op shuffles into broadcasts,
11074 // which we consider to be strictly better than an arbitrary shuffle.
11075 if (isNoopOrBroadcastShuffleMask(V1Mask) &&
11076 isNoopOrBroadcastShuffleMask(V2Mask)) {
11077 canonicalizeBroadcastableInput(V1, V1Mask);
11078 canonicalizeBroadcastableInput(V2, V2Mask);
11081 // Try to lower with the simpler initial blend/unpack/rotate strategies unless
11082 // one of the input shuffles would be a no-op. We prefer to shuffle inputs as
11083 // the shuffle may be able to fold with a load or other benefit. However, when
11084 // we'll have to do 2x as many shuffles in order to achieve this, a 2-input
11085 // pre-shuffle first is a better strategy.
11086 if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask)) {
11087 // Only prefer immediate blends to unpack/rotate.
11088 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
11089 DAG, true))
11090 return BlendPerm;
11091 // If either input vector provides only a single element which is repeated
11092 // multiple times, unpacking from both input vectors would generate worse
11093 // code. e.g. for
11094 // t5: v16i8 = vector_shuffle<16,0,16,1,16,2,16,3,16,4,16,5,16,6,16,7> t2, t4
11095 // it is better to process t4 first to create a vector of t4[0], then unpack
11096 // that vector with t2.
11097 if (!isSingleElementRepeatedMask(V1Mask) &&
11098 !isSingleElementRepeatedMask(V2Mask))
11099 if (SDValue UnpackPerm =
11100 lowerShuffleAsUNPCKAndPermute(DL, VT, V1, V2, Mask, DAG))
11101 return UnpackPerm;
11102 if (SDValue RotatePerm = lowerShuffleAsByteRotateAndPermute(
11103 DL, VT, V1, V2, Mask, Subtarget, DAG))
11104 return RotatePerm;
11105 // Unpack/rotate failed - try again with variable blends.
11106 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
11107 DAG))
11108 return BlendPerm;
11109 if (VT.getScalarSizeInBits() >= 32)
11110 if (SDValue PermUnpack = lowerShuffleAsPermuteAndUnpack(
11111 DL, VT, V1, V2, Mask, Subtarget, DAG))
11112 return PermUnpack;
11115 // If the final mask is an alternating blend of vXi8/vXi16, convert to an
11116 // UNPCKL(SHUFFLE, SHUFFLE) pattern.
11117 // TODO: It doesn't have to be alternating - but each lane mustn't have more
11118 // than half the elements coming from each source.
11119 if (IsAlternating && VT.getScalarSizeInBits() < 32) {
11120 V1Mask.assign(NumElts, -1);
11121 V2Mask.assign(NumElts, -1);
11122 FinalMask.assign(NumElts, -1);
11123 for (int i = 0; i != NumElts; i += NumEltsPerLane)
11124 for (int j = 0; j != NumEltsPerLane; ++j) {
11125 int M = Mask[i + j];
11126 if (M >= 0 && M < NumElts) {
11127 V1Mask[i + (j / 2)] = M;
11128 FinalMask[i + j] = i + (j / 2);
11129 } else if (M >= NumElts) {
11130 V2Mask[i + (j / 2)] = M - NumElts;
11131 FinalMask[i + j] = i + (j / 2) + NumElts;
11136 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
11137 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
11138 return DAG.getVectorShuffle(VT, DL, V1, V2, FinalMask);
11141 static int matchShuffleAsBitRotate(MVT &RotateVT, int EltSizeInBits,
11142 const X86Subtarget &Subtarget,
11143 ArrayRef<int> Mask) {
11144 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
11145 assert(EltSizeInBits < 64 && "Can't rotate 64-bit integers");
11147 // AVX512 only has vXi32/vXi64 rotates, so limit the rotation sub group size.
11148 int MinSubElts = Subtarget.hasAVX512() ? std::max(32 / EltSizeInBits, 2) : 2;
11149 int MaxSubElts = 64 / EltSizeInBits;
11150 unsigned RotateAmt, NumSubElts;
11151 if (!ShuffleVectorInst::isBitRotateMask(Mask, EltSizeInBits, MinSubElts,
11152 MaxSubElts, NumSubElts, RotateAmt))
11153 return -1;
11154 unsigned NumElts = Mask.size();
11155 MVT RotateSVT = MVT::getIntegerVT(EltSizeInBits * NumSubElts);
11156 RotateVT = MVT::getVectorVT(RotateSVT, NumElts / NumSubElts);
11157 return RotateAmt;
11160 /// Lower shuffle using X86ISD::VROTLI rotations.
11161 static SDValue lowerShuffleAsBitRotate(const SDLoc &DL, MVT VT, SDValue V1,
11162 ArrayRef<int> Mask,
11163 const X86Subtarget &Subtarget,
11164 SelectionDAG &DAG) {
11165 // Only XOP + AVX512 targets have bit rotation instructions.
11166 // If we at least have SSSE3 (PSHUFB) then we shouldn't attempt to use this.
11167 bool IsLegal =
11168 (VT.is128BitVector() && Subtarget.hasXOP()) || Subtarget.hasAVX512();
11169 if (!IsLegal && Subtarget.hasSSE3())
11170 return SDValue();
11172 MVT RotateVT;
11173 int RotateAmt = matchShuffleAsBitRotate(RotateVT, VT.getScalarSizeInBits(),
11174 Subtarget, Mask);
11175 if (RotateAmt < 0)
11176 return SDValue();
11178 // For pre-SSSE3 targets, if we are shuffling vXi8 elts then ISD::ROTL,
11179 // expanded to OR(SRL,SHL), will be more efficient, but if they can
11180 // widen to vXi16 or more then existing lowering should will be better.
11181 if (!IsLegal) {
11182 if ((RotateAmt % 16) == 0)
11183 return SDValue();
11184 // TODO: Use getTargetVShiftByConstNode.
11185 unsigned ShlAmt = RotateAmt;
11186 unsigned SrlAmt = RotateVT.getScalarSizeInBits() - RotateAmt;
11187 V1 = DAG.getBitcast(RotateVT, V1);
11188 SDValue SHL = DAG.getNode(X86ISD::VSHLI, DL, RotateVT, V1,
11189 DAG.getTargetConstant(ShlAmt, DL, MVT::i8));
11190 SDValue SRL = DAG.getNode(X86ISD::VSRLI, DL, RotateVT, V1,
11191 DAG.getTargetConstant(SrlAmt, DL, MVT::i8));
11192 SDValue Rot = DAG.getNode(ISD::OR, DL, RotateVT, SHL, SRL);
11193 return DAG.getBitcast(VT, Rot);
11196 SDValue Rot =
11197 DAG.getNode(X86ISD::VROTLI, DL, RotateVT, DAG.getBitcast(RotateVT, V1),
11198 DAG.getTargetConstant(RotateAmt, DL, MVT::i8));
11199 return DAG.getBitcast(VT, Rot);
11202 /// Try to match a vector shuffle as an element rotation.
11204 /// This is used for support PALIGNR for SSSE3 or VALIGND/Q for AVX512.
11205 static int matchShuffleAsElementRotate(SDValue &V1, SDValue &V2,
11206 ArrayRef<int> Mask) {
11207 int NumElts = Mask.size();
11209 // We need to detect various ways of spelling a rotation:
11210 // [11, 12, 13, 14, 15, 0, 1, 2]
11211 // [-1, 12, 13, 14, -1, -1, 1, -1]
11212 // [-1, -1, -1, -1, -1, -1, 1, 2]
11213 // [ 3, 4, 5, 6, 7, 8, 9, 10]
11214 // [-1, 4, 5, 6, -1, -1, 9, -1]
11215 // [-1, 4, 5, 6, -1, -1, -1, -1]
11216 int Rotation = 0;
11217 SDValue Lo, Hi;
11218 for (int i = 0; i < NumElts; ++i) {
11219 int M = Mask[i];
11220 assert((M == SM_SentinelUndef || (0 <= M && M < (2*NumElts))) &&
11221 "Unexpected mask index.");
11222 if (M < 0)
11223 continue;
11225 // Determine where a rotated vector would have started.
11226 int StartIdx = i - (M % NumElts);
11227 if (StartIdx == 0)
11228 // The identity rotation isn't interesting, stop.
11229 return -1;
11231 // If we found the tail of a vector the rotation must be the missing
11232 // front. If we found the head of a vector, it must be how much of the
11233 // head.
11234 int CandidateRotation = StartIdx < 0 ? -StartIdx : NumElts - StartIdx;
11236 if (Rotation == 0)
11237 Rotation = CandidateRotation;
11238 else if (Rotation != CandidateRotation)
11239 // The rotations don't match, so we can't match this mask.
11240 return -1;
11242 // Compute which value this mask is pointing at.
11243 SDValue MaskV = M < NumElts ? V1 : V2;
11245 // Compute which of the two target values this index should be assigned
11246 // to. This reflects whether the high elements are remaining or the low
11247 // elements are remaining.
11248 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
11250 // Either set up this value if we've not encountered it before, or check
11251 // that it remains consistent.
11252 if (!TargetV)
11253 TargetV = MaskV;
11254 else if (TargetV != MaskV)
11255 // This may be a rotation, but it pulls from the inputs in some
11256 // unsupported interleaving.
11257 return -1;
11260 // Check that we successfully analyzed the mask, and normalize the results.
11261 assert(Rotation != 0 && "Failed to locate a viable rotation!");
11262 assert((Lo || Hi) && "Failed to find a rotated input vector!");
11263 if (!Lo)
11264 Lo = Hi;
11265 else if (!Hi)
11266 Hi = Lo;
11268 V1 = Lo;
11269 V2 = Hi;
11271 return Rotation;
11274 /// Try to lower a vector shuffle as a byte rotation.
11276 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
11277 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
11278 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
11279 /// try to generically lower a vector shuffle through such an pattern. It
11280 /// does not check for the profitability of lowering either as PALIGNR or
11281 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
11282 /// This matches shuffle vectors that look like:
11284 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
11286 /// Essentially it concatenates V1 and V2, shifts right by some number of
11287 /// elements, and takes the low elements as the result. Note that while this is
11288 /// specified as a *right shift* because x86 is little-endian, it is a *left
11289 /// rotate* of the vector lanes.
11290 static int matchShuffleAsByteRotate(MVT VT, SDValue &V1, SDValue &V2,
11291 ArrayRef<int> Mask) {
11292 // Don't accept any shuffles with zero elements.
11293 if (isAnyZero(Mask))
11294 return -1;
11296 // PALIGNR works on 128-bit lanes.
11297 SmallVector<int, 16> RepeatedMask;
11298 if (!is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedMask))
11299 return -1;
11301 int Rotation = matchShuffleAsElementRotate(V1, V2, RepeatedMask);
11302 if (Rotation <= 0)
11303 return -1;
11305 // PALIGNR rotates bytes, so we need to scale the
11306 // rotation based on how many bytes are in the vector lane.
11307 int NumElts = RepeatedMask.size();
11308 int Scale = 16 / NumElts;
11309 return Rotation * Scale;
11312 static SDValue lowerShuffleAsByteRotate(const SDLoc &DL, MVT VT, SDValue V1,
11313 SDValue V2, ArrayRef<int> Mask,
11314 const X86Subtarget &Subtarget,
11315 SelectionDAG &DAG) {
11316 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
11318 SDValue Lo = V1, Hi = V2;
11319 int ByteRotation = matchShuffleAsByteRotate(VT, Lo, Hi, Mask);
11320 if (ByteRotation <= 0)
11321 return SDValue();
11323 // Cast the inputs to i8 vector of correct length to match PALIGNR or
11324 // PSLLDQ/PSRLDQ.
11325 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
11326 Lo = DAG.getBitcast(ByteVT, Lo);
11327 Hi = DAG.getBitcast(ByteVT, Hi);
11329 // SSSE3 targets can use the palignr instruction.
11330 if (Subtarget.hasSSSE3()) {
11331 assert((!VT.is512BitVector() || Subtarget.hasBWI()) &&
11332 "512-bit PALIGNR requires BWI instructions");
11333 return DAG.getBitcast(
11334 VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, Lo, Hi,
11335 DAG.getTargetConstant(ByteRotation, DL, MVT::i8)));
11338 assert(VT.is128BitVector() &&
11339 "Rotate-based lowering only supports 128-bit lowering!");
11340 assert(Mask.size() <= 16 &&
11341 "Can shuffle at most 16 bytes in a 128-bit vector!");
11342 assert(ByteVT == MVT::v16i8 &&
11343 "SSE2 rotate lowering only needed for v16i8!");
11345 // Default SSE2 implementation
11346 int LoByteShift = 16 - ByteRotation;
11347 int HiByteShift = ByteRotation;
11349 SDValue LoShift =
11350 DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Lo,
11351 DAG.getTargetConstant(LoByteShift, DL, MVT::i8));
11352 SDValue HiShift =
11353 DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Hi,
11354 DAG.getTargetConstant(HiByteShift, DL, MVT::i8));
11355 return DAG.getBitcast(VT,
11356 DAG.getNode(ISD::OR, DL, MVT::v16i8, LoShift, HiShift));
11359 /// Try to lower a vector shuffle as a dword/qword rotation.
11361 /// AVX512 has a VALIGND/VALIGNQ instructions that will do an arbitrary
11362 /// rotation of the concatenation of two vectors; This routine will
11363 /// try to generically lower a vector shuffle through such an pattern.
11365 /// Essentially it concatenates V1 and V2, shifts right by some number of
11366 /// elements, and takes the low elements as the result. Note that while this is
11367 /// specified as a *right shift* because x86 is little-endian, it is a *left
11368 /// rotate* of the vector lanes.
11369 static SDValue lowerShuffleAsVALIGN(const SDLoc &DL, MVT VT, SDValue V1,
11370 SDValue V2, ArrayRef<int> Mask,
11371 const APInt &Zeroable,
11372 const X86Subtarget &Subtarget,
11373 SelectionDAG &DAG) {
11374 assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&
11375 "Only 32-bit and 64-bit elements are supported!");
11377 // 128/256-bit vectors are only supported with VLX.
11378 assert((Subtarget.hasVLX() || (!VT.is128BitVector() && !VT.is256BitVector()))
11379 && "VLX required for 128/256-bit vectors");
11381 SDValue Lo = V1, Hi = V2;
11382 int Rotation = matchShuffleAsElementRotate(Lo, Hi, Mask);
11383 if (0 < Rotation)
11384 return DAG.getNode(X86ISD::VALIGN, DL, VT, Lo, Hi,
11385 DAG.getTargetConstant(Rotation, DL, MVT::i8));
11387 // See if we can use VALIGN as a cross-lane version of VSHLDQ/VSRLDQ.
11388 // TODO: Pull this out as a matchShuffleAsElementShift helper?
11389 // TODO: We can probably make this more aggressive and use shift-pairs like
11390 // lowerShuffleAsByteShiftMask.
11391 unsigned NumElts = Mask.size();
11392 unsigned ZeroLo = Zeroable.countr_one();
11393 unsigned ZeroHi = Zeroable.countl_one();
11394 assert((ZeroLo + ZeroHi) < NumElts && "Zeroable shuffle detected");
11395 if (!ZeroLo && !ZeroHi)
11396 return SDValue();
11398 if (ZeroLo) {
11399 SDValue Src = Mask[ZeroLo] < (int)NumElts ? V1 : V2;
11400 int Low = Mask[ZeroLo] < (int)NumElts ? 0 : NumElts;
11401 if (isSequentialOrUndefInRange(Mask, ZeroLo, NumElts - ZeroLo, Low))
11402 return DAG.getNode(X86ISD::VALIGN, DL, VT, Src,
11403 getZeroVector(VT, Subtarget, DAG, DL),
11404 DAG.getTargetConstant(NumElts - ZeroLo, DL, MVT::i8));
11407 if (ZeroHi) {
11408 SDValue Src = Mask[0] < (int)NumElts ? V1 : V2;
11409 int Low = Mask[0] < (int)NumElts ? 0 : NumElts;
11410 if (isSequentialOrUndefInRange(Mask, 0, NumElts - ZeroHi, Low + ZeroHi))
11411 return DAG.getNode(X86ISD::VALIGN, DL, VT,
11412 getZeroVector(VT, Subtarget, DAG, DL), Src,
11413 DAG.getTargetConstant(ZeroHi, DL, MVT::i8));
11416 return SDValue();
11419 /// Try to lower a vector shuffle as a byte shift sequence.
11420 static SDValue lowerShuffleAsByteShiftMask(const SDLoc &DL, MVT VT, SDValue V1,
11421 SDValue V2, ArrayRef<int> Mask,
11422 const APInt &Zeroable,
11423 const X86Subtarget &Subtarget,
11424 SelectionDAG &DAG) {
11425 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
11426 assert(VT.is128BitVector() && "Only 128-bit vectors supported");
11428 // We need a shuffle that has zeros at one/both ends and a sequential
11429 // shuffle from one source within.
11430 unsigned ZeroLo = Zeroable.countr_one();
11431 unsigned ZeroHi = Zeroable.countl_one();
11432 if (!ZeroLo && !ZeroHi)
11433 return SDValue();
11435 unsigned NumElts = Mask.size();
11436 unsigned Len = NumElts - (ZeroLo + ZeroHi);
11437 if (!isSequentialOrUndefInRange(Mask, ZeroLo, Len, Mask[ZeroLo]))
11438 return SDValue();
11440 unsigned Scale = VT.getScalarSizeInBits() / 8;
11441 ArrayRef<int> StubMask = Mask.slice(ZeroLo, Len);
11442 if (!isUndefOrInRange(StubMask, 0, NumElts) &&
11443 !isUndefOrInRange(StubMask, NumElts, 2 * NumElts))
11444 return SDValue();
11446 SDValue Res = Mask[ZeroLo] < (int)NumElts ? V1 : V2;
11447 Res = DAG.getBitcast(MVT::v16i8, Res);
11449 // Use VSHLDQ/VSRLDQ ops to zero the ends of a vector and leave an
11450 // inner sequential set of elements, possibly offset:
11451 // 01234567 --> zzzzzz01 --> 1zzzzzzz
11452 // 01234567 --> 4567zzzz --> zzzzz456
11453 // 01234567 --> z0123456 --> 3456zzzz --> zz3456zz
11454 if (ZeroLo == 0) {
11455 unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
11456 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11457 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11458 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11459 DAG.getTargetConstant(Scale * ZeroHi, DL, MVT::i8));
11460 } else if (ZeroHi == 0) {
11461 unsigned Shift = Mask[ZeroLo] % NumElts;
11462 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11463 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11464 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11465 DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
11466 } else if (!Subtarget.hasSSSE3()) {
11467 // If we don't have PSHUFB then its worth avoiding an AND constant mask
11468 // by performing 3 byte shifts. Shuffle combining can kick in above that.
11469 // TODO: There may be some cases where VSH{LR}DQ+PAND is still better.
11470 unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
11471 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11472 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11473 Shift += Mask[ZeroLo] % NumElts;
11474 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11475 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11476 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11477 DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
11478 } else
11479 return SDValue();
11481 return DAG.getBitcast(VT, Res);
11484 /// Try to lower a vector shuffle as a bit shift (shifts in zeros).
11486 /// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and
11487 /// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function
11488 /// matches elements from one of the input vectors shuffled to the left or
11489 /// right with zeroable elements 'shifted in'. It handles both the strictly
11490 /// bit-wise element shifts and the byte shift across an entire 128-bit double
11491 /// quad word lane.
11493 /// PSHL : (little-endian) left bit shift.
11494 /// [ zz, 0, zz, 2 ]
11495 /// [ -1, 4, zz, -1 ]
11496 /// PSRL : (little-endian) right bit shift.
11497 /// [ 1, zz, 3, zz]
11498 /// [ -1, -1, 7, zz]
11499 /// PSLLDQ : (little-endian) left byte shift
11500 /// [ zz, 0, 1, 2, 3, 4, 5, 6]
11501 /// [ zz, zz, -1, -1, 2, 3, 4, -1]
11502 /// [ zz, zz, zz, zz, zz, zz, -1, 1]
11503 /// PSRLDQ : (little-endian) right byte shift
11504 /// [ 5, 6, 7, zz, zz, zz, zz, zz]
11505 /// [ -1, 5, 6, 7, zz, zz, zz, zz]
11506 /// [ 1, 2, -1, -1, -1, -1, zz, zz]
11507 static int matchShuffleAsShift(MVT &ShiftVT, unsigned &Opcode,
11508 unsigned ScalarSizeInBits, ArrayRef<int> Mask,
11509 int MaskOffset, const APInt &Zeroable,
11510 const X86Subtarget &Subtarget) {
11511 int Size = Mask.size();
11512 unsigned SizeInBits = Size * ScalarSizeInBits;
11514 auto CheckZeros = [&](int Shift, int Scale, bool Left) {
11515 for (int i = 0; i < Size; i += Scale)
11516 for (int j = 0; j < Shift; ++j)
11517 if (!Zeroable[i + j + (Left ? 0 : (Scale - Shift))])
11518 return false;
11520 return true;
11523 auto MatchShift = [&](int Shift, int Scale, bool Left) {
11524 for (int i = 0; i != Size; i += Scale) {
11525 unsigned Pos = Left ? i + Shift : i;
11526 unsigned Low = Left ? i : i + Shift;
11527 unsigned Len = Scale - Shift;
11528 if (!isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset))
11529 return -1;
11532 int ShiftEltBits = ScalarSizeInBits * Scale;
11533 bool ByteShift = ShiftEltBits > 64;
11534 Opcode = Left ? (ByteShift ? X86ISD::VSHLDQ : X86ISD::VSHLI)
11535 : (ByteShift ? X86ISD::VSRLDQ : X86ISD::VSRLI);
11536 int ShiftAmt = Shift * ScalarSizeInBits / (ByteShift ? 8 : 1);
11538 // Normalize the scale for byte shifts to still produce an i64 element
11539 // type.
11540 Scale = ByteShift ? Scale / 2 : Scale;
11542 // We need to round trip through the appropriate type for the shift.
11543 MVT ShiftSVT = MVT::getIntegerVT(ScalarSizeInBits * Scale);
11544 ShiftVT = ByteShift ? MVT::getVectorVT(MVT::i8, SizeInBits / 8)
11545 : MVT::getVectorVT(ShiftSVT, Size / Scale);
11546 return (int)ShiftAmt;
11549 // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
11550 // keep doubling the size of the integer elements up to that. We can
11551 // then shift the elements of the integer vector by whole multiples of
11552 // their width within the elements of the larger integer vector. Test each
11553 // multiple to see if we can find a match with the moved element indices
11554 // and that the shifted in elements are all zeroable.
11555 unsigned MaxWidth = ((SizeInBits == 512) && !Subtarget.hasBWI() ? 64 : 128);
11556 for (int Scale = 2; Scale * ScalarSizeInBits <= MaxWidth; Scale *= 2)
11557 for (int Shift = 1; Shift != Scale; ++Shift)
11558 for (bool Left : {true, false})
11559 if (CheckZeros(Shift, Scale, Left)) {
11560 int ShiftAmt = MatchShift(Shift, Scale, Left);
11561 if (0 < ShiftAmt)
11562 return ShiftAmt;
11565 // no match
11566 return -1;
11569 static SDValue lowerShuffleAsShift(const SDLoc &DL, MVT VT, SDValue V1,
11570 SDValue V2, ArrayRef<int> Mask,
11571 const APInt &Zeroable,
11572 const X86Subtarget &Subtarget,
11573 SelectionDAG &DAG, bool BitwiseOnly) {
11574 int Size = Mask.size();
11575 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
11577 MVT ShiftVT;
11578 SDValue V = V1;
11579 unsigned Opcode;
11581 // Try to match shuffle against V1 shift.
11582 int ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
11583 Mask, 0, Zeroable, Subtarget);
11585 // If V1 failed, try to match shuffle against V2 shift.
11586 if (ShiftAmt < 0) {
11587 ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
11588 Mask, Size, Zeroable, Subtarget);
11589 V = V2;
11592 if (ShiftAmt < 0)
11593 return SDValue();
11595 if (BitwiseOnly && (Opcode == X86ISD::VSHLDQ || Opcode == X86ISD::VSRLDQ))
11596 return SDValue();
11598 assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
11599 "Illegal integer vector type");
11600 V = DAG.getBitcast(ShiftVT, V);
11601 V = DAG.getNode(Opcode, DL, ShiftVT, V,
11602 DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
11603 return DAG.getBitcast(VT, V);
11606 // EXTRQ: Extract Len elements from lower half of source, starting at Idx.
11607 // Remainder of lower half result is zero and upper half is all undef.
11608 static bool matchShuffleAsEXTRQ(MVT VT, SDValue &V1, SDValue &V2,
11609 ArrayRef<int> Mask, uint64_t &BitLen,
11610 uint64_t &BitIdx, const APInt &Zeroable) {
11611 int Size = Mask.size();
11612 int HalfSize = Size / 2;
11613 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
11614 assert(!Zeroable.isAllOnes() && "Fully zeroable shuffle mask");
11616 // Upper half must be undefined.
11617 if (!isUndefUpperHalf(Mask))
11618 return false;
11620 // Determine the extraction length from the part of the
11621 // lower half that isn't zeroable.
11622 int Len = HalfSize;
11623 for (; Len > 0; --Len)
11624 if (!Zeroable[Len - 1])
11625 break;
11626 assert(Len > 0 && "Zeroable shuffle mask");
11628 // Attempt to match first Len sequential elements from the lower half.
11629 SDValue Src;
11630 int Idx = -1;
11631 for (int i = 0; i != Len; ++i) {
11632 int M = Mask[i];
11633 if (M == SM_SentinelUndef)
11634 continue;
11635 SDValue &V = (M < Size ? V1 : V2);
11636 M = M % Size;
11638 // The extracted elements must start at a valid index and all mask
11639 // elements must be in the lower half.
11640 if (i > M || M >= HalfSize)
11641 return false;
11643 if (Idx < 0 || (Src == V && Idx == (M - i))) {
11644 Src = V;
11645 Idx = M - i;
11646 continue;
11648 return false;
11651 if (!Src || Idx < 0)
11652 return false;
11654 assert((Idx + Len) <= HalfSize && "Illegal extraction mask");
11655 BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
11656 BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
11657 V1 = Src;
11658 return true;
11661 // INSERTQ: Extract lowest Len elements from lower half of second source and
11662 // insert over first source, starting at Idx.
11663 // { A[0], .., A[Idx-1], B[0], .., B[Len-1], A[Idx+Len], .., UNDEF, ... }
11664 static bool matchShuffleAsINSERTQ(MVT VT, SDValue &V1, SDValue &V2,
11665 ArrayRef<int> Mask, uint64_t &BitLen,
11666 uint64_t &BitIdx) {
11667 int Size = Mask.size();
11668 int HalfSize = Size / 2;
11669 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
11671 // Upper half must be undefined.
11672 if (!isUndefUpperHalf(Mask))
11673 return false;
11675 for (int Idx = 0; Idx != HalfSize; ++Idx) {
11676 SDValue Base;
11678 // Attempt to match first source from mask before insertion point.
11679 if (isUndefInRange(Mask, 0, Idx)) {
11680 /* EMPTY */
11681 } else if (isSequentialOrUndefInRange(Mask, 0, Idx, 0)) {
11682 Base = V1;
11683 } else if (isSequentialOrUndefInRange(Mask, 0, Idx, Size)) {
11684 Base = V2;
11685 } else {
11686 continue;
11689 // Extend the extraction length looking to match both the insertion of
11690 // the second source and the remaining elements of the first.
11691 for (int Hi = Idx + 1; Hi <= HalfSize; ++Hi) {
11692 SDValue Insert;
11693 int Len = Hi - Idx;
11695 // Match insertion.
11696 if (isSequentialOrUndefInRange(Mask, Idx, Len, 0)) {
11697 Insert = V1;
11698 } else if (isSequentialOrUndefInRange(Mask, Idx, Len, Size)) {
11699 Insert = V2;
11700 } else {
11701 continue;
11704 // Match the remaining elements of the lower half.
11705 if (isUndefInRange(Mask, Hi, HalfSize - Hi)) {
11706 /* EMPTY */
11707 } else if ((!Base || (Base == V1)) &&
11708 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi, Hi)) {
11709 Base = V1;
11710 } else if ((!Base || (Base == V2)) &&
11711 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi,
11712 Size + Hi)) {
11713 Base = V2;
11714 } else {
11715 continue;
11718 BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
11719 BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
11720 V1 = Base;
11721 V2 = Insert;
11722 return true;
11726 return false;
11729 /// Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ.
11730 static SDValue lowerShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1,
11731 SDValue V2, ArrayRef<int> Mask,
11732 const APInt &Zeroable, SelectionDAG &DAG) {
11733 uint64_t BitLen, BitIdx;
11734 if (matchShuffleAsEXTRQ(VT, V1, V2, Mask, BitLen, BitIdx, Zeroable))
11735 return DAG.getNode(X86ISD::EXTRQI, DL, VT, V1,
11736 DAG.getTargetConstant(BitLen, DL, MVT::i8),
11737 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
11739 if (matchShuffleAsINSERTQ(VT, V1, V2, Mask, BitLen, BitIdx))
11740 return DAG.getNode(X86ISD::INSERTQI, DL, VT, V1 ? V1 : DAG.getUNDEF(VT),
11741 V2 ? V2 : DAG.getUNDEF(VT),
11742 DAG.getTargetConstant(BitLen, DL, MVT::i8),
11743 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
11745 return SDValue();
11748 /// Lower a vector shuffle as a zero or any extension.
11750 /// Given a specific number of elements, element bit width, and extension
11751 /// stride, produce either a zero or any extension based on the available
11752 /// features of the subtarget. The extended elements are consecutive and
11753 /// begin and can start from an offsetted element index in the input; to
11754 /// avoid excess shuffling the offset must either being in the bottom lane
11755 /// or at the start of a higher lane. All extended elements must be from
11756 /// the same lane.
11757 static SDValue lowerShuffleAsSpecificZeroOrAnyExtend(
11758 const SDLoc &DL, MVT VT, int Scale, int Offset, bool AnyExt, SDValue InputV,
11759 ArrayRef<int> Mask, const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11760 assert(Scale > 1 && "Need a scale to extend.");
11761 int EltBits = VT.getScalarSizeInBits();
11762 int NumElements = VT.getVectorNumElements();
11763 int NumEltsPerLane = 128 / EltBits;
11764 int OffsetLane = Offset / NumEltsPerLane;
11765 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
11766 "Only 8, 16, and 32 bit elements can be extended.");
11767 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
11768 assert(0 <= Offset && "Extension offset must be positive.");
11769 assert((Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0) &&
11770 "Extension offset must be in the first lane or start an upper lane.");
11772 // Check that an index is in same lane as the base offset.
11773 auto SafeOffset = [&](int Idx) {
11774 return OffsetLane == (Idx / NumEltsPerLane);
11777 // Shift along an input so that the offset base moves to the first element.
11778 auto ShuffleOffset = [&](SDValue V) {
11779 if (!Offset)
11780 return V;
11782 SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
11783 for (int i = 0; i * Scale < NumElements; ++i) {
11784 int SrcIdx = i + Offset;
11785 ShMask[i] = SafeOffset(SrcIdx) ? SrcIdx : -1;
11787 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), ShMask);
11790 // Found a valid a/zext mask! Try various lowering strategies based on the
11791 // input type and available ISA extensions.
11792 if (Subtarget.hasSSE41()) {
11793 // Not worth offsetting 128-bit vectors if scale == 2, a pattern using
11794 // PUNPCK will catch this in a later shuffle match.
11795 if (Offset && Scale == 2 && VT.is128BitVector())
11796 return SDValue();
11797 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
11798 NumElements / Scale);
11799 InputV = DAG.getBitcast(VT, InputV);
11800 InputV = ShuffleOffset(InputV);
11801 InputV = getEXTEND_VECTOR_INREG(AnyExt ? ISD::ANY_EXTEND : ISD::ZERO_EXTEND,
11802 DL, ExtVT, InputV, DAG);
11803 return DAG.getBitcast(VT, InputV);
11806 assert(VT.is128BitVector() && "Only 128-bit vectors can be extended.");
11807 InputV = DAG.getBitcast(VT, InputV);
11809 // For any extends we can cheat for larger element sizes and use shuffle
11810 // instructions that can fold with a load and/or copy.
11811 if (AnyExt && EltBits == 32) {
11812 int PSHUFDMask[4] = {Offset, -1, SafeOffset(Offset + 1) ? Offset + 1 : -1,
11813 -1};
11814 return DAG.getBitcast(
11815 VT, DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
11816 DAG.getBitcast(MVT::v4i32, InputV),
11817 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
11819 if (AnyExt && EltBits == 16 && Scale > 2) {
11820 int PSHUFDMask[4] = {Offset / 2, -1,
11821 SafeOffset(Offset + 1) ? (Offset + 1) / 2 : -1, -1};
11822 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
11823 DAG.getBitcast(MVT::v4i32, InputV),
11824 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
11825 int PSHUFWMask[4] = {1, -1, -1, -1};
11826 unsigned OddEvenOp = (Offset & 1) ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
11827 return DAG.getBitcast(
11828 VT, DAG.getNode(OddEvenOp, DL, MVT::v8i16,
11829 DAG.getBitcast(MVT::v8i16, InputV),
11830 getV4X86ShuffleImm8ForMask(PSHUFWMask, DL, DAG)));
11833 // The SSE4A EXTRQ instruction can efficiently extend the first 2 lanes
11834 // to 64-bits.
11835 if ((Scale * EltBits) == 64 && EltBits < 32 && Subtarget.hasSSE4A()) {
11836 assert(NumElements == (int)Mask.size() && "Unexpected shuffle mask size!");
11837 assert(VT.is128BitVector() && "Unexpected vector width!");
11839 int LoIdx = Offset * EltBits;
11840 SDValue Lo = DAG.getBitcast(
11841 MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
11842 DAG.getTargetConstant(EltBits, DL, MVT::i8),
11843 DAG.getTargetConstant(LoIdx, DL, MVT::i8)));
11845 if (isUndefUpperHalf(Mask) || !SafeOffset(Offset + 1))
11846 return DAG.getBitcast(VT, Lo);
11848 int HiIdx = (Offset + 1) * EltBits;
11849 SDValue Hi = DAG.getBitcast(
11850 MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
11851 DAG.getTargetConstant(EltBits, DL, MVT::i8),
11852 DAG.getTargetConstant(HiIdx, DL, MVT::i8)));
11853 return DAG.getBitcast(VT,
11854 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, Lo, Hi));
11857 // If this would require more than 2 unpack instructions to expand, use
11858 // pshufb when available. We can only use more than 2 unpack instructions
11859 // when zero extending i8 elements which also makes it easier to use pshufb.
11860 if (Scale > 4 && EltBits == 8 && Subtarget.hasSSSE3()) {
11861 assert(NumElements == 16 && "Unexpected byte vector width!");
11862 SDValue PSHUFBMask[16];
11863 for (int i = 0; i < 16; ++i) {
11864 int Idx = Offset + (i / Scale);
11865 if ((i % Scale == 0 && SafeOffset(Idx))) {
11866 PSHUFBMask[i] = DAG.getConstant(Idx, DL, MVT::i8);
11867 continue;
11869 PSHUFBMask[i] =
11870 AnyExt ? DAG.getUNDEF(MVT::i8) : DAG.getConstant(0x80, DL, MVT::i8);
11872 InputV = DAG.getBitcast(MVT::v16i8, InputV);
11873 return DAG.getBitcast(
11874 VT, DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
11875 DAG.getBuildVector(MVT::v16i8, DL, PSHUFBMask)));
11878 // If we are extending from an offset, ensure we start on a boundary that
11879 // we can unpack from.
11880 int AlignToUnpack = Offset % (NumElements / Scale);
11881 if (AlignToUnpack) {
11882 SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
11883 for (int i = AlignToUnpack; i < NumElements; ++i)
11884 ShMask[i - AlignToUnpack] = i;
11885 InputV = DAG.getVectorShuffle(VT, DL, InputV, DAG.getUNDEF(VT), ShMask);
11886 Offset -= AlignToUnpack;
11889 // Otherwise emit a sequence of unpacks.
11890 do {
11891 unsigned UnpackLoHi = X86ISD::UNPCKL;
11892 if (Offset >= (NumElements / 2)) {
11893 UnpackLoHi = X86ISD::UNPCKH;
11894 Offset -= (NumElements / 2);
11897 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
11898 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
11899 : getZeroVector(InputVT, Subtarget, DAG, DL);
11900 InputV = DAG.getBitcast(InputVT, InputV);
11901 InputV = DAG.getNode(UnpackLoHi, DL, InputVT, InputV, Ext);
11902 Scale /= 2;
11903 EltBits *= 2;
11904 NumElements /= 2;
11905 } while (Scale > 1);
11906 return DAG.getBitcast(VT, InputV);
11909 /// Try to lower a vector shuffle as a zero extension on any microarch.
11911 /// This routine will try to do everything in its power to cleverly lower
11912 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
11913 /// check for the profitability of this lowering, it tries to aggressively
11914 /// match this pattern. It will use all of the micro-architectural details it
11915 /// can to emit an efficient lowering. It handles both blends with all-zero
11916 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
11917 /// masking out later).
11919 /// The reason we have dedicated lowering for zext-style shuffles is that they
11920 /// are both incredibly common and often quite performance sensitive.
11921 static SDValue lowerShuffleAsZeroOrAnyExtend(
11922 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11923 const APInt &Zeroable, const X86Subtarget &Subtarget,
11924 SelectionDAG &DAG) {
11925 int Bits = VT.getSizeInBits();
11926 int NumLanes = Bits / 128;
11927 int NumElements = VT.getVectorNumElements();
11928 int NumEltsPerLane = NumElements / NumLanes;
11929 assert(VT.getScalarSizeInBits() <= 32 &&
11930 "Exceeds 32-bit integer zero extension limit");
11931 assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
11933 // Define a helper function to check a particular ext-scale and lower to it if
11934 // valid.
11935 auto Lower = [&](int Scale) -> SDValue {
11936 SDValue InputV;
11937 bool AnyExt = true;
11938 int Offset = 0;
11939 int Matches = 0;
11940 for (int i = 0; i < NumElements; ++i) {
11941 int M = Mask[i];
11942 if (M < 0)
11943 continue; // Valid anywhere but doesn't tell us anything.
11944 if (i % Scale != 0) {
11945 // Each of the extended elements need to be zeroable.
11946 if (!Zeroable[i])
11947 return SDValue();
11949 // We no longer are in the anyext case.
11950 AnyExt = false;
11951 continue;
11954 // Each of the base elements needs to be consecutive indices into the
11955 // same input vector.
11956 SDValue V = M < NumElements ? V1 : V2;
11957 M = M % NumElements;
11958 if (!InputV) {
11959 InputV = V;
11960 Offset = M - (i / Scale);
11961 } else if (InputV != V)
11962 return SDValue(); // Flip-flopping inputs.
11964 // Offset must start in the lowest 128-bit lane or at the start of an
11965 // upper lane.
11966 // FIXME: Is it ever worth allowing a negative base offset?
11967 if (!((0 <= Offset && Offset < NumEltsPerLane) ||
11968 (Offset % NumEltsPerLane) == 0))
11969 return SDValue();
11971 // If we are offsetting, all referenced entries must come from the same
11972 // lane.
11973 if (Offset && (Offset / NumEltsPerLane) != (M / NumEltsPerLane))
11974 return SDValue();
11976 if ((M % NumElements) != (Offset + (i / Scale)))
11977 return SDValue(); // Non-consecutive strided elements.
11978 Matches++;
11981 // If we fail to find an input, we have a zero-shuffle which should always
11982 // have already been handled.
11983 // FIXME: Maybe handle this here in case during blending we end up with one?
11984 if (!InputV)
11985 return SDValue();
11987 // If we are offsetting, don't extend if we only match a single input, we
11988 // can always do better by using a basic PSHUF or PUNPCK.
11989 if (Offset != 0 && Matches < 2)
11990 return SDValue();
11992 return lowerShuffleAsSpecificZeroOrAnyExtend(DL, VT, Scale, Offset, AnyExt,
11993 InputV, Mask, Subtarget, DAG);
11996 // The widest scale possible for extending is to a 64-bit integer.
11997 assert(Bits % 64 == 0 &&
11998 "The number of bits in a vector must be divisible by 64 on x86!");
11999 int NumExtElements = Bits / 64;
12001 // Each iteration, try extending the elements half as much, but into twice as
12002 // many elements.
12003 for (; NumExtElements < NumElements; NumExtElements *= 2) {
12004 assert(NumElements % NumExtElements == 0 &&
12005 "The input vector size must be divisible by the extended size.");
12006 if (SDValue V = Lower(NumElements / NumExtElements))
12007 return V;
12010 // General extends failed, but 128-bit vectors may be able to use MOVQ.
12011 if (Bits != 128)
12012 return SDValue();
12014 // Returns one of the source operands if the shuffle can be reduced to a
12015 // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
12016 auto CanZExtLowHalf = [&]() {
12017 for (int i = NumElements / 2; i != NumElements; ++i)
12018 if (!Zeroable[i])
12019 return SDValue();
12020 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
12021 return V1;
12022 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
12023 return V2;
12024 return SDValue();
12027 if (SDValue V = CanZExtLowHalf()) {
12028 V = DAG.getBitcast(MVT::v2i64, V);
12029 V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
12030 return DAG.getBitcast(VT, V);
12033 // No viable ext lowering found.
12034 return SDValue();
12037 /// Try to get a scalar value for a specific element of a vector.
12039 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
12040 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
12041 SelectionDAG &DAG) {
12042 MVT VT = V.getSimpleValueType();
12043 MVT EltVT = VT.getVectorElementType();
12044 V = peekThroughBitcasts(V);
12046 // If the bitcasts shift the element size, we can't extract an equivalent
12047 // element from it.
12048 MVT NewVT = V.getSimpleValueType();
12049 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
12050 return SDValue();
12052 if (V.getOpcode() == ISD::BUILD_VECTOR ||
12053 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR)) {
12054 // Ensure the scalar operand is the same size as the destination.
12055 // FIXME: Add support for scalar truncation where possible.
12056 SDValue S = V.getOperand(Idx);
12057 if (EltVT.getSizeInBits() == S.getSimpleValueType().getSizeInBits())
12058 return DAG.getBitcast(EltVT, S);
12061 return SDValue();
12064 /// Helper to test for a load that can be folded with x86 shuffles.
12066 /// This is particularly important because the set of instructions varies
12067 /// significantly based on whether the operand is a load or not.
12068 static bool isShuffleFoldableLoad(SDValue V) {
12069 return V->hasOneUse() &&
12070 ISD::isNON_EXTLoad(peekThroughOneUseBitcasts(V).getNode());
12073 template<typename T>
12074 static bool isSoftF16(T VT, const X86Subtarget &Subtarget) {
12075 T EltVT = VT.getScalarType();
12076 return EltVT == MVT::bf16 || (EltVT == MVT::f16 && !Subtarget.hasFP16());
12079 /// Try to lower insertion of a single element into a zero vector.
12081 /// This is a common pattern that we have especially efficient patterns to lower
12082 /// across all subtarget feature sets.
12083 static SDValue lowerShuffleAsElementInsertion(
12084 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
12085 const APInt &Zeroable, const X86Subtarget &Subtarget,
12086 SelectionDAG &DAG) {
12087 MVT ExtVT = VT;
12088 MVT EltVT = VT.getVectorElementType();
12089 unsigned NumElts = VT.getVectorNumElements();
12090 unsigned EltBits = VT.getScalarSizeInBits();
12092 if (isSoftF16(EltVT, Subtarget))
12093 return SDValue();
12095 int V2Index =
12096 find_if(Mask, [&Mask](int M) { return M >= (int)Mask.size(); }) -
12097 Mask.begin();
12098 bool IsV1Constant = getTargetConstantFromNode(V1) != nullptr;
12099 bool IsV1Zeroable = true;
12100 for (int i = 0, Size = Mask.size(); i < Size; ++i)
12101 if (i != V2Index && !Zeroable[i]) {
12102 IsV1Zeroable = false;
12103 break;
12106 // Bail if a non-zero V1 isn't used in place.
12107 if (!IsV1Zeroable) {
12108 SmallVector<int, 8> V1Mask(Mask);
12109 V1Mask[V2Index] = -1;
12110 if (!isNoopShuffleMask(V1Mask))
12111 return SDValue();
12114 // Check for a single input from a SCALAR_TO_VECTOR node.
12115 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
12116 // all the smarts here sunk into that routine. However, the current
12117 // lowering of BUILD_VECTOR makes that nearly impossible until the old
12118 // vector shuffle lowering is dead.
12119 SDValue V2S = getScalarValueForVectorElement(V2, Mask[V2Index] - Mask.size(),
12120 DAG);
12121 if (V2S && DAG.getTargetLoweringInfo().isTypeLegal(V2S.getValueType())) {
12122 // We need to zext the scalar if it is smaller than an i32.
12123 V2S = DAG.getBitcast(EltVT, V2S);
12124 if (EltVT == MVT::i8 || (EltVT == MVT::i16 && !Subtarget.hasFP16())) {
12125 // Using zext to expand a narrow element won't work for non-zero
12126 // insertions. But we can use a masked constant vector if we're
12127 // inserting V2 into the bottom of V1.
12128 if (!IsV1Zeroable && !(IsV1Constant && V2Index == 0))
12129 return SDValue();
12131 // Zero-extend directly to i32.
12132 ExtVT = MVT::getVectorVT(MVT::i32, ExtVT.getSizeInBits() / 32);
12133 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
12135 // If we're inserting into a constant, mask off the inserted index
12136 // and OR with the zero-extended scalar.
12137 if (!IsV1Zeroable) {
12138 SmallVector<APInt> Bits(NumElts, APInt::getAllOnes(EltBits));
12139 Bits[V2Index] = APInt::getZero(EltBits);
12140 SDValue BitMask = getConstVector(Bits, VT, DAG, DL);
12141 V1 = DAG.getNode(ISD::AND, DL, VT, V1, BitMask);
12142 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
12143 V2 = DAG.getBitcast(VT, DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2));
12144 return DAG.getNode(ISD::OR, DL, VT, V1, V2);
12147 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
12148 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
12149 EltVT == MVT::i16) {
12150 // Either not inserting from the low element of the input or the input
12151 // element size is too small to use VZEXT_MOVL to clear the high bits.
12152 return SDValue();
12155 if (!IsV1Zeroable) {
12156 // If V1 can't be treated as a zero vector we have fewer options to lower
12157 // this. We can't support integer vectors or non-zero targets cheaply.
12158 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
12159 if (!VT.isFloatingPoint() || V2Index != 0)
12160 return SDValue();
12161 if (!VT.is128BitVector())
12162 return SDValue();
12164 // Otherwise, use MOVSD, MOVSS or MOVSH.
12165 unsigned MovOpc = 0;
12166 if (EltVT == MVT::f16)
12167 MovOpc = X86ISD::MOVSH;
12168 else if (EltVT == MVT::f32)
12169 MovOpc = X86ISD::MOVSS;
12170 else if (EltVT == MVT::f64)
12171 MovOpc = X86ISD::MOVSD;
12172 else
12173 llvm_unreachable("Unsupported floating point element type to handle!");
12174 return DAG.getNode(MovOpc, DL, ExtVT, V1, V2);
12177 // This lowering only works for the low element with floating point vectors.
12178 if (VT.isFloatingPoint() && V2Index != 0)
12179 return SDValue();
12181 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
12182 if (ExtVT != VT)
12183 V2 = DAG.getBitcast(VT, V2);
12185 if (V2Index != 0) {
12186 // If we have 4 or fewer lanes we can cheaply shuffle the element into
12187 // the desired position. Otherwise it is more efficient to do a vector
12188 // shift left. We know that we can do a vector shift left because all
12189 // the inputs are zero.
12190 if (VT.isFloatingPoint() || NumElts <= 4) {
12191 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
12192 V2Shuffle[V2Index] = 0;
12193 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
12194 } else {
12195 V2 = DAG.getBitcast(MVT::v16i8, V2);
12196 V2 = DAG.getNode(
12197 X86ISD::VSHLDQ, DL, MVT::v16i8, V2,
12198 DAG.getTargetConstant(V2Index * EltBits / 8, DL, MVT::i8));
12199 V2 = DAG.getBitcast(VT, V2);
12202 return V2;
12205 /// Try to lower broadcast of a single - truncated - integer element,
12206 /// coming from a scalar_to_vector/build_vector node \p V0 with larger elements.
12208 /// This assumes we have AVX2.
12209 static SDValue lowerShuffleAsTruncBroadcast(const SDLoc &DL, MVT VT, SDValue V0,
12210 int BroadcastIdx,
12211 const X86Subtarget &Subtarget,
12212 SelectionDAG &DAG) {
12213 assert(Subtarget.hasAVX2() &&
12214 "We can only lower integer broadcasts with AVX2!");
12216 MVT EltVT = VT.getVectorElementType();
12217 MVT V0VT = V0.getSimpleValueType();
12219 assert(VT.isInteger() && "Unexpected non-integer trunc broadcast!");
12220 assert(V0VT.isVector() && "Unexpected non-vector vector-sized value!");
12222 MVT V0EltVT = V0VT.getVectorElementType();
12223 if (!V0EltVT.isInteger())
12224 return SDValue();
12226 const unsigned EltSize = EltVT.getSizeInBits();
12227 const unsigned V0EltSize = V0EltVT.getSizeInBits();
12229 // This is only a truncation if the original element type is larger.
12230 if (V0EltSize <= EltSize)
12231 return SDValue();
12233 assert(((V0EltSize % EltSize) == 0) &&
12234 "Scalar type sizes must all be powers of 2 on x86!");
12236 const unsigned V0Opc = V0.getOpcode();
12237 const unsigned Scale = V0EltSize / EltSize;
12238 const unsigned V0BroadcastIdx = BroadcastIdx / Scale;
12240 if ((V0Opc != ISD::SCALAR_TO_VECTOR || V0BroadcastIdx != 0) &&
12241 V0Opc != ISD::BUILD_VECTOR)
12242 return SDValue();
12244 SDValue Scalar = V0.getOperand(V0BroadcastIdx);
12246 // If we're extracting non-least-significant bits, shift so we can truncate.
12247 // Hopefully, we can fold away the trunc/srl/load into the broadcast.
12248 // Even if we can't (and !isShuffleFoldableLoad(Scalar)), prefer
12249 // vpbroadcast+vmovd+shr to vpshufb(m)+vmovd.
12250 if (const int OffsetIdx = BroadcastIdx % Scale)
12251 Scalar = DAG.getNode(ISD::SRL, DL, Scalar.getValueType(), Scalar,
12252 DAG.getConstant(OffsetIdx * EltSize, DL, MVT::i8));
12254 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
12255 DAG.getNode(ISD::TRUNCATE, DL, EltVT, Scalar));
12258 /// Test whether this can be lowered with a single SHUFPS instruction.
12260 /// This is used to disable more specialized lowerings when the shufps lowering
12261 /// will happen to be efficient.
12262 static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
12263 // This routine only handles 128-bit shufps.
12264 assert(Mask.size() == 4 && "Unsupported mask size!");
12265 assert(Mask[0] >= -1 && Mask[0] < 8 && "Out of bound mask element!");
12266 assert(Mask[1] >= -1 && Mask[1] < 8 && "Out of bound mask element!");
12267 assert(Mask[2] >= -1 && Mask[2] < 8 && "Out of bound mask element!");
12268 assert(Mask[3] >= -1 && Mask[3] < 8 && "Out of bound mask element!");
12270 // To lower with a single SHUFPS we need to have the low half and high half
12271 // each requiring a single input.
12272 if (Mask[0] >= 0 && Mask[1] >= 0 && (Mask[0] < 4) != (Mask[1] < 4))
12273 return false;
12274 if (Mask[2] >= 0 && Mask[3] >= 0 && (Mask[2] < 4) != (Mask[3] < 4))
12275 return false;
12277 return true;
12280 /// Test whether the specified input (0 or 1) is in-place blended by the
12281 /// given mask.
12283 /// This returns true if the elements from a particular input are already in the
12284 /// slot required by the given mask and require no permutation.
12285 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
12286 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
12287 int Size = Mask.size();
12288 for (int i = 0; i < Size; ++i)
12289 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
12290 return false;
12292 return true;
12295 /// If we are extracting two 128-bit halves of a vector and shuffling the
12296 /// result, match that to a 256-bit AVX2 vperm* instruction to avoid a
12297 /// multi-shuffle lowering.
12298 static SDValue lowerShuffleOfExtractsAsVperm(const SDLoc &DL, SDValue N0,
12299 SDValue N1, ArrayRef<int> Mask,
12300 SelectionDAG &DAG) {
12301 MVT VT = N0.getSimpleValueType();
12302 assert((VT.is128BitVector() &&
12303 (VT.getScalarSizeInBits() == 32 || VT.getScalarSizeInBits() == 64)) &&
12304 "VPERM* family of shuffles requires 32-bit or 64-bit elements");
12306 // Check that both sources are extracts of the same source vector.
12307 if (N0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
12308 N1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
12309 N0.getOperand(0) != N1.getOperand(0) ||
12310 !N0.hasOneUse() || !N1.hasOneUse())
12311 return SDValue();
12313 SDValue WideVec = N0.getOperand(0);
12314 MVT WideVT = WideVec.getSimpleValueType();
12315 if (!WideVT.is256BitVector())
12316 return SDValue();
12318 // Match extracts of each half of the wide source vector. Commute the shuffle
12319 // if the extract of the low half is N1.
12320 unsigned NumElts = VT.getVectorNumElements();
12321 SmallVector<int, 4> NewMask(Mask);
12322 const APInt &ExtIndex0 = N0.getConstantOperandAPInt(1);
12323 const APInt &ExtIndex1 = N1.getConstantOperandAPInt(1);
12324 if (ExtIndex1 == 0 && ExtIndex0 == NumElts)
12325 ShuffleVectorSDNode::commuteMask(NewMask);
12326 else if (ExtIndex0 != 0 || ExtIndex1 != NumElts)
12327 return SDValue();
12329 // Final bailout: if the mask is simple, we are better off using an extract
12330 // and a simple narrow shuffle. Prefer extract+unpack(h/l)ps to vpermps
12331 // because that avoids a constant load from memory.
12332 if (NumElts == 4 &&
12333 (isSingleSHUFPSMask(NewMask) || is128BitUnpackShuffleMask(NewMask, DAG)))
12334 return SDValue();
12336 // Extend the shuffle mask with undef elements.
12337 NewMask.append(NumElts, -1);
12339 // shuf (extract X, 0), (extract X, 4), M --> extract (shuf X, undef, M'), 0
12340 SDValue Shuf = DAG.getVectorShuffle(WideVT, DL, WideVec, DAG.getUNDEF(WideVT),
12341 NewMask);
12342 // This is free: ymm -> xmm.
12343 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Shuf,
12344 DAG.getIntPtrConstant(0, DL));
12347 /// Try to lower broadcast of a single element.
12349 /// For convenience, this code also bundles all of the subtarget feature set
12350 /// filtering. While a little annoying to re-dispatch on type here, there isn't
12351 /// a convenient way to factor it out.
12352 static SDValue lowerShuffleAsBroadcast(const SDLoc &DL, MVT VT, SDValue V1,
12353 SDValue V2, ArrayRef<int> Mask,
12354 const X86Subtarget &Subtarget,
12355 SelectionDAG &DAG) {
12356 MVT EltVT = VT.getVectorElementType();
12357 if (!((Subtarget.hasSSE3() && VT == MVT::v2f64) ||
12358 (Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
12359 (Subtarget.hasAVX2() && (VT.isInteger() || EltVT == MVT::f16))))
12360 return SDValue();
12362 // With MOVDDUP (v2f64) we can broadcast from a register or a load, otherwise
12363 // we can only broadcast from a register with AVX2.
12364 unsigned NumEltBits = VT.getScalarSizeInBits();
12365 unsigned Opcode = (VT == MVT::v2f64 && !Subtarget.hasAVX2())
12366 ? X86ISD::MOVDDUP
12367 : X86ISD::VBROADCAST;
12368 bool BroadcastFromReg = (Opcode == X86ISD::MOVDDUP) || Subtarget.hasAVX2();
12370 // Check that the mask is a broadcast.
12371 int BroadcastIdx = getSplatIndex(Mask);
12372 if (BroadcastIdx < 0)
12373 return SDValue();
12374 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
12375 "a sorted mask where the broadcast "
12376 "comes from V1.");
12378 // Go up the chain of (vector) values to find a scalar load that we can
12379 // combine with the broadcast.
12380 // TODO: Combine this logic with findEltLoadSrc() used by
12381 // EltsFromConsecutiveLoads().
12382 int BitOffset = BroadcastIdx * NumEltBits;
12383 SDValue V = V1;
12384 for (;;) {
12385 switch (V.getOpcode()) {
12386 case ISD::BITCAST: {
12387 V = V.getOperand(0);
12388 continue;
12390 case ISD::CONCAT_VECTORS: {
12391 int OpBitWidth = V.getOperand(0).getValueSizeInBits();
12392 int OpIdx = BitOffset / OpBitWidth;
12393 V = V.getOperand(OpIdx);
12394 BitOffset %= OpBitWidth;
12395 continue;
12397 case ISD::EXTRACT_SUBVECTOR: {
12398 // The extraction index adds to the existing offset.
12399 unsigned EltBitWidth = V.getScalarValueSizeInBits();
12400 unsigned Idx = V.getConstantOperandVal(1);
12401 unsigned BeginOffset = Idx * EltBitWidth;
12402 BitOffset += BeginOffset;
12403 V = V.getOperand(0);
12404 continue;
12406 case ISD::INSERT_SUBVECTOR: {
12407 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
12408 int EltBitWidth = VOuter.getScalarValueSizeInBits();
12409 int Idx = (int)V.getConstantOperandVal(2);
12410 int NumSubElts = (int)VInner.getSimpleValueType().getVectorNumElements();
12411 int BeginOffset = Idx * EltBitWidth;
12412 int EndOffset = BeginOffset + NumSubElts * EltBitWidth;
12413 if (BeginOffset <= BitOffset && BitOffset < EndOffset) {
12414 BitOffset -= BeginOffset;
12415 V = VInner;
12416 } else {
12417 V = VOuter;
12419 continue;
12422 break;
12424 assert((BitOffset % NumEltBits) == 0 && "Illegal bit-offset");
12425 BroadcastIdx = BitOffset / NumEltBits;
12427 // Do we need to bitcast the source to retrieve the original broadcast index?
12428 bool BitCastSrc = V.getScalarValueSizeInBits() != NumEltBits;
12430 // Check if this is a broadcast of a scalar. We special case lowering
12431 // for scalars so that we can more effectively fold with loads.
12432 // If the original value has a larger element type than the shuffle, the
12433 // broadcast element is in essence truncated. Make that explicit to ease
12434 // folding.
12435 if (BitCastSrc && VT.isInteger())
12436 if (SDValue TruncBroadcast = lowerShuffleAsTruncBroadcast(
12437 DL, VT, V, BroadcastIdx, Subtarget, DAG))
12438 return TruncBroadcast;
12440 // Also check the simpler case, where we can directly reuse the scalar.
12441 if (!BitCastSrc &&
12442 ((V.getOpcode() == ISD::BUILD_VECTOR && V.hasOneUse()) ||
12443 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0))) {
12444 V = V.getOperand(BroadcastIdx);
12446 // If we can't broadcast from a register, check that the input is a load.
12447 if (!BroadcastFromReg && !isShuffleFoldableLoad(V))
12448 return SDValue();
12449 } else if (ISD::isNormalLoad(V.getNode()) &&
12450 cast<LoadSDNode>(V)->isSimple()) {
12451 // We do not check for one-use of the vector load because a broadcast load
12452 // is expected to be a win for code size, register pressure, and possibly
12453 // uops even if the original vector load is not eliminated.
12455 // Reduce the vector load and shuffle to a broadcasted scalar load.
12456 LoadSDNode *Ld = cast<LoadSDNode>(V);
12457 SDValue BaseAddr = Ld->getOperand(1);
12458 MVT SVT = VT.getScalarType();
12459 unsigned Offset = BroadcastIdx * SVT.getStoreSize();
12460 assert((int)(Offset * 8) == BitOffset && "Unexpected bit-offset");
12461 SDValue NewAddr =
12462 DAG.getMemBasePlusOffset(BaseAddr, TypeSize::getFixed(Offset), DL);
12464 // Directly form VBROADCAST_LOAD if we're using VBROADCAST opcode rather
12465 // than MOVDDUP.
12466 // FIXME: Should we add VBROADCAST_LOAD isel patterns for pre-AVX?
12467 if (Opcode == X86ISD::VBROADCAST) {
12468 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
12469 SDValue Ops[] = {Ld->getChain(), NewAddr};
12470 V = DAG.getMemIntrinsicNode(
12471 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, SVT,
12472 DAG.getMachineFunction().getMachineMemOperand(
12473 Ld->getMemOperand(), Offset, SVT.getStoreSize()));
12474 DAG.makeEquivalentMemoryOrdering(Ld, V);
12475 return DAG.getBitcast(VT, V);
12477 assert(SVT == MVT::f64 && "Unexpected VT!");
12478 V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
12479 DAG.getMachineFunction().getMachineMemOperand(
12480 Ld->getMemOperand(), Offset, SVT.getStoreSize()));
12481 DAG.makeEquivalentMemoryOrdering(Ld, V);
12482 } else if (!BroadcastFromReg) {
12483 // We can't broadcast from a vector register.
12484 return SDValue();
12485 } else if (BitOffset != 0) {
12486 // We can only broadcast from the zero-element of a vector register,
12487 // but it can be advantageous to broadcast from the zero-element of a
12488 // subvector.
12489 if (!VT.is256BitVector() && !VT.is512BitVector())
12490 return SDValue();
12492 // VPERMQ/VPERMPD can perform the cross-lane shuffle directly.
12493 if (VT == MVT::v4f64 || VT == MVT::v4i64)
12494 return SDValue();
12496 // Only broadcast the zero-element of a 128-bit subvector.
12497 if ((BitOffset % 128) != 0)
12498 return SDValue();
12500 assert((BitOffset % V.getScalarValueSizeInBits()) == 0 &&
12501 "Unexpected bit-offset");
12502 assert((V.getValueSizeInBits() == 256 || V.getValueSizeInBits() == 512) &&
12503 "Unexpected vector size");
12504 unsigned ExtractIdx = BitOffset / V.getScalarValueSizeInBits();
12505 V = extract128BitVector(V, ExtractIdx, DAG, DL);
12508 // On AVX we can use VBROADCAST directly for scalar sources.
12509 if (Opcode == X86ISD::MOVDDUP && !V.getValueType().isVector()) {
12510 V = DAG.getBitcast(MVT::f64, V);
12511 if (Subtarget.hasAVX()) {
12512 V = DAG.getNode(X86ISD::VBROADCAST, DL, MVT::v2f64, V);
12513 return DAG.getBitcast(VT, V);
12515 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V);
12518 // If this is a scalar, do the broadcast on this type and bitcast.
12519 if (!V.getValueType().isVector()) {
12520 assert(V.getScalarValueSizeInBits() == NumEltBits &&
12521 "Unexpected scalar size");
12522 MVT BroadcastVT = MVT::getVectorVT(V.getSimpleValueType(),
12523 VT.getVectorNumElements());
12524 return DAG.getBitcast(VT, DAG.getNode(Opcode, DL, BroadcastVT, V));
12527 // We only support broadcasting from 128-bit vectors to minimize the
12528 // number of patterns we need to deal with in isel. So extract down to
12529 // 128-bits, removing as many bitcasts as possible.
12530 if (V.getValueSizeInBits() > 128)
12531 V = extract128BitVector(peekThroughBitcasts(V), 0, DAG, DL);
12533 // Otherwise cast V to a vector with the same element type as VT, but
12534 // possibly narrower than VT. Then perform the broadcast.
12535 unsigned NumSrcElts = V.getValueSizeInBits() / NumEltBits;
12536 MVT CastVT = MVT::getVectorVT(VT.getVectorElementType(), NumSrcElts);
12537 return DAG.getNode(Opcode, DL, VT, DAG.getBitcast(CastVT, V));
12540 // Check for whether we can use INSERTPS to perform the shuffle. We only use
12541 // INSERTPS when the V1 elements are already in the correct locations
12542 // because otherwise we can just always use two SHUFPS instructions which
12543 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
12544 // perform INSERTPS if a single V1 element is out of place and all V2
12545 // elements are zeroable.
12546 static bool matchShuffleAsInsertPS(SDValue &V1, SDValue &V2,
12547 unsigned &InsertPSMask,
12548 const APInt &Zeroable,
12549 ArrayRef<int> Mask, SelectionDAG &DAG) {
12550 assert(V1.getSimpleValueType().is128BitVector() && "Bad operand type!");
12551 assert(V2.getSimpleValueType().is128BitVector() && "Bad operand type!");
12552 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
12554 // Attempt to match INSERTPS with one element from VA or VB being
12555 // inserted into VA (or undef). If successful, V1, V2 and InsertPSMask
12556 // are updated.
12557 auto matchAsInsertPS = [&](SDValue VA, SDValue VB,
12558 ArrayRef<int> CandidateMask) {
12559 unsigned ZMask = 0;
12560 int VADstIndex = -1;
12561 int VBDstIndex = -1;
12562 bool VAUsedInPlace = false;
12564 for (int i = 0; i < 4; ++i) {
12565 // Synthesize a zero mask from the zeroable elements (includes undefs).
12566 if (Zeroable[i]) {
12567 ZMask |= 1 << i;
12568 continue;
12571 // Flag if we use any VA inputs in place.
12572 if (i == CandidateMask[i]) {
12573 VAUsedInPlace = true;
12574 continue;
12577 // We can only insert a single non-zeroable element.
12578 if (VADstIndex >= 0 || VBDstIndex >= 0)
12579 return false;
12581 if (CandidateMask[i] < 4) {
12582 // VA input out of place for insertion.
12583 VADstIndex = i;
12584 } else {
12585 // VB input for insertion.
12586 VBDstIndex = i;
12590 // Don't bother if we have no (non-zeroable) element for insertion.
12591 if (VADstIndex < 0 && VBDstIndex < 0)
12592 return false;
12594 // Determine element insertion src/dst indices. The src index is from the
12595 // start of the inserted vector, not the start of the concatenated vector.
12596 unsigned VBSrcIndex = 0;
12597 if (VADstIndex >= 0) {
12598 // If we have a VA input out of place, we use VA as the V2 element
12599 // insertion and don't use the original V2 at all.
12600 VBSrcIndex = CandidateMask[VADstIndex];
12601 VBDstIndex = VADstIndex;
12602 VB = VA;
12603 } else {
12604 VBSrcIndex = CandidateMask[VBDstIndex] - 4;
12607 // If no V1 inputs are used in place, then the result is created only from
12608 // the zero mask and the V2 insertion - so remove V1 dependency.
12609 if (!VAUsedInPlace)
12610 VA = DAG.getUNDEF(MVT::v4f32);
12612 // Update V1, V2 and InsertPSMask accordingly.
12613 V1 = VA;
12614 V2 = VB;
12616 // Insert the V2 element into the desired position.
12617 InsertPSMask = VBSrcIndex << 6 | VBDstIndex << 4 | ZMask;
12618 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
12619 return true;
12622 if (matchAsInsertPS(V1, V2, Mask))
12623 return true;
12625 // Commute and try again.
12626 SmallVector<int, 4> CommutedMask(Mask);
12627 ShuffleVectorSDNode::commuteMask(CommutedMask);
12628 if (matchAsInsertPS(V2, V1, CommutedMask))
12629 return true;
12631 return false;
12634 static SDValue lowerShuffleAsInsertPS(const SDLoc &DL, SDValue V1, SDValue V2,
12635 ArrayRef<int> Mask, const APInt &Zeroable,
12636 SelectionDAG &DAG) {
12637 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
12638 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
12640 // Attempt to match the insertps pattern.
12641 unsigned InsertPSMask = 0;
12642 if (!matchShuffleAsInsertPS(V1, V2, InsertPSMask, Zeroable, Mask, DAG))
12643 return SDValue();
12645 // Insert the V2 element into the desired position.
12646 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
12647 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
12650 /// Handle lowering of 2-lane 64-bit floating point shuffles.
12652 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
12653 /// support for floating point shuffles but not integer shuffles. These
12654 /// instructions will incur a domain crossing penalty on some chips though so
12655 /// it is better to avoid lowering through this for integer vectors where
12656 /// possible.
12657 static SDValue lowerV2F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
12658 const APInt &Zeroable, SDValue V1, SDValue V2,
12659 const X86Subtarget &Subtarget,
12660 SelectionDAG &DAG) {
12661 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
12662 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
12663 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
12665 if (V2.isUndef()) {
12666 // Check for being able to broadcast a single element.
12667 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2f64, V1, V2,
12668 Mask, Subtarget, DAG))
12669 return Broadcast;
12671 // Straight shuffle of a single input vector. Simulate this by using the
12672 // single input as both of the "inputs" to this instruction..
12673 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
12675 if (Subtarget.hasAVX()) {
12676 // If we have AVX, we can use VPERMILPS which will allow folding a load
12677 // into the shuffle.
12678 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
12679 DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
12682 return DAG.getNode(
12683 X86ISD::SHUFP, DL, MVT::v2f64,
12684 Mask[0] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
12685 Mask[1] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
12686 DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
12688 assert(Mask[0] >= 0 && "No undef lanes in multi-input v2 shuffles!");
12689 assert(Mask[1] >= 0 && "No undef lanes in multi-input v2 shuffles!");
12690 assert(Mask[0] < 2 && "We sort V1 to be the first input.");
12691 assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
12693 if (Subtarget.hasAVX2())
12694 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
12695 return Extract;
12697 // When loading a scalar and then shuffling it into a vector we can often do
12698 // the insertion cheaply.
12699 if (SDValue Insertion = lowerShuffleAsElementInsertion(
12700 DL, MVT::v2f64, V1, V2, Mask, Zeroable, Subtarget, DAG))
12701 return Insertion;
12702 // Try inverting the insertion since for v2 masks it is easy to do and we
12703 // can't reliably sort the mask one way or the other.
12704 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
12705 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
12706 if (SDValue Insertion = lowerShuffleAsElementInsertion(
12707 DL, MVT::v2f64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
12708 return Insertion;
12710 // Try to use one of the special instruction patterns to handle two common
12711 // blend patterns if a zero-blend above didn't work.
12712 if (isShuffleEquivalent(Mask, {0, 3}, V1, V2) ||
12713 isShuffleEquivalent(Mask, {1, 3}, V1, V2))
12714 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
12715 // We can either use a special instruction to load over the low double or
12716 // to move just the low double.
12717 return DAG.getNode(
12718 X86ISD::MOVSD, DL, MVT::v2f64, V2,
12719 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
12721 if (Subtarget.hasSSE41())
12722 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
12723 Zeroable, Subtarget, DAG))
12724 return Blend;
12726 // Use dedicated unpack instructions for masks that match their pattern.
12727 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2f64, Mask, V1, V2, DAG))
12728 return V;
12730 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
12731 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v2f64, V1, V2,
12732 DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
12735 /// Handle lowering of 2-lane 64-bit integer shuffles.
12737 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
12738 /// the integer unit to minimize domain crossing penalties. However, for blends
12739 /// it falls back to the floating point shuffle operation with appropriate bit
12740 /// casting.
12741 static SDValue lowerV2I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
12742 const APInt &Zeroable, SDValue V1, SDValue V2,
12743 const X86Subtarget &Subtarget,
12744 SelectionDAG &DAG) {
12745 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
12746 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
12747 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
12749 if (V2.isUndef()) {
12750 // Check for being able to broadcast a single element.
12751 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2i64, V1, V2,
12752 Mask, Subtarget, DAG))
12753 return Broadcast;
12755 // Straight shuffle of a single input vector. For everything from SSE2
12756 // onward this has a single fast instruction with no scary immediates.
12757 // We have to map the mask as it is actually a v4i32 shuffle instruction.
12758 V1 = DAG.getBitcast(MVT::v4i32, V1);
12759 int WidenedMask[4] = {Mask[0] < 0 ? -1 : (Mask[0] * 2),
12760 Mask[0] < 0 ? -1 : ((Mask[0] * 2) + 1),
12761 Mask[1] < 0 ? -1 : (Mask[1] * 2),
12762 Mask[1] < 0 ? -1 : ((Mask[1] * 2) + 1)};
12763 return DAG.getBitcast(
12764 MVT::v2i64,
12765 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
12766 getV4X86ShuffleImm8ForMask(WidenedMask, DL, DAG)));
12768 assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!");
12769 assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!");
12770 assert(Mask[0] < 2 && "We sort V1 to be the first input.");
12771 assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
12773 if (Subtarget.hasAVX2())
12774 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
12775 return Extract;
12777 // Try to use shift instructions.
12778 if (SDValue Shift =
12779 lowerShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask, Zeroable, Subtarget,
12780 DAG, /*BitwiseOnly*/ false))
12781 return Shift;
12783 // When loading a scalar and then shuffling it into a vector we can often do
12784 // the insertion cheaply.
12785 if (SDValue Insertion = lowerShuffleAsElementInsertion(
12786 DL, MVT::v2i64, V1, V2, Mask, Zeroable, Subtarget, DAG))
12787 return Insertion;
12788 // Try inverting the insertion since for v2 masks it is easy to do and we
12789 // can't reliably sort the mask one way or the other.
12790 int InverseMask[2] = {Mask[0] ^ 2, Mask[1] ^ 2};
12791 if (SDValue Insertion = lowerShuffleAsElementInsertion(
12792 DL, MVT::v2i64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
12793 return Insertion;
12795 // We have different paths for blend lowering, but they all must use the
12796 // *exact* same predicate.
12797 bool IsBlendSupported = Subtarget.hasSSE41();
12798 if (IsBlendSupported)
12799 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
12800 Zeroable, Subtarget, DAG))
12801 return Blend;
12803 // Use dedicated unpack instructions for masks that match their pattern.
12804 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2i64, Mask, V1, V2, DAG))
12805 return V;
12807 // Try to use byte rotation instructions.
12808 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
12809 if (Subtarget.hasSSSE3()) {
12810 if (Subtarget.hasVLX())
12811 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v2i64, V1, V2, Mask,
12812 Zeroable, Subtarget, DAG))
12813 return Rotate;
12815 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v2i64, V1, V2, Mask,
12816 Subtarget, DAG))
12817 return Rotate;
12820 // If we have direct support for blends, we should lower by decomposing into
12821 // a permute. That will be faster than the domain cross.
12822 if (IsBlendSupported)
12823 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v2i64, V1, V2, Mask,
12824 Subtarget, DAG);
12826 // We implement this with SHUFPD which is pretty lame because it will likely
12827 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
12828 // However, all the alternatives are still more cycles and newer chips don't
12829 // have this problem. It would be really nice if x86 had better shuffles here.
12830 V1 = DAG.getBitcast(MVT::v2f64, V1);
12831 V2 = DAG.getBitcast(MVT::v2f64, V2);
12832 return DAG.getBitcast(MVT::v2i64,
12833 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
12836 /// Lower a vector shuffle using the SHUFPS instruction.
12838 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
12839 /// It makes no assumptions about whether this is the *best* lowering, it simply
12840 /// uses it.
12841 static SDValue lowerShuffleWithSHUFPS(const SDLoc &DL, MVT VT,
12842 ArrayRef<int> Mask, SDValue V1,
12843 SDValue V2, SelectionDAG &DAG) {
12844 SDValue LowV = V1, HighV = V2;
12845 SmallVector<int, 4> NewMask(Mask);
12846 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
12848 if (NumV2Elements == 1) {
12849 int V2Index = find_if(Mask, [](int M) { return M >= 4; }) - Mask.begin();
12851 // Compute the index adjacent to V2Index and in the same half by toggling
12852 // the low bit.
12853 int V2AdjIndex = V2Index ^ 1;
12855 if (Mask[V2AdjIndex] < 0) {
12856 // Handles all the cases where we have a single V2 element and an undef.
12857 // This will only ever happen in the high lanes because we commute the
12858 // vector otherwise.
12859 if (V2Index < 2)
12860 std::swap(LowV, HighV);
12861 NewMask[V2Index] -= 4;
12862 } else {
12863 // Handle the case where the V2 element ends up adjacent to a V1 element.
12864 // To make this work, blend them together as the first step.
12865 int V1Index = V2AdjIndex;
12866 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
12867 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
12868 getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
12870 // Now proceed to reconstruct the final blend as we have the necessary
12871 // high or low half formed.
12872 if (V2Index < 2) {
12873 LowV = V2;
12874 HighV = V1;
12875 } else {
12876 HighV = V2;
12878 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
12879 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
12881 } else if (NumV2Elements == 2) {
12882 if (Mask[0] < 4 && Mask[1] < 4) {
12883 // Handle the easy case where we have V1 in the low lanes and V2 in the
12884 // high lanes.
12885 NewMask[2] -= 4;
12886 NewMask[3] -= 4;
12887 } else if (Mask[2] < 4 && Mask[3] < 4) {
12888 // We also handle the reversed case because this utility may get called
12889 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
12890 // arrange things in the right direction.
12891 NewMask[0] -= 4;
12892 NewMask[1] -= 4;
12893 HighV = V1;
12894 LowV = V2;
12895 } else {
12896 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
12897 // trying to place elements directly, just blend them and set up the final
12898 // shuffle to place them.
12900 // The first two blend mask elements are for V1, the second two are for
12901 // V2.
12902 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
12903 Mask[2] < 4 ? Mask[2] : Mask[3],
12904 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
12905 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
12906 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
12907 getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
12909 // Now we do a normal shuffle of V1 by giving V1 as both operands to
12910 // a blend.
12911 LowV = HighV = V1;
12912 NewMask[0] = Mask[0] < 4 ? 0 : 2;
12913 NewMask[1] = Mask[0] < 4 ? 2 : 0;
12914 NewMask[2] = Mask[2] < 4 ? 1 : 3;
12915 NewMask[3] = Mask[2] < 4 ? 3 : 1;
12917 } else if (NumV2Elements == 3) {
12918 // Ideally canonicalizeShuffleMaskWithCommute should have caught this, but
12919 // we can get here due to other paths (e.g repeated mask matching) that we
12920 // don't want to do another round of lowerVECTOR_SHUFFLE.
12921 ShuffleVectorSDNode::commuteMask(NewMask);
12922 return lowerShuffleWithSHUFPS(DL, VT, NewMask, V2, V1, DAG);
12924 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
12925 getV4X86ShuffleImm8ForMask(NewMask, DL, DAG));
12928 /// Lower 4-lane 32-bit floating point shuffles.
12930 /// Uses instructions exclusively from the floating point unit to minimize
12931 /// domain crossing penalties, as these are sufficient to implement all v4f32
12932 /// shuffles.
12933 static SDValue lowerV4F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
12934 const APInt &Zeroable, SDValue V1, SDValue V2,
12935 const X86Subtarget &Subtarget,
12936 SelectionDAG &DAG) {
12937 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
12938 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
12939 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
12941 if (Subtarget.hasSSE41())
12942 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
12943 Zeroable, Subtarget, DAG))
12944 return Blend;
12946 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
12948 if (NumV2Elements == 0) {
12949 // Check for being able to broadcast a single element.
12950 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f32, V1, V2,
12951 Mask, Subtarget, DAG))
12952 return Broadcast;
12954 // Use even/odd duplicate instructions for masks that match their pattern.
12955 if (Subtarget.hasSSE3()) {
12956 if (isShuffleEquivalent(Mask, {0, 0, 2, 2}, V1, V2))
12957 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
12958 if (isShuffleEquivalent(Mask, {1, 1, 3, 3}, V1, V2))
12959 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
12962 if (Subtarget.hasAVX()) {
12963 // If we have AVX, we can use VPERMILPS which will allow folding a load
12964 // into the shuffle.
12965 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
12966 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
12969 // Use MOVLHPS/MOVHLPS to simulate unary shuffles. These are only valid
12970 // in SSE1 because otherwise they are widened to v2f64 and never get here.
12971 if (!Subtarget.hasSSE2()) {
12972 if (isShuffleEquivalent(Mask, {0, 1, 0, 1}, V1, V2))
12973 return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V1);
12974 if (isShuffleEquivalent(Mask, {2, 3, 2, 3}, V1, V2))
12975 return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V1, V1);
12978 // Otherwise, use a straight shuffle of a single input vector. We pass the
12979 // input vector to both operands to simulate this with a SHUFPS.
12980 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
12981 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
12984 if (Subtarget.hasSSE2())
12985 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
12986 DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget, DAG)) {
12987 ZExt = DAG.getBitcast(MVT::v4f32, ZExt);
12988 return ZExt;
12991 if (Subtarget.hasAVX2())
12992 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
12993 return Extract;
12995 // There are special ways we can lower some single-element blends. However, we
12996 // have custom ways we can lower more complex single-element blends below that
12997 // we defer to if both this and BLENDPS fail to match, so restrict this to
12998 // when the V2 input is targeting element 0 of the mask -- that is the fast
12999 // case here.
13000 if (NumV2Elements == 1 && Mask[0] >= 4)
13001 if (SDValue V = lowerShuffleAsElementInsertion(
13002 DL, MVT::v4f32, V1, V2, Mask, Zeroable, Subtarget, DAG))
13003 return V;
13005 if (Subtarget.hasSSE41()) {
13006 // Use INSERTPS if we can complete the shuffle efficiently.
13007 if (SDValue V = lowerShuffleAsInsertPS(DL, V1, V2, Mask, Zeroable, DAG))
13008 return V;
13010 if (!isSingleSHUFPSMask(Mask))
13011 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, MVT::v4f32, V1,
13012 V2, Mask, DAG))
13013 return BlendPerm;
13016 // Use low/high mov instructions. These are only valid in SSE1 because
13017 // otherwise they are widened to v2f64 and never get here.
13018 if (!Subtarget.hasSSE2()) {
13019 if (isShuffleEquivalent(Mask, {0, 1, 4, 5}, V1, V2))
13020 return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V2);
13021 if (isShuffleEquivalent(Mask, {2, 3, 6, 7}, V1, V2))
13022 return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V2, V1);
13025 // Use dedicated unpack instructions for masks that match their pattern.
13026 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f32, Mask, V1, V2, DAG))
13027 return V;
13029 // Otherwise fall back to a SHUFPS lowering strategy.
13030 return lowerShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
13033 /// Lower 4-lane i32 vector shuffles.
13035 /// We try to handle these with integer-domain shuffles where we can, but for
13036 /// blends we use the floating point domain blend instructions.
13037 static SDValue lowerV4I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13038 const APInt &Zeroable, SDValue V1, SDValue V2,
13039 const X86Subtarget &Subtarget,
13040 SelectionDAG &DAG) {
13041 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
13042 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
13043 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
13045 // Whenever we can lower this as a zext, that instruction is strictly faster
13046 // than any alternative. It also allows us to fold memory operands into the
13047 // shuffle in many cases.
13048 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2, Mask,
13049 Zeroable, Subtarget, DAG))
13050 return ZExt;
13052 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
13054 // Try to use shift instructions if fast.
13055 if (Subtarget.preferLowerShuffleAsShift()) {
13056 if (SDValue Shift =
13057 lowerShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask, Zeroable,
13058 Subtarget, DAG, /*BitwiseOnly*/ true))
13059 return Shift;
13060 if (NumV2Elements == 0)
13061 if (SDValue Rotate =
13062 lowerShuffleAsBitRotate(DL, MVT::v4i32, V1, Mask, Subtarget, DAG))
13063 return Rotate;
13066 if (NumV2Elements == 0) {
13067 // Try to use broadcast unless the mask only has one non-undef element.
13068 if (count_if(Mask, [](int M) { return M >= 0 && M < 4; }) > 1) {
13069 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i32, V1, V2,
13070 Mask, Subtarget, DAG))
13071 return Broadcast;
13074 // Straight shuffle of a single input vector. For everything from SSE2
13075 // onward this has a single fast instruction with no scary immediates.
13076 // We coerce the shuffle pattern to be compatible with UNPCK instructions
13077 // but we aren't actually going to use the UNPCK instruction because doing
13078 // so prevents folding a load into this instruction or making a copy.
13079 const int UnpackLoMask[] = {0, 0, 1, 1};
13080 const int UnpackHiMask[] = {2, 2, 3, 3};
13081 if (isShuffleEquivalent(Mask, {0, 0, 1, 1}, V1, V2))
13082 Mask = UnpackLoMask;
13083 else if (isShuffleEquivalent(Mask, {2, 2, 3, 3}, V1, V2))
13084 Mask = UnpackHiMask;
13086 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
13087 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
13090 if (Subtarget.hasAVX2())
13091 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
13092 return Extract;
13094 // Try to use shift instructions.
13095 if (SDValue Shift =
13096 lowerShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget,
13097 DAG, /*BitwiseOnly*/ false))
13098 return Shift;
13100 // There are special ways we can lower some single-element blends.
13101 if (NumV2Elements == 1)
13102 if (SDValue V = lowerShuffleAsElementInsertion(
13103 DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
13104 return V;
13106 // We have different paths for blend lowering, but they all must use the
13107 // *exact* same predicate.
13108 bool IsBlendSupported = Subtarget.hasSSE41();
13109 if (IsBlendSupported)
13110 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
13111 Zeroable, Subtarget, DAG))
13112 return Blend;
13114 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask,
13115 Zeroable, Subtarget, DAG))
13116 return Masked;
13118 // Use dedicated unpack instructions for masks that match their pattern.
13119 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i32, Mask, V1, V2, DAG))
13120 return V;
13122 // Try to use byte rotation instructions.
13123 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
13124 if (Subtarget.hasSSSE3()) {
13125 if (Subtarget.hasVLX())
13126 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v4i32, V1, V2, Mask,
13127 Zeroable, Subtarget, DAG))
13128 return Rotate;
13130 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i32, V1, V2, Mask,
13131 Subtarget, DAG))
13132 return Rotate;
13135 // Assume that a single SHUFPS is faster than an alternative sequence of
13136 // multiple instructions (even if the CPU has a domain penalty).
13137 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
13138 if (!isSingleSHUFPSMask(Mask)) {
13139 // If we have direct support for blends, we should lower by decomposing into
13140 // a permute. That will be faster than the domain cross.
13141 if (IsBlendSupported)
13142 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4i32, V1, V2, Mask,
13143 Subtarget, DAG);
13145 // Try to lower by permuting the inputs into an unpack instruction.
13146 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v4i32, V1, V2,
13147 Mask, Subtarget, DAG))
13148 return Unpack;
13151 // We implement this with SHUFPS because it can blend from two vectors.
13152 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
13153 // up the inputs, bypassing domain shift penalties that we would incur if we
13154 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
13155 // relevant.
13156 SDValue CastV1 = DAG.getBitcast(MVT::v4f32, V1);
13157 SDValue CastV2 = DAG.getBitcast(MVT::v4f32, V2);
13158 SDValue ShufPS = DAG.getVectorShuffle(MVT::v4f32, DL, CastV1, CastV2, Mask);
13159 return DAG.getBitcast(MVT::v4i32, ShufPS);
13162 /// Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
13163 /// shuffle lowering, and the most complex part.
13165 /// The lowering strategy is to try to form pairs of input lanes which are
13166 /// targeted at the same half of the final vector, and then use a dword shuffle
13167 /// to place them onto the right half, and finally unpack the paired lanes into
13168 /// their final position.
13170 /// The exact breakdown of how to form these dword pairs and align them on the
13171 /// correct sides is really tricky. See the comments within the function for
13172 /// more of the details.
13174 /// This code also handles repeated 128-bit lanes of v8i16 shuffles, but each
13175 /// lane must shuffle the *exact* same way. In fact, you must pass a v8 Mask to
13176 /// this routine for it to work correctly. To shuffle a 256-bit or 512-bit i16
13177 /// vector, form the analogous 128-bit 8-element Mask.
13178 static SDValue lowerV8I16GeneralSingleInputShuffle(
13179 const SDLoc &DL, MVT VT, SDValue V, MutableArrayRef<int> Mask,
13180 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
13181 assert(VT.getVectorElementType() == MVT::i16 && "Bad input type!");
13182 MVT PSHUFDVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
13184 assert(Mask.size() == 8 && "Shuffle mask length doesn't match!");
13185 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
13186 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
13188 // Attempt to directly match PSHUFLW or PSHUFHW.
13189 if (isUndefOrInRange(LoMask, 0, 4) &&
13190 isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
13191 return DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
13192 getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
13194 if (isUndefOrInRange(HiMask, 4, 8) &&
13195 isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
13196 for (int i = 0; i != 4; ++i)
13197 HiMask[i] = (HiMask[i] < 0 ? HiMask[i] : (HiMask[i] - 4));
13198 return DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
13199 getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
13202 SmallVector<int, 4> LoInputs;
13203 copy_if(LoMask, std::back_inserter(LoInputs), [](int M) { return M >= 0; });
13204 array_pod_sort(LoInputs.begin(), LoInputs.end());
13205 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
13206 SmallVector<int, 4> HiInputs;
13207 copy_if(HiMask, std::back_inserter(HiInputs), [](int M) { return M >= 0; });
13208 array_pod_sort(HiInputs.begin(), HiInputs.end());
13209 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
13210 int NumLToL = llvm::lower_bound(LoInputs, 4) - LoInputs.begin();
13211 int NumHToL = LoInputs.size() - NumLToL;
13212 int NumLToH = llvm::lower_bound(HiInputs, 4) - HiInputs.begin();
13213 int NumHToH = HiInputs.size() - NumLToH;
13214 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
13215 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
13216 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
13217 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
13219 // If we are shuffling values from one half - check how many different DWORD
13220 // pairs we need to create. If only 1 or 2 then we can perform this as a
13221 // PSHUFLW/PSHUFHW + PSHUFD instead of the PSHUFD+PSHUFLW+PSHUFHW chain below.
13222 auto ShuffleDWordPairs = [&](ArrayRef<int> PSHUFHalfMask,
13223 ArrayRef<int> PSHUFDMask, unsigned ShufWOp) {
13224 V = DAG.getNode(ShufWOp, DL, VT, V,
13225 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
13226 V = DAG.getBitcast(PSHUFDVT, V);
13227 V = DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, V,
13228 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
13229 return DAG.getBitcast(VT, V);
13232 if ((NumHToL + NumHToH) == 0 || (NumLToL + NumLToH) == 0) {
13233 int PSHUFDMask[4] = { -1, -1, -1, -1 };
13234 SmallVector<std::pair<int, int>, 4> DWordPairs;
13235 int DOffset = ((NumHToL + NumHToH) == 0 ? 0 : 2);
13237 // Collect the different DWORD pairs.
13238 for (int DWord = 0; DWord != 4; ++DWord) {
13239 int M0 = Mask[2 * DWord + 0];
13240 int M1 = Mask[2 * DWord + 1];
13241 M0 = (M0 >= 0 ? M0 % 4 : M0);
13242 M1 = (M1 >= 0 ? M1 % 4 : M1);
13243 if (M0 < 0 && M1 < 0)
13244 continue;
13246 bool Match = false;
13247 for (int j = 0, e = DWordPairs.size(); j < e; ++j) {
13248 auto &DWordPair = DWordPairs[j];
13249 if ((M0 < 0 || isUndefOrEqual(DWordPair.first, M0)) &&
13250 (M1 < 0 || isUndefOrEqual(DWordPair.second, M1))) {
13251 DWordPair.first = (M0 >= 0 ? M0 : DWordPair.first);
13252 DWordPair.second = (M1 >= 0 ? M1 : DWordPair.second);
13253 PSHUFDMask[DWord] = DOffset + j;
13254 Match = true;
13255 break;
13258 if (!Match) {
13259 PSHUFDMask[DWord] = DOffset + DWordPairs.size();
13260 DWordPairs.push_back(std::make_pair(M0, M1));
13264 if (DWordPairs.size() <= 2) {
13265 DWordPairs.resize(2, std::make_pair(-1, -1));
13266 int PSHUFHalfMask[4] = {DWordPairs[0].first, DWordPairs[0].second,
13267 DWordPairs[1].first, DWordPairs[1].second};
13268 if ((NumHToL + NumHToH) == 0)
13269 return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFLW);
13270 if ((NumLToL + NumLToH) == 0)
13271 return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFHW);
13275 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
13276 // such inputs we can swap two of the dwords across the half mark and end up
13277 // with <=2 inputs to each half in each half. Once there, we can fall through
13278 // to the generic code below. For example:
13280 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
13281 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
13283 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
13284 // and an existing 2-into-2 on the other half. In this case we may have to
13285 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
13286 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
13287 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
13288 // because any other situation (including a 3-into-1 or 1-into-3 in the other
13289 // half than the one we target for fixing) will be fixed when we re-enter this
13290 // path. We will also combine away any sequence of PSHUFD instructions that
13291 // result into a single instruction. Here is an example of the tricky case:
13293 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
13294 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
13296 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
13298 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
13299 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
13301 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
13302 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
13304 // The result is fine to be handled by the generic logic.
13305 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
13306 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
13307 int AOffset, int BOffset) {
13308 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
13309 "Must call this with A having 3 or 1 inputs from the A half.");
13310 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
13311 "Must call this with B having 1 or 3 inputs from the B half.");
13312 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
13313 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
13315 bool ThreeAInputs = AToAInputs.size() == 3;
13317 // Compute the index of dword with only one word among the three inputs in
13318 // a half by taking the sum of the half with three inputs and subtracting
13319 // the sum of the actual three inputs. The difference is the remaining
13320 // slot.
13321 int ADWord = 0, BDWord = 0;
13322 int &TripleDWord = ThreeAInputs ? ADWord : BDWord;
13323 int &OneInputDWord = ThreeAInputs ? BDWord : ADWord;
13324 int TripleInputOffset = ThreeAInputs ? AOffset : BOffset;
13325 ArrayRef<int> TripleInputs = ThreeAInputs ? AToAInputs : BToAInputs;
13326 int OneInput = ThreeAInputs ? BToAInputs[0] : AToAInputs[0];
13327 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
13328 int TripleNonInputIdx =
13329 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
13330 TripleDWord = TripleNonInputIdx / 2;
13332 // We use xor with one to compute the adjacent DWord to whichever one the
13333 // OneInput is in.
13334 OneInputDWord = (OneInput / 2) ^ 1;
13336 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
13337 // and BToA inputs. If there is also such a problem with the BToB and AToB
13338 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
13339 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
13340 // is essential that we don't *create* a 3<-1 as then we might oscillate.
13341 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
13342 // Compute how many inputs will be flipped by swapping these DWords. We
13343 // need
13344 // to balance this to ensure we don't form a 3-1 shuffle in the other
13345 // half.
13346 int NumFlippedAToBInputs = llvm::count(AToBInputs, 2 * ADWord) +
13347 llvm::count(AToBInputs, 2 * ADWord + 1);
13348 int NumFlippedBToBInputs = llvm::count(BToBInputs, 2 * BDWord) +
13349 llvm::count(BToBInputs, 2 * BDWord + 1);
13350 if ((NumFlippedAToBInputs == 1 &&
13351 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
13352 (NumFlippedBToBInputs == 1 &&
13353 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
13354 // We choose whether to fix the A half or B half based on whether that
13355 // half has zero flipped inputs. At zero, we may not be able to fix it
13356 // with that half. We also bias towards fixing the B half because that
13357 // will more commonly be the high half, and we have to bias one way.
13358 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
13359 ArrayRef<int> Inputs) {
13360 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
13361 bool IsFixIdxInput = is_contained(Inputs, PinnedIdx ^ 1);
13362 // Determine whether the free index is in the flipped dword or the
13363 // unflipped dword based on where the pinned index is. We use this bit
13364 // in an xor to conditionally select the adjacent dword.
13365 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
13366 bool IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
13367 if (IsFixIdxInput == IsFixFreeIdxInput)
13368 FixFreeIdx += 1;
13369 IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
13370 assert(IsFixIdxInput != IsFixFreeIdxInput &&
13371 "We need to be changing the number of flipped inputs!");
13372 int PSHUFHalfMask[] = {0, 1, 2, 3};
13373 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
13374 V = DAG.getNode(
13375 FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
13376 MVT::getVectorVT(MVT::i16, V.getValueSizeInBits() / 16), V,
13377 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
13379 for (int &M : Mask)
13380 if (M >= 0 && M == FixIdx)
13381 M = FixFreeIdx;
13382 else if (M >= 0 && M == FixFreeIdx)
13383 M = FixIdx;
13385 if (NumFlippedBToBInputs != 0) {
13386 int BPinnedIdx =
13387 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
13388 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
13389 } else {
13390 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
13391 int APinnedIdx = ThreeAInputs ? TripleNonInputIdx : OneInput;
13392 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
13397 int PSHUFDMask[] = {0, 1, 2, 3};
13398 PSHUFDMask[ADWord] = BDWord;
13399 PSHUFDMask[BDWord] = ADWord;
13400 V = DAG.getBitcast(
13402 DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
13403 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
13405 // Adjust the mask to match the new locations of A and B.
13406 for (int &M : Mask)
13407 if (M >= 0 && M/2 == ADWord)
13408 M = 2 * BDWord + M % 2;
13409 else if (M >= 0 && M/2 == BDWord)
13410 M = 2 * ADWord + M % 2;
13412 // Recurse back into this routine to re-compute state now that this isn't
13413 // a 3 and 1 problem.
13414 return lowerV8I16GeneralSingleInputShuffle(DL, VT, V, Mask, Subtarget, DAG);
13416 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
13417 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
13418 if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
13419 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
13421 // At this point there are at most two inputs to the low and high halves from
13422 // each half. That means the inputs can always be grouped into dwords and
13423 // those dwords can then be moved to the correct half with a dword shuffle.
13424 // We use at most one low and one high word shuffle to collect these paired
13425 // inputs into dwords, and finally a dword shuffle to place them.
13426 int PSHUFLMask[4] = {-1, -1, -1, -1};
13427 int PSHUFHMask[4] = {-1, -1, -1, -1};
13428 int PSHUFDMask[4] = {-1, -1, -1, -1};
13430 // First fix the masks for all the inputs that are staying in their
13431 // original halves. This will then dictate the targets of the cross-half
13432 // shuffles.
13433 auto fixInPlaceInputs =
13434 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
13435 MutableArrayRef<int> SourceHalfMask,
13436 MutableArrayRef<int> HalfMask, int HalfOffset) {
13437 if (InPlaceInputs.empty())
13438 return;
13439 if (InPlaceInputs.size() == 1) {
13440 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
13441 InPlaceInputs[0] - HalfOffset;
13442 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
13443 return;
13445 if (IncomingInputs.empty()) {
13446 // Just fix all of the in place inputs.
13447 for (int Input : InPlaceInputs) {
13448 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
13449 PSHUFDMask[Input / 2] = Input / 2;
13451 return;
13454 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
13455 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
13456 InPlaceInputs[0] - HalfOffset;
13457 // Put the second input next to the first so that they are packed into
13458 // a dword. We find the adjacent index by toggling the low bit.
13459 int AdjIndex = InPlaceInputs[0] ^ 1;
13460 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
13461 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
13462 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
13464 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
13465 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
13467 // Now gather the cross-half inputs and place them into a free dword of
13468 // their target half.
13469 // FIXME: This operation could almost certainly be simplified dramatically to
13470 // look more like the 3-1 fixing operation.
13471 auto moveInputsToRightHalf = [&PSHUFDMask](
13472 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
13473 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
13474 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
13475 int DestOffset) {
13476 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
13477 return SourceHalfMask[Word] >= 0 && SourceHalfMask[Word] != Word;
13479 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
13480 int Word) {
13481 int LowWord = Word & ~1;
13482 int HighWord = Word | 1;
13483 return isWordClobbered(SourceHalfMask, LowWord) ||
13484 isWordClobbered(SourceHalfMask, HighWord);
13487 if (IncomingInputs.empty())
13488 return;
13490 if (ExistingInputs.empty()) {
13491 // Map any dwords with inputs from them into the right half.
13492 for (int Input : IncomingInputs) {
13493 // If the source half mask maps over the inputs, turn those into
13494 // swaps and use the swapped lane.
13495 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
13496 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] < 0) {
13497 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
13498 Input - SourceOffset;
13499 // We have to swap the uses in our half mask in one sweep.
13500 for (int &M : HalfMask)
13501 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
13502 M = Input;
13503 else if (M == Input)
13504 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
13505 } else {
13506 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
13507 Input - SourceOffset &&
13508 "Previous placement doesn't match!");
13510 // Note that this correctly re-maps both when we do a swap and when
13511 // we observe the other side of the swap above. We rely on that to
13512 // avoid swapping the members of the input list directly.
13513 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
13516 // Map the input's dword into the correct half.
13517 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] < 0)
13518 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
13519 else
13520 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
13521 Input / 2 &&
13522 "Previous placement doesn't match!");
13525 // And just directly shift any other-half mask elements to be same-half
13526 // as we will have mirrored the dword containing the element into the
13527 // same position within that half.
13528 for (int &M : HalfMask)
13529 if (M >= SourceOffset && M < SourceOffset + 4) {
13530 M = M - SourceOffset + DestOffset;
13531 assert(M >= 0 && "This should never wrap below zero!");
13533 return;
13536 // Ensure we have the input in a viable dword of its current half. This
13537 // is particularly tricky because the original position may be clobbered
13538 // by inputs being moved and *staying* in that half.
13539 if (IncomingInputs.size() == 1) {
13540 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
13541 int InputFixed = find(SourceHalfMask, -1) - std::begin(SourceHalfMask) +
13542 SourceOffset;
13543 SourceHalfMask[InputFixed - SourceOffset] =
13544 IncomingInputs[0] - SourceOffset;
13545 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
13546 InputFixed);
13547 IncomingInputs[0] = InputFixed;
13549 } else if (IncomingInputs.size() == 2) {
13550 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
13551 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
13552 // We have two non-adjacent or clobbered inputs we need to extract from
13553 // the source half. To do this, we need to map them into some adjacent
13554 // dword slot in the source mask.
13555 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
13556 IncomingInputs[1] - SourceOffset};
13558 // If there is a free slot in the source half mask adjacent to one of
13559 // the inputs, place the other input in it. We use (Index XOR 1) to
13560 // compute an adjacent index.
13561 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
13562 SourceHalfMask[InputsFixed[0] ^ 1] < 0) {
13563 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
13564 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
13565 InputsFixed[1] = InputsFixed[0] ^ 1;
13566 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
13567 SourceHalfMask[InputsFixed[1] ^ 1] < 0) {
13568 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
13569 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
13570 InputsFixed[0] = InputsFixed[1] ^ 1;
13571 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] < 0 &&
13572 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] < 0) {
13573 // The two inputs are in the same DWord but it is clobbered and the
13574 // adjacent DWord isn't used at all. Move both inputs to the free
13575 // slot.
13576 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
13577 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
13578 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
13579 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
13580 } else {
13581 // The only way we hit this point is if there is no clobbering
13582 // (because there are no off-half inputs to this half) and there is no
13583 // free slot adjacent to one of the inputs. In this case, we have to
13584 // swap an input with a non-input.
13585 for (int i = 0; i < 4; ++i)
13586 assert((SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) &&
13587 "We can't handle any clobbers here!");
13588 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
13589 "Cannot have adjacent inputs here!");
13591 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
13592 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
13594 // We also have to update the final source mask in this case because
13595 // it may need to undo the above swap.
13596 for (int &M : FinalSourceHalfMask)
13597 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
13598 M = InputsFixed[1] + SourceOffset;
13599 else if (M == InputsFixed[1] + SourceOffset)
13600 M = (InputsFixed[0] ^ 1) + SourceOffset;
13602 InputsFixed[1] = InputsFixed[0] ^ 1;
13605 // Point everything at the fixed inputs.
13606 for (int &M : HalfMask)
13607 if (M == IncomingInputs[0])
13608 M = InputsFixed[0] + SourceOffset;
13609 else if (M == IncomingInputs[1])
13610 M = InputsFixed[1] + SourceOffset;
13612 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
13613 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
13615 } else {
13616 llvm_unreachable("Unhandled input size!");
13619 // Now hoist the DWord down to the right half.
13620 int FreeDWord = (PSHUFDMask[DestOffset / 2] < 0 ? 0 : 1) + DestOffset / 2;
13621 assert(PSHUFDMask[FreeDWord] < 0 && "DWord not free");
13622 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
13623 for (int &M : HalfMask)
13624 for (int Input : IncomingInputs)
13625 if (M == Input)
13626 M = FreeDWord * 2 + Input % 2;
13628 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
13629 /*SourceOffset*/ 4, /*DestOffset*/ 0);
13630 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
13631 /*SourceOffset*/ 0, /*DestOffset*/ 4);
13633 // Now enact all the shuffles we've computed to move the inputs into their
13634 // target half.
13635 if (!isNoopShuffleMask(PSHUFLMask))
13636 V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
13637 getV4X86ShuffleImm8ForMask(PSHUFLMask, DL, DAG));
13638 if (!isNoopShuffleMask(PSHUFHMask))
13639 V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
13640 getV4X86ShuffleImm8ForMask(PSHUFHMask, DL, DAG));
13641 if (!isNoopShuffleMask(PSHUFDMask))
13642 V = DAG.getBitcast(
13644 DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
13645 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
13647 // At this point, each half should contain all its inputs, and we can then
13648 // just shuffle them into their final position.
13649 assert(count_if(LoMask, [](int M) { return M >= 4; }) == 0 &&
13650 "Failed to lift all the high half inputs to the low mask!");
13651 assert(count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 &&
13652 "Failed to lift all the low half inputs to the high mask!");
13654 // Do a half shuffle for the low mask.
13655 if (!isNoopShuffleMask(LoMask))
13656 V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
13657 getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
13659 // Do a half shuffle with the high mask after shifting its values down.
13660 for (int &M : HiMask)
13661 if (M >= 0)
13662 M -= 4;
13663 if (!isNoopShuffleMask(HiMask))
13664 V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
13665 getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
13667 return V;
13670 /// Helper to form a PSHUFB-based shuffle+blend, opportunistically avoiding the
13671 /// blend if only one input is used.
13672 static SDValue lowerShuffleAsBlendOfPSHUFBs(
13673 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
13674 const APInt &Zeroable, SelectionDAG &DAG, bool &V1InUse, bool &V2InUse) {
13675 assert(!is128BitLaneCrossingShuffleMask(VT, Mask) &&
13676 "Lane crossing shuffle masks not supported");
13678 int NumBytes = VT.getSizeInBits() / 8;
13679 int Size = Mask.size();
13680 int Scale = NumBytes / Size;
13682 SmallVector<SDValue, 64> V1Mask(NumBytes, DAG.getUNDEF(MVT::i8));
13683 SmallVector<SDValue, 64> V2Mask(NumBytes, DAG.getUNDEF(MVT::i8));
13684 V1InUse = false;
13685 V2InUse = false;
13687 for (int i = 0; i < NumBytes; ++i) {
13688 int M = Mask[i / Scale];
13689 if (M < 0)
13690 continue;
13692 const int ZeroMask = 0x80;
13693 int V1Idx = M < Size ? M * Scale + i % Scale : ZeroMask;
13694 int V2Idx = M < Size ? ZeroMask : (M - Size) * Scale + i % Scale;
13695 if (Zeroable[i / Scale])
13696 V1Idx = V2Idx = ZeroMask;
13698 V1Mask[i] = DAG.getConstant(V1Idx, DL, MVT::i8);
13699 V2Mask[i] = DAG.getConstant(V2Idx, DL, MVT::i8);
13700 V1InUse |= (ZeroMask != V1Idx);
13701 V2InUse |= (ZeroMask != V2Idx);
13704 MVT ShufVT = MVT::getVectorVT(MVT::i8, NumBytes);
13705 if (V1InUse)
13706 V1 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V1),
13707 DAG.getBuildVector(ShufVT, DL, V1Mask));
13708 if (V2InUse)
13709 V2 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V2),
13710 DAG.getBuildVector(ShufVT, DL, V2Mask));
13712 // If we need shuffled inputs from both, blend the two.
13713 SDValue V;
13714 if (V1InUse && V2InUse)
13715 V = DAG.getNode(ISD::OR, DL, ShufVT, V1, V2);
13716 else
13717 V = V1InUse ? V1 : V2;
13719 // Cast the result back to the correct type.
13720 return DAG.getBitcast(VT, V);
13723 /// Generic lowering of 8-lane i16 shuffles.
13725 /// This handles both single-input shuffles and combined shuffle/blends with
13726 /// two inputs. The single input shuffles are immediately delegated to
13727 /// a dedicated lowering routine.
13729 /// The blends are lowered in one of three fundamental ways. If there are few
13730 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
13731 /// of the input is significantly cheaper when lowered as an interleaving of
13732 /// the two inputs, try to interleave them. Otherwise, blend the low and high
13733 /// halves of the inputs separately (making them have relatively few inputs)
13734 /// and then concatenate them.
13735 static SDValue lowerV8I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13736 const APInt &Zeroable, SDValue V1, SDValue V2,
13737 const X86Subtarget &Subtarget,
13738 SelectionDAG &DAG) {
13739 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
13740 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
13741 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
13743 // Whenever we can lower this as a zext, that instruction is strictly faster
13744 // than any alternative.
13745 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i16, V1, V2, Mask,
13746 Zeroable, Subtarget, DAG))
13747 return ZExt;
13749 // Try to use lower using a truncation.
13750 if (SDValue V = lowerShuffleWithVPMOV(DL, MVT::v8i16, V1, V2, Mask, Zeroable,
13751 Subtarget, DAG))
13752 return V;
13754 int NumV2Inputs = count_if(Mask, [](int M) { return M >= 8; });
13756 if (NumV2Inputs == 0) {
13757 // Try to use shift instructions.
13758 if (SDValue Shift =
13759 lowerShuffleAsShift(DL, MVT::v8i16, V1, V1, Mask, Zeroable,
13760 Subtarget, DAG, /*BitwiseOnly*/ false))
13761 return Shift;
13763 // Check for being able to broadcast a single element.
13764 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i16, V1, V2,
13765 Mask, Subtarget, DAG))
13766 return Broadcast;
13768 // Try to use bit rotation instructions.
13769 if (SDValue Rotate = lowerShuffleAsBitRotate(DL, MVT::v8i16, V1, Mask,
13770 Subtarget, DAG))
13771 return Rotate;
13773 // Use dedicated unpack instructions for masks that match their pattern.
13774 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
13775 return V;
13777 // Use dedicated pack instructions for masks that match their pattern.
13778 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
13779 Subtarget))
13780 return V;
13782 // Try to use byte rotation instructions.
13783 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V1, Mask,
13784 Subtarget, DAG))
13785 return Rotate;
13787 // Make a copy of the mask so it can be modified.
13788 SmallVector<int, 8> MutableMask(Mask);
13789 return lowerV8I16GeneralSingleInputShuffle(DL, MVT::v8i16, V1, MutableMask,
13790 Subtarget, DAG);
13793 assert(llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) &&
13794 "All single-input shuffles should be canonicalized to be V1-input "
13795 "shuffles.");
13797 // Try to use shift instructions.
13798 if (SDValue Shift =
13799 lowerShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget,
13800 DAG, /*BitwiseOnly*/ false))
13801 return Shift;
13803 // See if we can use SSE4A Extraction / Insertion.
13804 if (Subtarget.hasSSE4A())
13805 if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v8i16, V1, V2, Mask,
13806 Zeroable, DAG))
13807 return V;
13809 // There are special ways we can lower some single-element blends.
13810 if (NumV2Inputs == 1)
13811 if (SDValue V = lowerShuffleAsElementInsertion(
13812 DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
13813 return V;
13815 // We have different paths for blend lowering, but they all must use the
13816 // *exact* same predicate.
13817 bool IsBlendSupported = Subtarget.hasSSE41();
13818 if (IsBlendSupported)
13819 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
13820 Zeroable, Subtarget, DAG))
13821 return Blend;
13823 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask,
13824 Zeroable, Subtarget, DAG))
13825 return Masked;
13827 // Use dedicated unpack instructions for masks that match their pattern.
13828 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
13829 return V;
13831 // Use dedicated pack instructions for masks that match their pattern.
13832 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
13833 Subtarget))
13834 return V;
13836 // Try to use lower using a truncation.
13837 if (SDValue V = lowerShuffleAsVTRUNC(DL, MVT::v8i16, V1, V2, Mask, Zeroable,
13838 Subtarget, DAG))
13839 return V;
13841 // Try to use byte rotation instructions.
13842 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V2, Mask,
13843 Subtarget, DAG))
13844 return Rotate;
13846 if (SDValue BitBlend =
13847 lowerShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
13848 return BitBlend;
13850 // Try to use byte shift instructions to mask.
13851 if (SDValue V = lowerShuffleAsByteShiftMask(DL, MVT::v8i16, V1, V2, Mask,
13852 Zeroable, Subtarget, DAG))
13853 return V;
13855 // Attempt to lower using compaction, SSE41 is necessary for PACKUSDW.
13856 int NumEvenDrops = canLowerByDroppingElements(Mask, true, false);
13857 if ((NumEvenDrops == 1 || (NumEvenDrops == 2 && Subtarget.hasSSE41())) &&
13858 !Subtarget.hasVLX()) {
13859 // Check if this is part of a 256-bit vector truncation.
13860 unsigned PackOpc = 0;
13861 if (NumEvenDrops == 2 && Subtarget.hasAVX2() &&
13862 peekThroughBitcasts(V1).getOpcode() == ISD::EXTRACT_SUBVECTOR &&
13863 peekThroughBitcasts(V2).getOpcode() == ISD::EXTRACT_SUBVECTOR) {
13864 SDValue V1V2 = concatSubVectors(V1, V2, DAG, DL);
13865 V1V2 = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1V2,
13866 getZeroVector(MVT::v16i16, Subtarget, DAG, DL),
13867 DAG.getTargetConstant(0xEE, DL, MVT::i8));
13868 V1V2 = DAG.getBitcast(MVT::v8i32, V1V2);
13869 V1 = extract128BitVector(V1V2, 0, DAG, DL);
13870 V2 = extract128BitVector(V1V2, 4, DAG, DL);
13871 PackOpc = X86ISD::PACKUS;
13872 } else if (Subtarget.hasSSE41()) {
13873 SmallVector<SDValue, 4> DWordClearOps(4,
13874 DAG.getConstant(0, DL, MVT::i32));
13875 for (unsigned i = 0; i != 4; i += 1 << (NumEvenDrops - 1))
13876 DWordClearOps[i] = DAG.getConstant(0xFFFF, DL, MVT::i32);
13877 SDValue DWordClearMask =
13878 DAG.getBuildVector(MVT::v4i32, DL, DWordClearOps);
13879 V1 = DAG.getNode(ISD::AND, DL, MVT::v4i32, DAG.getBitcast(MVT::v4i32, V1),
13880 DWordClearMask);
13881 V2 = DAG.getNode(ISD::AND, DL, MVT::v4i32, DAG.getBitcast(MVT::v4i32, V2),
13882 DWordClearMask);
13883 PackOpc = X86ISD::PACKUS;
13884 } else if (!Subtarget.hasSSSE3()) {
13885 SDValue ShAmt = DAG.getTargetConstant(16, DL, MVT::i8);
13886 V1 = DAG.getBitcast(MVT::v4i32, V1);
13887 V2 = DAG.getBitcast(MVT::v4i32, V2);
13888 V1 = DAG.getNode(X86ISD::VSHLI, DL, MVT::v4i32, V1, ShAmt);
13889 V2 = DAG.getNode(X86ISD::VSHLI, DL, MVT::v4i32, V2, ShAmt);
13890 V1 = DAG.getNode(X86ISD::VSRAI, DL, MVT::v4i32, V1, ShAmt);
13891 V2 = DAG.getNode(X86ISD::VSRAI, DL, MVT::v4i32, V2, ShAmt);
13892 PackOpc = X86ISD::PACKSS;
13894 if (PackOpc) {
13895 // Now pack things back together.
13896 SDValue Result = DAG.getNode(PackOpc, DL, MVT::v8i16, V1, V2);
13897 if (NumEvenDrops == 2) {
13898 Result = DAG.getBitcast(MVT::v4i32, Result);
13899 Result = DAG.getNode(PackOpc, DL, MVT::v8i16, Result, Result);
13901 return Result;
13905 // When compacting odd (upper) elements, use PACKSS pre-SSE41.
13906 int NumOddDrops = canLowerByDroppingElements(Mask, false, false);
13907 if (NumOddDrops == 1) {
13908 bool HasSSE41 = Subtarget.hasSSE41();
13909 V1 = DAG.getNode(HasSSE41 ? X86ISD::VSRLI : X86ISD::VSRAI, DL, MVT::v4i32,
13910 DAG.getBitcast(MVT::v4i32, V1),
13911 DAG.getTargetConstant(16, DL, MVT::i8));
13912 V2 = DAG.getNode(HasSSE41 ? X86ISD::VSRLI : X86ISD::VSRAI, DL, MVT::v4i32,
13913 DAG.getBitcast(MVT::v4i32, V2),
13914 DAG.getTargetConstant(16, DL, MVT::i8));
13915 return DAG.getNode(HasSSE41 ? X86ISD::PACKUS : X86ISD::PACKSS, DL,
13916 MVT::v8i16, V1, V2);
13919 // Try to lower by permuting the inputs into an unpack instruction.
13920 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v8i16, V1, V2,
13921 Mask, Subtarget, DAG))
13922 return Unpack;
13924 // If we can't directly blend but can use PSHUFB, that will be better as it
13925 // can both shuffle and set up the inefficient blend.
13926 if (!IsBlendSupported && Subtarget.hasSSSE3()) {
13927 bool V1InUse, V2InUse;
13928 return lowerShuffleAsBlendOfPSHUFBs(DL, MVT::v8i16, V1, V2, Mask,
13929 Zeroable, DAG, V1InUse, V2InUse);
13932 // We can always bit-blend if we have to so the fallback strategy is to
13933 // decompose into single-input permutes and blends/unpacks.
13934 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v8i16, V1, V2,
13935 Mask, Subtarget, DAG);
13938 /// Lower 8-lane 16-bit floating point shuffles.
13939 static SDValue lowerV8F16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13940 const APInt &Zeroable, SDValue V1, SDValue V2,
13941 const X86Subtarget &Subtarget,
13942 SelectionDAG &DAG) {
13943 assert(V1.getSimpleValueType() == MVT::v8f16 && "Bad operand type!");
13944 assert(V2.getSimpleValueType() == MVT::v8f16 && "Bad operand type!");
13945 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
13946 int NumV2Elements = count_if(Mask, [](int M) { return M >= 8; });
13948 if (Subtarget.hasFP16()) {
13949 if (NumV2Elements == 0) {
13950 // Check for being able to broadcast a single element.
13951 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8f16, V1, V2,
13952 Mask, Subtarget, DAG))
13953 return Broadcast;
13955 if (NumV2Elements == 1 && Mask[0] >= 8)
13956 if (SDValue V = lowerShuffleAsElementInsertion(
13957 DL, MVT::v8f16, V1, V2, Mask, Zeroable, Subtarget, DAG))
13958 return V;
13961 V1 = DAG.getBitcast(MVT::v8i16, V1);
13962 V2 = DAG.getBitcast(MVT::v8i16, V2);
13963 return DAG.getBitcast(MVT::v8f16,
13964 DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, Mask));
13967 // Lowers unary/binary shuffle as VPERMV/VPERMV3, for non-VLX targets,
13968 // sub-512-bit shuffles are padded to 512-bits for the shuffle and then
13969 // the active subvector is extracted.
13970 static SDValue lowerShuffleWithPERMV(const SDLoc &DL, MVT VT,
13971 ArrayRef<int> Mask, SDValue V1, SDValue V2,
13972 const X86Subtarget &Subtarget,
13973 SelectionDAG &DAG) {
13974 MVT MaskVT = VT.changeTypeToInteger();
13975 SDValue MaskNode;
13976 MVT ShuffleVT = VT;
13977 if (!VT.is512BitVector() && !Subtarget.hasVLX()) {
13978 V1 = widenSubVector(V1, false, Subtarget, DAG, DL, 512);
13979 V2 = widenSubVector(V2, false, Subtarget, DAG, DL, 512);
13980 ShuffleVT = V1.getSimpleValueType();
13982 // Adjust mask to correct indices for the second input.
13983 int NumElts = VT.getVectorNumElements();
13984 unsigned Scale = 512 / VT.getSizeInBits();
13985 SmallVector<int, 32> AdjustedMask(Mask);
13986 for (int &M : AdjustedMask)
13987 if (NumElts <= M)
13988 M += (Scale - 1) * NumElts;
13989 MaskNode = getConstVector(AdjustedMask, MaskVT, DAG, DL, true);
13990 MaskNode = widenSubVector(MaskNode, false, Subtarget, DAG, DL, 512);
13991 } else {
13992 MaskNode = getConstVector(Mask, MaskVT, DAG, DL, true);
13995 SDValue Result;
13996 if (V2.isUndef())
13997 Result = DAG.getNode(X86ISD::VPERMV, DL, ShuffleVT, MaskNode, V1);
13998 else
13999 Result = DAG.getNode(X86ISD::VPERMV3, DL, ShuffleVT, V1, MaskNode, V2);
14001 if (VT != ShuffleVT)
14002 Result = extractSubVector(Result, 0, DAG, DL, VT.getSizeInBits());
14004 return Result;
14007 /// Generic lowering of v16i8 shuffles.
14009 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
14010 /// detect any complexity reducing interleaving. If that doesn't help, it uses
14011 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
14012 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
14013 /// back together.
14014 static SDValue lowerV16I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
14015 const APInt &Zeroable, SDValue V1, SDValue V2,
14016 const X86Subtarget &Subtarget,
14017 SelectionDAG &DAG) {
14018 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
14019 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
14020 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
14022 // Try to use shift instructions.
14023 if (SDValue Shift =
14024 lowerShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget,
14025 DAG, /*BitwiseOnly*/ false))
14026 return Shift;
14028 // Try to use byte rotation instructions.
14029 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i8, V1, V2, Mask,
14030 Subtarget, DAG))
14031 return Rotate;
14033 // Use dedicated pack instructions for masks that match their pattern.
14034 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i8, Mask, V1, V2, DAG,
14035 Subtarget))
14036 return V;
14038 // Try to use a zext lowering.
14039 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v16i8, V1, V2, Mask,
14040 Zeroable, Subtarget, DAG))
14041 return ZExt;
14043 // Try to use lower using a truncation.
14044 if (SDValue V = lowerShuffleWithVPMOV(DL, MVT::v16i8, V1, V2, Mask, Zeroable,
14045 Subtarget, DAG))
14046 return V;
14048 if (SDValue V = lowerShuffleAsVTRUNC(DL, MVT::v16i8, V1, V2, Mask, Zeroable,
14049 Subtarget, DAG))
14050 return V;
14052 // See if we can use SSE4A Extraction / Insertion.
14053 if (Subtarget.hasSSE4A())
14054 if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v16i8, V1, V2, Mask,
14055 Zeroable, DAG))
14056 return V;
14058 int NumV2Elements = count_if(Mask, [](int M) { return M >= 16; });
14060 // For single-input shuffles, there are some nicer lowering tricks we can use.
14061 if (NumV2Elements == 0) {
14062 // Check for being able to broadcast a single element.
14063 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i8, V1, V2,
14064 Mask, Subtarget, DAG))
14065 return Broadcast;
14067 // Try to use bit rotation instructions.
14068 if (SDValue Rotate = lowerShuffleAsBitRotate(DL, MVT::v16i8, V1, Mask,
14069 Subtarget, DAG))
14070 return Rotate;
14072 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
14073 return V;
14075 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
14076 // Notably, this handles splat and partial-splat shuffles more efficiently.
14077 // However, it only makes sense if the pre-duplication shuffle simplifies
14078 // things significantly. Currently, this means we need to be able to
14079 // express the pre-duplication shuffle as an i16 shuffle.
14081 // FIXME: We should check for other patterns which can be widened into an
14082 // i16 shuffle as well.
14083 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
14084 for (int i = 0; i < 16; i += 2)
14085 if (Mask[i] >= 0 && Mask[i + 1] >= 0 && Mask[i] != Mask[i + 1])
14086 return false;
14088 return true;
14090 auto tryToWidenViaDuplication = [&]() -> SDValue {
14091 if (!canWidenViaDuplication(Mask))
14092 return SDValue();
14093 SmallVector<int, 4> LoInputs;
14094 copy_if(Mask, std::back_inserter(LoInputs),
14095 [](int M) { return M >= 0 && M < 8; });
14096 array_pod_sort(LoInputs.begin(), LoInputs.end());
14097 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
14098 LoInputs.end());
14099 SmallVector<int, 4> HiInputs;
14100 copy_if(Mask, std::back_inserter(HiInputs), [](int M) { return M >= 8; });
14101 array_pod_sort(HiInputs.begin(), HiInputs.end());
14102 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
14103 HiInputs.end());
14105 bool TargetLo = LoInputs.size() >= HiInputs.size();
14106 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
14107 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
14109 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
14110 SmallDenseMap<int, int, 8> LaneMap;
14111 for (int I : InPlaceInputs) {
14112 PreDupI16Shuffle[I/2] = I/2;
14113 LaneMap[I] = I;
14115 int j = TargetLo ? 0 : 4, je = j + 4;
14116 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
14117 // Check if j is already a shuffle of this input. This happens when
14118 // there are two adjacent bytes after we move the low one.
14119 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
14120 // If we haven't yet mapped the input, search for a slot into which
14121 // we can map it.
14122 while (j < je && PreDupI16Shuffle[j] >= 0)
14123 ++j;
14125 if (j == je)
14126 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
14127 return SDValue();
14129 // Map this input with the i16 shuffle.
14130 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
14133 // Update the lane map based on the mapping we ended up with.
14134 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
14136 V1 = DAG.getBitcast(
14137 MVT::v16i8,
14138 DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
14139 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
14141 // Unpack the bytes to form the i16s that will be shuffled into place.
14142 bool EvenInUse = false, OddInUse = false;
14143 for (int i = 0; i < 16; i += 2) {
14144 EvenInUse |= (Mask[i + 0] >= 0);
14145 OddInUse |= (Mask[i + 1] >= 0);
14146 if (EvenInUse && OddInUse)
14147 break;
14149 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
14150 MVT::v16i8, EvenInUse ? V1 : DAG.getUNDEF(MVT::v16i8),
14151 OddInUse ? V1 : DAG.getUNDEF(MVT::v16i8));
14153 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
14154 for (int i = 0; i < 16; ++i)
14155 if (Mask[i] >= 0) {
14156 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
14157 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
14158 if (PostDupI16Shuffle[i / 2] < 0)
14159 PostDupI16Shuffle[i / 2] = MappedMask;
14160 else
14161 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
14162 "Conflicting entries in the original shuffle!");
14164 return DAG.getBitcast(
14165 MVT::v16i8,
14166 DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
14167 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
14169 if (SDValue V = tryToWidenViaDuplication())
14170 return V;
14173 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v16i8, V1, V2, Mask,
14174 Zeroable, Subtarget, DAG))
14175 return Masked;
14177 // Use dedicated unpack instructions for masks that match their pattern.
14178 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
14179 return V;
14181 // Try to use byte shift instructions to mask.
14182 if (SDValue V = lowerShuffleAsByteShiftMask(DL, MVT::v16i8, V1, V2, Mask,
14183 Zeroable, Subtarget, DAG))
14184 return V;
14186 // Check for compaction patterns.
14187 bool IsSingleInput = V2.isUndef();
14188 int NumEvenDrops = canLowerByDroppingElements(Mask, true, IsSingleInput);
14190 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
14191 // with PSHUFB. It is important to do this before we attempt to generate any
14192 // blends but after all of the single-input lowerings. If the single input
14193 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
14194 // want to preserve that and we can DAG combine any longer sequences into
14195 // a PSHUFB in the end. But once we start blending from multiple inputs,
14196 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
14197 // and there are *very* few patterns that would actually be faster than the
14198 // PSHUFB approach because of its ability to zero lanes.
14200 // If the mask is a binary compaction, we can more efficiently perform this
14201 // as a PACKUS(AND(),AND()) - which is quicker than UNPACK(PSHUFB(),PSHUFB()).
14203 // FIXME: The only exceptions to the above are blends which are exact
14204 // interleavings with direct instructions supporting them. We currently don't
14205 // handle those well here.
14206 if (Subtarget.hasSSSE3() && (IsSingleInput || NumEvenDrops != 1)) {
14207 bool V1InUse = false;
14208 bool V2InUse = false;
14210 SDValue PSHUFB = lowerShuffleAsBlendOfPSHUFBs(
14211 DL, MVT::v16i8, V1, V2, Mask, Zeroable, DAG, V1InUse, V2InUse);
14213 // If both V1 and V2 are in use and we can use a direct blend or an unpack,
14214 // do so. This avoids using them to handle blends-with-zero which is
14215 // important as a single pshufb is significantly faster for that.
14216 if (V1InUse && V2InUse) {
14217 if (Subtarget.hasSSE41())
14218 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i8, V1, V2, Mask,
14219 Zeroable, Subtarget, DAG))
14220 return Blend;
14222 // We can use an unpack to do the blending rather than an or in some
14223 // cases. Even though the or may be (very minorly) more efficient, we
14224 // preference this lowering because there are common cases where part of
14225 // the complexity of the shuffles goes away when we do the final blend as
14226 // an unpack.
14227 // FIXME: It might be worth trying to detect if the unpack-feeding
14228 // shuffles will both be pshufb, in which case we shouldn't bother with
14229 // this.
14230 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(
14231 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
14232 return Unpack;
14234 // AVX512VBMI can lower to VPERMB (non-VLX will pad to v64i8).
14235 if (Subtarget.hasVBMI())
14236 return lowerShuffleWithPERMV(DL, MVT::v16i8, Mask, V1, V2, Subtarget,
14237 DAG);
14239 // If we have XOP we can use one VPPERM instead of multiple PSHUFBs.
14240 if (Subtarget.hasXOP()) {
14241 SDValue MaskNode = getConstVector(Mask, MVT::v16i8, DAG, DL, true);
14242 return DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, V1, V2, MaskNode);
14245 // Use PALIGNR+Permute if possible - permute might become PSHUFB but the
14246 // PALIGNR will be cheaper than the second PSHUFB+OR.
14247 if (SDValue V = lowerShuffleAsByteRotateAndPermute(
14248 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
14249 return V;
14252 return PSHUFB;
14255 // There are special ways we can lower some single-element blends.
14256 if (NumV2Elements == 1)
14257 if (SDValue V = lowerShuffleAsElementInsertion(
14258 DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
14259 return V;
14261 if (SDValue Blend = lowerShuffleAsBitBlend(DL, MVT::v16i8, V1, V2, Mask, DAG))
14262 return Blend;
14264 // Check whether a compaction lowering can be done. This handles shuffles
14265 // which take every Nth element for some even N. See the helper function for
14266 // details.
14268 // We special case these as they can be particularly efficiently handled with
14269 // the PACKUSB instruction on x86 and they show up in common patterns of
14270 // rearranging bytes to truncate wide elements.
14271 if (NumEvenDrops) {
14272 // NumEvenDrops is the power of two stride of the elements. Another way of
14273 // thinking about it is that we need to drop the even elements this many
14274 // times to get the original input.
14276 // First we need to zero all the dropped bytes.
14277 assert(NumEvenDrops <= 3 &&
14278 "No support for dropping even elements more than 3 times.");
14279 SmallVector<SDValue, 8> WordClearOps(8, DAG.getConstant(0, DL, MVT::i16));
14280 for (unsigned i = 0; i != 8; i += 1 << (NumEvenDrops - 1))
14281 WordClearOps[i] = DAG.getConstant(0xFF, DL, MVT::i16);
14282 SDValue WordClearMask = DAG.getBuildVector(MVT::v8i16, DL, WordClearOps);
14283 V1 = DAG.getNode(ISD::AND, DL, MVT::v8i16, DAG.getBitcast(MVT::v8i16, V1),
14284 WordClearMask);
14285 if (!IsSingleInput)
14286 V2 = DAG.getNode(ISD::AND, DL, MVT::v8i16, DAG.getBitcast(MVT::v8i16, V2),
14287 WordClearMask);
14289 // Now pack things back together.
14290 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1,
14291 IsSingleInput ? V1 : V2);
14292 for (int i = 1; i < NumEvenDrops; ++i) {
14293 Result = DAG.getBitcast(MVT::v8i16, Result);
14294 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
14296 return Result;
14299 int NumOddDrops = canLowerByDroppingElements(Mask, false, IsSingleInput);
14300 if (NumOddDrops == 1) {
14301 V1 = DAG.getNode(X86ISD::VSRLI, DL, MVT::v8i16,
14302 DAG.getBitcast(MVT::v8i16, V1),
14303 DAG.getTargetConstant(8, DL, MVT::i8));
14304 if (!IsSingleInput)
14305 V2 = DAG.getNode(X86ISD::VSRLI, DL, MVT::v8i16,
14306 DAG.getBitcast(MVT::v8i16, V2),
14307 DAG.getTargetConstant(8, DL, MVT::i8));
14308 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1,
14309 IsSingleInput ? V1 : V2);
14312 // Handle multi-input cases by blending/unpacking single-input shuffles.
14313 if (NumV2Elements > 0)
14314 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v16i8, V1, V2, Mask,
14315 Subtarget, DAG);
14317 // The fallback path for single-input shuffles widens this into two v8i16
14318 // vectors with unpacks, shuffles those, and then pulls them back together
14319 // with a pack.
14320 SDValue V = V1;
14322 std::array<int, 8> LoBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
14323 std::array<int, 8> HiBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
14324 for (int i = 0; i < 16; ++i)
14325 if (Mask[i] >= 0)
14326 (i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i];
14328 SDValue VLoHalf, VHiHalf;
14329 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
14330 // them out and avoid using UNPCK{L,H} to extract the elements of V as
14331 // i16s.
14332 if (none_of(LoBlendMask, [](int M) { return M >= 0 && M % 2 == 1; }) &&
14333 none_of(HiBlendMask, [](int M) { return M >= 0 && M % 2 == 1; })) {
14334 // Use a mask to drop the high bytes.
14335 VLoHalf = DAG.getBitcast(MVT::v8i16, V);
14336 VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
14337 DAG.getConstant(0x00FF, DL, MVT::v8i16));
14339 // This will be a single vector shuffle instead of a blend so nuke VHiHalf.
14340 VHiHalf = DAG.getUNDEF(MVT::v8i16);
14342 // Squash the masks to point directly into VLoHalf.
14343 for (int &M : LoBlendMask)
14344 if (M >= 0)
14345 M /= 2;
14346 for (int &M : HiBlendMask)
14347 if (M >= 0)
14348 M /= 2;
14349 } else {
14350 // Otherwise just unpack the low half of V into VLoHalf and the high half into
14351 // VHiHalf so that we can blend them as i16s.
14352 SDValue Zero = getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
14354 VLoHalf = DAG.getBitcast(
14355 MVT::v8i16, DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
14356 VHiHalf = DAG.getBitcast(
14357 MVT::v8i16, DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
14360 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, LoBlendMask);
14361 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, HiBlendMask);
14363 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
14366 /// Dispatching routine to lower various 128-bit x86 vector shuffles.
14368 /// This routine breaks down the specific type of 128-bit shuffle and
14369 /// dispatches to the lowering routines accordingly.
14370 static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
14371 MVT VT, SDValue V1, SDValue V2,
14372 const APInt &Zeroable,
14373 const X86Subtarget &Subtarget,
14374 SelectionDAG &DAG) {
14375 if (VT == MVT::v8bf16) {
14376 V1 = DAG.getBitcast(MVT::v8i16, V1);
14377 V2 = DAG.getBitcast(MVT::v8i16, V2);
14378 return DAG.getBitcast(VT,
14379 DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, Mask));
14382 switch (VT.SimpleTy) {
14383 case MVT::v2i64:
14384 return lowerV2I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14385 case MVT::v2f64:
14386 return lowerV2F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14387 case MVT::v4i32:
14388 return lowerV4I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14389 case MVT::v4f32:
14390 return lowerV4F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14391 case MVT::v8i16:
14392 return lowerV8I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14393 case MVT::v8f16:
14394 return lowerV8F16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14395 case MVT::v16i8:
14396 return lowerV16I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14398 default:
14399 llvm_unreachable("Unimplemented!");
14403 /// Generic routine to split vector shuffle into half-sized shuffles.
14405 /// This routine just extracts two subvectors, shuffles them independently, and
14406 /// then concatenates them back together. This should work effectively with all
14407 /// AVX vector shuffle types.
14408 static SDValue splitAndLowerShuffle(const SDLoc &DL, MVT VT, SDValue V1,
14409 SDValue V2, ArrayRef<int> Mask,
14410 SelectionDAG &DAG, bool SimpleOnly) {
14411 assert(VT.getSizeInBits() >= 256 &&
14412 "Only for 256-bit or wider vector shuffles!");
14413 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
14414 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
14416 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
14417 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
14419 int NumElements = VT.getVectorNumElements();
14420 int SplitNumElements = NumElements / 2;
14421 MVT ScalarVT = VT.getVectorElementType();
14422 MVT SplitVT = MVT::getVectorVT(ScalarVT, SplitNumElements);
14424 // Use splitVector/extractSubVector so that split build-vectors just build two
14425 // narrower build vectors. This helps shuffling with splats and zeros.
14426 auto SplitVector = [&](SDValue V) {
14427 SDValue LoV, HiV;
14428 std::tie(LoV, HiV) = splitVector(peekThroughBitcasts(V), DAG, DL);
14429 return std::make_pair(DAG.getBitcast(SplitVT, LoV),
14430 DAG.getBitcast(SplitVT, HiV));
14433 SDValue LoV1, HiV1, LoV2, HiV2;
14434 std::tie(LoV1, HiV1) = SplitVector(V1);
14435 std::tie(LoV2, HiV2) = SplitVector(V2);
14437 // Now create two 4-way blends of these half-width vectors.
14438 auto GetHalfBlendPiecesReq = [&](const ArrayRef<int> &HalfMask, bool &UseLoV1,
14439 bool &UseHiV1, bool &UseLoV2,
14440 bool &UseHiV2) {
14441 UseLoV1 = UseHiV1 = UseLoV2 = UseHiV2 = false;
14442 for (int i = 0; i < SplitNumElements; ++i) {
14443 int M = HalfMask[i];
14444 if (M >= NumElements) {
14445 if (M >= NumElements + SplitNumElements)
14446 UseHiV2 = true;
14447 else
14448 UseLoV2 = true;
14449 } else if (M >= 0) {
14450 if (M >= SplitNumElements)
14451 UseHiV1 = true;
14452 else
14453 UseLoV1 = true;
14458 auto CheckHalfBlendUsable = [&](const ArrayRef<int> &HalfMask) -> bool {
14459 if (!SimpleOnly)
14460 return true;
14462 bool UseLoV1, UseHiV1, UseLoV2, UseHiV2;
14463 GetHalfBlendPiecesReq(HalfMask, UseLoV1, UseHiV1, UseLoV2, UseHiV2);
14465 return !(UseHiV1 || UseHiV2);
14468 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
14469 SmallVector<int, 32> V1BlendMask((unsigned)SplitNumElements, -1);
14470 SmallVector<int, 32> V2BlendMask((unsigned)SplitNumElements, -1);
14471 SmallVector<int, 32> BlendMask((unsigned)SplitNumElements, -1);
14472 for (int i = 0; i < SplitNumElements; ++i) {
14473 int M = HalfMask[i];
14474 if (M >= NumElements) {
14475 V2BlendMask[i] = M - NumElements;
14476 BlendMask[i] = SplitNumElements + i;
14477 } else if (M >= 0) {
14478 V1BlendMask[i] = M;
14479 BlendMask[i] = i;
14483 bool UseLoV1, UseHiV1, UseLoV2, UseHiV2;
14484 GetHalfBlendPiecesReq(HalfMask, UseLoV1, UseHiV1, UseLoV2, UseHiV2);
14486 // Because the lowering happens after all combining takes place, we need to
14487 // manually combine these blend masks as much as possible so that we create
14488 // a minimal number of high-level vector shuffle nodes.
14489 assert((!SimpleOnly || (!UseHiV1 && !UseHiV2)) && "Shuffle isn't simple");
14491 // First try just blending the halves of V1 or V2.
14492 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
14493 return DAG.getUNDEF(SplitVT);
14494 if (!UseLoV2 && !UseHiV2)
14495 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
14496 if (!UseLoV1 && !UseHiV1)
14497 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
14499 SDValue V1Blend, V2Blend;
14500 if (UseLoV1 && UseHiV1) {
14501 V1Blend = DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
14502 } else {
14503 // We only use half of V1 so map the usage down into the final blend mask.
14504 V1Blend = UseLoV1 ? LoV1 : HiV1;
14505 for (int i = 0; i < SplitNumElements; ++i)
14506 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
14507 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
14509 if (UseLoV2 && UseHiV2) {
14510 V2Blend = DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
14511 } else {
14512 // We only use half of V2 so map the usage down into the final blend mask.
14513 V2Blend = UseLoV2 ? LoV2 : HiV2;
14514 for (int i = 0; i < SplitNumElements; ++i)
14515 if (BlendMask[i] >= SplitNumElements)
14516 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
14518 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
14521 if (!CheckHalfBlendUsable(LoMask) || !CheckHalfBlendUsable(HiMask))
14522 return SDValue();
14524 SDValue Lo = HalfBlend(LoMask);
14525 SDValue Hi = HalfBlend(HiMask);
14526 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
14529 /// Either split a vector in halves or decompose the shuffles and the
14530 /// blend/unpack.
14532 /// This is provided as a good fallback for many lowerings of non-single-input
14533 /// shuffles with more than one 128-bit lane. In those cases, we want to select
14534 /// between splitting the shuffle into 128-bit components and stitching those
14535 /// back together vs. extracting the single-input shuffles and blending those
14536 /// results.
14537 static SDValue lowerShuffleAsSplitOrBlend(const SDLoc &DL, MVT VT, SDValue V1,
14538 SDValue V2, ArrayRef<int> Mask,
14539 const X86Subtarget &Subtarget,
14540 SelectionDAG &DAG) {
14541 assert(!V2.isUndef() && "This routine must not be used to lower single-input "
14542 "shuffles as it could then recurse on itself.");
14543 int Size = Mask.size();
14545 // If this can be modeled as a broadcast of two elements followed by a blend,
14546 // prefer that lowering. This is especially important because broadcasts can
14547 // often fold with memory operands.
14548 auto DoBothBroadcast = [&] {
14549 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
14550 for (int M : Mask)
14551 if (M >= Size) {
14552 if (V2BroadcastIdx < 0)
14553 V2BroadcastIdx = M - Size;
14554 else if (M - Size != V2BroadcastIdx)
14555 return false;
14556 } else if (M >= 0) {
14557 if (V1BroadcastIdx < 0)
14558 V1BroadcastIdx = M;
14559 else if (M != V1BroadcastIdx)
14560 return false;
14562 return true;
14564 if (DoBothBroadcast())
14565 return lowerShuffleAsDecomposedShuffleMerge(DL, VT, V1, V2, Mask, Subtarget,
14566 DAG);
14568 // If the inputs all stem from a single 128-bit lane of each input, then we
14569 // split them rather than blending because the split will decompose to
14570 // unusually few instructions.
14571 int LaneCount = VT.getSizeInBits() / 128;
14572 int LaneSize = Size / LaneCount;
14573 SmallBitVector LaneInputs[2];
14574 LaneInputs[0].resize(LaneCount, false);
14575 LaneInputs[1].resize(LaneCount, false);
14576 for (int i = 0; i < Size; ++i)
14577 if (Mask[i] >= 0)
14578 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
14579 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
14580 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG,
14581 /*SimpleOnly*/ false);
14583 // Otherwise, just fall back to decomposed shuffles and a blend/unpack. This
14584 // requires that the decomposed single-input shuffles don't end up here.
14585 return lowerShuffleAsDecomposedShuffleMerge(DL, VT, V1, V2, Mask, Subtarget,
14586 DAG);
14589 // Lower as SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
14590 // TODO: Extend to support v8f32 (+ 512-bit shuffles).
14591 static SDValue lowerShuffleAsLanePermuteAndSHUFP(const SDLoc &DL, MVT VT,
14592 SDValue V1, SDValue V2,
14593 ArrayRef<int> Mask,
14594 SelectionDAG &DAG) {
14595 assert(VT == MVT::v4f64 && "Only for v4f64 shuffles");
14597 int LHSMask[4] = {-1, -1, -1, -1};
14598 int RHSMask[4] = {-1, -1, -1, -1};
14599 unsigned SHUFPMask = 0;
14601 // As SHUFPD uses a single LHS/RHS element per lane, we can always
14602 // perform the shuffle once the lanes have been shuffled in place.
14603 for (int i = 0; i != 4; ++i) {
14604 int M = Mask[i];
14605 if (M < 0)
14606 continue;
14607 int LaneBase = i & ~1;
14608 auto &LaneMask = (i & 1) ? RHSMask : LHSMask;
14609 LaneMask[LaneBase + (M & 1)] = M;
14610 SHUFPMask |= (M & 1) << i;
14613 SDValue LHS = DAG.getVectorShuffle(VT, DL, V1, V2, LHSMask);
14614 SDValue RHS = DAG.getVectorShuffle(VT, DL, V1, V2, RHSMask);
14615 return DAG.getNode(X86ISD::SHUFP, DL, VT, LHS, RHS,
14616 DAG.getTargetConstant(SHUFPMask, DL, MVT::i8));
14619 /// Lower a vector shuffle crossing multiple 128-bit lanes as
14620 /// a lane permutation followed by a per-lane permutation.
14622 /// This is mainly for cases where we can have non-repeating permutes
14623 /// in each lane.
14625 /// TODO: This is very similar to lowerShuffleAsLanePermuteAndRepeatedMask,
14626 /// we should investigate merging them.
14627 static SDValue lowerShuffleAsLanePermuteAndPermute(
14628 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14629 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
14630 int NumElts = VT.getVectorNumElements();
14631 int NumLanes = VT.getSizeInBits() / 128;
14632 int NumEltsPerLane = NumElts / NumLanes;
14633 bool CanUseSublanes = Subtarget.hasAVX2() && V2.isUndef();
14635 /// Attempts to find a sublane permute with the given size
14636 /// that gets all elements into their target lanes.
14638 /// If successful, fills CrossLaneMask and InLaneMask and returns true.
14639 /// If unsuccessful, returns false and may overwrite InLaneMask.
14640 auto getSublanePermute = [&](int NumSublanes) -> SDValue {
14641 int NumSublanesPerLane = NumSublanes / NumLanes;
14642 int NumEltsPerSublane = NumElts / NumSublanes;
14644 SmallVector<int, 16> CrossLaneMask;
14645 SmallVector<int, 16> InLaneMask(NumElts, SM_SentinelUndef);
14646 // CrossLaneMask but one entry == one sublane.
14647 SmallVector<int, 16> CrossLaneMaskLarge(NumSublanes, SM_SentinelUndef);
14649 for (int i = 0; i != NumElts; ++i) {
14650 int M = Mask[i];
14651 if (M < 0)
14652 continue;
14654 int SrcSublane = M / NumEltsPerSublane;
14655 int DstLane = i / NumEltsPerLane;
14657 // We only need to get the elements into the right lane, not sublane.
14658 // So search all sublanes that make up the destination lane.
14659 bool Found = false;
14660 int DstSubStart = DstLane * NumSublanesPerLane;
14661 int DstSubEnd = DstSubStart + NumSublanesPerLane;
14662 for (int DstSublane = DstSubStart; DstSublane < DstSubEnd; ++DstSublane) {
14663 if (!isUndefOrEqual(CrossLaneMaskLarge[DstSublane], SrcSublane))
14664 continue;
14666 Found = true;
14667 CrossLaneMaskLarge[DstSublane] = SrcSublane;
14668 int DstSublaneOffset = DstSublane * NumEltsPerSublane;
14669 InLaneMask[i] = DstSublaneOffset + M % NumEltsPerSublane;
14670 break;
14672 if (!Found)
14673 return SDValue();
14676 // Fill CrossLaneMask using CrossLaneMaskLarge.
14677 narrowShuffleMaskElts(NumEltsPerSublane, CrossLaneMaskLarge, CrossLaneMask);
14679 if (!CanUseSublanes) {
14680 // If we're only shuffling a single lowest lane and the rest are identity
14681 // then don't bother.
14682 // TODO - isShuffleMaskInputInPlace could be extended to something like
14683 // this.
14684 int NumIdentityLanes = 0;
14685 bool OnlyShuffleLowestLane = true;
14686 for (int i = 0; i != NumLanes; ++i) {
14687 int LaneOffset = i * NumEltsPerLane;
14688 if (isSequentialOrUndefInRange(InLaneMask, LaneOffset, NumEltsPerLane,
14689 i * NumEltsPerLane))
14690 NumIdentityLanes++;
14691 else if (CrossLaneMask[LaneOffset] != 0)
14692 OnlyShuffleLowestLane = false;
14694 if (OnlyShuffleLowestLane && NumIdentityLanes == (NumLanes - 1))
14695 return SDValue();
14698 // Avoid returning the same shuffle operation. For example,
14699 // t7: v16i16 = vector_shuffle<8,9,10,11,4,5,6,7,0,1,2,3,12,13,14,15> t5,
14700 // undef:v16i16
14701 if (CrossLaneMask == Mask || InLaneMask == Mask)
14702 return SDValue();
14704 SDValue CrossLane = DAG.getVectorShuffle(VT, DL, V1, V2, CrossLaneMask);
14705 return DAG.getVectorShuffle(VT, DL, CrossLane, DAG.getUNDEF(VT),
14706 InLaneMask);
14709 // First attempt a solution with full lanes.
14710 if (SDValue V = getSublanePermute(/*NumSublanes=*/NumLanes))
14711 return V;
14713 // The rest of the solutions use sublanes.
14714 if (!CanUseSublanes)
14715 return SDValue();
14717 // Then attempt a solution with 64-bit sublanes (vpermq).
14718 if (SDValue V = getSublanePermute(/*NumSublanes=*/NumLanes * 2))
14719 return V;
14721 // If that doesn't work and we have fast variable cross-lane shuffle,
14722 // attempt 32-bit sublanes (vpermd).
14723 if (!Subtarget.hasFastVariableCrossLaneShuffle())
14724 return SDValue();
14726 return getSublanePermute(/*NumSublanes=*/NumLanes * 4);
14729 /// Helper to get compute inlane shuffle mask for a complete shuffle mask.
14730 static void computeInLaneShuffleMask(const ArrayRef<int> &Mask, int LaneSize,
14731 SmallVector<int> &InLaneMask) {
14732 int Size = Mask.size();
14733 InLaneMask.assign(Mask.begin(), Mask.end());
14734 for (int i = 0; i < Size; ++i) {
14735 int &M = InLaneMask[i];
14736 if (M < 0)
14737 continue;
14738 if (((M % Size) / LaneSize) != (i / LaneSize))
14739 M = (M % LaneSize) + ((i / LaneSize) * LaneSize) + Size;
14743 /// Lower a vector shuffle crossing multiple 128-bit lanes by shuffling one
14744 /// source with a lane permutation.
14746 /// This lowering strategy results in four instructions in the worst case for a
14747 /// single-input cross lane shuffle which is lower than any other fully general
14748 /// cross-lane shuffle strategy I'm aware of. Special cases for each particular
14749 /// shuffle pattern should be handled prior to trying this lowering.
14750 static SDValue lowerShuffleAsLanePermuteAndShuffle(
14751 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14752 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
14753 // FIXME: This should probably be generalized for 512-bit vectors as well.
14754 assert(VT.is256BitVector() && "Only for 256-bit vector shuffles!");
14755 int Size = Mask.size();
14756 int LaneSize = Size / 2;
14758 // Fold to SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
14759 // Only do this if the elements aren't all from the lower lane,
14760 // otherwise we're (probably) better off doing a split.
14761 if (VT == MVT::v4f64 &&
14762 !all_of(Mask, [LaneSize](int M) { return M < LaneSize; }))
14763 return lowerShuffleAsLanePermuteAndSHUFP(DL, VT, V1, V2, Mask, DAG);
14765 // If there are only inputs from one 128-bit lane, splitting will in fact be
14766 // less expensive. The flags track whether the given lane contains an element
14767 // that crosses to another lane.
14768 bool AllLanes;
14769 if (!Subtarget.hasAVX2()) {
14770 bool LaneCrossing[2] = {false, false};
14771 for (int i = 0; i < Size; ++i)
14772 if (Mask[i] >= 0 && ((Mask[i] % Size) / LaneSize) != (i / LaneSize))
14773 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
14774 AllLanes = LaneCrossing[0] && LaneCrossing[1];
14775 } else {
14776 bool LaneUsed[2] = {false, false};
14777 for (int i = 0; i < Size; ++i)
14778 if (Mask[i] >= 0)
14779 LaneUsed[(Mask[i] % Size) / LaneSize] = true;
14780 AllLanes = LaneUsed[0] && LaneUsed[1];
14783 // TODO - we could support shuffling V2 in the Flipped input.
14784 assert(V2.isUndef() &&
14785 "This last part of this routine only works on single input shuffles");
14787 SmallVector<int> InLaneMask;
14788 computeInLaneShuffleMask(Mask, Mask.size() / 2, InLaneMask);
14790 assert(!is128BitLaneCrossingShuffleMask(VT, InLaneMask) &&
14791 "In-lane shuffle mask expected");
14793 // If we're not using both lanes in each lane and the inlane mask is not
14794 // repeating, then we're better off splitting.
14795 if (!AllLanes && !is128BitLaneRepeatedShuffleMask(VT, InLaneMask))
14796 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG,
14797 /*SimpleOnly*/ false);
14799 // Flip the lanes, and shuffle the results which should now be in-lane.
14800 MVT PVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
14801 SDValue Flipped = DAG.getBitcast(PVT, V1);
14802 Flipped =
14803 DAG.getVectorShuffle(PVT, DL, Flipped, DAG.getUNDEF(PVT), {2, 3, 0, 1});
14804 Flipped = DAG.getBitcast(VT, Flipped);
14805 return DAG.getVectorShuffle(VT, DL, V1, Flipped, InLaneMask);
14808 /// Handle lowering 2-lane 128-bit shuffles.
14809 static SDValue lowerV2X128Shuffle(const SDLoc &DL, MVT VT, SDValue V1,
14810 SDValue V2, ArrayRef<int> Mask,
14811 const APInt &Zeroable,
14812 const X86Subtarget &Subtarget,
14813 SelectionDAG &DAG) {
14814 if (V2.isUndef()) {
14815 // Attempt to match VBROADCAST*128 subvector broadcast load.
14816 bool SplatLo = isShuffleEquivalent(Mask, {0, 1, 0, 1}, V1);
14817 bool SplatHi = isShuffleEquivalent(Mask, {2, 3, 2, 3}, V1);
14818 if ((SplatLo || SplatHi) && !Subtarget.hasAVX512() && V1.hasOneUse() &&
14819 X86::mayFoldLoad(peekThroughOneUseBitcasts(V1), Subtarget)) {
14820 MVT MemVT = VT.getHalfNumVectorElementsVT();
14821 unsigned Ofs = SplatLo ? 0 : MemVT.getStoreSize();
14822 auto *Ld = cast<LoadSDNode>(peekThroughOneUseBitcasts(V1));
14823 if (SDValue BcstLd = getBROADCAST_LOAD(X86ISD::SUBV_BROADCAST_LOAD, DL,
14824 VT, MemVT, Ld, Ofs, DAG))
14825 return BcstLd;
14828 // With AVX2, use VPERMQ/VPERMPD for unary shuffles to allow memory folding.
14829 if (Subtarget.hasAVX2())
14830 return SDValue();
14833 bool V2IsZero = !V2.isUndef() && ISD::isBuildVectorAllZeros(V2.getNode());
14835 SmallVector<int, 4> WidenedMask;
14836 if (!canWidenShuffleElements(Mask, Zeroable, V2IsZero, WidenedMask))
14837 return SDValue();
14839 bool IsLowZero = (Zeroable & 0x3) == 0x3;
14840 bool IsHighZero = (Zeroable & 0xc) == 0xc;
14842 // Try to use an insert into a zero vector.
14843 if (WidenedMask[0] == 0 && IsHighZero) {
14844 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
14845 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
14846 DAG.getIntPtrConstant(0, DL));
14847 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
14848 getZeroVector(VT, Subtarget, DAG, DL), LoV,
14849 DAG.getIntPtrConstant(0, DL));
14852 // TODO: If minimizing size and one of the inputs is a zero vector and the
14853 // the zero vector has only one use, we could use a VPERM2X128 to save the
14854 // instruction bytes needed to explicitly generate the zero vector.
14856 // Blends are faster and handle all the non-lane-crossing cases.
14857 if (SDValue Blend = lowerShuffleAsBlend(DL, VT, V1, V2, Mask, Zeroable,
14858 Subtarget, DAG))
14859 return Blend;
14861 // If either input operand is a zero vector, use VPERM2X128 because its mask
14862 // allows us to replace the zero input with an implicit zero.
14863 if (!IsLowZero && !IsHighZero) {
14864 // Check for patterns which can be matched with a single insert of a 128-bit
14865 // subvector.
14866 bool OnlyUsesV1 = isShuffleEquivalent(Mask, {0, 1, 0, 1}, V1, V2);
14867 if (OnlyUsesV1 || isShuffleEquivalent(Mask, {0, 1, 4, 5}, V1, V2)) {
14869 // With AVX1, use vperm2f128 (below) to allow load folding. Otherwise,
14870 // this will likely become vinsertf128 which can't fold a 256-bit memop.
14871 if (!isa<LoadSDNode>(peekThroughBitcasts(V1))) {
14872 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
14873 SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
14874 OnlyUsesV1 ? V1 : V2,
14875 DAG.getIntPtrConstant(0, DL));
14876 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
14877 DAG.getIntPtrConstant(2, DL));
14881 // Try to use SHUF128 if possible.
14882 if (Subtarget.hasVLX()) {
14883 if (WidenedMask[0] < 2 && WidenedMask[1] >= 2) {
14884 unsigned PermMask = ((WidenedMask[0] % 2) << 0) |
14885 ((WidenedMask[1] % 2) << 1);
14886 return DAG.getNode(X86ISD::SHUF128, DL, VT, V1, V2,
14887 DAG.getTargetConstant(PermMask, DL, MVT::i8));
14892 // Otherwise form a 128-bit permutation. After accounting for undefs,
14893 // convert the 64-bit shuffle mask selection values into 128-bit
14894 // selection bits by dividing the indexes by 2 and shifting into positions
14895 // defined by a vperm2*128 instruction's immediate control byte.
14897 // The immediate permute control byte looks like this:
14898 // [1:0] - select 128 bits from sources for low half of destination
14899 // [2] - ignore
14900 // [3] - zero low half of destination
14901 // [5:4] - select 128 bits from sources for high half of destination
14902 // [6] - ignore
14903 // [7] - zero high half of destination
14905 assert((WidenedMask[0] >= 0 || IsLowZero) &&
14906 (WidenedMask[1] >= 0 || IsHighZero) && "Undef half?");
14908 unsigned PermMask = 0;
14909 PermMask |= IsLowZero ? 0x08 : (WidenedMask[0] << 0);
14910 PermMask |= IsHighZero ? 0x80 : (WidenedMask[1] << 4);
14912 // Check the immediate mask and replace unused sources with undef.
14913 if ((PermMask & 0x0a) != 0x00 && (PermMask & 0xa0) != 0x00)
14914 V1 = DAG.getUNDEF(VT);
14915 if ((PermMask & 0x0a) != 0x02 && (PermMask & 0xa0) != 0x20)
14916 V2 = DAG.getUNDEF(VT);
14918 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
14919 DAG.getTargetConstant(PermMask, DL, MVT::i8));
14922 /// Lower a vector shuffle by first fixing the 128-bit lanes and then
14923 /// shuffling each lane.
14925 /// This attempts to create a repeated lane shuffle where each lane uses one
14926 /// or two of the lanes of the inputs. The lanes of the input vectors are
14927 /// shuffled in one or two independent shuffles to get the lanes into the
14928 /// position needed by the final shuffle.
14929 static SDValue lowerShuffleAsLanePermuteAndRepeatedMask(
14930 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14931 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
14932 assert(!V2.isUndef() && "This is only useful with multiple inputs.");
14934 if (is128BitLaneRepeatedShuffleMask(VT, Mask))
14935 return SDValue();
14937 int NumElts = Mask.size();
14938 int NumLanes = VT.getSizeInBits() / 128;
14939 int NumLaneElts = 128 / VT.getScalarSizeInBits();
14940 SmallVector<int, 16> RepeatMask(NumLaneElts, -1);
14941 SmallVector<std::array<int, 2>, 2> LaneSrcs(NumLanes, {{-1, -1}});
14943 // First pass will try to fill in the RepeatMask from lanes that need two
14944 // sources.
14945 for (int Lane = 0; Lane != NumLanes; ++Lane) {
14946 int Srcs[2] = {-1, -1};
14947 SmallVector<int, 16> InLaneMask(NumLaneElts, -1);
14948 for (int i = 0; i != NumLaneElts; ++i) {
14949 int M = Mask[(Lane * NumLaneElts) + i];
14950 if (M < 0)
14951 continue;
14952 // Determine which of the possible input lanes (NumLanes from each source)
14953 // this element comes from. Assign that as one of the sources for this
14954 // lane. We can assign up to 2 sources for this lane. If we run out
14955 // sources we can't do anything.
14956 int LaneSrc = M / NumLaneElts;
14957 int Src;
14958 if (Srcs[0] < 0 || Srcs[0] == LaneSrc)
14959 Src = 0;
14960 else if (Srcs[1] < 0 || Srcs[1] == LaneSrc)
14961 Src = 1;
14962 else
14963 return SDValue();
14965 Srcs[Src] = LaneSrc;
14966 InLaneMask[i] = (M % NumLaneElts) + Src * NumElts;
14969 // If this lane has two sources, see if it fits with the repeat mask so far.
14970 if (Srcs[1] < 0)
14971 continue;
14973 LaneSrcs[Lane][0] = Srcs[0];
14974 LaneSrcs[Lane][1] = Srcs[1];
14976 auto MatchMasks = [](ArrayRef<int> M1, ArrayRef<int> M2) {
14977 assert(M1.size() == M2.size() && "Unexpected mask size");
14978 for (int i = 0, e = M1.size(); i != e; ++i)
14979 if (M1[i] >= 0 && M2[i] >= 0 && M1[i] != M2[i])
14980 return false;
14981 return true;
14984 auto MergeMasks = [](ArrayRef<int> Mask, MutableArrayRef<int> MergedMask) {
14985 assert(Mask.size() == MergedMask.size() && "Unexpected mask size");
14986 for (int i = 0, e = MergedMask.size(); i != e; ++i) {
14987 int M = Mask[i];
14988 if (M < 0)
14989 continue;
14990 assert((MergedMask[i] < 0 || MergedMask[i] == M) &&
14991 "Unexpected mask element");
14992 MergedMask[i] = M;
14996 if (MatchMasks(InLaneMask, RepeatMask)) {
14997 // Merge this lane mask into the final repeat mask.
14998 MergeMasks(InLaneMask, RepeatMask);
14999 continue;
15002 // Didn't find a match. Swap the operands and try again.
15003 std::swap(LaneSrcs[Lane][0], LaneSrcs[Lane][1]);
15004 ShuffleVectorSDNode::commuteMask(InLaneMask);
15006 if (MatchMasks(InLaneMask, RepeatMask)) {
15007 // Merge this lane mask into the final repeat mask.
15008 MergeMasks(InLaneMask, RepeatMask);
15009 continue;
15012 // Couldn't find a match with the operands in either order.
15013 return SDValue();
15016 // Now handle any lanes with only one source.
15017 for (int Lane = 0; Lane != NumLanes; ++Lane) {
15018 // If this lane has already been processed, skip it.
15019 if (LaneSrcs[Lane][0] >= 0)
15020 continue;
15022 for (int i = 0; i != NumLaneElts; ++i) {
15023 int M = Mask[(Lane * NumLaneElts) + i];
15024 if (M < 0)
15025 continue;
15027 // If RepeatMask isn't defined yet we can define it ourself.
15028 if (RepeatMask[i] < 0)
15029 RepeatMask[i] = M % NumLaneElts;
15031 if (RepeatMask[i] < NumElts) {
15032 if (RepeatMask[i] != M % NumLaneElts)
15033 return SDValue();
15034 LaneSrcs[Lane][0] = M / NumLaneElts;
15035 } else {
15036 if (RepeatMask[i] != ((M % NumLaneElts) + NumElts))
15037 return SDValue();
15038 LaneSrcs[Lane][1] = M / NumLaneElts;
15042 if (LaneSrcs[Lane][0] < 0 && LaneSrcs[Lane][1] < 0)
15043 return SDValue();
15046 SmallVector<int, 16> NewMask(NumElts, -1);
15047 for (int Lane = 0; Lane != NumLanes; ++Lane) {
15048 int Src = LaneSrcs[Lane][0];
15049 for (int i = 0; i != NumLaneElts; ++i) {
15050 int M = -1;
15051 if (Src >= 0)
15052 M = Src * NumLaneElts + i;
15053 NewMask[Lane * NumLaneElts + i] = M;
15056 SDValue NewV1 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
15057 // Ensure we didn't get back the shuffle we started with.
15058 // FIXME: This is a hack to make up for some splat handling code in
15059 // getVectorShuffle.
15060 if (isa<ShuffleVectorSDNode>(NewV1) &&
15061 cast<ShuffleVectorSDNode>(NewV1)->getMask() == Mask)
15062 return SDValue();
15064 for (int Lane = 0; Lane != NumLanes; ++Lane) {
15065 int Src = LaneSrcs[Lane][1];
15066 for (int i = 0; i != NumLaneElts; ++i) {
15067 int M = -1;
15068 if (Src >= 0)
15069 M = Src * NumLaneElts + i;
15070 NewMask[Lane * NumLaneElts + i] = M;
15073 SDValue NewV2 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
15074 // Ensure we didn't get back the shuffle we started with.
15075 // FIXME: This is a hack to make up for some splat handling code in
15076 // getVectorShuffle.
15077 if (isa<ShuffleVectorSDNode>(NewV2) &&
15078 cast<ShuffleVectorSDNode>(NewV2)->getMask() == Mask)
15079 return SDValue();
15081 for (int i = 0; i != NumElts; ++i) {
15082 if (Mask[i] < 0) {
15083 NewMask[i] = -1;
15084 continue;
15086 NewMask[i] = RepeatMask[i % NumLaneElts];
15087 if (NewMask[i] < 0)
15088 continue;
15090 NewMask[i] += (i / NumLaneElts) * NumLaneElts;
15092 return DAG.getVectorShuffle(VT, DL, NewV1, NewV2, NewMask);
15095 /// If the input shuffle mask results in a vector that is undefined in all upper
15096 /// or lower half elements and that mask accesses only 2 halves of the
15097 /// shuffle's operands, return true. A mask of half the width with mask indexes
15098 /// adjusted to access the extracted halves of the original shuffle operands is
15099 /// returned in HalfMask. HalfIdx1 and HalfIdx2 return whether the upper or
15100 /// lower half of each input operand is accessed.
15101 static bool
15102 getHalfShuffleMask(ArrayRef<int> Mask, MutableArrayRef<int> HalfMask,
15103 int &HalfIdx1, int &HalfIdx2) {
15104 assert((Mask.size() == HalfMask.size() * 2) &&
15105 "Expected input mask to be twice as long as output");
15107 // Exactly one half of the result must be undef to allow narrowing.
15108 bool UndefLower = isUndefLowerHalf(Mask);
15109 bool UndefUpper = isUndefUpperHalf(Mask);
15110 if (UndefLower == UndefUpper)
15111 return false;
15113 unsigned HalfNumElts = HalfMask.size();
15114 unsigned MaskIndexOffset = UndefLower ? HalfNumElts : 0;
15115 HalfIdx1 = -1;
15116 HalfIdx2 = -1;
15117 for (unsigned i = 0; i != HalfNumElts; ++i) {
15118 int M = Mask[i + MaskIndexOffset];
15119 if (M < 0) {
15120 HalfMask[i] = M;
15121 continue;
15124 // Determine which of the 4 half vectors this element is from.
15125 // i.e. 0 = Lower V1, 1 = Upper V1, 2 = Lower V2, 3 = Upper V2.
15126 int HalfIdx = M / HalfNumElts;
15128 // Determine the element index into its half vector source.
15129 int HalfElt = M % HalfNumElts;
15131 // We can shuffle with up to 2 half vectors, set the new 'half'
15132 // shuffle mask accordingly.
15133 if (HalfIdx1 < 0 || HalfIdx1 == HalfIdx) {
15134 HalfMask[i] = HalfElt;
15135 HalfIdx1 = HalfIdx;
15136 continue;
15138 if (HalfIdx2 < 0 || HalfIdx2 == HalfIdx) {
15139 HalfMask[i] = HalfElt + HalfNumElts;
15140 HalfIdx2 = HalfIdx;
15141 continue;
15144 // Too many half vectors referenced.
15145 return false;
15148 return true;
15151 /// Given the output values from getHalfShuffleMask(), create a half width
15152 /// shuffle of extracted vectors followed by an insert back to full width.
15153 static SDValue getShuffleHalfVectors(const SDLoc &DL, SDValue V1, SDValue V2,
15154 ArrayRef<int> HalfMask, int HalfIdx1,
15155 int HalfIdx2, bool UndefLower,
15156 SelectionDAG &DAG, bool UseConcat = false) {
15157 assert(V1.getValueType() == V2.getValueType() && "Different sized vectors?");
15158 assert(V1.getValueType().isSimple() && "Expecting only simple types");
15160 MVT VT = V1.getSimpleValueType();
15161 MVT HalfVT = VT.getHalfNumVectorElementsVT();
15162 unsigned HalfNumElts = HalfVT.getVectorNumElements();
15164 auto getHalfVector = [&](int HalfIdx) {
15165 if (HalfIdx < 0)
15166 return DAG.getUNDEF(HalfVT);
15167 SDValue V = (HalfIdx < 2 ? V1 : V2);
15168 HalfIdx = (HalfIdx % 2) * HalfNumElts;
15169 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V,
15170 DAG.getIntPtrConstant(HalfIdx, DL));
15173 // ins undef, (shuf (ext V1, HalfIdx1), (ext V2, HalfIdx2), HalfMask), Offset
15174 SDValue Half1 = getHalfVector(HalfIdx1);
15175 SDValue Half2 = getHalfVector(HalfIdx2);
15176 SDValue V = DAG.getVectorShuffle(HalfVT, DL, Half1, Half2, HalfMask);
15177 if (UseConcat) {
15178 SDValue Op0 = V;
15179 SDValue Op1 = DAG.getUNDEF(HalfVT);
15180 if (UndefLower)
15181 std::swap(Op0, Op1);
15182 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Op0, Op1);
15185 unsigned Offset = UndefLower ? HalfNumElts : 0;
15186 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V,
15187 DAG.getIntPtrConstant(Offset, DL));
15190 /// Lower shuffles where an entire half of a 256 or 512-bit vector is UNDEF.
15191 /// This allows for fast cases such as subvector extraction/insertion
15192 /// or shuffling smaller vector types which can lower more efficiently.
15193 static SDValue lowerShuffleWithUndefHalf(const SDLoc &DL, MVT VT, SDValue V1,
15194 SDValue V2, ArrayRef<int> Mask,
15195 const X86Subtarget &Subtarget,
15196 SelectionDAG &DAG) {
15197 assert((VT.is256BitVector() || VT.is512BitVector()) &&
15198 "Expected 256-bit or 512-bit vector");
15200 bool UndefLower = isUndefLowerHalf(Mask);
15201 if (!UndefLower && !isUndefUpperHalf(Mask))
15202 return SDValue();
15204 assert((!UndefLower || !isUndefUpperHalf(Mask)) &&
15205 "Completely undef shuffle mask should have been simplified already");
15207 // Upper half is undef and lower half is whole upper subvector.
15208 // e.g. vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
15209 MVT HalfVT = VT.getHalfNumVectorElementsVT();
15210 unsigned HalfNumElts = HalfVT.getVectorNumElements();
15211 if (!UndefLower &&
15212 isSequentialOrUndefInRange(Mask, 0, HalfNumElts, HalfNumElts)) {
15213 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
15214 DAG.getIntPtrConstant(HalfNumElts, DL));
15215 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
15216 DAG.getIntPtrConstant(0, DL));
15219 // Lower half is undef and upper half is whole lower subvector.
15220 // e.g. vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
15221 if (UndefLower &&
15222 isSequentialOrUndefInRange(Mask, HalfNumElts, HalfNumElts, 0)) {
15223 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
15224 DAG.getIntPtrConstant(0, DL));
15225 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
15226 DAG.getIntPtrConstant(HalfNumElts, DL));
15229 int HalfIdx1, HalfIdx2;
15230 SmallVector<int, 8> HalfMask(HalfNumElts);
15231 if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2))
15232 return SDValue();
15234 assert(HalfMask.size() == HalfNumElts && "Unexpected shuffle mask length");
15236 // Only shuffle the halves of the inputs when useful.
15237 unsigned NumLowerHalves =
15238 (HalfIdx1 == 0 || HalfIdx1 == 2) + (HalfIdx2 == 0 || HalfIdx2 == 2);
15239 unsigned NumUpperHalves =
15240 (HalfIdx1 == 1 || HalfIdx1 == 3) + (HalfIdx2 == 1 || HalfIdx2 == 3);
15241 assert(NumLowerHalves + NumUpperHalves <= 2 && "Only 1 or 2 halves allowed");
15243 // Determine the larger pattern of undef/halves, then decide if it's worth
15244 // splitting the shuffle based on subtarget capabilities and types.
15245 unsigned EltWidth = VT.getVectorElementType().getSizeInBits();
15246 if (!UndefLower) {
15247 // XXXXuuuu: no insert is needed.
15248 // Always extract lowers when setting lower - these are all free subreg ops.
15249 if (NumUpperHalves == 0)
15250 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15251 UndefLower, DAG);
15253 if (NumUpperHalves == 1) {
15254 // AVX2 has efficient 32/64-bit element cross-lane shuffles.
15255 if (Subtarget.hasAVX2()) {
15256 // extract128 + vunpckhps/vshufps, is better than vblend + vpermps.
15257 if (EltWidth == 32 && NumLowerHalves && HalfVT.is128BitVector() &&
15258 !is128BitUnpackShuffleMask(HalfMask, DAG) &&
15259 (!isSingleSHUFPSMask(HalfMask) ||
15260 Subtarget.hasFastVariableCrossLaneShuffle()))
15261 return SDValue();
15262 // If this is a unary shuffle (assume that the 2nd operand is
15263 // canonicalized to undef), then we can use vpermpd. Otherwise, we
15264 // are better off extracting the upper half of 1 operand and using a
15265 // narrow shuffle.
15266 if (EltWidth == 64 && V2.isUndef())
15267 return SDValue();
15269 // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
15270 if (Subtarget.hasAVX512() && VT.is512BitVector())
15271 return SDValue();
15272 // Extract + narrow shuffle is better than the wide alternative.
15273 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15274 UndefLower, DAG);
15277 // Don't extract both uppers, instead shuffle and then extract.
15278 assert(NumUpperHalves == 2 && "Half vector count went wrong");
15279 return SDValue();
15282 // UndefLower - uuuuXXXX: an insert to high half is required if we split this.
15283 if (NumUpperHalves == 0) {
15284 // AVX2 has efficient 64-bit element cross-lane shuffles.
15285 // TODO: Refine to account for unary shuffle, splat, and other masks?
15286 if (Subtarget.hasAVX2() && EltWidth == 64)
15287 return SDValue();
15288 // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
15289 if (Subtarget.hasAVX512() && VT.is512BitVector())
15290 return SDValue();
15291 // Narrow shuffle + insert is better than the wide alternative.
15292 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15293 UndefLower, DAG);
15296 // NumUpperHalves != 0: don't bother with extract, shuffle, and then insert.
15297 return SDValue();
15300 /// Handle case where shuffle sources are coming from the same 128-bit lane and
15301 /// every lane can be represented as the same repeating mask - allowing us to
15302 /// shuffle the sources with the repeating shuffle and then permute the result
15303 /// to the destination lanes.
15304 static SDValue lowerShuffleAsRepeatedMaskAndLanePermute(
15305 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
15306 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
15307 int NumElts = VT.getVectorNumElements();
15308 int NumLanes = VT.getSizeInBits() / 128;
15309 int NumLaneElts = NumElts / NumLanes;
15311 // On AVX2 we may be able to just shuffle the lowest elements and then
15312 // broadcast the result.
15313 if (Subtarget.hasAVX2()) {
15314 for (unsigned BroadcastSize : {16, 32, 64}) {
15315 if (BroadcastSize <= VT.getScalarSizeInBits())
15316 continue;
15317 int NumBroadcastElts = BroadcastSize / VT.getScalarSizeInBits();
15319 // Attempt to match a repeating pattern every NumBroadcastElts,
15320 // accounting for UNDEFs but only references the lowest 128-bit
15321 // lane of the inputs.
15322 auto FindRepeatingBroadcastMask = [&](SmallVectorImpl<int> &RepeatMask) {
15323 for (int i = 0; i != NumElts; i += NumBroadcastElts)
15324 for (int j = 0; j != NumBroadcastElts; ++j) {
15325 int M = Mask[i + j];
15326 if (M < 0)
15327 continue;
15328 int &R = RepeatMask[j];
15329 if (0 != ((M % NumElts) / NumLaneElts))
15330 return false;
15331 if (0 <= R && R != M)
15332 return false;
15333 R = M;
15335 return true;
15338 SmallVector<int, 8> RepeatMask((unsigned)NumElts, -1);
15339 if (!FindRepeatingBroadcastMask(RepeatMask))
15340 continue;
15342 // Shuffle the (lowest) repeated elements in place for broadcast.
15343 SDValue RepeatShuf = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatMask);
15345 // Shuffle the actual broadcast.
15346 SmallVector<int, 8> BroadcastMask((unsigned)NumElts, -1);
15347 for (int i = 0; i != NumElts; i += NumBroadcastElts)
15348 for (int j = 0; j != NumBroadcastElts; ++j)
15349 BroadcastMask[i + j] = j;
15351 // Avoid returning the same shuffle operation. For example,
15352 // v8i32 = vector_shuffle<0,1,0,1,0,1,0,1> t5, undef:v8i32
15353 if (BroadcastMask == Mask)
15354 return SDValue();
15356 return DAG.getVectorShuffle(VT, DL, RepeatShuf, DAG.getUNDEF(VT),
15357 BroadcastMask);
15361 // Bail if the shuffle mask doesn't cross 128-bit lanes.
15362 if (!is128BitLaneCrossingShuffleMask(VT, Mask))
15363 return SDValue();
15365 // Bail if we already have a repeated lane shuffle mask.
15366 if (is128BitLaneRepeatedShuffleMask(VT, Mask))
15367 return SDValue();
15369 // Helper to look for repeated mask in each split sublane, and that those
15370 // sublanes can then be permuted into place.
15371 auto ShuffleSubLanes = [&](int SubLaneScale) {
15372 int NumSubLanes = NumLanes * SubLaneScale;
15373 int NumSubLaneElts = NumLaneElts / SubLaneScale;
15375 // Check that all the sources are coming from the same lane and see if we
15376 // can form a repeating shuffle mask (local to each sub-lane). At the same
15377 // time, determine the source sub-lane for each destination sub-lane.
15378 int TopSrcSubLane = -1;
15379 SmallVector<int, 8> Dst2SrcSubLanes((unsigned)NumSubLanes, -1);
15380 SmallVector<SmallVector<int, 8>> RepeatedSubLaneMasks(
15381 SubLaneScale,
15382 SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef));
15384 for (int DstSubLane = 0; DstSubLane != NumSubLanes; ++DstSubLane) {
15385 // Extract the sub-lane mask, check that it all comes from the same lane
15386 // and normalize the mask entries to come from the first lane.
15387 int SrcLane = -1;
15388 SmallVector<int, 8> SubLaneMask((unsigned)NumSubLaneElts, -1);
15389 for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
15390 int M = Mask[(DstSubLane * NumSubLaneElts) + Elt];
15391 if (M < 0)
15392 continue;
15393 int Lane = (M % NumElts) / NumLaneElts;
15394 if ((0 <= SrcLane) && (SrcLane != Lane))
15395 return SDValue();
15396 SrcLane = Lane;
15397 int LocalM = (M % NumLaneElts) + (M < NumElts ? 0 : NumElts);
15398 SubLaneMask[Elt] = LocalM;
15401 // Whole sub-lane is UNDEF.
15402 if (SrcLane < 0)
15403 continue;
15405 // Attempt to match against the candidate repeated sub-lane masks.
15406 for (int SubLane = 0; SubLane != SubLaneScale; ++SubLane) {
15407 auto MatchMasks = [NumSubLaneElts](ArrayRef<int> M1, ArrayRef<int> M2) {
15408 for (int i = 0; i != NumSubLaneElts; ++i) {
15409 if (M1[i] < 0 || M2[i] < 0)
15410 continue;
15411 if (M1[i] != M2[i])
15412 return false;
15414 return true;
15417 auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane];
15418 if (!MatchMasks(SubLaneMask, RepeatedSubLaneMask))
15419 continue;
15421 // Merge the sub-lane mask into the matching repeated sub-lane mask.
15422 for (int i = 0; i != NumSubLaneElts; ++i) {
15423 int M = SubLaneMask[i];
15424 if (M < 0)
15425 continue;
15426 assert((RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) &&
15427 "Unexpected mask element");
15428 RepeatedSubLaneMask[i] = M;
15431 // Track the top most source sub-lane - by setting the remaining to
15432 // UNDEF we can greatly simplify shuffle matching.
15433 int SrcSubLane = (SrcLane * SubLaneScale) + SubLane;
15434 TopSrcSubLane = std::max(TopSrcSubLane, SrcSubLane);
15435 Dst2SrcSubLanes[DstSubLane] = SrcSubLane;
15436 break;
15439 // Bail if we failed to find a matching repeated sub-lane mask.
15440 if (Dst2SrcSubLanes[DstSubLane] < 0)
15441 return SDValue();
15443 assert(0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes &&
15444 "Unexpected source lane");
15446 // Create a repeating shuffle mask for the entire vector.
15447 SmallVector<int, 8> RepeatedMask((unsigned)NumElts, -1);
15448 for (int SubLane = 0; SubLane <= TopSrcSubLane; ++SubLane) {
15449 int Lane = SubLane / SubLaneScale;
15450 auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane % SubLaneScale];
15451 for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
15452 int M = RepeatedSubLaneMask[Elt];
15453 if (M < 0)
15454 continue;
15455 int Idx = (SubLane * NumSubLaneElts) + Elt;
15456 RepeatedMask[Idx] = M + (Lane * NumLaneElts);
15460 // Shuffle each source sub-lane to its destination.
15461 SmallVector<int, 8> SubLaneMask((unsigned)NumElts, -1);
15462 for (int i = 0; i != NumElts; i += NumSubLaneElts) {
15463 int SrcSubLane = Dst2SrcSubLanes[i / NumSubLaneElts];
15464 if (SrcSubLane < 0)
15465 continue;
15466 for (int j = 0; j != NumSubLaneElts; ++j)
15467 SubLaneMask[i + j] = j + (SrcSubLane * NumSubLaneElts);
15470 // Avoid returning the same shuffle operation.
15471 // v8i32 = vector_shuffle<0,1,4,5,2,3,6,7> t5, undef:v8i32
15472 if (RepeatedMask == Mask || SubLaneMask == Mask)
15473 return SDValue();
15475 SDValue RepeatedShuffle =
15476 DAG.getVectorShuffle(VT, DL, V1, V2, RepeatedMask);
15478 return DAG.getVectorShuffle(VT, DL, RepeatedShuffle, DAG.getUNDEF(VT),
15479 SubLaneMask);
15482 // On AVX2 targets we can permute 256-bit vectors as 64-bit sub-lanes
15483 // (with PERMQ/PERMPD). On AVX2/AVX512BW targets, permuting 32-bit sub-lanes,
15484 // even with a variable shuffle, can be worth it for v32i8/v64i8 vectors.
15485 // Otherwise we can only permute whole 128-bit lanes.
15486 int MinSubLaneScale = 1, MaxSubLaneScale = 1;
15487 if (Subtarget.hasAVX2() && VT.is256BitVector()) {
15488 bool OnlyLowestElts = isUndefOrInRange(Mask, 0, NumLaneElts);
15489 MinSubLaneScale = 2;
15490 MaxSubLaneScale =
15491 (!OnlyLowestElts && V2.isUndef() && VT == MVT::v32i8) ? 4 : 2;
15493 if (Subtarget.hasBWI() && VT == MVT::v64i8)
15494 MinSubLaneScale = MaxSubLaneScale = 4;
15496 for (int Scale = MinSubLaneScale; Scale <= MaxSubLaneScale; Scale *= 2)
15497 if (SDValue Shuffle = ShuffleSubLanes(Scale))
15498 return Shuffle;
15500 return SDValue();
15503 static bool matchShuffleWithSHUFPD(MVT VT, SDValue &V1, SDValue &V2,
15504 bool &ForceV1Zero, bool &ForceV2Zero,
15505 unsigned &ShuffleImm, ArrayRef<int> Mask,
15506 const APInt &Zeroable) {
15507 int NumElts = VT.getVectorNumElements();
15508 assert(VT.getScalarSizeInBits() == 64 &&
15509 (NumElts == 2 || NumElts == 4 || NumElts == 8) &&
15510 "Unexpected data type for VSHUFPD");
15511 assert(isUndefOrZeroOrInRange(Mask, 0, 2 * NumElts) &&
15512 "Illegal shuffle mask");
15514 bool ZeroLane[2] = { true, true };
15515 for (int i = 0; i < NumElts; ++i)
15516 ZeroLane[i & 1] &= Zeroable[i];
15518 // Mask for V8F64: 0/1, 8/9, 2/3, 10/11, 4/5, ..
15519 // Mask for V4F64; 0/1, 4/5, 2/3, 6/7..
15520 ShuffleImm = 0;
15521 bool ShufpdMask = true;
15522 bool CommutableMask = true;
15523 for (int i = 0; i < NumElts; ++i) {
15524 if (Mask[i] == SM_SentinelUndef || ZeroLane[i & 1])
15525 continue;
15526 if (Mask[i] < 0)
15527 return false;
15528 int Val = (i & 6) + NumElts * (i & 1);
15529 int CommutVal = (i & 0xe) + NumElts * ((i & 1) ^ 1);
15530 if (Mask[i] < Val || Mask[i] > Val + 1)
15531 ShufpdMask = false;
15532 if (Mask[i] < CommutVal || Mask[i] > CommutVal + 1)
15533 CommutableMask = false;
15534 ShuffleImm |= (Mask[i] % 2) << i;
15537 if (!ShufpdMask && !CommutableMask)
15538 return false;
15540 if (!ShufpdMask && CommutableMask)
15541 std::swap(V1, V2);
15543 ForceV1Zero = ZeroLane[0];
15544 ForceV2Zero = ZeroLane[1];
15545 return true;
15548 static SDValue lowerShuffleWithSHUFPD(const SDLoc &DL, MVT VT, SDValue V1,
15549 SDValue V2, ArrayRef<int> Mask,
15550 const APInt &Zeroable,
15551 const X86Subtarget &Subtarget,
15552 SelectionDAG &DAG) {
15553 assert((VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v8f64) &&
15554 "Unexpected data type for VSHUFPD");
15556 unsigned Immediate = 0;
15557 bool ForceV1Zero = false, ForceV2Zero = false;
15558 if (!matchShuffleWithSHUFPD(VT, V1, V2, ForceV1Zero, ForceV2Zero, Immediate,
15559 Mask, Zeroable))
15560 return SDValue();
15562 // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
15563 if (ForceV1Zero)
15564 V1 = getZeroVector(VT, Subtarget, DAG, DL);
15565 if (ForceV2Zero)
15566 V2 = getZeroVector(VT, Subtarget, DAG, DL);
15568 return DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
15569 DAG.getTargetConstant(Immediate, DL, MVT::i8));
15572 // Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
15573 // by zeroable elements in the remaining 24 elements. Turn this into two
15574 // vmovqb instructions shuffled together.
15575 static SDValue lowerShuffleAsVTRUNCAndUnpack(const SDLoc &DL, MVT VT,
15576 SDValue V1, SDValue V2,
15577 ArrayRef<int> Mask,
15578 const APInt &Zeroable,
15579 SelectionDAG &DAG) {
15580 assert(VT == MVT::v32i8 && "Unexpected type!");
15582 // The first 8 indices should be every 8th element.
15583 if (!isSequentialOrUndefInRange(Mask, 0, 8, 0, 8))
15584 return SDValue();
15586 // Remaining elements need to be zeroable.
15587 if (Zeroable.countl_one() < (Mask.size() - 8))
15588 return SDValue();
15590 V1 = DAG.getBitcast(MVT::v4i64, V1);
15591 V2 = DAG.getBitcast(MVT::v4i64, V2);
15593 V1 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V1);
15594 V2 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V2);
15596 // The VTRUNCs will put 0s in the upper 12 bytes. Use them to put zeroes in
15597 // the upper bits of the result using an unpckldq.
15598 SDValue Unpack = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2,
15599 { 0, 1, 2, 3, 16, 17, 18, 19,
15600 4, 5, 6, 7, 20, 21, 22, 23 });
15601 // Insert the unpckldq into a zero vector to widen to v32i8.
15602 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v32i8,
15603 DAG.getConstant(0, DL, MVT::v32i8), Unpack,
15604 DAG.getIntPtrConstant(0, DL));
15607 // a = shuffle v1, v2, mask1 ; interleaving lower lanes of v1 and v2
15608 // b = shuffle v1, v2, mask2 ; interleaving higher lanes of v1 and v2
15609 // =>
15610 // ul = unpckl v1, v2
15611 // uh = unpckh v1, v2
15612 // a = vperm ul, uh
15613 // b = vperm ul, uh
15615 // Pattern-match interleave(256b v1, 256b v2) -> 512b v3 and lower it into unpck
15616 // and permute. We cannot directly match v3 because it is split into two
15617 // 256-bit vectors in earlier isel stages. Therefore, this function matches a
15618 // pair of 256-bit shuffles and makes sure the masks are consecutive.
15620 // Once unpck and permute nodes are created, the permute corresponding to this
15621 // shuffle is returned, while the other permute replaces the other half of the
15622 // shuffle in the selection dag.
15623 static SDValue lowerShufflePairAsUNPCKAndPermute(const SDLoc &DL, MVT VT,
15624 SDValue V1, SDValue V2,
15625 ArrayRef<int> Mask,
15626 SelectionDAG &DAG) {
15627 if (VT != MVT::v8f32 && VT != MVT::v8i32 && VT != MVT::v16i16 &&
15628 VT != MVT::v32i8)
15629 return SDValue();
15630 // <B0, B1, B0+1, B1+1, ..., >
15631 auto IsInterleavingPattern = [&](ArrayRef<int> Mask, unsigned Begin0,
15632 unsigned Begin1) {
15633 size_t Size = Mask.size();
15634 assert(Size % 2 == 0 && "Expected even mask size");
15635 for (unsigned I = 0; I < Size; I += 2) {
15636 if (Mask[I] != (int)(Begin0 + I / 2) ||
15637 Mask[I + 1] != (int)(Begin1 + I / 2))
15638 return false;
15640 return true;
15642 // Check which half is this shuffle node
15643 int NumElts = VT.getVectorNumElements();
15644 size_t FirstQtr = NumElts / 2;
15645 size_t ThirdQtr = NumElts + NumElts / 2;
15646 bool IsFirstHalf = IsInterleavingPattern(Mask, 0, NumElts);
15647 bool IsSecondHalf = IsInterleavingPattern(Mask, FirstQtr, ThirdQtr);
15648 if (!IsFirstHalf && !IsSecondHalf)
15649 return SDValue();
15651 // Find the intersection between shuffle users of V1 and V2.
15652 SmallVector<SDNode *, 2> Shuffles;
15653 for (SDNode *User : V1->uses())
15654 if (User->getOpcode() == ISD::VECTOR_SHUFFLE && User->getOperand(0) == V1 &&
15655 User->getOperand(1) == V2)
15656 Shuffles.push_back(User);
15657 // Limit user size to two for now.
15658 if (Shuffles.size() != 2)
15659 return SDValue();
15660 // Find out which half of the 512-bit shuffles is each smaller shuffle
15661 auto *SVN1 = cast<ShuffleVectorSDNode>(Shuffles[0]);
15662 auto *SVN2 = cast<ShuffleVectorSDNode>(Shuffles[1]);
15663 SDNode *FirstHalf;
15664 SDNode *SecondHalf;
15665 if (IsInterleavingPattern(SVN1->getMask(), 0, NumElts) &&
15666 IsInterleavingPattern(SVN2->getMask(), FirstQtr, ThirdQtr)) {
15667 FirstHalf = Shuffles[0];
15668 SecondHalf = Shuffles[1];
15669 } else if (IsInterleavingPattern(SVN1->getMask(), FirstQtr, ThirdQtr) &&
15670 IsInterleavingPattern(SVN2->getMask(), 0, NumElts)) {
15671 FirstHalf = Shuffles[1];
15672 SecondHalf = Shuffles[0];
15673 } else {
15674 return SDValue();
15676 // Lower into unpck and perm. Return the perm of this shuffle and replace
15677 // the other.
15678 SDValue Unpckl = DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2);
15679 SDValue Unpckh = DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2);
15680 SDValue Perm1 = DAG.getNode(X86ISD::VPERM2X128, DL, VT, Unpckl, Unpckh,
15681 DAG.getTargetConstant(0x20, DL, MVT::i8));
15682 SDValue Perm2 = DAG.getNode(X86ISD::VPERM2X128, DL, VT, Unpckl, Unpckh,
15683 DAG.getTargetConstant(0x31, DL, MVT::i8));
15684 if (IsFirstHalf) {
15685 DAG.ReplaceAllUsesWith(SecondHalf, &Perm2);
15686 return Perm1;
15688 DAG.ReplaceAllUsesWith(FirstHalf, &Perm1);
15689 return Perm2;
15692 /// Handle lowering of 4-lane 64-bit floating point shuffles.
15694 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
15695 /// isn't available.
15696 static SDValue lowerV4F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15697 const APInt &Zeroable, SDValue V1, SDValue V2,
15698 const X86Subtarget &Subtarget,
15699 SelectionDAG &DAG) {
15700 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
15701 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
15702 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
15704 if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4f64, V1, V2, Mask, Zeroable,
15705 Subtarget, DAG))
15706 return V;
15708 if (V2.isUndef()) {
15709 // Check for being able to broadcast a single element.
15710 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f64, V1, V2,
15711 Mask, Subtarget, DAG))
15712 return Broadcast;
15714 // Use low duplicate instructions for masks that match their pattern.
15715 if (isShuffleEquivalent(Mask, {0, 0, 2, 2}, V1, V2))
15716 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
15718 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
15719 // Non-half-crossing single input shuffles can be lowered with an
15720 // interleaved permutation.
15721 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
15722 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
15723 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
15724 DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
15727 // With AVX2 we have direct support for this permutation.
15728 if (Subtarget.hasAVX2())
15729 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
15730 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15732 // Try to create an in-lane repeating shuffle mask and then shuffle the
15733 // results into the target lanes.
15734 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15735 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15736 return V;
15738 // Try to permute the lanes and then use a per-lane permute.
15739 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(DL, MVT::v4f64, V1, V2,
15740 Mask, DAG, Subtarget))
15741 return V;
15743 // Otherwise, fall back.
15744 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v4f64, V1, V2, Mask,
15745 DAG, Subtarget);
15748 // Use dedicated unpack instructions for masks that match their pattern.
15749 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f64, Mask, V1, V2, DAG))
15750 return V;
15752 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
15753 Zeroable, Subtarget, DAG))
15754 return Blend;
15756 // Check if the blend happens to exactly fit that of SHUFPD.
15757 if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v4f64, V1, V2, Mask,
15758 Zeroable, Subtarget, DAG))
15759 return Op;
15761 bool V1IsInPlace = isShuffleMaskInputInPlace(0, Mask);
15762 bool V2IsInPlace = isShuffleMaskInputInPlace(1, Mask);
15764 // If we have lane crossing shuffles AND they don't all come from the lower
15765 // lane elements, lower to SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
15766 // TODO: Handle BUILD_VECTOR sources which getVectorShuffle currently
15767 // canonicalize to a blend of splat which isn't necessary for this combine.
15768 if (is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask) &&
15769 !all_of(Mask, [](int M) { return M < 2 || (4 <= M && M < 6); }) &&
15770 (V1.getOpcode() != ISD::BUILD_VECTOR) &&
15771 (V2.getOpcode() != ISD::BUILD_VECTOR))
15772 return lowerShuffleAsLanePermuteAndSHUFP(DL, MVT::v4f64, V1, V2, Mask, DAG);
15774 // If we have one input in place, then we can permute the other input and
15775 // blend the result.
15776 if (V1IsInPlace || V2IsInPlace)
15777 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4f64, V1, V2, Mask,
15778 Subtarget, DAG);
15780 // Try to create an in-lane repeating shuffle mask and then shuffle the
15781 // results into the target lanes.
15782 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15783 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15784 return V;
15786 // Try to simplify this by merging 128-bit lanes to enable a lane-based
15787 // shuffle. However, if we have AVX2 and either inputs are already in place,
15788 // we will be able to shuffle even across lanes the other input in a single
15789 // instruction so skip this pattern.
15790 if (!(Subtarget.hasAVX2() && (V1IsInPlace || V2IsInPlace)))
15791 if (SDValue V = lowerShuffleAsLanePermuteAndRepeatedMask(
15792 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15793 return V;
15795 // If we have VLX support, we can use VEXPAND.
15796 if (Subtarget.hasVLX())
15797 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4f64, Zeroable, Mask, V1, V2,
15798 DAG, Subtarget))
15799 return V;
15801 // If we have AVX2 then we always want to lower with a blend because an v4 we
15802 // can fully permute the elements.
15803 if (Subtarget.hasAVX2())
15804 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4f64, V1, V2, Mask,
15805 Subtarget, DAG);
15807 // Otherwise fall back on generic lowering.
15808 return lowerShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask,
15809 Subtarget, DAG);
15812 /// Handle lowering of 4-lane 64-bit integer shuffles.
15814 /// This routine is only called when we have AVX2 and thus a reasonable
15815 /// instruction set for v4i64 shuffling..
15816 static SDValue lowerV4I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15817 const APInt &Zeroable, SDValue V1, SDValue V2,
15818 const X86Subtarget &Subtarget,
15819 SelectionDAG &DAG) {
15820 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
15821 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
15822 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
15823 assert(Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!");
15825 if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4i64, V1, V2, Mask, Zeroable,
15826 Subtarget, DAG))
15827 return V;
15829 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
15830 Zeroable, Subtarget, DAG))
15831 return Blend;
15833 // Check for being able to broadcast a single element.
15834 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i64, V1, V2, Mask,
15835 Subtarget, DAG))
15836 return Broadcast;
15838 // Try to use shift instructions if fast.
15839 if (Subtarget.preferLowerShuffleAsShift())
15840 if (SDValue Shift =
15841 lowerShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask, Zeroable,
15842 Subtarget, DAG, /*BitwiseOnly*/ true))
15843 return Shift;
15845 if (V2.isUndef()) {
15846 // When the shuffle is mirrored between the 128-bit lanes of the unit, we
15847 // can use lower latency instructions that will operate on both lanes.
15848 SmallVector<int, 2> RepeatedMask;
15849 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
15850 SmallVector<int, 4> PSHUFDMask;
15851 narrowShuffleMaskElts(2, RepeatedMask, PSHUFDMask);
15852 return DAG.getBitcast(
15853 MVT::v4i64,
15854 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
15855 DAG.getBitcast(MVT::v8i32, V1),
15856 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
15859 // AVX2 provides a direct instruction for permuting a single input across
15860 // lanes.
15861 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
15862 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15865 // Try to use shift instructions.
15866 if (SDValue Shift =
15867 lowerShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask, Zeroable, Subtarget,
15868 DAG, /*BitwiseOnly*/ false))
15869 return Shift;
15871 // If we have VLX support, we can use VALIGN or VEXPAND.
15872 if (Subtarget.hasVLX()) {
15873 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v4i64, V1, V2, Mask,
15874 Zeroable, Subtarget, DAG))
15875 return Rotate;
15877 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4i64, Zeroable, Mask, V1, V2,
15878 DAG, Subtarget))
15879 return V;
15882 // Try to use PALIGNR.
15883 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i64, V1, V2, Mask,
15884 Subtarget, DAG))
15885 return Rotate;
15887 // Use dedicated unpack instructions for masks that match their pattern.
15888 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i64, Mask, V1, V2, DAG))
15889 return V;
15891 bool V1IsInPlace = isShuffleMaskInputInPlace(0, Mask);
15892 bool V2IsInPlace = isShuffleMaskInputInPlace(1, Mask);
15894 // If we have one input in place, then we can permute the other input and
15895 // blend the result.
15896 if (V1IsInPlace || V2IsInPlace)
15897 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4i64, V1, V2, Mask,
15898 Subtarget, DAG);
15900 // Try to create an in-lane repeating shuffle mask and then shuffle the
15901 // results into the target lanes.
15902 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15903 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
15904 return V;
15906 // Try to lower to PERMQ(BLENDD(V1,V2)).
15907 if (SDValue V =
15908 lowerShuffleAsBlendAndPermute(DL, MVT::v4i64, V1, V2, Mask, DAG))
15909 return V;
15911 // Try to simplify this by merging 128-bit lanes to enable a lane-based
15912 // shuffle. However, if we have AVX2 and either inputs are already in place,
15913 // we will be able to shuffle even across lanes the other input in a single
15914 // instruction so skip this pattern.
15915 if (!V1IsInPlace && !V2IsInPlace)
15916 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
15917 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
15918 return Result;
15920 // Otherwise fall back on generic blend lowering.
15921 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4i64, V1, V2, Mask,
15922 Subtarget, DAG);
15925 /// Handle lowering of 8-lane 32-bit floating point shuffles.
15927 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
15928 /// isn't available.
15929 static SDValue lowerV8F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15930 const APInt &Zeroable, SDValue V1, SDValue V2,
15931 const X86Subtarget &Subtarget,
15932 SelectionDAG &DAG) {
15933 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
15934 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
15935 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
15937 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
15938 Zeroable, Subtarget, DAG))
15939 return Blend;
15941 // Check for being able to broadcast a single element.
15942 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8f32, V1, V2, Mask,
15943 Subtarget, DAG))
15944 return Broadcast;
15946 if (!Subtarget.hasAVX2()) {
15947 SmallVector<int> InLaneMask;
15948 computeInLaneShuffleMask(Mask, Mask.size() / 2, InLaneMask);
15950 if (!is128BitLaneRepeatedShuffleMask(MVT::v8f32, InLaneMask))
15951 if (SDValue R = splitAndLowerShuffle(DL, MVT::v8f32, V1, V2, Mask, DAG,
15952 /*SimpleOnly*/ true))
15953 return R;
15955 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2, Mask,
15956 Zeroable, Subtarget, DAG))
15957 return DAG.getBitcast(MVT::v8f32, ZExt);
15959 // If the shuffle mask is repeated in each 128-bit lane, we have many more
15960 // options to efficiently lower the shuffle.
15961 SmallVector<int, 4> RepeatedMask;
15962 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
15963 assert(RepeatedMask.size() == 4 &&
15964 "Repeated masks must be half the mask width!");
15966 // Use even/odd duplicate instructions for masks that match their pattern.
15967 if (isShuffleEquivalent(RepeatedMask, {0, 0, 2, 2}, V1, V2))
15968 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
15969 if (isShuffleEquivalent(RepeatedMask, {1, 1, 3, 3}, V1, V2))
15970 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
15972 if (V2.isUndef())
15973 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
15974 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
15976 // Use dedicated unpack instructions for masks that match their pattern.
15977 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8f32, Mask, V1, V2, DAG))
15978 return V;
15980 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
15981 // have already handled any direct blends.
15982 return lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
15985 // Try to create an in-lane repeating shuffle mask and then shuffle the
15986 // results into the target lanes.
15987 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15988 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
15989 return V;
15991 // If we have a single input shuffle with different shuffle patterns in the
15992 // two 128-bit lanes use the variable mask to VPERMILPS.
15993 if (V2.isUndef()) {
15994 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask)) {
15995 SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
15996 return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, V1, VPermMask);
15998 if (Subtarget.hasAVX2()) {
15999 SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
16000 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32, VPermMask, V1);
16002 // Otherwise, fall back.
16003 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v8f32, V1, V2, Mask,
16004 DAG, Subtarget);
16007 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16008 // shuffle.
16009 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16010 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
16011 return Result;
16013 // If we have VLX support, we can use VEXPAND.
16014 if (Subtarget.hasVLX())
16015 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f32, Zeroable, Mask, V1, V2,
16016 DAG, Subtarget))
16017 return V;
16019 // Try to match an interleave of two v8f32s and lower them as unpck and
16020 // permutes using ymms. This needs to go before we try to split the vectors.
16022 // TODO: Expand this to AVX1. Currently v8i32 is casted to v8f32 and hits
16023 // this path inadvertently.
16024 if (Subtarget.hasAVX2() && !Subtarget.hasAVX512())
16025 if (SDValue V = lowerShufflePairAsUNPCKAndPermute(DL, MVT::v8f32, V1, V2,
16026 Mask, DAG))
16027 return V;
16029 // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
16030 // since after split we get a more efficient code using vpunpcklwd and
16031 // vpunpckhwd instrs than vblend.
16032 if (!Subtarget.hasAVX512() && isUnpackWdShuffleMask(Mask, MVT::v8f32, DAG))
16033 return lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, Subtarget,
16034 DAG);
16036 // If we have AVX2 then we always want to lower with a blend because at v8 we
16037 // can fully permute the elements.
16038 if (Subtarget.hasAVX2())
16039 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v8f32, V1, V2, Mask,
16040 Subtarget, DAG);
16042 // Otherwise fall back on generic lowering.
16043 return lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask,
16044 Subtarget, DAG);
16047 /// Handle lowering of 8-lane 32-bit integer shuffles.
16049 /// This routine is only called when we have AVX2 and thus a reasonable
16050 /// instruction set for v8i32 shuffling..
16051 static SDValue lowerV8I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16052 const APInt &Zeroable, SDValue V1, SDValue V2,
16053 const X86Subtarget &Subtarget,
16054 SelectionDAG &DAG) {
16055 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
16056 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
16057 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16058 assert(Subtarget.hasAVX2() && "We can only lower v8i32 with AVX2!");
16060 int NumV2Elements = count_if(Mask, [](int M) { return M >= 8; });
16062 // Whenever we can lower this as a zext, that instruction is strictly faster
16063 // than any alternative. It also allows us to fold memory operands into the
16064 // shuffle in many cases.
16065 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2, Mask,
16066 Zeroable, Subtarget, DAG))
16067 return ZExt;
16069 // Try to match an interleave of two v8i32s and lower them as unpck and
16070 // permutes using ymms. This needs to go before we try to split the vectors.
16071 if (!Subtarget.hasAVX512())
16072 if (SDValue V = lowerShufflePairAsUNPCKAndPermute(DL, MVT::v8i32, V1, V2,
16073 Mask, DAG))
16074 return V;
16076 // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
16077 // since after split we get a more efficient code than vblend by using
16078 // vpunpcklwd and vpunpckhwd instrs.
16079 if (isUnpackWdShuffleMask(Mask, MVT::v8i32, DAG) && !V2.isUndef() &&
16080 !Subtarget.hasAVX512())
16081 return lowerShuffleAsSplitOrBlend(DL, MVT::v8i32, V1, V2, Mask, Subtarget,
16082 DAG);
16084 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
16085 Zeroable, Subtarget, DAG))
16086 return Blend;
16088 // Check for being able to broadcast a single element.
16089 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i32, V1, V2, Mask,
16090 Subtarget, DAG))
16091 return Broadcast;
16093 // Try to use shift instructions if fast.
16094 if (Subtarget.preferLowerShuffleAsShift()) {
16095 if (SDValue Shift =
16096 lowerShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask, Zeroable,
16097 Subtarget, DAG, /*BitwiseOnly*/ true))
16098 return Shift;
16099 if (NumV2Elements == 0)
16100 if (SDValue Rotate =
16101 lowerShuffleAsBitRotate(DL, MVT::v8i32, V1, Mask, Subtarget, DAG))
16102 return Rotate;
16105 // If the shuffle mask is repeated in each 128-bit lane we can use more
16106 // efficient instructions that mirror the shuffles across the two 128-bit
16107 // lanes.
16108 SmallVector<int, 4> RepeatedMask;
16109 bool Is128BitLaneRepeatedShuffle =
16110 is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask);
16111 if (Is128BitLaneRepeatedShuffle) {
16112 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
16113 if (V2.isUndef())
16114 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
16115 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16117 // Use dedicated unpack instructions for masks that match their pattern.
16118 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i32, Mask, V1, V2, DAG))
16119 return V;
16122 // Try to use shift instructions.
16123 if (SDValue Shift =
16124 lowerShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask, Zeroable, Subtarget,
16125 DAG, /*BitwiseOnly*/ false))
16126 return Shift;
16128 if (!Subtarget.preferLowerShuffleAsShift() && NumV2Elements == 0)
16129 if (SDValue Rotate =
16130 lowerShuffleAsBitRotate(DL, MVT::v8i32, V1, Mask, Subtarget, DAG))
16131 return Rotate;
16133 // If we have VLX support, we can use VALIGN or EXPAND.
16134 if (Subtarget.hasVLX()) {
16135 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v8i32, V1, V2, Mask,
16136 Zeroable, Subtarget, DAG))
16137 return Rotate;
16139 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i32, Zeroable, Mask, V1, V2,
16140 DAG, Subtarget))
16141 return V;
16144 // Try to use byte rotation instructions.
16145 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i32, V1, V2, Mask,
16146 Subtarget, DAG))
16147 return Rotate;
16149 // Try to create an in-lane repeating shuffle mask and then shuffle the
16150 // results into the target lanes.
16151 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16152 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
16153 return V;
16155 if (V2.isUndef()) {
16156 // Try to produce a fixed cross-128-bit lane permute followed by unpack
16157 // because that should be faster than the variable permute alternatives.
16158 if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v8i32, Mask, V1, V2, DAG))
16159 return V;
16161 // If the shuffle patterns aren't repeated but it's a single input, directly
16162 // generate a cross-lane VPERMD instruction.
16163 SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
16164 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8i32, VPermMask, V1);
16167 // Assume that a single SHUFPS is faster than an alternative sequence of
16168 // multiple instructions (even if the CPU has a domain penalty).
16169 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
16170 if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
16171 SDValue CastV1 = DAG.getBitcast(MVT::v8f32, V1);
16172 SDValue CastV2 = DAG.getBitcast(MVT::v8f32, V2);
16173 SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask,
16174 CastV1, CastV2, DAG);
16175 return DAG.getBitcast(MVT::v8i32, ShufPS);
16178 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16179 // shuffle.
16180 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16181 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
16182 return Result;
16184 // Otherwise fall back on generic blend lowering.
16185 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v8i32, V1, V2, Mask,
16186 Subtarget, DAG);
16189 /// Handle lowering of 16-lane 16-bit integer shuffles.
16191 /// This routine is only called when we have AVX2 and thus a reasonable
16192 /// instruction set for v16i16 shuffling..
16193 static SDValue lowerV16I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16194 const APInt &Zeroable, SDValue V1, SDValue V2,
16195 const X86Subtarget &Subtarget,
16196 SelectionDAG &DAG) {
16197 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
16198 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
16199 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16200 assert(Subtarget.hasAVX2() && "We can only lower v16i16 with AVX2!");
16202 // Whenever we can lower this as a zext, that instruction is strictly faster
16203 // than any alternative. It also allows us to fold memory operands into the
16204 // shuffle in many cases.
16205 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16206 DL, MVT::v16i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
16207 return ZExt;
16209 // Check for being able to broadcast a single element.
16210 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i16, V1, V2, Mask,
16211 Subtarget, DAG))
16212 return Broadcast;
16214 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
16215 Zeroable, Subtarget, DAG))
16216 return Blend;
16218 // Use dedicated unpack instructions for masks that match their pattern.
16219 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i16, Mask, V1, V2, DAG))
16220 return V;
16222 // Use dedicated pack instructions for masks that match their pattern.
16223 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i16, Mask, V1, V2, DAG,
16224 Subtarget))
16225 return V;
16227 // Try to use lower using a truncation.
16228 if (SDValue V = lowerShuffleAsVTRUNC(DL, MVT::v16i16, V1, V2, Mask, Zeroable,
16229 Subtarget, DAG))
16230 return V;
16232 // Try to use shift instructions.
16233 if (SDValue Shift =
16234 lowerShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask, Zeroable,
16235 Subtarget, DAG, /*BitwiseOnly*/ false))
16236 return Shift;
16238 // Try to use byte rotation instructions.
16239 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i16, V1, V2, Mask,
16240 Subtarget, DAG))
16241 return Rotate;
16243 // Try to create an in-lane repeating shuffle mask and then shuffle the
16244 // results into the target lanes.
16245 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16246 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
16247 return V;
16249 if (V2.isUndef()) {
16250 // Try to use bit rotation instructions.
16251 if (SDValue Rotate =
16252 lowerShuffleAsBitRotate(DL, MVT::v16i16, V1, Mask, Subtarget, DAG))
16253 return Rotate;
16255 // Try to produce a fixed cross-128-bit lane permute followed by unpack
16256 // because that should be faster than the variable permute alternatives.
16257 if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v16i16, Mask, V1, V2, DAG))
16258 return V;
16260 // There are no generalized cross-lane shuffle operations available on i16
16261 // element types.
16262 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask)) {
16263 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16264 DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
16265 return V;
16267 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v16i16, V1, V2, Mask,
16268 DAG, Subtarget);
16271 SmallVector<int, 8> RepeatedMask;
16272 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
16273 // As this is a single-input shuffle, the repeated mask should be
16274 // a strictly valid v8i16 mask that we can pass through to the v8i16
16275 // lowering to handle even the v16 case.
16276 return lowerV8I16GeneralSingleInputShuffle(
16277 DL, MVT::v16i16, V1, RepeatedMask, Subtarget, DAG);
16281 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v16i16, Mask, V1, V2,
16282 Zeroable, Subtarget, DAG))
16283 return PSHUFB;
16285 // AVX512BW can lower to VPERMW (non-VLX will pad to v32i16).
16286 if (Subtarget.hasBWI())
16287 return lowerShuffleWithPERMV(DL, MVT::v16i16, Mask, V1, V2, Subtarget, DAG);
16289 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16290 // shuffle.
16291 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16292 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
16293 return Result;
16295 // Try to permute the lanes and then use a per-lane permute.
16296 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16297 DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
16298 return V;
16300 // Try to match an interleave of two v16i16s and lower them as unpck and
16301 // permutes using ymms.
16302 if (!Subtarget.hasAVX512())
16303 if (SDValue V = lowerShufflePairAsUNPCKAndPermute(DL, MVT::v16i16, V1, V2,
16304 Mask, DAG))
16305 return V;
16307 // Otherwise fall back on generic lowering.
16308 return lowerShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask,
16309 Subtarget, DAG);
16312 /// Handle lowering of 32-lane 8-bit integer shuffles.
16314 /// This routine is only called when we have AVX2 and thus a reasonable
16315 /// instruction set for v32i8 shuffling..
16316 static SDValue lowerV32I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16317 const APInt &Zeroable, SDValue V1, SDValue V2,
16318 const X86Subtarget &Subtarget,
16319 SelectionDAG &DAG) {
16320 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
16321 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
16322 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
16323 assert(Subtarget.hasAVX2() && "We can only lower v32i8 with AVX2!");
16325 // Whenever we can lower this as a zext, that instruction is strictly faster
16326 // than any alternative. It also allows us to fold memory operands into the
16327 // shuffle in many cases.
16328 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2, Mask,
16329 Zeroable, Subtarget, DAG))
16330 return ZExt;
16332 // Check for being able to broadcast a single element.
16333 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v32i8, V1, V2, Mask,
16334 Subtarget, DAG))
16335 return Broadcast;
16337 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
16338 Zeroable, Subtarget, DAG))
16339 return Blend;
16341 // Use dedicated unpack instructions for masks that match their pattern.
16342 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i8, Mask, V1, V2, DAG))
16343 return V;
16345 // Use dedicated pack instructions for masks that match their pattern.
16346 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v32i8, Mask, V1, V2, DAG,
16347 Subtarget))
16348 return V;
16350 // Try to use lower using a truncation.
16351 if (SDValue V = lowerShuffleAsVTRUNC(DL, MVT::v32i8, V1, V2, Mask, Zeroable,
16352 Subtarget, DAG))
16353 return V;
16355 // Try to use shift instructions.
16356 if (SDValue Shift =
16357 lowerShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask, Zeroable, Subtarget,
16358 DAG, /*BitwiseOnly*/ false))
16359 return Shift;
16361 // Try to use byte rotation instructions.
16362 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i8, V1, V2, Mask,
16363 Subtarget, DAG))
16364 return Rotate;
16366 // Try to use bit rotation instructions.
16367 if (V2.isUndef())
16368 if (SDValue Rotate =
16369 lowerShuffleAsBitRotate(DL, MVT::v32i8, V1, Mask, Subtarget, DAG))
16370 return Rotate;
16372 // Try to create an in-lane repeating shuffle mask and then shuffle the
16373 // results into the target lanes.
16374 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16375 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
16376 return V;
16378 // There are no generalized cross-lane shuffle operations available on i8
16379 // element types.
16380 if (V2.isUndef() && is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask)) {
16381 // Try to produce a fixed cross-128-bit lane permute followed by unpack
16382 // because that should be faster than the variable permute alternatives.
16383 if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v32i8, Mask, V1, V2, DAG))
16384 return V;
16386 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16387 DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
16388 return V;
16390 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v32i8, V1, V2, Mask,
16391 DAG, Subtarget);
16394 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i8, Mask, V1, V2,
16395 Zeroable, Subtarget, DAG))
16396 return PSHUFB;
16398 // AVX512VBMI can lower to VPERMB (non-VLX will pad to v64i8).
16399 if (Subtarget.hasVBMI())
16400 return lowerShuffleWithPERMV(DL, MVT::v32i8, Mask, V1, V2, Subtarget, DAG);
16402 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16403 // shuffle.
16404 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16405 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
16406 return Result;
16408 // Try to permute the lanes and then use a per-lane permute.
16409 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16410 DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
16411 return V;
16413 // Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
16414 // by zeroable elements in the remaining 24 elements. Turn this into two
16415 // vmovqb instructions shuffled together.
16416 if (Subtarget.hasVLX())
16417 if (SDValue V = lowerShuffleAsVTRUNCAndUnpack(DL, MVT::v32i8, V1, V2,
16418 Mask, Zeroable, DAG))
16419 return V;
16421 // Try to match an interleave of two v32i8s and lower them as unpck and
16422 // permutes using ymms.
16423 if (!Subtarget.hasAVX512())
16424 if (SDValue V = lowerShufflePairAsUNPCKAndPermute(DL, MVT::v32i8, V1, V2,
16425 Mask, DAG))
16426 return V;
16428 // Otherwise fall back on generic lowering.
16429 return lowerShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask,
16430 Subtarget, DAG);
16433 /// High-level routine to lower various 256-bit x86 vector shuffles.
16435 /// This routine either breaks down the specific type of a 256-bit x86 vector
16436 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
16437 /// together based on the available instructions.
16438 static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
16439 SDValue V1, SDValue V2, const APInt &Zeroable,
16440 const X86Subtarget &Subtarget,
16441 SelectionDAG &DAG) {
16442 // If we have a single input to the zero element, insert that into V1 if we
16443 // can do so cheaply.
16444 int NumElts = VT.getVectorNumElements();
16445 int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
16447 if (NumV2Elements == 1 && Mask[0] >= NumElts)
16448 if (SDValue Insertion = lowerShuffleAsElementInsertion(
16449 DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
16450 return Insertion;
16452 // Handle special cases where the lower or upper half is UNDEF.
16453 if (SDValue V =
16454 lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
16455 return V;
16457 // There is a really nice hard cut-over between AVX1 and AVX2 that means we
16458 // can check for those subtargets here and avoid much of the subtarget
16459 // querying in the per-vector-type lowering routines. With AVX1 we have
16460 // essentially *zero* ability to manipulate a 256-bit vector with integer
16461 // types. Since we'll use floating point types there eventually, just
16462 // immediately cast everything to a float and operate entirely in that domain.
16463 if (VT.isInteger() && !Subtarget.hasAVX2()) {
16464 int ElementBits = VT.getScalarSizeInBits();
16465 if (ElementBits < 32) {
16466 // No floating point type available, if we can't use the bit operations
16467 // for masking/blending then decompose into 128-bit vectors.
16468 if (SDValue V = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
16469 Subtarget, DAG))
16470 return V;
16471 if (SDValue V = lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
16472 return V;
16473 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG, /*SimpleOnly*/ false);
16476 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
16477 VT.getVectorNumElements());
16478 V1 = DAG.getBitcast(FpVT, V1);
16479 V2 = DAG.getBitcast(FpVT, V2);
16480 return DAG.getBitcast(VT, DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
16483 if (VT == MVT::v16f16 || VT == MVT::v16bf16) {
16484 V1 = DAG.getBitcast(MVT::v16i16, V1);
16485 V2 = DAG.getBitcast(MVT::v16i16, V2);
16486 return DAG.getBitcast(VT,
16487 DAG.getVectorShuffle(MVT::v16i16, DL, V1, V2, Mask));
16490 switch (VT.SimpleTy) {
16491 case MVT::v4f64:
16492 return lowerV4F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16493 case MVT::v4i64:
16494 return lowerV4I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16495 case MVT::v8f32:
16496 return lowerV8F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16497 case MVT::v8i32:
16498 return lowerV8I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16499 case MVT::v16i16:
16500 return lowerV16I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16501 case MVT::v32i8:
16502 return lowerV32I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16504 default:
16505 llvm_unreachable("Not a valid 256-bit x86 vector type!");
16509 /// Try to lower a vector shuffle as a 128-bit shuffles.
16510 static SDValue lowerV4X128Shuffle(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
16511 const APInt &Zeroable, SDValue V1, SDValue V2,
16512 const X86Subtarget &Subtarget,
16513 SelectionDAG &DAG) {
16514 assert(VT.getScalarSizeInBits() == 64 &&
16515 "Unexpected element type size for 128bit shuffle.");
16517 // To handle 256 bit vector requires VLX and most probably
16518 // function lowerV2X128VectorShuffle() is better solution.
16519 assert(VT.is512BitVector() && "Unexpected vector size for 512bit shuffle.");
16521 // TODO - use Zeroable like we do for lowerV2X128VectorShuffle?
16522 SmallVector<int, 4> Widened128Mask;
16523 if (!canWidenShuffleElements(Mask, Widened128Mask))
16524 return SDValue();
16525 assert(Widened128Mask.size() == 4 && "Shuffle widening mismatch");
16527 // Try to use an insert into a zero vector.
16528 if (Widened128Mask[0] == 0 && (Zeroable & 0xf0) == 0xf0 &&
16529 (Widened128Mask[1] == 1 || (Zeroable & 0x0c) == 0x0c)) {
16530 unsigned NumElts = ((Zeroable & 0x0c) == 0x0c) ? 2 : 4;
16531 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
16532 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
16533 DAG.getIntPtrConstant(0, DL));
16534 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
16535 getZeroVector(VT, Subtarget, DAG, DL), LoV,
16536 DAG.getIntPtrConstant(0, DL));
16539 // Check for patterns which can be matched with a single insert of a 256-bit
16540 // subvector.
16541 bool OnlyUsesV1 = isShuffleEquivalent(Mask, {0, 1, 2, 3, 0, 1, 2, 3}, V1, V2);
16542 if (OnlyUsesV1 ||
16543 isShuffleEquivalent(Mask, {0, 1, 2, 3, 8, 9, 10, 11}, V1, V2)) {
16544 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 4);
16545 SDValue SubVec =
16546 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, OnlyUsesV1 ? V1 : V2,
16547 DAG.getIntPtrConstant(0, DL));
16548 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
16549 DAG.getIntPtrConstant(4, DL));
16552 // See if this is an insertion of the lower 128-bits of V2 into V1.
16553 bool IsInsert = true;
16554 int V2Index = -1;
16555 for (int i = 0; i < 4; ++i) {
16556 assert(Widened128Mask[i] >= -1 && "Illegal shuffle sentinel value");
16557 if (Widened128Mask[i] < 0)
16558 continue;
16560 // Make sure all V1 subvectors are in place.
16561 if (Widened128Mask[i] < 4) {
16562 if (Widened128Mask[i] != i) {
16563 IsInsert = false;
16564 break;
16566 } else {
16567 // Make sure we only have a single V2 index and its the lowest 128-bits.
16568 if (V2Index >= 0 || Widened128Mask[i] != 4) {
16569 IsInsert = false;
16570 break;
16572 V2Index = i;
16575 if (IsInsert && V2Index >= 0) {
16576 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
16577 SDValue Subvec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
16578 DAG.getIntPtrConstant(0, DL));
16579 return insert128BitVector(V1, Subvec, V2Index * 2, DAG, DL);
16582 // See if we can widen to a 256-bit lane shuffle, we're going to lose 128-lane
16583 // UNDEF info by lowering to X86ISD::SHUF128 anyway, so by widening where
16584 // possible we at least ensure the lanes stay sequential to help later
16585 // combines.
16586 SmallVector<int, 2> Widened256Mask;
16587 if (canWidenShuffleElements(Widened128Mask, Widened256Mask)) {
16588 Widened128Mask.clear();
16589 narrowShuffleMaskElts(2, Widened256Mask, Widened128Mask);
16592 // Try to lower to vshuf64x2/vshuf32x4.
16593 SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
16594 int PermMask[4] = {-1, -1, -1, -1};
16595 // Ensure elements came from the same Op.
16596 for (int i = 0; i < 4; ++i) {
16597 assert(Widened128Mask[i] >= -1 && "Illegal shuffle sentinel value");
16598 if (Widened128Mask[i] < 0)
16599 continue;
16601 SDValue Op = Widened128Mask[i] >= 4 ? V2 : V1;
16602 unsigned OpIndex = i / 2;
16603 if (Ops[OpIndex].isUndef())
16604 Ops[OpIndex] = Op;
16605 else if (Ops[OpIndex] != Op)
16606 return SDValue();
16608 PermMask[i] = Widened128Mask[i] % 4;
16611 return DAG.getNode(X86ISD::SHUF128, DL, VT, Ops[0], Ops[1],
16612 getV4X86ShuffleImm8ForMask(PermMask, DL, DAG));
16615 /// Handle lowering of 8-lane 64-bit floating point shuffles.
16616 static SDValue lowerV8F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16617 const APInt &Zeroable, SDValue V1, SDValue V2,
16618 const X86Subtarget &Subtarget,
16619 SelectionDAG &DAG) {
16620 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
16621 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
16622 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16624 if (V2.isUndef()) {
16625 // Use low duplicate instructions for masks that match their pattern.
16626 if (isShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6}, V1, V2))
16627 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v8f64, V1);
16629 if (!is128BitLaneCrossingShuffleMask(MVT::v8f64, Mask)) {
16630 // Non-half-crossing single input shuffles can be lowered with an
16631 // interleaved permutation.
16632 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
16633 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3) |
16634 ((Mask[4] == 5) << 4) | ((Mask[5] == 5) << 5) |
16635 ((Mask[6] == 7) << 6) | ((Mask[7] == 7) << 7);
16636 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f64, V1,
16637 DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
16640 SmallVector<int, 4> RepeatedMask;
16641 if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask))
16642 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8f64, V1,
16643 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16646 if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8f64, Mask, Zeroable, V1,
16647 V2, Subtarget, DAG))
16648 return Shuf128;
16650 if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8f64, Mask, V1, V2, DAG))
16651 return Unpck;
16653 // Check if the blend happens to exactly fit that of SHUFPD.
16654 if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v8f64, V1, V2, Mask,
16655 Zeroable, Subtarget, DAG))
16656 return Op;
16658 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f64, Zeroable, Mask, V1, V2,
16659 DAG, Subtarget))
16660 return V;
16662 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f64, V1, V2, Mask,
16663 Zeroable, Subtarget, DAG))
16664 return Blend;
16666 return lowerShuffleWithPERMV(DL, MVT::v8f64, Mask, V1, V2, Subtarget, DAG);
16669 /// Handle lowering of 16-lane 32-bit floating point shuffles.
16670 static SDValue lowerV16F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16671 const APInt &Zeroable, SDValue V1, SDValue V2,
16672 const X86Subtarget &Subtarget,
16673 SelectionDAG &DAG) {
16674 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
16675 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
16676 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16678 // If the shuffle mask is repeated in each 128-bit lane, we have many more
16679 // options to efficiently lower the shuffle.
16680 SmallVector<int, 4> RepeatedMask;
16681 if (is128BitLaneRepeatedShuffleMask(MVT::v16f32, Mask, RepeatedMask)) {
16682 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
16684 // Use even/odd duplicate instructions for masks that match their pattern.
16685 if (isShuffleEquivalent(RepeatedMask, {0, 0, 2, 2}, V1, V2))
16686 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v16f32, V1);
16687 if (isShuffleEquivalent(RepeatedMask, {1, 1, 3, 3}, V1, V2))
16688 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v16f32, V1);
16690 if (V2.isUndef())
16691 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v16f32, V1,
16692 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16694 // Use dedicated unpack instructions for masks that match their pattern.
16695 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16f32, Mask, V1, V2, DAG))
16696 return V;
16698 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16f32, V1, V2, Mask,
16699 Zeroable, Subtarget, DAG))
16700 return Blend;
16702 // Otherwise, fall back to a SHUFPS sequence.
16703 return lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask, V1, V2, DAG);
16706 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16f32, V1, V2, Mask,
16707 Zeroable, Subtarget, DAG))
16708 return Blend;
16710 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16711 DL, MVT::v16i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
16712 return DAG.getBitcast(MVT::v16f32, ZExt);
16714 // Try to create an in-lane repeating shuffle mask and then shuffle the
16715 // results into the target lanes.
16716 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16717 DL, MVT::v16f32, V1, V2, Mask, Subtarget, DAG))
16718 return V;
16720 // If we have a single input shuffle with different shuffle patterns in the
16721 // 128-bit lanes and don't lane cross, use variable mask VPERMILPS.
16722 if (V2.isUndef() &&
16723 !is128BitLaneCrossingShuffleMask(MVT::v16f32, Mask)) {
16724 SDValue VPermMask = getConstVector(Mask, MVT::v16i32, DAG, DL, true);
16725 return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v16f32, V1, VPermMask);
16728 // If we have AVX512F support, we can use VEXPAND.
16729 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16f32, Zeroable, Mask,
16730 V1, V2, DAG, Subtarget))
16731 return V;
16733 return lowerShuffleWithPERMV(DL, MVT::v16f32, Mask, V1, V2, Subtarget, DAG);
16736 /// Handle lowering of 8-lane 64-bit integer shuffles.
16737 static SDValue lowerV8I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16738 const APInt &Zeroable, SDValue V1, SDValue V2,
16739 const X86Subtarget &Subtarget,
16740 SelectionDAG &DAG) {
16741 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
16742 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
16743 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16745 // Try to use shift instructions if fast.
16746 if (Subtarget.preferLowerShuffleAsShift())
16747 if (SDValue Shift =
16748 lowerShuffleAsShift(DL, MVT::v8i64, V1, V2, Mask, Zeroable,
16749 Subtarget, DAG, /*BitwiseOnly*/ true))
16750 return Shift;
16752 if (V2.isUndef()) {
16753 // When the shuffle is mirrored between the 128-bit lanes of the unit, we
16754 // can use lower latency instructions that will operate on all four
16755 // 128-bit lanes.
16756 SmallVector<int, 2> Repeated128Mask;
16757 if (is128BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated128Mask)) {
16758 SmallVector<int, 4> PSHUFDMask;
16759 narrowShuffleMaskElts(2, Repeated128Mask, PSHUFDMask);
16760 return DAG.getBitcast(
16761 MVT::v8i64,
16762 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32,
16763 DAG.getBitcast(MVT::v16i32, V1),
16764 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
16767 SmallVector<int, 4> Repeated256Mask;
16768 if (is256BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated256Mask))
16769 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8i64, V1,
16770 getV4X86ShuffleImm8ForMask(Repeated256Mask, DL, DAG));
16773 if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8i64, Mask, Zeroable, V1,
16774 V2, Subtarget, DAG))
16775 return Shuf128;
16777 // Try to use shift instructions.
16778 if (SDValue Shift =
16779 lowerShuffleAsShift(DL, MVT::v8i64, V1, V2, Mask, Zeroable, Subtarget,
16780 DAG, /*BitwiseOnly*/ false))
16781 return Shift;
16783 // Try to use VALIGN.
16784 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v8i64, V1, V2, Mask,
16785 Zeroable, Subtarget, DAG))
16786 return Rotate;
16788 // Try to use PALIGNR.
16789 if (Subtarget.hasBWI())
16790 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i64, V1, V2, Mask,
16791 Subtarget, DAG))
16792 return Rotate;
16794 if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8i64, Mask, V1, V2, DAG))
16795 return Unpck;
16797 // If we have AVX512F support, we can use VEXPAND.
16798 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i64, Zeroable, Mask, V1, V2,
16799 DAG, Subtarget))
16800 return V;
16802 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i64, V1, V2, Mask,
16803 Zeroable, Subtarget, DAG))
16804 return Blend;
16806 return lowerShuffleWithPERMV(DL, MVT::v8i64, Mask, V1, V2, Subtarget, DAG);
16809 /// Handle lowering of 16-lane 32-bit integer shuffles.
16810 static SDValue lowerV16I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16811 const APInt &Zeroable, SDValue V1, SDValue V2,
16812 const X86Subtarget &Subtarget,
16813 SelectionDAG &DAG) {
16814 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
16815 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
16816 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16818 int NumV2Elements = count_if(Mask, [](int M) { return M >= 16; });
16820 // Whenever we can lower this as a zext, that instruction is strictly faster
16821 // than any alternative. It also allows us to fold memory operands into the
16822 // shuffle in many cases.
16823 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16824 DL, MVT::v16i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
16825 return ZExt;
16827 // Try to use shift instructions if fast.
16828 if (Subtarget.preferLowerShuffleAsShift()) {
16829 if (SDValue Shift =
16830 lowerShuffleAsShift(DL, MVT::v16i32, V1, V2, Mask, Zeroable,
16831 Subtarget, DAG, /*BitwiseOnly*/ true))
16832 return Shift;
16833 if (NumV2Elements == 0)
16834 if (SDValue Rotate = lowerShuffleAsBitRotate(DL, MVT::v16i32, V1, Mask,
16835 Subtarget, DAG))
16836 return Rotate;
16839 // If the shuffle mask is repeated in each 128-bit lane we can use more
16840 // efficient instructions that mirror the shuffles across the four 128-bit
16841 // lanes.
16842 SmallVector<int, 4> RepeatedMask;
16843 bool Is128BitLaneRepeatedShuffle =
16844 is128BitLaneRepeatedShuffleMask(MVT::v16i32, Mask, RepeatedMask);
16845 if (Is128BitLaneRepeatedShuffle) {
16846 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
16847 if (V2.isUndef())
16848 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32, V1,
16849 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16851 // Use dedicated unpack instructions for masks that match their pattern.
16852 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i32, Mask, V1, V2, DAG))
16853 return V;
16856 // Try to use shift instructions.
16857 if (SDValue Shift =
16858 lowerShuffleAsShift(DL, MVT::v16i32, V1, V2, Mask, Zeroable,
16859 Subtarget, DAG, /*BitwiseOnly*/ false))
16860 return Shift;
16862 if (!Subtarget.preferLowerShuffleAsShift() && NumV2Elements != 0)
16863 if (SDValue Rotate =
16864 lowerShuffleAsBitRotate(DL, MVT::v16i32, V1, Mask, Subtarget, DAG))
16865 return Rotate;
16867 // Try to use VALIGN.
16868 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v16i32, V1, V2, Mask,
16869 Zeroable, Subtarget, DAG))
16870 return Rotate;
16872 // Try to use byte rotation instructions.
16873 if (Subtarget.hasBWI())
16874 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i32, V1, V2, Mask,
16875 Subtarget, DAG))
16876 return Rotate;
16878 // Assume that a single SHUFPS is faster than using a permv shuffle.
16879 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
16880 if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
16881 SDValue CastV1 = DAG.getBitcast(MVT::v16f32, V1);
16882 SDValue CastV2 = DAG.getBitcast(MVT::v16f32, V2);
16883 SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask,
16884 CastV1, CastV2, DAG);
16885 return DAG.getBitcast(MVT::v16i32, ShufPS);
16888 // Try to create an in-lane repeating shuffle mask and then shuffle the
16889 // results into the target lanes.
16890 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16891 DL, MVT::v16i32, V1, V2, Mask, Subtarget, DAG))
16892 return V;
16894 // If we have AVX512F support, we can use VEXPAND.
16895 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16i32, Zeroable, Mask, V1, V2,
16896 DAG, Subtarget))
16897 return V;
16899 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i32, V1, V2, Mask,
16900 Zeroable, Subtarget, DAG))
16901 return Blend;
16903 return lowerShuffleWithPERMV(DL, MVT::v16i32, Mask, V1, V2, Subtarget, DAG);
16906 /// Handle lowering of 32-lane 16-bit integer shuffles.
16907 static SDValue lowerV32I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16908 const APInt &Zeroable, SDValue V1, SDValue V2,
16909 const X86Subtarget &Subtarget,
16910 SelectionDAG &DAG) {
16911 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
16912 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
16913 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
16914 assert(Subtarget.hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
16916 // Whenever we can lower this as a zext, that instruction is strictly faster
16917 // than any alternative. It also allows us to fold memory operands into the
16918 // shuffle in many cases.
16919 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16920 DL, MVT::v32i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
16921 return ZExt;
16923 // Use dedicated unpack instructions for masks that match their pattern.
16924 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i16, Mask, V1, V2, DAG))
16925 return V;
16927 // Use dedicated pack instructions for masks that match their pattern.
16928 if (SDValue V =
16929 lowerShuffleWithPACK(DL, MVT::v32i16, Mask, V1, V2, DAG, Subtarget))
16930 return V;
16932 // Try to use shift instructions.
16933 if (SDValue Shift =
16934 lowerShuffleAsShift(DL, MVT::v32i16, V1, V2, Mask, Zeroable,
16935 Subtarget, DAG, /*BitwiseOnly*/ false))
16936 return Shift;
16938 // Try to use byte rotation instructions.
16939 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i16, V1, V2, Mask,
16940 Subtarget, DAG))
16941 return Rotate;
16943 if (V2.isUndef()) {
16944 // Try to use bit rotation instructions.
16945 if (SDValue Rotate =
16946 lowerShuffleAsBitRotate(DL, MVT::v32i16, V1, Mask, Subtarget, DAG))
16947 return Rotate;
16949 SmallVector<int, 8> RepeatedMask;
16950 if (is128BitLaneRepeatedShuffleMask(MVT::v32i16, Mask, RepeatedMask)) {
16951 // As this is a single-input shuffle, the repeated mask should be
16952 // a strictly valid v8i16 mask that we can pass through to the v8i16
16953 // lowering to handle even the v32 case.
16954 return lowerV8I16GeneralSingleInputShuffle(DL, MVT::v32i16, V1,
16955 RepeatedMask, Subtarget, DAG);
16959 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i16, V1, V2, Mask,
16960 Zeroable, Subtarget, DAG))
16961 return Blend;
16963 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i16, Mask, V1, V2,
16964 Zeroable, Subtarget, DAG))
16965 return PSHUFB;
16967 return lowerShuffleWithPERMV(DL, MVT::v32i16, Mask, V1, V2, Subtarget, DAG);
16970 /// Handle lowering of 64-lane 8-bit integer shuffles.
16971 static SDValue lowerV64I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16972 const APInt &Zeroable, SDValue V1, SDValue V2,
16973 const X86Subtarget &Subtarget,
16974 SelectionDAG &DAG) {
16975 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
16976 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
16977 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
16978 assert(Subtarget.hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
16980 // Whenever we can lower this as a zext, that instruction is strictly faster
16981 // than any alternative. It also allows us to fold memory operands into the
16982 // shuffle in many cases.
16983 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16984 DL, MVT::v64i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
16985 return ZExt;
16987 // Use dedicated unpack instructions for masks that match their pattern.
16988 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v64i8, Mask, V1, V2, DAG))
16989 return V;
16991 // Use dedicated pack instructions for masks that match their pattern.
16992 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v64i8, Mask, V1, V2, DAG,
16993 Subtarget))
16994 return V;
16996 // Try to use shift instructions.
16997 if (SDValue Shift =
16998 lowerShuffleAsShift(DL, MVT::v64i8, V1, V2, Mask, Zeroable, Subtarget,
16999 DAG, /*BitwiseOnly*/ false))
17000 return Shift;
17002 // Try to use byte rotation instructions.
17003 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v64i8, V1, V2, Mask,
17004 Subtarget, DAG))
17005 return Rotate;
17007 // Try to use bit rotation instructions.
17008 if (V2.isUndef())
17009 if (SDValue Rotate =
17010 lowerShuffleAsBitRotate(DL, MVT::v64i8, V1, Mask, Subtarget, DAG))
17011 return Rotate;
17013 // Lower as AND if possible.
17014 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v64i8, V1, V2, Mask,
17015 Zeroable, Subtarget, DAG))
17016 return Masked;
17018 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v64i8, Mask, V1, V2,
17019 Zeroable, Subtarget, DAG))
17020 return PSHUFB;
17022 // Try to create an in-lane repeating shuffle mask and then shuffle the
17023 // results into the target lanes.
17024 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
17025 DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
17026 return V;
17028 if (SDValue Result = lowerShuffleAsLanePermuteAndPermute(
17029 DL, MVT::v64i8, V1, V2, Mask, DAG, Subtarget))
17030 return Result;
17032 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v64i8, V1, V2, Mask,
17033 Zeroable, Subtarget, DAG))
17034 return Blend;
17036 if (!is128BitLaneCrossingShuffleMask(MVT::v64i8, Mask)) {
17037 // Use PALIGNR+Permute if possible - permute might become PSHUFB but the
17038 // PALIGNR will be cheaper than the second PSHUFB+OR.
17039 if (SDValue V = lowerShuffleAsByteRotateAndPermute(DL, MVT::v64i8, V1, V2,
17040 Mask, Subtarget, DAG))
17041 return V;
17043 // If we can't directly blend but can use PSHUFB, that will be better as it
17044 // can both shuffle and set up the inefficient blend.
17045 bool V1InUse, V2InUse;
17046 return lowerShuffleAsBlendOfPSHUFBs(DL, MVT::v64i8, V1, V2, Mask, Zeroable,
17047 DAG, V1InUse, V2InUse);
17050 // Try to simplify this by merging 128-bit lanes to enable a lane-based
17051 // shuffle.
17052 if (!V2.isUndef())
17053 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
17054 DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
17055 return Result;
17057 // VBMI can use VPERMV/VPERMV3 byte shuffles.
17058 if (Subtarget.hasVBMI())
17059 return lowerShuffleWithPERMV(DL, MVT::v64i8, Mask, V1, V2, Subtarget, DAG);
17061 return splitAndLowerShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG, /*SimpleOnly*/ false);
17064 /// High-level routine to lower various 512-bit x86 vector shuffles.
17066 /// This routine either breaks down the specific type of a 512-bit x86 vector
17067 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
17068 /// together based on the available instructions.
17069 static SDValue lower512BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
17070 MVT VT, SDValue V1, SDValue V2,
17071 const APInt &Zeroable,
17072 const X86Subtarget &Subtarget,
17073 SelectionDAG &DAG) {
17074 assert(Subtarget.hasAVX512() &&
17075 "Cannot lower 512-bit vectors w/ basic ISA!");
17077 // If we have a single input to the zero element, insert that into V1 if we
17078 // can do so cheaply.
17079 int NumElts = Mask.size();
17080 int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
17082 if (NumV2Elements == 1 && Mask[0] >= NumElts)
17083 if (SDValue Insertion = lowerShuffleAsElementInsertion(
17084 DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
17085 return Insertion;
17087 // Handle special cases where the lower or upper half is UNDEF.
17088 if (SDValue V =
17089 lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
17090 return V;
17092 // Check for being able to broadcast a single element.
17093 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, Mask,
17094 Subtarget, DAG))
17095 return Broadcast;
17097 if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI()) {
17098 // Try using bit ops for masking and blending before falling back to
17099 // splitting.
17100 if (SDValue V = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
17101 Subtarget, DAG))
17102 return V;
17103 if (SDValue V = lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
17104 return V;
17106 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG, /*SimpleOnly*/ false);
17109 if (VT == MVT::v32f16 || VT == MVT::v32bf16) {
17110 if (!Subtarget.hasBWI())
17111 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG,
17112 /*SimpleOnly*/ false);
17114 V1 = DAG.getBitcast(MVT::v32i16, V1);
17115 V2 = DAG.getBitcast(MVT::v32i16, V2);
17116 return DAG.getBitcast(VT,
17117 DAG.getVectorShuffle(MVT::v32i16, DL, V1, V2, Mask));
17120 // Dispatch to each element type for lowering. If we don't have support for
17121 // specific element type shuffles at 512 bits, immediately split them and
17122 // lower them. Each lowering routine of a given type is allowed to assume that
17123 // the requisite ISA extensions for that element type are available.
17124 switch (VT.SimpleTy) {
17125 case MVT::v8f64:
17126 return lowerV8F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17127 case MVT::v16f32:
17128 return lowerV16F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17129 case MVT::v8i64:
17130 return lowerV8I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17131 case MVT::v16i32:
17132 return lowerV16I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17133 case MVT::v32i16:
17134 return lowerV32I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17135 case MVT::v64i8:
17136 return lowerV64I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17138 default:
17139 llvm_unreachable("Not a valid 512-bit x86 vector type!");
17143 static SDValue lower1BitShuffleAsKSHIFTR(const SDLoc &DL, ArrayRef<int> Mask,
17144 MVT VT, SDValue V1, SDValue V2,
17145 const X86Subtarget &Subtarget,
17146 SelectionDAG &DAG) {
17147 // Shuffle should be unary.
17148 if (!V2.isUndef())
17149 return SDValue();
17151 int ShiftAmt = -1;
17152 int NumElts = Mask.size();
17153 for (int i = 0; i != NumElts; ++i) {
17154 int M = Mask[i];
17155 assert((M == SM_SentinelUndef || (0 <= M && M < NumElts)) &&
17156 "Unexpected mask index.");
17157 if (M < 0)
17158 continue;
17160 // The first non-undef element determines our shift amount.
17161 if (ShiftAmt < 0) {
17162 ShiftAmt = M - i;
17163 // Need to be shifting right.
17164 if (ShiftAmt <= 0)
17165 return SDValue();
17167 // All non-undef elements must shift by the same amount.
17168 if (ShiftAmt != M - i)
17169 return SDValue();
17171 assert(ShiftAmt >= 0 && "All undef?");
17173 // Great we found a shift right.
17174 SDValue Res = widenMaskVector(V1, false, Subtarget, DAG, DL);
17175 Res = DAG.getNode(X86ISD::KSHIFTR, DL, Res.getValueType(), Res,
17176 DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
17177 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
17178 DAG.getIntPtrConstant(0, DL));
17181 // Determine if this shuffle can be implemented with a KSHIFT instruction.
17182 // Returns the shift amount if possible or -1 if not. This is a simplified
17183 // version of matchShuffleAsShift.
17184 static int match1BitShuffleAsKSHIFT(unsigned &Opcode, ArrayRef<int> Mask,
17185 int MaskOffset, const APInt &Zeroable) {
17186 int Size = Mask.size();
17188 auto CheckZeros = [&](int Shift, bool Left) {
17189 for (int j = 0; j < Shift; ++j)
17190 if (!Zeroable[j + (Left ? 0 : (Size - Shift))])
17191 return false;
17193 return true;
17196 auto MatchShift = [&](int Shift, bool Left) {
17197 unsigned Pos = Left ? Shift : 0;
17198 unsigned Low = Left ? 0 : Shift;
17199 unsigned Len = Size - Shift;
17200 return isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset);
17203 for (int Shift = 1; Shift != Size; ++Shift)
17204 for (bool Left : {true, false})
17205 if (CheckZeros(Shift, Left) && MatchShift(Shift, Left)) {
17206 Opcode = Left ? X86ISD::KSHIFTL : X86ISD::KSHIFTR;
17207 return Shift;
17210 return -1;
17214 // Lower vXi1 vector shuffles.
17215 // There is no a dedicated instruction on AVX-512 that shuffles the masks.
17216 // The only way to shuffle bits is to sign-extend the mask vector to SIMD
17217 // vector, shuffle and then truncate it back.
17218 static SDValue lower1BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
17219 MVT VT, SDValue V1, SDValue V2,
17220 const APInt &Zeroable,
17221 const X86Subtarget &Subtarget,
17222 SelectionDAG &DAG) {
17223 assert(Subtarget.hasAVX512() &&
17224 "Cannot lower 512-bit vectors w/o basic ISA!");
17226 int NumElts = Mask.size();
17227 int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
17229 // Try to recognize shuffles that are just padding a subvector with zeros.
17230 int SubvecElts = 0;
17231 int Src = -1;
17232 for (int i = 0; i != NumElts; ++i) {
17233 if (Mask[i] >= 0) {
17234 // Grab the source from the first valid mask. All subsequent elements need
17235 // to use this same source.
17236 if (Src < 0)
17237 Src = Mask[i] / NumElts;
17238 if (Src != (Mask[i] / NumElts) || (Mask[i] % NumElts) != i)
17239 break;
17242 ++SubvecElts;
17244 assert(SubvecElts != NumElts && "Identity shuffle?");
17246 // Clip to a power 2.
17247 SubvecElts = llvm::bit_floor<uint32_t>(SubvecElts);
17249 // Make sure the number of zeroable bits in the top at least covers the bits
17250 // not covered by the subvector.
17251 if ((int)Zeroable.countl_one() >= (NumElts - SubvecElts)) {
17252 assert(Src >= 0 && "Expected a source!");
17253 MVT ExtractVT = MVT::getVectorVT(MVT::i1, SubvecElts);
17254 SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtractVT,
17255 Src == 0 ? V1 : V2,
17256 DAG.getIntPtrConstant(0, DL));
17257 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
17258 DAG.getConstant(0, DL, VT),
17259 Extract, DAG.getIntPtrConstant(0, DL));
17262 // Try a simple shift right with undef elements. Later we'll try with zeros.
17263 if (SDValue Shift = lower1BitShuffleAsKSHIFTR(DL, Mask, VT, V1, V2, Subtarget,
17264 DAG))
17265 return Shift;
17267 // Try to match KSHIFTs.
17268 unsigned Offset = 0;
17269 for (SDValue V : { V1, V2 }) {
17270 unsigned Opcode;
17271 int ShiftAmt = match1BitShuffleAsKSHIFT(Opcode, Mask, Offset, Zeroable);
17272 if (ShiftAmt >= 0) {
17273 SDValue Res = widenMaskVector(V, false, Subtarget, DAG, DL);
17274 MVT WideVT = Res.getSimpleValueType();
17275 // Widened right shifts need two shifts to ensure we shift in zeroes.
17276 if (Opcode == X86ISD::KSHIFTR && WideVT != VT) {
17277 int WideElts = WideVT.getVectorNumElements();
17278 // Shift left to put the original vector in the MSBs of the new size.
17279 Res = DAG.getNode(X86ISD::KSHIFTL, DL, WideVT, Res,
17280 DAG.getTargetConstant(WideElts - NumElts, DL, MVT::i8));
17281 // Increase the shift amount to account for the left shift.
17282 ShiftAmt += WideElts - NumElts;
17285 Res = DAG.getNode(Opcode, DL, WideVT, Res,
17286 DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
17287 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
17288 DAG.getIntPtrConstant(0, DL));
17290 Offset += NumElts; // Increment for next iteration.
17293 // If we're performing an unary shuffle on a SETCC result, try to shuffle the
17294 // ops instead.
17295 // TODO: What other unary shuffles would benefit from this?
17296 if (NumV2Elements == 0 && V1.getOpcode() == ISD::SETCC && V1->hasOneUse()) {
17297 SDValue Op0 = V1.getOperand(0);
17298 SDValue Op1 = V1.getOperand(1);
17299 ISD::CondCode CC = cast<CondCodeSDNode>(V1.getOperand(2))->get();
17300 EVT OpVT = Op0.getValueType();
17301 if (OpVT.getScalarSizeInBits() >= 32 || isBroadcastShuffleMask(Mask))
17302 return DAG.getSetCC(
17303 DL, VT, DAG.getVectorShuffle(OpVT, DL, Op0, DAG.getUNDEF(OpVT), Mask),
17304 DAG.getVectorShuffle(OpVT, DL, Op1, DAG.getUNDEF(OpVT), Mask), CC);
17307 MVT ExtVT;
17308 switch (VT.SimpleTy) {
17309 default:
17310 llvm_unreachable("Expected a vector of i1 elements");
17311 case MVT::v2i1:
17312 ExtVT = MVT::v2i64;
17313 break;
17314 case MVT::v4i1:
17315 ExtVT = MVT::v4i32;
17316 break;
17317 case MVT::v8i1:
17318 // Take 512-bit type, more shuffles on KNL. If we have VLX use a 256-bit
17319 // shuffle.
17320 ExtVT = Subtarget.hasVLX() ? MVT::v8i32 : MVT::v8i64;
17321 break;
17322 case MVT::v16i1:
17323 // Take 512-bit type, unless we are avoiding 512-bit types and have the
17324 // 256-bit operation available.
17325 ExtVT = Subtarget.canExtendTo512DQ() ? MVT::v16i32 : MVT::v16i16;
17326 break;
17327 case MVT::v32i1:
17328 // Take 512-bit type, unless we are avoiding 512-bit types and have the
17329 // 256-bit operation available.
17330 assert(Subtarget.hasBWI() && "Expected AVX512BW support");
17331 ExtVT = Subtarget.canExtendTo512BW() ? MVT::v32i16 : MVT::v32i8;
17332 break;
17333 case MVT::v64i1:
17334 // Fall back to scalarization. FIXME: We can do better if the shuffle
17335 // can be partitioned cleanly.
17336 if (!Subtarget.useBWIRegs())
17337 return SDValue();
17338 ExtVT = MVT::v64i8;
17339 break;
17342 V1 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V1);
17343 V2 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V2);
17345 SDValue Shuffle = DAG.getVectorShuffle(ExtVT, DL, V1, V2, Mask);
17346 // i1 was sign extended we can use X86ISD::CVT2MASK.
17347 int NumElems = VT.getVectorNumElements();
17348 if ((Subtarget.hasBWI() && (NumElems >= 32)) ||
17349 (Subtarget.hasDQI() && (NumElems < 32)))
17350 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, ExtVT),
17351 Shuffle, ISD::SETGT);
17353 return DAG.getNode(ISD::TRUNCATE, DL, VT, Shuffle);
17356 /// Helper function that returns true if the shuffle mask should be
17357 /// commuted to improve canonicalization.
17358 static bool canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask) {
17359 int NumElements = Mask.size();
17361 int NumV1Elements = 0, NumV2Elements = 0;
17362 for (int M : Mask)
17363 if (M < 0)
17364 continue;
17365 else if (M < NumElements)
17366 ++NumV1Elements;
17367 else
17368 ++NumV2Elements;
17370 // Commute the shuffle as needed such that more elements come from V1 than
17371 // V2. This allows us to match the shuffle pattern strictly on how many
17372 // elements come from V1 without handling the symmetric cases.
17373 if (NumV2Elements > NumV1Elements)
17374 return true;
17376 assert(NumV1Elements > 0 && "No V1 indices");
17378 if (NumV2Elements == 0)
17379 return false;
17381 // When the number of V1 and V2 elements are the same, try to minimize the
17382 // number of uses of V2 in the low half of the vector. When that is tied,
17383 // ensure that the sum of indices for V1 is equal to or lower than the sum
17384 // indices for V2. When those are equal, try to ensure that the number of odd
17385 // indices for V1 is lower than the number of odd indices for V2.
17386 if (NumV1Elements == NumV2Elements) {
17387 int LowV1Elements = 0, LowV2Elements = 0;
17388 for (int M : Mask.slice(0, NumElements / 2))
17389 if (M >= NumElements)
17390 ++LowV2Elements;
17391 else if (M >= 0)
17392 ++LowV1Elements;
17393 if (LowV2Elements > LowV1Elements)
17394 return true;
17395 if (LowV2Elements == LowV1Elements) {
17396 int SumV1Indices = 0, SumV2Indices = 0;
17397 for (int i = 0, Size = Mask.size(); i < Size; ++i)
17398 if (Mask[i] >= NumElements)
17399 SumV2Indices += i;
17400 else if (Mask[i] >= 0)
17401 SumV1Indices += i;
17402 if (SumV2Indices < SumV1Indices)
17403 return true;
17404 if (SumV2Indices == SumV1Indices) {
17405 int NumV1OddIndices = 0, NumV2OddIndices = 0;
17406 for (int i = 0, Size = Mask.size(); i < Size; ++i)
17407 if (Mask[i] >= NumElements)
17408 NumV2OddIndices += i % 2;
17409 else if (Mask[i] >= 0)
17410 NumV1OddIndices += i % 2;
17411 if (NumV2OddIndices < NumV1OddIndices)
17412 return true;
17417 return false;
17420 static bool canCombineAsMaskOperation(SDValue V,
17421 const X86Subtarget &Subtarget) {
17422 if (!Subtarget.hasAVX512())
17423 return false;
17425 if (!V.getValueType().isSimple())
17426 return false;
17428 MVT VT = V.getSimpleValueType().getScalarType();
17429 if ((VT == MVT::i16 || VT == MVT::i8) && !Subtarget.hasBWI())
17430 return false;
17432 // If vec width < 512, widen i8/i16 even with BWI as blendd/blendps/blendpd
17433 // are preferable to blendw/blendvb/masked-mov.
17434 if ((VT == MVT::i16 || VT == MVT::i8) &&
17435 V.getSimpleValueType().getSizeInBits() < 512)
17436 return false;
17438 auto HasMaskOperation = [&](SDValue V) {
17439 // TODO: Currently we only check limited opcode. We probably extend
17440 // it to all binary operation by checking TLI.isBinOp().
17441 switch (V->getOpcode()) {
17442 default:
17443 return false;
17444 case ISD::ADD:
17445 case ISD::SUB:
17446 case ISD::AND:
17447 case ISD::XOR:
17448 case ISD::OR:
17449 case ISD::SMAX:
17450 case ISD::SMIN:
17451 case ISD::UMAX:
17452 case ISD::UMIN:
17453 case ISD::ABS:
17454 case ISD::SHL:
17455 case ISD::SRL:
17456 case ISD::SRA:
17457 case ISD::MUL:
17458 break;
17460 if (!V->hasOneUse())
17461 return false;
17463 return true;
17466 if (HasMaskOperation(V))
17467 return true;
17469 return false;
17472 // Forward declaration.
17473 static SDValue canonicalizeShuffleMaskWithHorizOp(
17474 MutableArrayRef<SDValue> Ops, MutableArrayRef<int> Mask,
17475 unsigned RootSizeInBits, const SDLoc &DL, SelectionDAG &DAG,
17476 const X86Subtarget &Subtarget);
17478 /// Top-level lowering for x86 vector shuffles.
17480 /// This handles decomposition, canonicalization, and lowering of all x86
17481 /// vector shuffles. Most of the specific lowering strategies are encapsulated
17482 /// above in helper routines. The canonicalization attempts to widen shuffles
17483 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
17484 /// s.t. only one of the two inputs needs to be tested, etc.
17485 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, const X86Subtarget &Subtarget,
17486 SelectionDAG &DAG) {
17487 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
17488 ArrayRef<int> OrigMask = SVOp->getMask();
17489 SDValue V1 = Op.getOperand(0);
17490 SDValue V2 = Op.getOperand(1);
17491 MVT VT = Op.getSimpleValueType();
17492 int NumElements = VT.getVectorNumElements();
17493 SDLoc DL(Op);
17494 bool Is1BitVector = (VT.getVectorElementType() == MVT::i1);
17496 assert((VT.getSizeInBits() != 64 || Is1BitVector) &&
17497 "Can't lower MMX shuffles");
17499 bool V1IsUndef = V1.isUndef();
17500 bool V2IsUndef = V2.isUndef();
17501 if (V1IsUndef && V2IsUndef)
17502 return DAG.getUNDEF(VT);
17504 // When we create a shuffle node we put the UNDEF node to second operand,
17505 // but in some cases the first operand may be transformed to UNDEF.
17506 // In this case we should just commute the node.
17507 if (V1IsUndef)
17508 return DAG.getCommutedVectorShuffle(*SVOp);
17510 // Check for non-undef masks pointing at an undef vector and make the masks
17511 // undef as well. This makes it easier to match the shuffle based solely on
17512 // the mask.
17513 if (V2IsUndef &&
17514 any_of(OrigMask, [NumElements](int M) { return M >= NumElements; })) {
17515 SmallVector<int, 8> NewMask(OrigMask);
17516 for (int &M : NewMask)
17517 if (M >= NumElements)
17518 M = -1;
17519 return DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
17522 // Check for illegal shuffle mask element index values.
17523 int MaskUpperLimit = OrigMask.size() * (V2IsUndef ? 1 : 2);
17524 (void)MaskUpperLimit;
17525 assert(llvm::all_of(OrigMask,
17526 [&](int M) { return -1 <= M && M < MaskUpperLimit; }) &&
17527 "Out of bounds shuffle index");
17529 // We actually see shuffles that are entirely re-arrangements of a set of
17530 // zero inputs. This mostly happens while decomposing complex shuffles into
17531 // simple ones. Directly lower these as a buildvector of zeros.
17532 APInt KnownUndef, KnownZero;
17533 computeZeroableShuffleElements(OrigMask, V1, V2, KnownUndef, KnownZero);
17535 APInt Zeroable = KnownUndef | KnownZero;
17536 if (Zeroable.isAllOnes())
17537 return getZeroVector(VT, Subtarget, DAG, DL);
17539 bool V2IsZero = !V2IsUndef && ISD::isBuildVectorAllZeros(V2.getNode());
17541 // Try to collapse shuffles into using a vector type with fewer elements but
17542 // wider element types. We cap this to not form integers or floating point
17543 // elements wider than 64 bits. It does not seem beneficial to form i128
17544 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
17545 SmallVector<int, 16> WidenedMask;
17546 if (VT.getScalarSizeInBits() < 64 && !Is1BitVector &&
17547 !canCombineAsMaskOperation(V1, Subtarget) &&
17548 !canCombineAsMaskOperation(V2, Subtarget) &&
17549 canWidenShuffleElements(OrigMask, Zeroable, V2IsZero, WidenedMask)) {
17550 // Shuffle mask widening should not interfere with a broadcast opportunity
17551 // by obfuscating the operands with bitcasts.
17552 // TODO: Avoid lowering directly from this top-level function: make this
17553 // a query (canLowerAsBroadcast) and defer lowering to the type-based calls.
17554 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, OrigMask,
17555 Subtarget, DAG))
17556 return Broadcast;
17558 MVT NewEltVT = VT.isFloatingPoint()
17559 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
17560 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
17561 int NewNumElts = NumElements / 2;
17562 MVT NewVT = MVT::getVectorVT(NewEltVT, NewNumElts);
17563 // Make sure that the new vector type is legal. For example, v2f64 isn't
17564 // legal on SSE1.
17565 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
17566 if (V2IsZero) {
17567 // Modify the new Mask to take all zeros from the all-zero vector.
17568 // Choose indices that are blend-friendly.
17569 bool UsedZeroVector = false;
17570 assert(is_contained(WidenedMask, SM_SentinelZero) &&
17571 "V2's non-undef elements are used?!");
17572 for (int i = 0; i != NewNumElts; ++i)
17573 if (WidenedMask[i] == SM_SentinelZero) {
17574 WidenedMask[i] = i + NewNumElts;
17575 UsedZeroVector = true;
17577 // Ensure all elements of V2 are zero - isBuildVectorAllZeros permits
17578 // some elements to be undef.
17579 if (UsedZeroVector)
17580 V2 = getZeroVector(NewVT, Subtarget, DAG, DL);
17582 V1 = DAG.getBitcast(NewVT, V1);
17583 V2 = DAG.getBitcast(NewVT, V2);
17584 return DAG.getBitcast(
17585 VT, DAG.getVectorShuffle(NewVT, DL, V1, V2, WidenedMask));
17589 SmallVector<SDValue> Ops = {V1, V2};
17590 SmallVector<int> Mask(OrigMask);
17592 // Canonicalize the shuffle with any horizontal ops inputs.
17593 // NOTE: This may update Ops and Mask.
17594 if (SDValue HOp = canonicalizeShuffleMaskWithHorizOp(
17595 Ops, Mask, VT.getSizeInBits(), DL, DAG, Subtarget))
17596 return DAG.getBitcast(VT, HOp);
17598 V1 = DAG.getBitcast(VT, Ops[0]);
17599 V2 = DAG.getBitcast(VT, Ops[1]);
17600 assert(NumElements == (int)Mask.size() &&
17601 "canonicalizeShuffleMaskWithHorizOp "
17602 "shouldn't alter the shuffle mask size");
17604 // Commute the shuffle if it will improve canonicalization.
17605 if (canonicalizeShuffleMaskWithCommute(Mask)) {
17606 ShuffleVectorSDNode::commuteMask(Mask);
17607 std::swap(V1, V2);
17610 // For each vector width, delegate to a specialized lowering routine.
17611 if (VT.is128BitVector())
17612 return lower128BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17614 if (VT.is256BitVector())
17615 return lower256BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17617 if (VT.is512BitVector())
17618 return lower512BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17620 if (Is1BitVector)
17621 return lower1BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17623 llvm_unreachable("Unimplemented!");
17626 /// Try to lower a VSELECT instruction to a vector shuffle.
17627 static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
17628 const X86Subtarget &Subtarget,
17629 SelectionDAG &DAG) {
17630 SDValue Cond = Op.getOperand(0);
17631 SDValue LHS = Op.getOperand(1);
17632 SDValue RHS = Op.getOperand(2);
17633 MVT VT = Op.getSimpleValueType();
17635 // Only non-legal VSELECTs reach this lowering, convert those into generic
17636 // shuffles and re-use the shuffle lowering path for blends.
17637 if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) {
17638 SmallVector<int, 32> Mask;
17639 if (createShuffleMaskFromVSELECT(Mask, Cond))
17640 return DAG.getVectorShuffle(VT, SDLoc(Op), LHS, RHS, Mask);
17643 return SDValue();
17646 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
17647 SDValue Cond = Op.getOperand(0);
17648 SDValue LHS = Op.getOperand(1);
17649 SDValue RHS = Op.getOperand(2);
17651 SDLoc dl(Op);
17652 MVT VT = Op.getSimpleValueType();
17653 if (isSoftF16(VT, Subtarget)) {
17654 MVT NVT = VT.changeVectorElementTypeToInteger();
17655 return DAG.getBitcast(VT, DAG.getNode(ISD::VSELECT, dl, NVT, Cond,
17656 DAG.getBitcast(NVT, LHS),
17657 DAG.getBitcast(NVT, RHS)));
17660 // A vselect where all conditions and data are constants can be optimized into
17661 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
17662 if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()) &&
17663 ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
17664 ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
17665 return SDValue();
17667 // Try to lower this to a blend-style vector shuffle. This can handle all
17668 // constant condition cases.
17669 if (SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG))
17670 return BlendOp;
17672 // If this VSELECT has a vector if i1 as a mask, it will be directly matched
17673 // with patterns on the mask registers on AVX-512.
17674 MVT CondVT = Cond.getSimpleValueType();
17675 unsigned CondEltSize = Cond.getScalarValueSizeInBits();
17676 if (CondEltSize == 1)
17677 return Op;
17679 // Variable blends are only legal from SSE4.1 onward.
17680 if (!Subtarget.hasSSE41())
17681 return SDValue();
17683 unsigned EltSize = VT.getScalarSizeInBits();
17684 unsigned NumElts = VT.getVectorNumElements();
17686 // Expand v32i16/v64i8 without BWI.
17687 if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
17688 return SDValue();
17690 // If the VSELECT is on a 512-bit type, we have to convert a non-i1 condition
17691 // into an i1 condition so that we can use the mask-based 512-bit blend
17692 // instructions.
17693 if (VT.getSizeInBits() == 512) {
17694 // Build a mask by testing the condition against zero.
17695 MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
17696 SDValue Mask = DAG.getSetCC(dl, MaskVT, Cond,
17697 DAG.getConstant(0, dl, CondVT),
17698 ISD::SETNE);
17699 // Now return a new VSELECT using the mask.
17700 return DAG.getSelect(dl, VT, Mask, LHS, RHS);
17703 // SEXT/TRUNC cases where the mask doesn't match the destination size.
17704 if (CondEltSize != EltSize) {
17705 // If we don't have a sign splat, rely on the expansion.
17706 if (CondEltSize != DAG.ComputeNumSignBits(Cond))
17707 return SDValue();
17709 MVT NewCondSVT = MVT::getIntegerVT(EltSize);
17710 MVT NewCondVT = MVT::getVectorVT(NewCondSVT, NumElts);
17711 Cond = DAG.getSExtOrTrunc(Cond, dl, NewCondVT);
17712 return DAG.getNode(ISD::VSELECT, dl, VT, Cond, LHS, RHS);
17715 // Only some types will be legal on some subtargets. If we can emit a legal
17716 // VSELECT-matching blend, return Op, and but if we need to expand, return
17717 // a null value.
17718 switch (VT.SimpleTy) {
17719 default:
17720 // Most of the vector types have blends past SSE4.1.
17721 return Op;
17723 case MVT::v32i8:
17724 // The byte blends for AVX vectors were introduced only in AVX2.
17725 if (Subtarget.hasAVX2())
17726 return Op;
17728 return SDValue();
17730 case MVT::v8i16:
17731 case MVT::v16i16: {
17732 // Bitcast everything to the vXi8 type and use a vXi8 vselect.
17733 MVT CastVT = MVT::getVectorVT(MVT::i8, NumElts * 2);
17734 Cond = DAG.getBitcast(CastVT, Cond);
17735 LHS = DAG.getBitcast(CastVT, LHS);
17736 RHS = DAG.getBitcast(CastVT, RHS);
17737 SDValue Select = DAG.getNode(ISD::VSELECT, dl, CastVT, Cond, LHS, RHS);
17738 return DAG.getBitcast(VT, Select);
17743 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
17744 MVT VT = Op.getSimpleValueType();
17745 SDValue Vec = Op.getOperand(0);
17746 SDValue Idx = Op.getOperand(1);
17747 assert(isa<ConstantSDNode>(Idx) && "Constant index expected");
17748 SDLoc dl(Op);
17750 if (!Vec.getSimpleValueType().is128BitVector())
17751 return SDValue();
17753 if (VT.getSizeInBits() == 8) {
17754 // If IdxVal is 0, it's cheaper to do a move instead of a pextrb, unless
17755 // we're going to zero extend the register or fold the store.
17756 if (llvm::isNullConstant(Idx) && !X86::mayFoldIntoZeroExtend(Op) &&
17757 !X86::mayFoldIntoStore(Op))
17758 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
17759 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17760 DAG.getBitcast(MVT::v4i32, Vec), Idx));
17762 unsigned IdxVal = Idx->getAsZExtVal();
17763 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32, Vec,
17764 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
17765 return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
17768 if (VT == MVT::f32) {
17769 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
17770 // the result back to FR32 register. It's only worth matching if the
17771 // result has a single use which is a store or a bitcast to i32. And in
17772 // the case of a store, it's not worth it if the index is a constant 0,
17773 // because a MOVSSmr can be used instead, which is smaller and faster.
17774 if (!Op.hasOneUse())
17775 return SDValue();
17776 SDNode *User = *Op.getNode()->use_begin();
17777 if ((User->getOpcode() != ISD::STORE || isNullConstant(Idx)) &&
17778 (User->getOpcode() != ISD::BITCAST ||
17779 User->getValueType(0) != MVT::i32))
17780 return SDValue();
17781 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17782 DAG.getBitcast(MVT::v4i32, Vec), Idx);
17783 return DAG.getBitcast(MVT::f32, Extract);
17786 if (VT == MVT::i32 || VT == MVT::i64)
17787 return Op;
17789 return SDValue();
17792 /// Extract one bit from mask vector, like v16i1 or v8i1.
17793 /// AVX-512 feature.
17794 static SDValue ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG,
17795 const X86Subtarget &Subtarget) {
17796 SDValue Vec = Op.getOperand(0);
17797 SDLoc dl(Vec);
17798 MVT VecVT = Vec.getSimpleValueType();
17799 SDValue Idx = Op.getOperand(1);
17800 auto* IdxC = dyn_cast<ConstantSDNode>(Idx);
17801 MVT EltVT = Op.getSimpleValueType();
17803 assert((VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI()) &&
17804 "Unexpected vector type in ExtractBitFromMaskVector");
17806 // variable index can't be handled in mask registers,
17807 // extend vector to VR512/128
17808 if (!IdxC) {
17809 unsigned NumElts = VecVT.getVectorNumElements();
17810 // Extending v8i1/v16i1 to 512-bit get better performance on KNL
17811 // than extending to 128/256bit.
17812 if (NumElts == 1) {
17813 Vec = widenMaskVector(Vec, false, Subtarget, DAG, dl);
17814 MVT IntVT = MVT::getIntegerVT(Vec.getValueType().getVectorNumElements());
17815 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, DAG.getBitcast(IntVT, Vec));
17817 MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
17818 MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
17819 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec);
17820 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ExtEltVT, Ext, Idx);
17821 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
17824 unsigned IdxVal = IdxC->getZExtValue();
17825 if (IdxVal == 0) // the operation is legal
17826 return Op;
17828 // Extend to natively supported kshift.
17829 Vec = widenMaskVector(Vec, false, Subtarget, DAG, dl);
17831 // Use kshiftr instruction to move to the lower element.
17832 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, Vec.getSimpleValueType(), Vec,
17833 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
17835 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
17836 DAG.getIntPtrConstant(0, dl));
17839 // Helper to find all the extracted elements from a vector.
17840 static APInt getExtractedDemandedElts(SDNode *N) {
17841 MVT VT = N->getSimpleValueType(0);
17842 unsigned NumElts = VT.getVectorNumElements();
17843 APInt DemandedElts = APInt::getZero(NumElts);
17844 for (SDNode *User : N->uses()) {
17845 switch (User->getOpcode()) {
17846 case X86ISD::PEXTRB:
17847 case X86ISD::PEXTRW:
17848 case ISD::EXTRACT_VECTOR_ELT:
17849 if (!isa<ConstantSDNode>(User->getOperand(1))) {
17850 DemandedElts.setAllBits();
17851 return DemandedElts;
17853 DemandedElts.setBit(User->getConstantOperandVal(1));
17854 break;
17855 case ISD::BITCAST: {
17856 if (!User->getValueType(0).isSimple() ||
17857 !User->getValueType(0).isVector()) {
17858 DemandedElts.setAllBits();
17859 return DemandedElts;
17861 APInt DemandedSrcElts = getExtractedDemandedElts(User);
17862 DemandedElts |= APIntOps::ScaleBitMask(DemandedSrcElts, NumElts);
17863 break;
17865 default:
17866 DemandedElts.setAllBits();
17867 return DemandedElts;
17870 return DemandedElts;
17873 SDValue
17874 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
17875 SelectionDAG &DAG) const {
17876 SDLoc dl(Op);
17877 SDValue Vec = Op.getOperand(0);
17878 MVT VecVT = Vec.getSimpleValueType();
17879 SDValue Idx = Op.getOperand(1);
17880 auto* IdxC = dyn_cast<ConstantSDNode>(Idx);
17882 if (VecVT.getVectorElementType() == MVT::i1)
17883 return ExtractBitFromMaskVector(Op, DAG, Subtarget);
17885 if (!IdxC) {
17886 // Its more profitable to go through memory (1 cycles throughput)
17887 // than using VMOVD + VPERMV/PSHUFB sequence (2/3 cycles throughput)
17888 // IACA tool was used to get performance estimation
17889 // (https://software.intel.com/en-us/articles/intel-architecture-code-analyzer)
17891 // example : extractelement <16 x i8> %a, i32 %i
17893 // Block Throughput: 3.00 Cycles
17894 // Throughput Bottleneck: Port5
17896 // | Num Of | Ports pressure in cycles | |
17897 // | Uops | 0 - DV | 5 | 6 | 7 | |
17898 // ---------------------------------------------
17899 // | 1 | | 1.0 | | | CP | vmovd xmm1, edi
17900 // | 1 | | 1.0 | | | CP | vpshufb xmm0, xmm0, xmm1
17901 // | 2 | 1.0 | 1.0 | | | CP | vpextrb eax, xmm0, 0x0
17902 // Total Num Of Uops: 4
17905 // Block Throughput: 1.00 Cycles
17906 // Throughput Bottleneck: PORT2_AGU, PORT3_AGU, Port4
17908 // | | Ports pressure in cycles | |
17909 // |Uops| 1 | 2 - D |3 - D | 4 | 5 | |
17910 // ---------------------------------------------------------
17911 // |2^ | | 0.5 | 0.5 |1.0| |CP| vmovaps xmmword ptr [rsp-0x18], xmm0
17912 // |1 |0.5| | | |0.5| | lea rax, ptr [rsp-0x18]
17913 // |1 | |0.5, 0.5|0.5, 0.5| | |CP| mov al, byte ptr [rdi+rax*1]
17914 // Total Num Of Uops: 4
17916 return SDValue();
17919 unsigned IdxVal = IdxC->getZExtValue();
17921 // If this is a 256-bit vector result, first extract the 128-bit vector and
17922 // then extract the element from the 128-bit vector.
17923 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
17924 // Get the 128-bit vector.
17925 Vec = extract128BitVector(Vec, IdxVal, DAG, dl);
17926 MVT EltVT = VecVT.getVectorElementType();
17928 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
17929 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
17931 // Find IdxVal modulo ElemsPerChunk. Since ElemsPerChunk is a power of 2
17932 // this can be done with a mask.
17933 IdxVal &= ElemsPerChunk - 1;
17934 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
17935 DAG.getIntPtrConstant(IdxVal, dl));
17938 assert(VecVT.is128BitVector() && "Unexpected vector length");
17940 MVT VT = Op.getSimpleValueType();
17942 if (VT == MVT::i16) {
17943 // If IdxVal is 0, it's cheaper to do a move instead of a pextrw, unless
17944 // we're going to zero extend the register or fold the store (SSE41 only).
17945 if (IdxVal == 0 && !X86::mayFoldIntoZeroExtend(Op) &&
17946 !(Subtarget.hasSSE41() && X86::mayFoldIntoStore(Op))) {
17947 if (Subtarget.hasFP16())
17948 return Op;
17950 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
17951 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17952 DAG.getBitcast(MVT::v4i32, Vec), Idx));
17955 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32, Vec,
17956 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
17957 return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
17960 if (Subtarget.hasSSE41())
17961 if (SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG))
17962 return Res;
17964 // Only extract a single element from a v16i8 source - determine the common
17965 // DWORD/WORD that all extractions share, and extract the sub-byte.
17966 // TODO: Add QWORD MOVQ extraction?
17967 if (VT == MVT::i8) {
17968 APInt DemandedElts = getExtractedDemandedElts(Vec.getNode());
17969 assert(DemandedElts.getBitWidth() == 16 && "Vector width mismatch");
17971 // Extract either the lowest i32 or any i16, and extract the sub-byte.
17972 int DWordIdx = IdxVal / 4;
17973 if (DWordIdx == 0 && DemandedElts == (DemandedElts & 15)) {
17974 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17975 DAG.getBitcast(MVT::v4i32, Vec),
17976 DAG.getIntPtrConstant(DWordIdx, dl));
17977 int ShiftVal = (IdxVal % 4) * 8;
17978 if (ShiftVal != 0)
17979 Res = DAG.getNode(ISD::SRL, dl, MVT::i32, Res,
17980 DAG.getConstant(ShiftVal, dl, MVT::i8));
17981 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
17984 int WordIdx = IdxVal / 2;
17985 if (DemandedElts == (DemandedElts & (3 << (WordIdx * 2)))) {
17986 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
17987 DAG.getBitcast(MVT::v8i16, Vec),
17988 DAG.getIntPtrConstant(WordIdx, dl));
17989 int ShiftVal = (IdxVal % 2) * 8;
17990 if (ShiftVal != 0)
17991 Res = DAG.getNode(ISD::SRL, dl, MVT::i16, Res,
17992 DAG.getConstant(ShiftVal, dl, MVT::i8));
17993 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
17997 if (VT == MVT::f16 || VT.getSizeInBits() == 32) {
17998 if (IdxVal == 0)
17999 return Op;
18001 // Shuffle the element to the lowest element, then movss or movsh.
18002 SmallVector<int, 8> Mask(VecVT.getVectorNumElements(), -1);
18003 Mask[0] = static_cast<int>(IdxVal);
18004 Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
18005 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
18006 DAG.getIntPtrConstant(0, dl));
18009 if (VT.getSizeInBits() == 64) {
18010 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
18011 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
18012 // to match extract_elt for f64.
18013 if (IdxVal == 0)
18014 return Op;
18016 // UNPCKHPD the element to the lowest double word, then movsd.
18017 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
18018 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
18019 int Mask[2] = { 1, -1 };
18020 Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
18021 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
18022 DAG.getIntPtrConstant(0, dl));
18025 return SDValue();
18028 /// Insert one bit to mask vector, like v16i1 or v8i1.
18029 /// AVX-512 feature.
18030 static SDValue InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG,
18031 const X86Subtarget &Subtarget) {
18032 SDLoc dl(Op);
18033 SDValue Vec = Op.getOperand(0);
18034 SDValue Elt = Op.getOperand(1);
18035 SDValue Idx = Op.getOperand(2);
18036 MVT VecVT = Vec.getSimpleValueType();
18038 if (!isa<ConstantSDNode>(Idx)) {
18039 // Non constant index. Extend source and destination,
18040 // insert element and then truncate the result.
18041 unsigned NumElts = VecVT.getVectorNumElements();
18042 MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
18043 MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
18044 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
18045 DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec),
18046 DAG.getNode(ISD::SIGN_EXTEND, dl, ExtEltVT, Elt), Idx);
18047 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
18050 // Copy into a k-register, extract to v1i1 and insert_subvector.
18051 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i1, Elt);
18052 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VecVT, Vec, EltInVec, Idx);
18055 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
18056 SelectionDAG &DAG) const {
18057 MVT VT = Op.getSimpleValueType();
18058 MVT EltVT = VT.getVectorElementType();
18059 unsigned NumElts = VT.getVectorNumElements();
18060 unsigned EltSizeInBits = EltVT.getScalarSizeInBits();
18062 if (EltVT == MVT::i1)
18063 return InsertBitToMaskVector(Op, DAG, Subtarget);
18065 SDLoc dl(Op);
18066 SDValue N0 = Op.getOperand(0);
18067 SDValue N1 = Op.getOperand(1);
18068 SDValue N2 = Op.getOperand(2);
18069 auto *N2C = dyn_cast<ConstantSDNode>(N2);
18071 if (EltVT == MVT::bf16) {
18072 MVT IVT = VT.changeVectorElementTypeToInteger();
18073 SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, IVT,
18074 DAG.getBitcast(IVT, N0),
18075 DAG.getBitcast(MVT::i16, N1), N2);
18076 return DAG.getBitcast(VT, Res);
18079 if (!N2C) {
18080 // Variable insertion indices, usually we're better off spilling to stack,
18081 // but AVX512 can use a variable compare+select by comparing against all
18082 // possible vector indices, and FP insertion has less gpr->simd traffic.
18083 if (!(Subtarget.hasBWI() ||
18084 (Subtarget.hasAVX512() && EltSizeInBits >= 32) ||
18085 (Subtarget.hasSSE41() && (EltVT == MVT::f32 || EltVT == MVT::f64))))
18086 return SDValue();
18088 MVT IdxSVT = MVT::getIntegerVT(EltSizeInBits);
18089 MVT IdxVT = MVT::getVectorVT(IdxSVT, NumElts);
18090 if (!isTypeLegal(IdxSVT) || !isTypeLegal(IdxVT))
18091 return SDValue();
18093 SDValue IdxExt = DAG.getZExtOrTrunc(N2, dl, IdxSVT);
18094 SDValue IdxSplat = DAG.getSplatBuildVector(IdxVT, dl, IdxExt);
18095 SDValue EltSplat = DAG.getSplatBuildVector(VT, dl, N1);
18097 SmallVector<SDValue, 16> RawIndices;
18098 for (unsigned I = 0; I != NumElts; ++I)
18099 RawIndices.push_back(DAG.getConstant(I, dl, IdxSVT));
18100 SDValue Indices = DAG.getBuildVector(IdxVT, dl, RawIndices);
18102 // inselt N0, N1, N2 --> select (SplatN2 == {0,1,2...}) ? SplatN1 : N0.
18103 return DAG.getSelectCC(dl, IdxSplat, Indices, EltSplat, N0,
18104 ISD::CondCode::SETEQ);
18107 if (N2C->getAPIntValue().uge(NumElts))
18108 return SDValue();
18109 uint64_t IdxVal = N2C->getZExtValue();
18111 bool IsZeroElt = X86::isZeroNode(N1);
18112 bool IsAllOnesElt = VT.isInteger() && llvm::isAllOnesConstant(N1);
18114 if (IsZeroElt || IsAllOnesElt) {
18115 // Lower insertion of v16i8/v32i8/v64i16 -1 elts as an 'OR' blend.
18116 // We don't deal with i8 0 since it appears to be handled elsewhere.
18117 if (IsAllOnesElt &&
18118 ((VT == MVT::v16i8 && !Subtarget.hasSSE41()) ||
18119 ((VT == MVT::v32i8 || VT == MVT::v16i16) && !Subtarget.hasInt256()))) {
18120 SDValue ZeroCst = DAG.getConstant(0, dl, VT.getScalarType());
18121 SDValue OnesCst = DAG.getAllOnesConstant(dl, VT.getScalarType());
18122 SmallVector<SDValue, 8> CstVectorElts(NumElts, ZeroCst);
18123 CstVectorElts[IdxVal] = OnesCst;
18124 SDValue CstVector = DAG.getBuildVector(VT, dl, CstVectorElts);
18125 return DAG.getNode(ISD::OR, dl, VT, N0, CstVector);
18127 // See if we can do this more efficiently with a blend shuffle with a
18128 // rematerializable vector.
18129 if (Subtarget.hasSSE41() &&
18130 (EltSizeInBits >= 16 || (IsZeroElt && !VT.is128BitVector()))) {
18131 SmallVector<int, 8> BlendMask;
18132 for (unsigned i = 0; i != NumElts; ++i)
18133 BlendMask.push_back(i == IdxVal ? i + NumElts : i);
18134 SDValue CstVector = IsZeroElt ? getZeroVector(VT, Subtarget, DAG, dl)
18135 : getOnesVector(VT, DAG, dl);
18136 return DAG.getVectorShuffle(VT, dl, N0, CstVector, BlendMask);
18140 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
18141 // into that, and then insert the subvector back into the result.
18142 if (VT.is256BitVector() || VT.is512BitVector()) {
18143 // With a 256-bit vector, we can insert into the zero element efficiently
18144 // using a blend if we have AVX or AVX2 and the right data type.
18145 if (VT.is256BitVector() && IdxVal == 0) {
18146 // TODO: It is worthwhile to cast integer to floating point and back
18147 // and incur a domain crossing penalty if that's what we'll end up
18148 // doing anyway after extracting to a 128-bit vector.
18149 if ((Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
18150 (Subtarget.hasAVX2() && (EltVT == MVT::i32 || EltVT == MVT::i64))) {
18151 SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
18152 return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec,
18153 DAG.getTargetConstant(1, dl, MVT::i8));
18157 unsigned NumEltsIn128 = 128 / EltSizeInBits;
18158 assert(isPowerOf2_32(NumEltsIn128) &&
18159 "Vectors will always have power-of-two number of elements.");
18161 // If we are not inserting into the low 128-bit vector chunk,
18162 // then prefer the broadcast+blend sequence.
18163 // FIXME: relax the profitability check iff all N1 uses are insertions.
18164 if (IdxVal >= NumEltsIn128 &&
18165 ((Subtarget.hasAVX2() && EltSizeInBits != 8) ||
18166 (Subtarget.hasAVX() && (EltSizeInBits >= 32) &&
18167 X86::mayFoldLoad(N1, Subtarget)))) {
18168 SDValue N1SplatVec = DAG.getSplatBuildVector(VT, dl, N1);
18169 SmallVector<int, 8> BlendMask;
18170 for (unsigned i = 0; i != NumElts; ++i)
18171 BlendMask.push_back(i == IdxVal ? i + NumElts : i);
18172 return DAG.getVectorShuffle(VT, dl, N0, N1SplatVec, BlendMask);
18175 // Get the desired 128-bit vector chunk.
18176 SDValue V = extract128BitVector(N0, IdxVal, DAG, dl);
18178 // Insert the element into the desired chunk.
18179 // Since NumEltsIn128 is a power of 2 we can use mask instead of modulo.
18180 unsigned IdxIn128 = IdxVal & (NumEltsIn128 - 1);
18182 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
18183 DAG.getIntPtrConstant(IdxIn128, dl));
18185 // Insert the changed part back into the bigger vector
18186 return insert128BitVector(N0, V, IdxVal, DAG, dl);
18188 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
18190 // This will be just movw/movd/movq/movsh/movss/movsd.
18191 if (IdxVal == 0 && ISD::isBuildVectorAllZeros(N0.getNode())) {
18192 if (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
18193 EltVT == MVT::f16 || EltVT == MVT::i64) {
18194 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
18195 return getShuffleVectorZeroOrUndef(N1, 0, true, Subtarget, DAG);
18198 // We can't directly insert an i8 or i16 into a vector, so zero extend
18199 // it to i32 first.
18200 if (EltVT == MVT::i16 || EltVT == MVT::i8) {
18201 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, N1);
18202 MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits() / 32);
18203 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, N1);
18204 N1 = getShuffleVectorZeroOrUndef(N1, 0, true, Subtarget, DAG);
18205 return DAG.getBitcast(VT, N1);
18209 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
18210 // argument. SSE41 required for pinsrb.
18211 if (VT == MVT::v8i16 || (VT == MVT::v16i8 && Subtarget.hasSSE41())) {
18212 unsigned Opc;
18213 if (VT == MVT::v8i16) {
18214 assert(Subtarget.hasSSE2() && "SSE2 required for PINSRW");
18215 Opc = X86ISD::PINSRW;
18216 } else {
18217 assert(VT == MVT::v16i8 && "PINSRB requires v16i8 vector");
18218 assert(Subtarget.hasSSE41() && "SSE41 required for PINSRB");
18219 Opc = X86ISD::PINSRB;
18222 assert(N1.getValueType() != MVT::i32 && "Unexpected VT");
18223 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
18224 N2 = DAG.getTargetConstant(IdxVal, dl, MVT::i8);
18225 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
18228 if (Subtarget.hasSSE41()) {
18229 if (EltVT == MVT::f32) {
18230 // Bits [7:6] of the constant are the source select. This will always be
18231 // zero here. The DAG Combiner may combine an extract_elt index into
18232 // these bits. For example (insert (extract, 3), 2) could be matched by
18233 // putting the '3' into bits [7:6] of X86ISD::INSERTPS.
18234 // Bits [5:4] of the constant are the destination select. This is the
18235 // value of the incoming immediate.
18236 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
18237 // combine either bitwise AND or insert of float 0.0 to set these bits.
18239 bool MinSize = DAG.getMachineFunction().getFunction().hasMinSize();
18240 if (IdxVal == 0 && (!MinSize || !X86::mayFoldLoad(N1, Subtarget))) {
18241 // If this is an insertion of 32-bits into the low 32-bits of
18242 // a vector, we prefer to generate a blend with immediate rather
18243 // than an insertps. Blends are simpler operations in hardware and so
18244 // will always have equal or better performance than insertps.
18245 // But if optimizing for size and there's a load folding opportunity,
18246 // generate insertps because blendps does not have a 32-bit memory
18247 // operand form.
18248 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
18249 return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1,
18250 DAG.getTargetConstant(1, dl, MVT::i8));
18252 // Create this as a scalar to vector..
18253 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
18254 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1,
18255 DAG.getTargetConstant(IdxVal << 4, dl, MVT::i8));
18258 // PINSR* works with constant index.
18259 if (EltVT == MVT::i32 || EltVT == MVT::i64)
18260 return Op;
18263 return SDValue();
18266 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget,
18267 SelectionDAG &DAG) {
18268 SDLoc dl(Op);
18269 MVT OpVT = Op.getSimpleValueType();
18271 // It's always cheaper to replace a xor+movd with xorps and simplifies further
18272 // combines.
18273 if (X86::isZeroNode(Op.getOperand(0)))
18274 return getZeroVector(OpVT, Subtarget, DAG, dl);
18276 // If this is a 256-bit vector result, first insert into a 128-bit
18277 // vector and then insert into the 256-bit vector.
18278 if (!OpVT.is128BitVector()) {
18279 // Insert into a 128-bit vector.
18280 unsigned SizeFactor = OpVT.getSizeInBits() / 128;
18281 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
18282 OpVT.getVectorNumElements() / SizeFactor);
18284 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
18286 // Insert the 128-bit vector.
18287 return insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
18289 assert(OpVT.is128BitVector() && OpVT.isInteger() && OpVT != MVT::v2i64 &&
18290 "Expected an SSE type!");
18292 // Pass through a v4i32 or V8i16 SCALAR_TO_VECTOR as that's what we use in
18293 // tblgen.
18294 if (OpVT == MVT::v4i32 || (OpVT == MVT::v8i16 && Subtarget.hasFP16()))
18295 return Op;
18297 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
18298 return DAG.getBitcast(
18299 OpVT, DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, AnyExt));
18302 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
18303 // simple superregister reference or explicit instructions to insert
18304 // the upper bits of a vector.
18305 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
18306 SelectionDAG &DAG) {
18307 assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1);
18309 return insert1BitVector(Op, DAG, Subtarget);
18312 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
18313 SelectionDAG &DAG) {
18314 assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1 &&
18315 "Only vXi1 extract_subvectors need custom lowering");
18317 SDLoc dl(Op);
18318 SDValue Vec = Op.getOperand(0);
18319 uint64_t IdxVal = Op.getConstantOperandVal(1);
18321 if (IdxVal == 0) // the operation is legal
18322 return Op;
18324 // Extend to natively supported kshift.
18325 Vec = widenMaskVector(Vec, false, Subtarget, DAG, dl);
18327 // Shift to the LSB.
18328 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, Vec.getSimpleValueType(), Vec,
18329 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
18331 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, Op.getValueType(), Vec,
18332 DAG.getIntPtrConstant(0, dl));
18335 // Returns the appropriate wrapper opcode for a global reference.
18336 unsigned X86TargetLowering::getGlobalWrapperKind(
18337 const GlobalValue *GV, const unsigned char OpFlags) const {
18338 // References to absolute symbols are never PC-relative.
18339 if (GV && GV->isAbsoluteSymbolRef())
18340 return X86ISD::Wrapper;
18342 // The following OpFlags under RIP-rel PIC use RIP.
18343 if (Subtarget.isPICStyleRIPRel() &&
18344 (OpFlags == X86II::MO_NO_FLAG || OpFlags == X86II::MO_COFFSTUB ||
18345 OpFlags == X86II::MO_DLLIMPORT))
18346 return X86ISD::WrapperRIP;
18348 // GOTPCREL references must always use RIP.
18349 if (OpFlags == X86II::MO_GOTPCREL || OpFlags == X86II::MO_GOTPCREL_NORELAX)
18350 return X86ISD::WrapperRIP;
18352 return X86ISD::Wrapper;
18355 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
18356 // their target counterpart wrapped in the X86ISD::Wrapper node. Suppose N is
18357 // one of the above mentioned nodes. It has to be wrapped because otherwise
18358 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
18359 // be used to form addressing mode. These wrapped nodes will be selected
18360 // into MOV32ri.
18361 SDValue
18362 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
18363 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
18365 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
18366 // global base reg.
18367 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
18369 auto PtrVT = getPointerTy(DAG.getDataLayout());
18370 SDValue Result = DAG.getTargetConstantPool(
18371 CP->getConstVal(), PtrVT, CP->getAlign(), CP->getOffset(), OpFlag);
18372 SDLoc DL(CP);
18373 Result =
18374 DAG.getNode(getGlobalWrapperKind(nullptr, OpFlag), DL, PtrVT, Result);
18375 // With PIC, the address is actually $g + Offset.
18376 if (OpFlag) {
18377 Result =
18378 DAG.getNode(ISD::ADD, DL, PtrVT,
18379 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
18382 return Result;
18385 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
18386 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
18388 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
18389 // global base reg.
18390 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
18392 auto PtrVT = getPointerTy(DAG.getDataLayout());
18393 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
18394 SDLoc DL(JT);
18395 Result =
18396 DAG.getNode(getGlobalWrapperKind(nullptr, OpFlag), DL, PtrVT, Result);
18398 // With PIC, the address is actually $g + Offset.
18399 if (OpFlag)
18400 Result =
18401 DAG.getNode(ISD::ADD, DL, PtrVT,
18402 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
18404 return Result;
18407 SDValue X86TargetLowering::LowerExternalSymbol(SDValue Op,
18408 SelectionDAG &DAG) const {
18409 return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
18412 SDValue
18413 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
18414 // Create the TargetBlockAddressAddress node.
18415 unsigned char OpFlags =
18416 Subtarget.classifyBlockAddressReference();
18417 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
18418 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
18419 SDLoc dl(Op);
18420 auto PtrVT = getPointerTy(DAG.getDataLayout());
18421 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
18422 Result =
18423 DAG.getNode(getGlobalWrapperKind(nullptr, OpFlags), dl, PtrVT, Result);
18425 // With PIC, the address is actually $g + Offset.
18426 if (isGlobalRelativeToPICBase(OpFlags)) {
18427 Result = DAG.getNode(ISD::ADD, dl, PtrVT,
18428 DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
18431 return Result;
18434 /// Creates target global address or external symbol nodes for calls or
18435 /// other uses.
18436 SDValue X86TargetLowering::LowerGlobalOrExternal(SDValue Op, SelectionDAG &DAG,
18437 bool ForCall) const {
18438 // Unpack the global address or external symbol.
18439 const SDLoc &dl = SDLoc(Op);
18440 const GlobalValue *GV = nullptr;
18441 int64_t Offset = 0;
18442 const char *ExternalSym = nullptr;
18443 if (const auto *G = dyn_cast<GlobalAddressSDNode>(Op)) {
18444 GV = G->getGlobal();
18445 Offset = G->getOffset();
18446 } else {
18447 const auto *ES = cast<ExternalSymbolSDNode>(Op);
18448 ExternalSym = ES->getSymbol();
18451 // Calculate some flags for address lowering.
18452 const Module &Mod = *DAG.getMachineFunction().getFunction().getParent();
18453 unsigned char OpFlags;
18454 if (ForCall)
18455 OpFlags = Subtarget.classifyGlobalFunctionReference(GV, Mod);
18456 else
18457 OpFlags = Subtarget.classifyGlobalReference(GV, Mod);
18458 bool HasPICReg = isGlobalRelativeToPICBase(OpFlags);
18459 bool NeedsLoad = isGlobalStubReference(OpFlags);
18461 CodeModel::Model M = DAG.getTarget().getCodeModel();
18462 auto PtrVT = getPointerTy(DAG.getDataLayout());
18463 SDValue Result;
18465 if (GV) {
18466 // Create a target global address if this is a global. If possible, fold the
18467 // offset into the global address reference. Otherwise, ADD it on later.
18468 // Suppress the folding if Offset is negative: movl foo-1, %eax is not
18469 // allowed because if the address of foo is 0, the ELF R_X86_64_32
18470 // relocation will compute to a negative value, which is invalid.
18471 int64_t GlobalOffset = 0;
18472 if (OpFlags == X86II::MO_NO_FLAG && Offset >= 0 &&
18473 X86::isOffsetSuitableForCodeModel(Offset, M, true)) {
18474 std::swap(GlobalOffset, Offset);
18476 Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, GlobalOffset, OpFlags);
18477 } else {
18478 // If this is not a global address, this must be an external symbol.
18479 Result = DAG.getTargetExternalSymbol(ExternalSym, PtrVT, OpFlags);
18482 // If this is a direct call, avoid the wrapper if we don't need to do any
18483 // loads or adds. This allows SDAG ISel to match direct calls.
18484 if (ForCall && !NeedsLoad && !HasPICReg && Offset == 0)
18485 return Result;
18487 Result = DAG.getNode(getGlobalWrapperKind(GV, OpFlags), dl, PtrVT, Result);
18489 // With PIC, the address is actually $g + Offset.
18490 if (HasPICReg) {
18491 Result = DAG.getNode(ISD::ADD, dl, PtrVT,
18492 DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
18495 // For globals that require a load from a stub to get the address, emit the
18496 // load.
18497 if (NeedsLoad)
18498 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
18499 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
18501 // If there was a non-zero offset that we didn't fold, create an explicit
18502 // addition for it.
18503 if (Offset != 0)
18504 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result,
18505 DAG.getConstant(Offset, dl, PtrVT));
18507 return Result;
18510 SDValue
18511 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
18512 return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
18515 static SDValue
18516 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
18517 SDValue *InGlue, const EVT PtrVT, unsigned ReturnReg,
18518 unsigned char OperandFlags, bool LocalDynamic = false) {
18519 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
18520 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
18521 SDLoc dl(GA);
18522 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
18523 GA->getValueType(0),
18524 GA->getOffset(),
18525 OperandFlags);
18527 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
18528 : X86ISD::TLSADDR;
18530 if (InGlue) {
18531 SDValue Ops[] = { Chain, TGA, *InGlue };
18532 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
18533 } else {
18534 SDValue Ops[] = { Chain, TGA };
18535 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
18538 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
18539 MFI.setAdjustsStack(true);
18540 MFI.setHasCalls(true);
18542 SDValue Glue = Chain.getValue(1);
18543 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Glue);
18546 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
18547 static SDValue
18548 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
18549 const EVT PtrVT) {
18550 SDValue InGlue;
18551 SDLoc dl(GA); // ? function entry point might be better
18552 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
18553 DAG.getNode(X86ISD::GlobalBaseReg,
18554 SDLoc(), PtrVT), InGlue);
18555 InGlue = Chain.getValue(1);
18557 return GetTLSADDR(DAG, Chain, GA, &InGlue, PtrVT, X86::EAX, X86II::MO_TLSGD);
18560 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit LP64
18561 static SDValue
18562 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
18563 const EVT PtrVT) {
18564 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
18565 X86::RAX, X86II::MO_TLSGD);
18568 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit ILP32
18569 static SDValue
18570 LowerToTLSGeneralDynamicModelX32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
18571 const EVT PtrVT) {
18572 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
18573 X86::EAX, X86II::MO_TLSGD);
18576 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
18577 SelectionDAG &DAG, const EVT PtrVT,
18578 bool Is64Bit, bool Is64BitLP64) {
18579 SDLoc dl(GA);
18581 // Get the start address of the TLS block for this module.
18582 X86MachineFunctionInfo *MFI = DAG.getMachineFunction()
18583 .getInfo<X86MachineFunctionInfo>();
18584 MFI->incNumLocalDynamicTLSAccesses();
18586 SDValue Base;
18587 if (Is64Bit) {
18588 unsigned ReturnReg = Is64BitLP64 ? X86::RAX : X86::EAX;
18589 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, ReturnReg,
18590 X86II::MO_TLSLD, /*LocalDynamic=*/true);
18591 } else {
18592 SDValue InGlue;
18593 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
18594 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InGlue);
18595 InGlue = Chain.getValue(1);
18596 Base = GetTLSADDR(DAG, Chain, GA, &InGlue, PtrVT, X86::EAX,
18597 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
18600 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
18601 // of Base.
18603 // Build x@dtpoff.
18604 unsigned char OperandFlags = X86II::MO_DTPOFF;
18605 unsigned WrapperKind = X86ISD::Wrapper;
18606 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
18607 GA->getValueType(0),
18608 GA->getOffset(), OperandFlags);
18609 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
18611 // Add x@dtpoff with the base.
18612 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
18615 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
18616 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
18617 const EVT PtrVT, TLSModel::Model model,
18618 bool is64Bit, bool isPIC) {
18619 SDLoc dl(GA);
18621 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
18622 Value *Ptr = Constant::getNullValue(
18623 PointerType::get(*DAG.getContext(), is64Bit ? 257 : 256));
18625 SDValue ThreadPointer =
18626 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0, dl),
18627 MachinePointerInfo(Ptr));
18629 unsigned char OperandFlags = 0;
18630 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
18631 // initialexec.
18632 unsigned WrapperKind = X86ISD::Wrapper;
18633 if (model == TLSModel::LocalExec) {
18634 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
18635 } else if (model == TLSModel::InitialExec) {
18636 if (is64Bit) {
18637 OperandFlags = X86II::MO_GOTTPOFF;
18638 WrapperKind = X86ISD::WrapperRIP;
18639 } else {
18640 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
18642 } else {
18643 llvm_unreachable("Unexpected model");
18646 // emit "addl x@ntpoff,%eax" (local exec)
18647 // or "addl x@indntpoff,%eax" (initial exec)
18648 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
18649 SDValue TGA =
18650 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
18651 GA->getOffset(), OperandFlags);
18652 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
18654 if (model == TLSModel::InitialExec) {
18655 if (isPIC && !is64Bit) {
18656 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
18657 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
18658 Offset);
18661 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
18662 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
18665 // The address of the thread local variable is the add of the thread
18666 // pointer with the offset of the variable.
18667 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
18670 SDValue
18671 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
18673 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
18675 if (DAG.getTarget().useEmulatedTLS())
18676 return LowerToTLSEmulatedModel(GA, DAG);
18678 const GlobalValue *GV = GA->getGlobal();
18679 auto PtrVT = getPointerTy(DAG.getDataLayout());
18680 bool PositionIndependent = isPositionIndependent();
18682 if (Subtarget.isTargetELF()) {
18683 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
18684 switch (model) {
18685 case TLSModel::GeneralDynamic:
18686 if (Subtarget.is64Bit()) {
18687 if (Subtarget.isTarget64BitLP64())
18688 return LowerToTLSGeneralDynamicModel64(GA, DAG, PtrVT);
18689 return LowerToTLSGeneralDynamicModelX32(GA, DAG, PtrVT);
18691 return LowerToTLSGeneralDynamicModel32(GA, DAG, PtrVT);
18692 case TLSModel::LocalDynamic:
18693 return LowerToTLSLocalDynamicModel(GA, DAG, PtrVT, Subtarget.is64Bit(),
18694 Subtarget.isTarget64BitLP64());
18695 case TLSModel::InitialExec:
18696 case TLSModel::LocalExec:
18697 return LowerToTLSExecModel(GA, DAG, PtrVT, model, Subtarget.is64Bit(),
18698 PositionIndependent);
18700 llvm_unreachable("Unknown TLS model.");
18703 if (Subtarget.isTargetDarwin()) {
18704 // Darwin only has one model of TLS. Lower to that.
18705 unsigned char OpFlag = 0;
18706 unsigned WrapperKind = 0;
18708 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
18709 // global base reg.
18710 bool PIC32 = PositionIndependent && !Subtarget.is64Bit();
18711 if (PIC32) {
18712 OpFlag = X86II::MO_TLVP_PIC_BASE;
18713 WrapperKind = X86ISD::Wrapper;
18714 } else {
18715 OpFlag = X86II::MO_TLVP;
18716 WrapperKind = X86ISD::WrapperRIP;
18718 SDLoc DL(Op);
18719 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
18720 GA->getValueType(0),
18721 GA->getOffset(), OpFlag);
18722 SDValue Offset = DAG.getNode(WrapperKind, DL, PtrVT, Result);
18724 // With PIC32, the address is actually $g + Offset.
18725 if (PIC32)
18726 Offset = DAG.getNode(ISD::ADD, DL, PtrVT,
18727 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
18728 Offset);
18730 // Lowering the machine isd will make sure everything is in the right
18731 // location.
18732 SDValue Chain = DAG.getEntryNode();
18733 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
18734 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
18735 SDValue Args[] = { Chain, Offset };
18736 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
18737 Chain = DAG.getCALLSEQ_END(Chain, 0, 0, Chain.getValue(1), DL);
18739 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
18740 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
18741 MFI.setAdjustsStack(true);
18743 // And our return value (tls address) is in the standard call return value
18744 // location.
18745 unsigned Reg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
18746 return DAG.getCopyFromReg(Chain, DL, Reg, PtrVT, Chain.getValue(1));
18749 if (Subtarget.isOSWindows()) {
18750 // Just use the implicit TLS architecture
18751 // Need to generate something similar to:
18752 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
18753 // ; from TEB
18754 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
18755 // mov rcx, qword [rdx+rcx*8]
18756 // mov eax, .tls$:tlsvar
18757 // [rax+rcx] contains the address
18758 // Windows 64bit: gs:0x58
18759 // Windows 32bit: fs:__tls_array
18761 SDLoc dl(GA);
18762 SDValue Chain = DAG.getEntryNode();
18764 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
18765 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
18766 // use its literal value of 0x2C.
18767 Value *Ptr = Constant::getNullValue(
18768 Subtarget.is64Bit() ? PointerType::get(*DAG.getContext(), 256)
18769 : PointerType::get(*DAG.getContext(), 257));
18771 SDValue TlsArray = Subtarget.is64Bit()
18772 ? DAG.getIntPtrConstant(0x58, dl)
18773 : (Subtarget.isTargetWindowsGNU()
18774 ? DAG.getIntPtrConstant(0x2C, dl)
18775 : DAG.getExternalSymbol("_tls_array", PtrVT));
18777 SDValue ThreadPointer =
18778 DAG.getLoad(PtrVT, dl, Chain, TlsArray, MachinePointerInfo(Ptr));
18780 SDValue res;
18781 if (GV->getThreadLocalMode() == GlobalVariable::LocalExecTLSModel) {
18782 res = ThreadPointer;
18783 } else {
18784 // Load the _tls_index variable
18785 SDValue IDX = DAG.getExternalSymbol("_tls_index", PtrVT);
18786 if (Subtarget.is64Bit())
18787 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain, IDX,
18788 MachinePointerInfo(), MVT::i32);
18789 else
18790 IDX = DAG.getLoad(PtrVT, dl, Chain, IDX, MachinePointerInfo());
18792 const DataLayout &DL = DAG.getDataLayout();
18793 SDValue Scale =
18794 DAG.getConstant(Log2_64_Ceil(DL.getPointerSize()), dl, MVT::i8);
18795 IDX = DAG.getNode(ISD::SHL, dl, PtrVT, IDX, Scale);
18797 res = DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, IDX);
18800 res = DAG.getLoad(PtrVT, dl, Chain, res, MachinePointerInfo());
18802 // Get the offset of start of .tls section
18803 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
18804 GA->getValueType(0),
18805 GA->getOffset(), X86II::MO_SECREL);
18806 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, PtrVT, TGA);
18808 // The address of the thread local variable is the add of the thread
18809 // pointer with the offset of the variable.
18810 return DAG.getNode(ISD::ADD, dl, PtrVT, res, Offset);
18813 llvm_unreachable("TLS not implemented for this target.");
18816 /// Lower SRA_PARTS and friends, which return two i32 values
18817 /// and take a 2 x i32 value to shift plus a shift amount.
18818 /// TODO: Can this be moved to general expansion code?
18819 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
18820 SDValue Lo, Hi;
18821 DAG.getTargetLoweringInfo().expandShiftParts(Op.getNode(), Lo, Hi, DAG);
18822 return DAG.getMergeValues({Lo, Hi}, SDLoc(Op));
18825 // Try to use a packed vector operation to handle i64 on 32-bit targets when
18826 // AVX512DQ is enabled.
18827 static SDValue LowerI64IntToFP_AVX512DQ(SDValue Op, SelectionDAG &DAG,
18828 const X86Subtarget &Subtarget) {
18829 assert((Op.getOpcode() == ISD::SINT_TO_FP ||
18830 Op.getOpcode() == ISD::STRICT_SINT_TO_FP ||
18831 Op.getOpcode() == ISD::STRICT_UINT_TO_FP ||
18832 Op.getOpcode() == ISD::UINT_TO_FP) &&
18833 "Unexpected opcode!");
18834 bool IsStrict = Op->isStrictFPOpcode();
18835 unsigned OpNo = IsStrict ? 1 : 0;
18836 SDValue Src = Op.getOperand(OpNo);
18837 MVT SrcVT = Src.getSimpleValueType();
18838 MVT VT = Op.getSimpleValueType();
18840 if (!Subtarget.hasDQI() || SrcVT != MVT::i64 || Subtarget.is64Bit() ||
18841 (VT != MVT::f32 && VT != MVT::f64))
18842 return SDValue();
18844 // Pack the i64 into a vector, do the operation and extract.
18846 // Using 256-bit to ensure result is 128-bits for f32 case.
18847 unsigned NumElts = Subtarget.hasVLX() ? 4 : 8;
18848 MVT VecInVT = MVT::getVectorVT(MVT::i64, NumElts);
18849 MVT VecVT = MVT::getVectorVT(VT, NumElts);
18851 SDLoc dl(Op);
18852 SDValue InVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecInVT, Src);
18853 if (IsStrict) {
18854 SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, {VecVT, MVT::Other},
18855 {Op.getOperand(0), InVec});
18856 SDValue Chain = CvtVec.getValue(1);
18857 SDValue Value = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
18858 DAG.getIntPtrConstant(0, dl));
18859 return DAG.getMergeValues({Value, Chain}, dl);
18862 SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, VecVT, InVec);
18864 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
18865 DAG.getIntPtrConstant(0, dl));
18868 // Try to use a packed vector operation to handle i64 on 32-bit targets.
18869 static SDValue LowerI64IntToFP16(SDValue Op, SelectionDAG &DAG,
18870 const X86Subtarget &Subtarget) {
18871 assert((Op.getOpcode() == ISD::SINT_TO_FP ||
18872 Op.getOpcode() == ISD::STRICT_SINT_TO_FP ||
18873 Op.getOpcode() == ISD::STRICT_UINT_TO_FP ||
18874 Op.getOpcode() == ISD::UINT_TO_FP) &&
18875 "Unexpected opcode!");
18876 bool IsStrict = Op->isStrictFPOpcode();
18877 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
18878 MVT SrcVT = Src.getSimpleValueType();
18879 MVT VT = Op.getSimpleValueType();
18881 if (SrcVT != MVT::i64 || Subtarget.is64Bit() || VT != MVT::f16)
18882 return SDValue();
18884 // Pack the i64 into a vector, do the operation and extract.
18886 assert(Subtarget.hasFP16() && "Expected FP16");
18888 SDLoc dl(Op);
18889 SDValue InVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Src);
18890 if (IsStrict) {
18891 SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, {MVT::v2f16, MVT::Other},
18892 {Op.getOperand(0), InVec});
18893 SDValue Chain = CvtVec.getValue(1);
18894 SDValue Value = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
18895 DAG.getIntPtrConstant(0, dl));
18896 return DAG.getMergeValues({Value, Chain}, dl);
18899 SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, MVT::v2f16, InVec);
18901 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
18902 DAG.getIntPtrConstant(0, dl));
18905 static bool useVectorCast(unsigned Opcode, MVT FromVT, MVT ToVT,
18906 const X86Subtarget &Subtarget) {
18907 switch (Opcode) {
18908 case ISD::SINT_TO_FP:
18909 // TODO: Handle wider types with AVX/AVX512.
18910 if (!Subtarget.hasSSE2() || FromVT != MVT::v4i32)
18911 return false;
18912 // CVTDQ2PS or (V)CVTDQ2PD
18913 return ToVT == MVT::v4f32 || (Subtarget.hasAVX() && ToVT == MVT::v4f64);
18915 case ISD::UINT_TO_FP:
18916 // TODO: Handle wider types and i64 elements.
18917 if (!Subtarget.hasAVX512() || FromVT != MVT::v4i32)
18918 return false;
18919 // VCVTUDQ2PS or VCVTUDQ2PD
18920 return ToVT == MVT::v4f32 || ToVT == MVT::v4f64;
18922 default:
18923 return false;
18927 /// Given a scalar cast operation that is extracted from a vector, try to
18928 /// vectorize the cast op followed by extraction. This will avoid an expensive
18929 /// round-trip between XMM and GPR.
18930 static SDValue vectorizeExtractedCast(SDValue Cast, SelectionDAG &DAG,
18931 const X86Subtarget &Subtarget) {
18932 // TODO: This could be enhanced to handle smaller integer types by peeking
18933 // through an extend.
18934 SDValue Extract = Cast.getOperand(0);
18935 MVT DestVT = Cast.getSimpleValueType();
18936 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
18937 !isa<ConstantSDNode>(Extract.getOperand(1)))
18938 return SDValue();
18940 // See if we have a 128-bit vector cast op for this type of cast.
18941 SDValue VecOp = Extract.getOperand(0);
18942 MVT FromVT = VecOp.getSimpleValueType();
18943 unsigned NumEltsInXMM = 128 / FromVT.getScalarSizeInBits();
18944 MVT Vec128VT = MVT::getVectorVT(FromVT.getScalarType(), NumEltsInXMM);
18945 MVT ToVT = MVT::getVectorVT(DestVT, NumEltsInXMM);
18946 if (!useVectorCast(Cast.getOpcode(), Vec128VT, ToVT, Subtarget))
18947 return SDValue();
18949 // If we are extracting from a non-zero element, first shuffle the source
18950 // vector to allow extracting from element zero.
18951 SDLoc DL(Cast);
18952 if (!isNullConstant(Extract.getOperand(1))) {
18953 SmallVector<int, 16> Mask(FromVT.getVectorNumElements(), -1);
18954 Mask[0] = Extract.getConstantOperandVal(1);
18955 VecOp = DAG.getVectorShuffle(FromVT, DL, VecOp, DAG.getUNDEF(FromVT), Mask);
18957 // If the source vector is wider than 128-bits, extract the low part. Do not
18958 // create an unnecessarily wide vector cast op.
18959 if (FromVT != Vec128VT)
18960 VecOp = extract128BitVector(VecOp, 0, DAG, DL);
18962 // cast (extelt V, 0) --> extelt (cast (extract_subv V)), 0
18963 // cast (extelt V, C) --> extelt (cast (extract_subv (shuffle V, [C...]))), 0
18964 SDValue VCast = DAG.getNode(Cast.getOpcode(), DL, ToVT, VecOp);
18965 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, DestVT, VCast,
18966 DAG.getIntPtrConstant(0, DL));
18969 /// Given a scalar cast to FP with a cast to integer operand (almost an ftrunc),
18970 /// try to vectorize the cast ops. This will avoid an expensive round-trip
18971 /// between XMM and GPR.
18972 static SDValue lowerFPToIntToFP(SDValue CastToFP, SelectionDAG &DAG,
18973 const X86Subtarget &Subtarget) {
18974 // TODO: Allow FP_TO_UINT.
18975 SDValue CastToInt = CastToFP.getOperand(0);
18976 MVT VT = CastToFP.getSimpleValueType();
18977 if (CastToInt.getOpcode() != ISD::FP_TO_SINT || VT.isVector())
18978 return SDValue();
18980 MVT IntVT = CastToInt.getSimpleValueType();
18981 SDValue X = CastToInt.getOperand(0);
18982 MVT SrcVT = X.getSimpleValueType();
18983 if (SrcVT != MVT::f32 && SrcVT != MVT::f64)
18984 return SDValue();
18986 // See if we have 128-bit vector cast instructions for this type of cast.
18987 // We need cvttps2dq/cvttpd2dq and cvtdq2ps/cvtdq2pd.
18988 if (!Subtarget.hasSSE2() || (VT != MVT::f32 && VT != MVT::f64) ||
18989 IntVT != MVT::i32)
18990 return SDValue();
18992 unsigned SrcSize = SrcVT.getSizeInBits();
18993 unsigned IntSize = IntVT.getSizeInBits();
18994 unsigned VTSize = VT.getSizeInBits();
18995 MVT VecSrcVT = MVT::getVectorVT(SrcVT, 128 / SrcSize);
18996 MVT VecIntVT = MVT::getVectorVT(IntVT, 128 / IntSize);
18997 MVT VecVT = MVT::getVectorVT(VT, 128 / VTSize);
18999 // We need target-specific opcodes if this is v2f64 -> v4i32 -> v2f64.
19000 unsigned ToIntOpcode =
19001 SrcSize != IntSize ? X86ISD::CVTTP2SI : (unsigned)ISD::FP_TO_SINT;
19002 unsigned ToFPOpcode =
19003 IntSize != VTSize ? X86ISD::CVTSI2P : (unsigned)ISD::SINT_TO_FP;
19005 // sint_to_fp (fp_to_sint X) --> extelt (sint_to_fp (fp_to_sint (s2v X))), 0
19007 // We are not defining the high elements (for example, zero them) because
19008 // that could nullify any performance advantage that we hoped to gain from
19009 // this vector op hack. We do not expect any adverse effects (like denorm
19010 // penalties) with cast ops.
19011 SDLoc DL(CastToFP);
19012 SDValue ZeroIdx = DAG.getIntPtrConstant(0, DL);
19013 SDValue VecX = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecSrcVT, X);
19014 SDValue VCastToInt = DAG.getNode(ToIntOpcode, DL, VecIntVT, VecX);
19015 SDValue VCastToFP = DAG.getNode(ToFPOpcode, DL, VecVT, VCastToInt);
19016 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, VCastToFP, ZeroIdx);
19019 static SDValue lowerINT_TO_FP_vXi64(SDValue Op, SelectionDAG &DAG,
19020 const X86Subtarget &Subtarget) {
19021 SDLoc DL(Op);
19022 bool IsStrict = Op->isStrictFPOpcode();
19023 MVT VT = Op->getSimpleValueType(0);
19024 SDValue Src = Op->getOperand(IsStrict ? 1 : 0);
19026 if (Subtarget.hasDQI()) {
19027 assert(!Subtarget.hasVLX() && "Unexpected features");
19029 assert((Src.getSimpleValueType() == MVT::v2i64 ||
19030 Src.getSimpleValueType() == MVT::v4i64) &&
19031 "Unsupported custom type");
19033 // With AVX512DQ, but not VLX we need to widen to get a 512-bit result type.
19034 assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v4f64) &&
19035 "Unexpected VT!");
19036 MVT WideVT = VT == MVT::v4f32 ? MVT::v8f32 : MVT::v8f64;
19038 // Need to concat with zero vector for strict fp to avoid spurious
19039 // exceptions.
19040 SDValue Tmp = IsStrict ? DAG.getConstant(0, DL, MVT::v8i64)
19041 : DAG.getUNDEF(MVT::v8i64);
19042 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v8i64, Tmp, Src,
19043 DAG.getIntPtrConstant(0, DL));
19044 SDValue Res, Chain;
19045 if (IsStrict) {
19046 Res = DAG.getNode(Op.getOpcode(), DL, {WideVT, MVT::Other},
19047 {Op->getOperand(0), Src});
19048 Chain = Res.getValue(1);
19049 } else {
19050 Res = DAG.getNode(Op.getOpcode(), DL, WideVT, Src);
19053 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
19054 DAG.getIntPtrConstant(0, DL));
19056 if (IsStrict)
19057 return DAG.getMergeValues({Res, Chain}, DL);
19058 return Res;
19061 bool IsSigned = Op->getOpcode() == ISD::SINT_TO_FP ||
19062 Op->getOpcode() == ISD::STRICT_SINT_TO_FP;
19063 if (VT != MVT::v4f32 || IsSigned)
19064 return SDValue();
19066 SDValue Zero = DAG.getConstant(0, DL, MVT::v4i64);
19067 SDValue One = DAG.getConstant(1, DL, MVT::v4i64);
19068 SDValue Sign = DAG.getNode(ISD::OR, DL, MVT::v4i64,
19069 DAG.getNode(ISD::SRL, DL, MVT::v4i64, Src, One),
19070 DAG.getNode(ISD::AND, DL, MVT::v4i64, Src, One));
19071 SDValue IsNeg = DAG.getSetCC(DL, MVT::v4i64, Src, Zero, ISD::SETLT);
19072 SDValue SignSrc = DAG.getSelect(DL, MVT::v4i64, IsNeg, Sign, Src);
19073 SmallVector<SDValue, 4> SignCvts(4);
19074 SmallVector<SDValue, 4> Chains(4);
19075 for (int i = 0; i != 4; ++i) {
19076 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, SignSrc,
19077 DAG.getIntPtrConstant(i, DL));
19078 if (IsStrict) {
19079 SignCvts[i] =
19080 DAG.getNode(ISD::STRICT_SINT_TO_FP, DL, {MVT::f32, MVT::Other},
19081 {Op.getOperand(0), Elt});
19082 Chains[i] = SignCvts[i].getValue(1);
19083 } else {
19084 SignCvts[i] = DAG.getNode(ISD::SINT_TO_FP, DL, MVT::f32, Elt);
19087 SDValue SignCvt = DAG.getBuildVector(VT, DL, SignCvts);
19089 SDValue Slow, Chain;
19090 if (IsStrict) {
19091 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
19092 Slow = DAG.getNode(ISD::STRICT_FADD, DL, {MVT::v4f32, MVT::Other},
19093 {Chain, SignCvt, SignCvt});
19094 Chain = Slow.getValue(1);
19095 } else {
19096 Slow = DAG.getNode(ISD::FADD, DL, MVT::v4f32, SignCvt, SignCvt);
19099 IsNeg = DAG.getNode(ISD::TRUNCATE, DL, MVT::v4i32, IsNeg);
19100 SDValue Cvt = DAG.getSelect(DL, MVT::v4f32, IsNeg, Slow, SignCvt);
19102 if (IsStrict)
19103 return DAG.getMergeValues({Cvt, Chain}, DL);
19105 return Cvt;
19108 static SDValue promoteXINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
19109 bool IsStrict = Op->isStrictFPOpcode();
19110 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
19111 SDValue Chain = IsStrict ? Op->getOperand(0) : DAG.getEntryNode();
19112 MVT VT = Op.getSimpleValueType();
19113 MVT NVT = VT.isVector() ? VT.changeVectorElementType(MVT::f32) : MVT::f32;
19114 SDLoc dl(Op);
19116 SDValue Rnd = DAG.getIntPtrConstant(0, dl);
19117 if (IsStrict)
19118 return DAG.getNode(
19119 ISD::STRICT_FP_ROUND, dl, {VT, MVT::Other},
19120 {Chain,
19121 DAG.getNode(Op.getOpcode(), dl, {NVT, MVT::Other}, {Chain, Src}),
19122 Rnd});
19123 return DAG.getNode(ISD::FP_ROUND, dl, VT,
19124 DAG.getNode(Op.getOpcode(), dl, NVT, Src), Rnd);
19127 static bool isLegalConversion(MVT VT, bool IsSigned,
19128 const X86Subtarget &Subtarget) {
19129 if (VT == MVT::v4i32 && Subtarget.hasSSE2() && IsSigned)
19130 return true;
19131 if (VT == MVT::v8i32 && Subtarget.hasAVX() && IsSigned)
19132 return true;
19133 if (Subtarget.hasVLX() && (VT == MVT::v4i32 || VT == MVT::v8i32))
19134 return true;
19135 if (Subtarget.useAVX512Regs()) {
19136 if (VT == MVT::v16i32)
19137 return true;
19138 if (VT == MVT::v8i64 && Subtarget.hasDQI())
19139 return true;
19141 if (Subtarget.hasDQI() && Subtarget.hasVLX() &&
19142 (VT == MVT::v2i64 || VT == MVT::v4i64))
19143 return true;
19144 return false;
19147 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
19148 SelectionDAG &DAG) const {
19149 bool IsStrict = Op->isStrictFPOpcode();
19150 unsigned OpNo = IsStrict ? 1 : 0;
19151 SDValue Src = Op.getOperand(OpNo);
19152 SDValue Chain = IsStrict ? Op->getOperand(0) : DAG.getEntryNode();
19153 MVT SrcVT = Src.getSimpleValueType();
19154 MVT VT = Op.getSimpleValueType();
19155 SDLoc dl(Op);
19157 if (isSoftF16(VT, Subtarget))
19158 return promoteXINT_TO_FP(Op, DAG);
19159 else if (isLegalConversion(SrcVT, true, Subtarget))
19160 return Op;
19162 if (Subtarget.isTargetWin64() && SrcVT == MVT::i128)
19163 return LowerWin64_INT128_TO_FP(Op, DAG);
19165 if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
19166 return Extract;
19168 if (SDValue R = lowerFPToIntToFP(Op, DAG, Subtarget))
19169 return R;
19171 if (SrcVT.isVector()) {
19172 if (SrcVT == MVT::v2i32 && VT == MVT::v2f64) {
19173 // Note: Since v2f64 is a legal type. We don't need to zero extend the
19174 // source for strict FP.
19175 if (IsStrict)
19176 return DAG.getNode(
19177 X86ISD::STRICT_CVTSI2P, dl, {VT, MVT::Other},
19178 {Chain, DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
19179 DAG.getUNDEF(SrcVT))});
19180 return DAG.getNode(X86ISD::CVTSI2P, dl, VT,
19181 DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
19182 DAG.getUNDEF(SrcVT)));
19184 if (SrcVT == MVT::v2i64 || SrcVT == MVT::v4i64)
19185 return lowerINT_TO_FP_vXi64(Op, DAG, Subtarget);
19187 return SDValue();
19190 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
19191 "Unknown SINT_TO_FP to lower!");
19193 bool UseSSEReg = isScalarFPTypeInSSEReg(VT);
19195 // These are really Legal; return the operand so the caller accepts it as
19196 // Legal.
19197 if (SrcVT == MVT::i32 && UseSSEReg)
19198 return Op;
19199 if (SrcVT == MVT::i64 && UseSSEReg && Subtarget.is64Bit())
19200 return Op;
19202 if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
19203 return V;
19204 if (SDValue V = LowerI64IntToFP16(Op, DAG, Subtarget))
19205 return V;
19207 // SSE doesn't have an i16 conversion so we need to promote.
19208 if (SrcVT == MVT::i16 && (UseSSEReg || VT == MVT::f128)) {
19209 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, Src);
19210 if (IsStrict)
19211 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
19212 {Chain, Ext});
19214 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Ext);
19217 if (VT == MVT::f128 || !Subtarget.hasX87())
19218 return SDValue();
19220 SDValue ValueToStore = Src;
19221 if (SrcVT == MVT::i64 && Subtarget.hasSSE2() && !Subtarget.is64Bit())
19222 // Bitcasting to f64 here allows us to do a single 64-bit store from
19223 // an SSE register, avoiding the store forwarding penalty that would come
19224 // with two 32-bit stores.
19225 ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
19227 unsigned Size = SrcVT.getStoreSize();
19228 Align Alignment(Size);
19229 MachineFunction &MF = DAG.getMachineFunction();
19230 auto PtrVT = getPointerTy(MF.getDataLayout());
19231 int SSFI = MF.getFrameInfo().CreateStackObject(Size, Alignment, false);
19232 MachinePointerInfo MPI =
19233 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI);
19234 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
19235 Chain = DAG.getStore(Chain, dl, ValueToStore, StackSlot, MPI, Alignment);
19236 std::pair<SDValue, SDValue> Tmp =
19237 BuildFILD(VT, SrcVT, dl, Chain, StackSlot, MPI, Alignment, DAG);
19239 if (IsStrict)
19240 return DAG.getMergeValues({Tmp.first, Tmp.second}, dl);
19242 return Tmp.first;
19245 std::pair<SDValue, SDValue> X86TargetLowering::BuildFILD(
19246 EVT DstVT, EVT SrcVT, const SDLoc &DL, SDValue Chain, SDValue Pointer,
19247 MachinePointerInfo PtrInfo, Align Alignment, SelectionDAG &DAG) const {
19248 // Build the FILD
19249 SDVTList Tys;
19250 bool useSSE = isScalarFPTypeInSSEReg(DstVT);
19251 if (useSSE)
19252 Tys = DAG.getVTList(MVT::f80, MVT::Other);
19253 else
19254 Tys = DAG.getVTList(DstVT, MVT::Other);
19256 SDValue FILDOps[] = {Chain, Pointer};
19257 SDValue Result =
19258 DAG.getMemIntrinsicNode(X86ISD::FILD, DL, Tys, FILDOps, SrcVT, PtrInfo,
19259 Alignment, MachineMemOperand::MOLoad);
19260 Chain = Result.getValue(1);
19262 if (useSSE) {
19263 MachineFunction &MF = DAG.getMachineFunction();
19264 unsigned SSFISize = DstVT.getStoreSize();
19265 int SSFI =
19266 MF.getFrameInfo().CreateStackObject(SSFISize, Align(SSFISize), false);
19267 auto PtrVT = getPointerTy(MF.getDataLayout());
19268 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
19269 Tys = DAG.getVTList(MVT::Other);
19270 SDValue FSTOps[] = {Chain, Result, StackSlot};
19271 MachineMemOperand *StoreMMO = DAG.getMachineFunction().getMachineMemOperand(
19272 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
19273 MachineMemOperand::MOStore, SSFISize, Align(SSFISize));
19275 Chain =
19276 DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, FSTOps, DstVT, StoreMMO);
19277 Result = DAG.getLoad(
19278 DstVT, DL, Chain, StackSlot,
19279 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
19280 Chain = Result.getValue(1);
19283 return { Result, Chain };
19286 /// Horizontal vector math instructions may be slower than normal math with
19287 /// shuffles. Limit horizontal op codegen based on size/speed trade-offs, uarch
19288 /// implementation, and likely shuffle complexity of the alternate sequence.
19289 static bool shouldUseHorizontalOp(bool IsSingleSource, SelectionDAG &DAG,
19290 const X86Subtarget &Subtarget) {
19291 bool IsOptimizingSize = DAG.shouldOptForSize();
19292 bool HasFastHOps = Subtarget.hasFastHorizontalOps();
19293 return !IsSingleSource || IsOptimizingSize || HasFastHOps;
19296 /// 64-bit unsigned integer to double expansion.
19297 static SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG,
19298 const X86Subtarget &Subtarget) {
19299 // We can't use this algorithm for strict fp. It produces -0.0 instead of +0.0
19300 // when converting 0 when rounding toward negative infinity. Caller will
19301 // fall back to Expand for when i64 or is legal or use FILD in 32-bit mode.
19302 assert(!Op->isStrictFPOpcode() && "Expected non-strict uint_to_fp!");
19303 // This algorithm is not obvious. Here it is what we're trying to output:
19305 movq %rax, %xmm0
19306 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
19307 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
19308 #ifdef __SSE3__
19309 haddpd %xmm0, %xmm0
19310 #else
19311 pshufd $0x4e, %xmm0, %xmm1
19312 addpd %xmm1, %xmm0
19313 #endif
19316 SDLoc dl(Op);
19317 LLVMContext *Context = DAG.getContext();
19319 // Build some magic constants.
19320 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
19321 Constant *C0 = ConstantDataVector::get(*Context, CV0);
19322 auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
19323 SDValue CPIdx0 = DAG.getConstantPool(C0, PtrVT, Align(16));
19325 SmallVector<Constant*,2> CV1;
19326 CV1.push_back(
19327 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
19328 APInt(64, 0x4330000000000000ULL))));
19329 CV1.push_back(
19330 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
19331 APInt(64, 0x4530000000000000ULL))));
19332 Constant *C1 = ConstantVector::get(CV1);
19333 SDValue CPIdx1 = DAG.getConstantPool(C1, PtrVT, Align(16));
19335 // Load the 64-bit value into an XMM register.
19336 SDValue XR1 =
19337 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Op.getOperand(0));
19338 SDValue CLod0 = DAG.getLoad(
19339 MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
19340 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Align(16));
19341 SDValue Unpck1 =
19342 getUnpackl(DAG, dl, MVT::v4i32, DAG.getBitcast(MVT::v4i32, XR1), CLod0);
19344 SDValue CLod1 = DAG.getLoad(
19345 MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
19346 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Align(16));
19347 SDValue XR2F = DAG.getBitcast(MVT::v2f64, Unpck1);
19348 // TODO: Are there any fast-math-flags to propagate here?
19349 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
19350 SDValue Result;
19352 if (Subtarget.hasSSE3() &&
19353 shouldUseHorizontalOp(true, DAG, Subtarget)) {
19354 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
19355 } else {
19356 SDValue Shuffle = DAG.getVectorShuffle(MVT::v2f64, dl, Sub, Sub, {1,-1});
19357 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, Shuffle, Sub);
19359 Result = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
19360 DAG.getIntPtrConstant(0, dl));
19361 return Result;
19364 /// 32-bit unsigned integer to float expansion.
19365 static SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG,
19366 const X86Subtarget &Subtarget) {
19367 unsigned OpNo = Op.getNode()->isStrictFPOpcode() ? 1 : 0;
19368 SDLoc dl(Op);
19369 // FP constant to bias correct the final result.
19370 SDValue Bias = DAG.getConstantFP(
19371 llvm::bit_cast<double>(0x4330000000000000ULL), dl, MVT::f64);
19373 // Load the 32-bit value into an XMM register.
19374 SDValue Load =
19375 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Op.getOperand(OpNo));
19377 // Zero out the upper parts of the register.
19378 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
19380 // Or the load with the bias.
19381 SDValue Or = DAG.getNode(
19382 ISD::OR, dl, MVT::v2i64,
19383 DAG.getBitcast(MVT::v2i64, Load),
19384 DAG.getBitcast(MVT::v2i64,
19385 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Bias)));
19386 Or =
19387 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
19388 DAG.getBitcast(MVT::v2f64, Or), DAG.getIntPtrConstant(0, dl));
19390 if (Op.getNode()->isStrictFPOpcode()) {
19391 // Subtract the bias.
19392 // TODO: Are there any fast-math-flags to propagate here?
19393 SDValue Chain = Op.getOperand(0);
19394 SDValue Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::f64, MVT::Other},
19395 {Chain, Or, Bias});
19397 if (Op.getValueType() == Sub.getValueType())
19398 return Sub;
19400 // Handle final rounding.
19401 std::pair<SDValue, SDValue> ResultPair = DAG.getStrictFPExtendOrRound(
19402 Sub, Sub.getValue(1), dl, Op.getSimpleValueType());
19404 return DAG.getMergeValues({ResultPair.first, ResultPair.second}, dl);
19407 // Subtract the bias.
19408 // TODO: Are there any fast-math-flags to propagate here?
19409 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
19411 // Handle final rounding.
19412 return DAG.getFPExtendOrRound(Sub, dl, Op.getSimpleValueType());
19415 static SDValue lowerUINT_TO_FP_v2i32(SDValue Op, SelectionDAG &DAG,
19416 const X86Subtarget &Subtarget,
19417 const SDLoc &DL) {
19418 if (Op.getSimpleValueType() != MVT::v2f64)
19419 return SDValue();
19421 bool IsStrict = Op->isStrictFPOpcode();
19423 SDValue N0 = Op.getOperand(IsStrict ? 1 : 0);
19424 assert(N0.getSimpleValueType() == MVT::v2i32 && "Unexpected input type");
19426 if (Subtarget.hasAVX512()) {
19427 if (!Subtarget.hasVLX()) {
19428 // Let generic type legalization widen this.
19429 if (!IsStrict)
19430 return SDValue();
19431 // Otherwise pad the integer input with 0s and widen the operation.
19432 N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
19433 DAG.getConstant(0, DL, MVT::v2i32));
19434 SDValue Res = DAG.getNode(Op->getOpcode(), DL, {MVT::v4f64, MVT::Other},
19435 {Op.getOperand(0), N0});
19436 SDValue Chain = Res.getValue(1);
19437 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2f64, Res,
19438 DAG.getIntPtrConstant(0, DL));
19439 return DAG.getMergeValues({Res, Chain}, DL);
19442 // Legalize to v4i32 type.
19443 N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
19444 DAG.getUNDEF(MVT::v2i32));
19445 if (IsStrict)
19446 return DAG.getNode(X86ISD::STRICT_CVTUI2P, DL, {MVT::v2f64, MVT::Other},
19447 {Op.getOperand(0), N0});
19448 return DAG.getNode(X86ISD::CVTUI2P, DL, MVT::v2f64, N0);
19451 // Zero extend to 2i64, OR with the floating point representation of 2^52.
19452 // This gives us the floating point equivalent of 2^52 + the i32 integer
19453 // since double has 52-bits of mantissa. Then subtract 2^52 in floating
19454 // point leaving just our i32 integers in double format.
19455 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v2i64, N0);
19456 SDValue VBias = DAG.getConstantFP(
19457 llvm::bit_cast<double>(0x4330000000000000ULL), DL, MVT::v2f64);
19458 SDValue Or = DAG.getNode(ISD::OR, DL, MVT::v2i64, ZExtIn,
19459 DAG.getBitcast(MVT::v2i64, VBias));
19460 Or = DAG.getBitcast(MVT::v2f64, Or);
19462 if (IsStrict)
19463 return DAG.getNode(ISD::STRICT_FSUB, DL, {MVT::v2f64, MVT::Other},
19464 {Op.getOperand(0), Or, VBias});
19465 return DAG.getNode(ISD::FSUB, DL, MVT::v2f64, Or, VBias);
19468 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
19469 const X86Subtarget &Subtarget) {
19470 SDLoc DL(Op);
19471 bool IsStrict = Op->isStrictFPOpcode();
19472 SDValue V = Op->getOperand(IsStrict ? 1 : 0);
19473 MVT VecIntVT = V.getSimpleValueType();
19474 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
19475 "Unsupported custom type");
19477 if (Subtarget.hasAVX512()) {
19478 // With AVX512, but not VLX we need to widen to get a 512-bit result type.
19479 assert(!Subtarget.hasVLX() && "Unexpected features");
19480 MVT VT = Op->getSimpleValueType(0);
19482 // v8i32->v8f64 is legal with AVX512 so just return it.
19483 if (VT == MVT::v8f64)
19484 return Op;
19486 assert((VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v4f64) &&
19487 "Unexpected VT!");
19488 MVT WideVT = VT == MVT::v4f64 ? MVT::v8f64 : MVT::v16f32;
19489 MVT WideIntVT = VT == MVT::v4f64 ? MVT::v8i32 : MVT::v16i32;
19490 // Need to concat with zero vector for strict fp to avoid spurious
19491 // exceptions.
19492 SDValue Tmp =
19493 IsStrict ? DAG.getConstant(0, DL, WideIntVT) : DAG.getUNDEF(WideIntVT);
19494 V = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideIntVT, Tmp, V,
19495 DAG.getIntPtrConstant(0, DL));
19496 SDValue Res, Chain;
19497 if (IsStrict) {
19498 Res = DAG.getNode(ISD::STRICT_UINT_TO_FP, DL, {WideVT, MVT::Other},
19499 {Op->getOperand(0), V});
19500 Chain = Res.getValue(1);
19501 } else {
19502 Res = DAG.getNode(ISD::UINT_TO_FP, DL, WideVT, V);
19505 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
19506 DAG.getIntPtrConstant(0, DL));
19508 if (IsStrict)
19509 return DAG.getMergeValues({Res, Chain}, DL);
19510 return Res;
19513 if (Subtarget.hasAVX() && VecIntVT == MVT::v4i32 &&
19514 Op->getSimpleValueType(0) == MVT::v4f64) {
19515 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i64, V);
19516 Constant *Bias = ConstantFP::get(
19517 *DAG.getContext(),
19518 APFloat(APFloat::IEEEdouble(), APInt(64, 0x4330000000000000ULL)));
19519 auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
19520 SDValue CPIdx = DAG.getConstantPool(Bias, PtrVT, Align(8));
19521 SDVTList Tys = DAG.getVTList(MVT::v4f64, MVT::Other);
19522 SDValue Ops[] = {DAG.getEntryNode(), CPIdx};
19523 SDValue VBias = DAG.getMemIntrinsicNode(
19524 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::f64,
19525 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Align(8),
19526 MachineMemOperand::MOLoad);
19528 SDValue Or = DAG.getNode(ISD::OR, DL, MVT::v4i64, ZExtIn,
19529 DAG.getBitcast(MVT::v4i64, VBias));
19530 Or = DAG.getBitcast(MVT::v4f64, Or);
19532 if (IsStrict)
19533 return DAG.getNode(ISD::STRICT_FSUB, DL, {MVT::v4f64, MVT::Other},
19534 {Op.getOperand(0), Or, VBias});
19535 return DAG.getNode(ISD::FSUB, DL, MVT::v4f64, Or, VBias);
19538 // The algorithm is the following:
19539 // #ifdef __SSE4_1__
19540 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
19541 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
19542 // (uint4) 0x53000000, 0xaa);
19543 // #else
19544 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
19545 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
19546 // #endif
19547 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
19548 // return (float4) lo + fhi;
19550 bool Is128 = VecIntVT == MVT::v4i32;
19551 MVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
19552 // If we convert to something else than the supported type, e.g., to v4f64,
19553 // abort early.
19554 if (VecFloatVT != Op->getSimpleValueType(0))
19555 return SDValue();
19557 // In the #idef/#else code, we have in common:
19558 // - The vector of constants:
19559 // -- 0x4b000000
19560 // -- 0x53000000
19561 // - A shift:
19562 // -- v >> 16
19564 // Create the splat vector for 0x4b000000.
19565 SDValue VecCstLow = DAG.getConstant(0x4b000000, DL, VecIntVT);
19566 // Create the splat vector for 0x53000000.
19567 SDValue VecCstHigh = DAG.getConstant(0x53000000, DL, VecIntVT);
19569 // Create the right shift.
19570 SDValue VecCstShift = DAG.getConstant(16, DL, VecIntVT);
19571 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
19573 SDValue Low, High;
19574 if (Subtarget.hasSSE41()) {
19575 MVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
19576 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
19577 SDValue VecCstLowBitcast = DAG.getBitcast(VecI16VT, VecCstLow);
19578 SDValue VecBitcast = DAG.getBitcast(VecI16VT, V);
19579 // Low will be bitcasted right away, so do not bother bitcasting back to its
19580 // original type.
19581 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
19582 VecCstLowBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
19583 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
19584 // (uint4) 0x53000000, 0xaa);
19585 SDValue VecCstHighBitcast = DAG.getBitcast(VecI16VT, VecCstHigh);
19586 SDValue VecShiftBitcast = DAG.getBitcast(VecI16VT, HighShift);
19587 // High will be bitcasted right away, so do not bother bitcasting back to
19588 // its original type.
19589 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
19590 VecCstHighBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
19591 } else {
19592 SDValue VecCstMask = DAG.getConstant(0xffff, DL, VecIntVT);
19593 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
19594 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
19595 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
19597 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
19598 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
19601 // Create the vector constant for (0x1.0p39f + 0x1.0p23f).
19602 SDValue VecCstFSub = DAG.getConstantFP(
19603 APFloat(APFloat::IEEEsingle(), APInt(32, 0x53000080)), DL, VecFloatVT);
19605 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
19606 // NOTE: By using fsub of a positive constant instead of fadd of a negative
19607 // constant, we avoid reassociation in MachineCombiner when unsafe-fp-math is
19608 // enabled. See PR24512.
19609 SDValue HighBitcast = DAG.getBitcast(VecFloatVT, High);
19610 // TODO: Are there any fast-math-flags to propagate here?
19611 // (float4) lo;
19612 SDValue LowBitcast = DAG.getBitcast(VecFloatVT, Low);
19613 // return (float4) lo + fhi;
19614 if (IsStrict) {
19615 SDValue FHigh = DAG.getNode(ISD::STRICT_FSUB, DL, {VecFloatVT, MVT::Other},
19616 {Op.getOperand(0), HighBitcast, VecCstFSub});
19617 return DAG.getNode(ISD::STRICT_FADD, DL, {VecFloatVT, MVT::Other},
19618 {FHigh.getValue(1), LowBitcast, FHigh});
19621 SDValue FHigh =
19622 DAG.getNode(ISD::FSUB, DL, VecFloatVT, HighBitcast, VecCstFSub);
19623 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
19626 static SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG,
19627 const X86Subtarget &Subtarget) {
19628 unsigned OpNo = Op.getNode()->isStrictFPOpcode() ? 1 : 0;
19629 SDValue N0 = Op.getOperand(OpNo);
19630 MVT SrcVT = N0.getSimpleValueType();
19631 SDLoc dl(Op);
19633 switch (SrcVT.SimpleTy) {
19634 default:
19635 llvm_unreachable("Custom UINT_TO_FP is not supported!");
19636 case MVT::v2i32:
19637 return lowerUINT_TO_FP_v2i32(Op, DAG, Subtarget, dl);
19638 case MVT::v4i32:
19639 case MVT::v8i32:
19640 return lowerUINT_TO_FP_vXi32(Op, DAG, Subtarget);
19641 case MVT::v2i64:
19642 case MVT::v4i64:
19643 return lowerINT_TO_FP_vXi64(Op, DAG, Subtarget);
19647 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
19648 SelectionDAG &DAG) const {
19649 bool IsStrict = Op->isStrictFPOpcode();
19650 unsigned OpNo = IsStrict ? 1 : 0;
19651 SDValue Src = Op.getOperand(OpNo);
19652 SDLoc dl(Op);
19653 auto PtrVT = getPointerTy(DAG.getDataLayout());
19654 MVT SrcVT = Src.getSimpleValueType();
19655 MVT DstVT = Op->getSimpleValueType(0);
19656 SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
19658 // Bail out when we don't have native conversion instructions.
19659 if (DstVT == MVT::f128)
19660 return SDValue();
19662 if (isSoftF16(DstVT, Subtarget))
19663 return promoteXINT_TO_FP(Op, DAG);
19664 else if (isLegalConversion(SrcVT, false, Subtarget))
19665 return Op;
19667 if (DstVT.isVector())
19668 return lowerUINT_TO_FP_vec(Op, DAG, Subtarget);
19670 if (Subtarget.isTargetWin64() && SrcVT == MVT::i128)
19671 return LowerWin64_INT128_TO_FP(Op, DAG);
19673 if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
19674 return Extract;
19676 if (Subtarget.hasAVX512() && isScalarFPTypeInSSEReg(DstVT) &&
19677 (SrcVT == MVT::i32 || (SrcVT == MVT::i64 && Subtarget.is64Bit()))) {
19678 // Conversions from unsigned i32 to f32/f64 are legal,
19679 // using VCVTUSI2SS/SD. Same for i64 in 64-bit mode.
19680 return Op;
19683 // Promote i32 to i64 and use a signed conversion on 64-bit targets.
19684 if (SrcVT == MVT::i32 && Subtarget.is64Bit()) {
19685 Src = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Src);
19686 if (IsStrict)
19687 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {DstVT, MVT::Other},
19688 {Chain, Src});
19689 return DAG.getNode(ISD::SINT_TO_FP, dl, DstVT, Src);
19692 if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
19693 return V;
19694 if (SDValue V = LowerI64IntToFP16(Op, DAG, Subtarget))
19695 return V;
19697 // The transform for i64->f64 isn't correct for 0 when rounding to negative
19698 // infinity. It produces -0.0, so disable under strictfp.
19699 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && Subtarget.hasSSE2() &&
19700 !IsStrict)
19701 return LowerUINT_TO_FP_i64(Op, DAG, Subtarget);
19702 // The transform for i32->f64/f32 isn't correct for 0 when rounding to
19703 // negative infinity. So disable under strictfp. Using FILD instead.
19704 if (SrcVT == MVT::i32 && Subtarget.hasSSE2() && DstVT != MVT::f80 &&
19705 !IsStrict)
19706 return LowerUINT_TO_FP_i32(Op, DAG, Subtarget);
19707 if (Subtarget.is64Bit() && SrcVT == MVT::i64 &&
19708 (DstVT == MVT::f32 || DstVT == MVT::f64))
19709 return SDValue();
19711 // Make a 64-bit buffer, and use it to build an FILD.
19712 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64, 8);
19713 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
19714 Align SlotAlign(8);
19715 MachinePointerInfo MPI =
19716 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI);
19717 if (SrcVT == MVT::i32) {
19718 SDValue OffsetSlot =
19719 DAG.getMemBasePlusOffset(StackSlot, TypeSize::getFixed(4), dl);
19720 SDValue Store1 = DAG.getStore(Chain, dl, Src, StackSlot, MPI, SlotAlign);
19721 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, dl, MVT::i32),
19722 OffsetSlot, MPI.getWithOffset(4), SlotAlign);
19723 std::pair<SDValue, SDValue> Tmp =
19724 BuildFILD(DstVT, MVT::i64, dl, Store2, StackSlot, MPI, SlotAlign, DAG);
19725 if (IsStrict)
19726 return DAG.getMergeValues({Tmp.first, Tmp.second}, dl);
19728 return Tmp.first;
19731 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
19732 SDValue ValueToStore = Src;
19733 if (isScalarFPTypeInSSEReg(Op.getValueType()) && !Subtarget.is64Bit()) {
19734 // Bitcasting to f64 here allows us to do a single 64-bit store from
19735 // an SSE register, avoiding the store forwarding penalty that would come
19736 // with two 32-bit stores.
19737 ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
19739 SDValue Store =
19740 DAG.getStore(Chain, dl, ValueToStore, StackSlot, MPI, SlotAlign);
19741 // For i64 source, we need to add the appropriate power of 2 if the input
19742 // was negative. We must be careful to do the computation in x87 extended
19743 // precision, not in SSE.
19744 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
19745 SDValue Ops[] = { Store, StackSlot };
19746 SDValue Fild =
19747 DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, MVT::i64, MPI,
19748 SlotAlign, MachineMemOperand::MOLoad);
19749 Chain = Fild.getValue(1);
19752 // Check whether the sign bit is set.
19753 SDValue SignSet = DAG.getSetCC(
19754 dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
19755 Op.getOperand(OpNo), DAG.getConstant(0, dl, MVT::i64), ISD::SETLT);
19757 // Build a 64 bit pair (FF, 0) in the constant pool, with FF in the hi bits.
19758 APInt FF(64, 0x5F80000000000000ULL);
19759 SDValue FudgePtr = DAG.getConstantPool(
19760 ConstantInt::get(*DAG.getContext(), FF), PtrVT);
19761 Align CPAlignment = cast<ConstantPoolSDNode>(FudgePtr)->getAlign();
19763 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
19764 SDValue Zero = DAG.getIntPtrConstant(0, dl);
19765 SDValue Four = DAG.getIntPtrConstant(4, dl);
19766 SDValue Offset = DAG.getSelect(dl, Zero.getValueType(), SignSet, Four, Zero);
19767 FudgePtr = DAG.getNode(ISD::ADD, dl, PtrVT, FudgePtr, Offset);
19769 // Load the value out, extending it from f32 to f80.
19770 SDValue Fudge = DAG.getExtLoad(
19771 ISD::EXTLOAD, dl, MVT::f80, Chain, FudgePtr,
19772 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), MVT::f32,
19773 CPAlignment);
19774 Chain = Fudge.getValue(1);
19775 // Extend everything to 80 bits to force it to be done on x87.
19776 // TODO: Are there any fast-math-flags to propagate here?
19777 if (IsStrict) {
19778 unsigned Opc = ISD::STRICT_FADD;
19779 // Windows needs the precision control changed to 80bits around this add.
19780 if (Subtarget.isOSWindows() && DstVT == MVT::f32)
19781 Opc = X86ISD::STRICT_FP80_ADD;
19783 SDValue Add =
19784 DAG.getNode(Opc, dl, {MVT::f80, MVT::Other}, {Chain, Fild, Fudge});
19785 // STRICT_FP_ROUND can't handle equal types.
19786 if (DstVT == MVT::f80)
19787 return Add;
19788 return DAG.getNode(ISD::STRICT_FP_ROUND, dl, {DstVT, MVT::Other},
19789 {Add.getValue(1), Add, DAG.getIntPtrConstant(0, dl)});
19791 unsigned Opc = ISD::FADD;
19792 // Windows needs the precision control changed to 80bits around this add.
19793 if (Subtarget.isOSWindows() && DstVT == MVT::f32)
19794 Opc = X86ISD::FP80_ADD;
19796 SDValue Add = DAG.getNode(Opc, dl, MVT::f80, Fild, Fudge);
19797 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add,
19798 DAG.getIntPtrConstant(0, dl, /*isTarget=*/true));
19801 // If the given FP_TO_SINT (IsSigned) or FP_TO_UINT (!IsSigned) operation
19802 // is legal, or has an fp128 or f16 source (which needs to be promoted to f32),
19803 // just return an SDValue().
19804 // Otherwise it is assumed to be a conversion from one of f32, f64 or f80
19805 // to i16, i32 or i64, and we lower it to a legal sequence and return the
19806 // result.
19807 SDValue
19808 X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
19809 bool IsSigned, SDValue &Chain) const {
19810 bool IsStrict = Op->isStrictFPOpcode();
19811 SDLoc DL(Op);
19813 EVT DstTy = Op.getValueType();
19814 SDValue Value = Op.getOperand(IsStrict ? 1 : 0);
19815 EVT TheVT = Value.getValueType();
19816 auto PtrVT = getPointerTy(DAG.getDataLayout());
19818 if (TheVT != MVT::f32 && TheVT != MVT::f64 && TheVT != MVT::f80) {
19819 // f16 must be promoted before using the lowering in this routine.
19820 // fp128 does not use this lowering.
19821 return SDValue();
19824 // If using FIST to compute an unsigned i64, we'll need some fixup
19825 // to handle values above the maximum signed i64. A FIST is always
19826 // used for the 32-bit subtarget, but also for f80 on a 64-bit target.
19827 bool UnsignedFixup = !IsSigned && DstTy == MVT::i64;
19829 // FIXME: This does not generate an invalid exception if the input does not
19830 // fit in i32. PR44019
19831 if (!IsSigned && DstTy != MVT::i64) {
19832 // Replace the fp-to-uint32 operation with an fp-to-sint64 FIST.
19833 // The low 32 bits of the fist result will have the correct uint32 result.
19834 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
19835 DstTy = MVT::i64;
19838 assert(DstTy.getSimpleVT() <= MVT::i64 &&
19839 DstTy.getSimpleVT() >= MVT::i16 &&
19840 "Unknown FP_TO_INT to lower!");
19842 // We lower FP->int64 into FISTP64 followed by a load from a temporary
19843 // stack slot.
19844 MachineFunction &MF = DAG.getMachineFunction();
19845 unsigned MemSize = DstTy.getStoreSize();
19846 int SSFI =
19847 MF.getFrameInfo().CreateStackObject(MemSize, Align(MemSize), false);
19848 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
19850 Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
19852 SDValue Adjust; // 0x0 or 0x80000000, for result sign bit adjustment.
19854 if (UnsignedFixup) {
19856 // Conversion to unsigned i64 is implemented with a select,
19857 // depending on whether the source value fits in the range
19858 // of a signed i64. Let Thresh be the FP equivalent of
19859 // 0x8000000000000000ULL.
19861 // Adjust = (Value >= Thresh) ? 0x80000000 : 0;
19862 // FltOfs = (Value >= Thresh) ? 0x80000000 : 0;
19863 // FistSrc = (Value - FltOfs);
19864 // Fist-to-mem64 FistSrc
19865 // Add 0 or 0x800...0ULL to the 64-bit result, which is equivalent
19866 // to XOR'ing the high 32 bits with Adjust.
19868 // Being a power of 2, Thresh is exactly representable in all FP formats.
19869 // For X87 we'd like to use the smallest FP type for this constant, but
19870 // for DAG type consistency we have to match the FP operand type.
19872 APFloat Thresh(APFloat::IEEEsingle(), APInt(32, 0x5f000000));
19873 LLVM_ATTRIBUTE_UNUSED APFloat::opStatus Status = APFloat::opOK;
19874 bool LosesInfo = false;
19875 if (TheVT == MVT::f64)
19876 // The rounding mode is irrelevant as the conversion should be exact.
19877 Status = Thresh.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven,
19878 &LosesInfo);
19879 else if (TheVT == MVT::f80)
19880 Status = Thresh.convert(APFloat::x87DoubleExtended(),
19881 APFloat::rmNearestTiesToEven, &LosesInfo);
19883 assert(Status == APFloat::opOK && !LosesInfo &&
19884 "FP conversion should have been exact");
19886 SDValue ThreshVal = DAG.getConstantFP(Thresh, DL, TheVT);
19888 EVT ResVT = getSetCCResultType(DAG.getDataLayout(),
19889 *DAG.getContext(), TheVT);
19890 SDValue Cmp;
19891 if (IsStrict) {
19892 Cmp = DAG.getSetCC(DL, ResVT, Value, ThreshVal, ISD::SETGE, Chain,
19893 /*IsSignaling*/ true);
19894 Chain = Cmp.getValue(1);
19895 } else {
19896 Cmp = DAG.getSetCC(DL, ResVT, Value, ThreshVal, ISD::SETGE);
19899 // Our preferred lowering of
19901 // (Value >= Thresh) ? 0x8000000000000000ULL : 0
19903 // is
19905 // (Value >= Thresh) << 63
19907 // but since we can get here after LegalOperations, DAGCombine might do the
19908 // wrong thing if we create a select. So, directly create the preferred
19909 // version.
19910 SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Cmp);
19911 SDValue Const63 = DAG.getConstant(63, DL, MVT::i8);
19912 Adjust = DAG.getNode(ISD::SHL, DL, MVT::i64, Zext, Const63);
19914 SDValue FltOfs = DAG.getSelect(DL, TheVT, Cmp, ThreshVal,
19915 DAG.getConstantFP(0.0, DL, TheVT));
19917 if (IsStrict) {
19918 Value = DAG.getNode(ISD::STRICT_FSUB, DL, { TheVT, MVT::Other},
19919 { Chain, Value, FltOfs });
19920 Chain = Value.getValue(1);
19921 } else
19922 Value = DAG.getNode(ISD::FSUB, DL, TheVT, Value, FltOfs);
19925 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, SSFI);
19927 // FIXME This causes a redundant load/store if the SSE-class value is already
19928 // in memory, such as if it is on the callstack.
19929 if (isScalarFPTypeInSSEReg(TheVT)) {
19930 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
19931 Chain = DAG.getStore(Chain, DL, Value, StackSlot, MPI);
19932 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
19933 SDValue Ops[] = { Chain, StackSlot };
19935 unsigned FLDSize = TheVT.getStoreSize();
19936 assert(FLDSize <= MemSize && "Stack slot not big enough");
19937 MachineMemOperand *MMO = MF.getMachineMemOperand(
19938 MPI, MachineMemOperand::MOLoad, FLDSize, Align(FLDSize));
19939 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, TheVT, MMO);
19940 Chain = Value.getValue(1);
19943 // Build the FP_TO_INT*_IN_MEM
19944 MachineMemOperand *MMO = MF.getMachineMemOperand(
19945 MPI, MachineMemOperand::MOStore, MemSize, Align(MemSize));
19946 SDValue Ops[] = { Chain, Value, StackSlot };
19947 SDValue FIST = DAG.getMemIntrinsicNode(X86ISD::FP_TO_INT_IN_MEM, DL,
19948 DAG.getVTList(MVT::Other),
19949 Ops, DstTy, MMO);
19951 SDValue Res = DAG.getLoad(Op.getValueType(), SDLoc(Op), FIST, StackSlot, MPI);
19952 Chain = Res.getValue(1);
19954 // If we need an unsigned fixup, XOR the result with adjust.
19955 if (UnsignedFixup)
19956 Res = DAG.getNode(ISD::XOR, DL, MVT::i64, Res, Adjust);
19958 return Res;
19961 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
19962 const X86Subtarget &Subtarget) {
19963 MVT VT = Op.getSimpleValueType();
19964 SDValue In = Op.getOperand(0);
19965 MVT InVT = In.getSimpleValueType();
19966 SDLoc dl(Op);
19967 unsigned Opc = Op.getOpcode();
19969 assert(VT.isVector() && InVT.isVector() && "Expected vector type");
19970 assert((Opc == ISD::ANY_EXTEND || Opc == ISD::ZERO_EXTEND) &&
19971 "Unexpected extension opcode");
19972 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
19973 "Expected same number of elements");
19974 assert((VT.getVectorElementType() == MVT::i16 ||
19975 VT.getVectorElementType() == MVT::i32 ||
19976 VT.getVectorElementType() == MVT::i64) &&
19977 "Unexpected element type");
19978 assert((InVT.getVectorElementType() == MVT::i8 ||
19979 InVT.getVectorElementType() == MVT::i16 ||
19980 InVT.getVectorElementType() == MVT::i32) &&
19981 "Unexpected element type");
19983 unsigned ExtendInVecOpc = DAG.getOpcode_EXTEND_VECTOR_INREG(Opc);
19985 if (VT == MVT::v32i16 && !Subtarget.hasBWI()) {
19986 assert(InVT == MVT::v32i8 && "Unexpected VT!");
19987 return splitVectorIntUnary(Op, DAG);
19990 if (Subtarget.hasInt256())
19991 return Op;
19993 // Optimize vectors in AVX mode:
19995 // v8i16 -> v8i32
19996 // Use vpmovzwd for 4 lower elements v8i16 -> v4i32.
19997 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
19998 // Concat upper and lower parts.
20000 // v4i32 -> v4i64
20001 // Use vpmovzdq for 4 lower elements v4i32 -> v2i64.
20002 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
20003 // Concat upper and lower parts.
20005 MVT HalfVT = VT.getHalfNumVectorElementsVT();
20006 SDValue OpLo = DAG.getNode(ExtendInVecOpc, dl, HalfVT, In);
20008 // Short-circuit if we can determine that each 128-bit half is the same value.
20009 // Otherwise, this is difficult to match and optimize.
20010 if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(In))
20011 if (hasIdenticalHalvesShuffleMask(Shuf->getMask()))
20012 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpLo);
20014 SDValue ZeroVec = DAG.getConstant(0, dl, InVT);
20015 SDValue Undef = DAG.getUNDEF(InVT);
20016 bool NeedZero = Opc == ISD::ZERO_EXTEND;
20017 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
20018 OpHi = DAG.getBitcast(HalfVT, OpHi);
20020 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
20023 // Helper to split and extend a v16i1 mask to v16i8 or v16i16.
20024 static SDValue SplitAndExtendv16i1(unsigned ExtOpc, MVT VT, SDValue In,
20025 const SDLoc &dl, SelectionDAG &DAG) {
20026 assert((VT == MVT::v16i8 || VT == MVT::v16i16) && "Unexpected VT.");
20027 SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
20028 DAG.getIntPtrConstant(0, dl));
20029 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
20030 DAG.getIntPtrConstant(8, dl));
20031 Lo = DAG.getNode(ExtOpc, dl, MVT::v8i16, Lo);
20032 Hi = DAG.getNode(ExtOpc, dl, MVT::v8i16, Hi);
20033 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i16, Lo, Hi);
20034 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20037 static SDValue LowerZERO_EXTEND_Mask(SDValue Op,
20038 const X86Subtarget &Subtarget,
20039 SelectionDAG &DAG) {
20040 MVT VT = Op->getSimpleValueType(0);
20041 SDValue In = Op->getOperand(0);
20042 MVT InVT = In.getSimpleValueType();
20043 assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
20044 SDLoc DL(Op);
20045 unsigned NumElts = VT.getVectorNumElements();
20047 // For all vectors, but vXi8 we can just emit a sign_extend and a shift. This
20048 // avoids a constant pool load.
20049 if (VT.getVectorElementType() != MVT::i8) {
20050 SDValue Extend = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, In);
20051 return DAG.getNode(ISD::SRL, DL, VT, Extend,
20052 DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT));
20055 // Extend VT if BWI is not supported.
20056 MVT ExtVT = VT;
20057 if (!Subtarget.hasBWI()) {
20058 // If v16i32 is to be avoided, we'll need to split and concatenate.
20059 if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
20060 return SplitAndExtendv16i1(ISD::ZERO_EXTEND, VT, In, DL, DAG);
20062 ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
20065 // Widen to 512-bits if VLX is not supported.
20066 MVT WideVT = ExtVT;
20067 if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
20068 NumElts *= 512 / ExtVT.getSizeInBits();
20069 InVT = MVT::getVectorVT(MVT::i1, NumElts);
20070 In = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT, DAG.getUNDEF(InVT),
20071 In, DAG.getIntPtrConstant(0, DL));
20072 WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(),
20073 NumElts);
20076 SDValue One = DAG.getConstant(1, DL, WideVT);
20077 SDValue Zero = DAG.getConstant(0, DL, WideVT);
20079 SDValue SelectedVal = DAG.getSelect(DL, WideVT, In, One, Zero);
20081 // Truncate if we had to extend above.
20082 if (VT != ExtVT) {
20083 WideVT = MVT::getVectorVT(MVT::i8, NumElts);
20084 SelectedVal = DAG.getNode(ISD::TRUNCATE, DL, WideVT, SelectedVal);
20087 // Extract back to 128/256-bit if we widened.
20088 if (WideVT != VT)
20089 SelectedVal = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SelectedVal,
20090 DAG.getIntPtrConstant(0, DL));
20092 return SelectedVal;
20095 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
20096 SelectionDAG &DAG) {
20097 SDValue In = Op.getOperand(0);
20098 MVT SVT = In.getSimpleValueType();
20100 if (SVT.getVectorElementType() == MVT::i1)
20101 return LowerZERO_EXTEND_Mask(Op, Subtarget, DAG);
20103 assert(Subtarget.hasAVX() && "Expected AVX support");
20104 return LowerAVXExtend(Op, DAG, Subtarget);
20107 /// Helper to recursively truncate vector elements in half with PACKSS/PACKUS.
20108 /// It makes use of the fact that vectors with enough leading sign/zero bits
20109 /// prevent the PACKSS/PACKUS from saturating the results.
20110 /// AVX2 (Int256) sub-targets require extra shuffling as the PACK*S operates
20111 /// within each 128-bit lane.
20112 static SDValue truncateVectorWithPACK(unsigned Opcode, EVT DstVT, SDValue In,
20113 const SDLoc &DL, SelectionDAG &DAG,
20114 const X86Subtarget &Subtarget) {
20115 assert((Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) &&
20116 "Unexpected PACK opcode");
20117 assert(DstVT.isVector() && "VT not a vector?");
20119 // Requires SSE2 for PACKSS (SSE41 PACKUSDW is handled below).
20120 if (!Subtarget.hasSSE2())
20121 return SDValue();
20123 EVT SrcVT = In.getValueType();
20125 // No truncation required, we might get here due to recursive calls.
20126 if (SrcVT == DstVT)
20127 return In;
20129 unsigned NumElems = SrcVT.getVectorNumElements();
20130 if (NumElems < 2 || !isPowerOf2_32(NumElems) )
20131 return SDValue();
20133 unsigned DstSizeInBits = DstVT.getSizeInBits();
20134 unsigned SrcSizeInBits = SrcVT.getSizeInBits();
20135 assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation");
20136 assert(SrcSizeInBits > DstSizeInBits && "Illegal truncation");
20138 LLVMContext &Ctx = *DAG.getContext();
20139 EVT PackedSVT = EVT::getIntegerVT(Ctx, SrcVT.getScalarSizeInBits() / 2);
20140 EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
20142 // Pack to the largest type possible:
20143 // vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB.
20144 EVT InVT = MVT::i16, OutVT = MVT::i8;
20145 if (SrcVT.getScalarSizeInBits() > 16 &&
20146 (Opcode == X86ISD::PACKSS || Subtarget.hasSSE41())) {
20147 InVT = MVT::i32;
20148 OutVT = MVT::i16;
20151 // Sub-128-bit truncation - widen to 128-bit src and pack in the lower half.
20152 // On pre-AVX512, pack the src in both halves to help value tracking.
20153 if (SrcSizeInBits <= 128) {
20154 InVT = EVT::getVectorVT(Ctx, InVT, 128 / InVT.getSizeInBits());
20155 OutVT = EVT::getVectorVT(Ctx, OutVT, 128 / OutVT.getSizeInBits());
20156 In = widenSubVector(In, false, Subtarget, DAG, DL, 128);
20157 SDValue LHS = DAG.getBitcast(InVT, In);
20158 SDValue RHS = Subtarget.hasAVX512() ? DAG.getUNDEF(InVT) : LHS;
20159 SDValue Res = DAG.getNode(Opcode, DL, OutVT, LHS, RHS);
20160 Res = extractSubVector(Res, 0, DAG, DL, SrcSizeInBits / 2);
20161 Res = DAG.getBitcast(PackedVT, Res);
20162 return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
20165 // Split lower/upper subvectors.
20166 SDValue Lo, Hi;
20167 std::tie(Lo, Hi) = splitVector(In, DAG, DL);
20169 // If Hi is undef, then don't bother packing it and widen the result instead.
20170 if (Hi.isUndef()) {
20171 EVT DstHalfVT = DstVT.getHalfNumVectorElementsVT(Ctx);
20172 if (SDValue Res =
20173 truncateVectorWithPACK(Opcode, DstHalfVT, Lo, DL, DAG, Subtarget))
20174 return widenSubVector(Res, false, Subtarget, DAG, DL, DstSizeInBits);
20177 unsigned SubSizeInBits = SrcSizeInBits / 2;
20178 InVT = EVT::getVectorVT(Ctx, InVT, SubSizeInBits / InVT.getSizeInBits());
20179 OutVT = EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits());
20181 // 256bit -> 128bit truncate - PACK lower/upper 128-bit subvectors.
20182 if (SrcVT.is256BitVector() && DstVT.is128BitVector()) {
20183 Lo = DAG.getBitcast(InVT, Lo);
20184 Hi = DAG.getBitcast(InVT, Hi);
20185 SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
20186 return DAG.getBitcast(DstVT, Res);
20189 // AVX2: 512bit -> 256bit truncate - PACK lower/upper 256-bit subvectors.
20190 // AVX2: 512bit -> 128bit truncate - PACK(PACK, PACK).
20191 if (SrcVT.is512BitVector() && Subtarget.hasInt256()) {
20192 Lo = DAG.getBitcast(InVT, Lo);
20193 Hi = DAG.getBitcast(InVT, Hi);
20194 SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
20196 // 256-bit PACK(ARG0, ARG1) leaves us with ((LO0,LO1),(HI0,HI1)),
20197 // so we need to shuffle to get ((LO0,HI0),(LO1,HI1)).
20198 // Scale shuffle mask to avoid bitcasts and help ComputeNumSignBits.
20199 SmallVector<int, 64> Mask;
20200 int Scale = 64 / OutVT.getScalarSizeInBits();
20201 narrowShuffleMaskElts(Scale, { 0, 2, 1, 3 }, Mask);
20202 Res = DAG.getVectorShuffle(OutVT, DL, Res, Res, Mask);
20204 if (DstVT.is256BitVector())
20205 return DAG.getBitcast(DstVT, Res);
20207 // If 512bit -> 128bit truncate another stage.
20208 Res = DAG.getBitcast(PackedVT, Res);
20209 return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
20212 // Recursively pack lower/upper subvectors, concat result and pack again.
20213 assert(SrcSizeInBits >= 256 && "Expected 256-bit vector or greater");
20215 if (PackedVT.is128BitVector()) {
20216 // Avoid CONCAT_VECTORS on sub-128bit nodes as these can fail after
20217 // type legalization.
20218 SDValue Res =
20219 truncateVectorWithPACK(Opcode, PackedVT, In, DL, DAG, Subtarget);
20220 return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
20223 EVT HalfPackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems / 2);
20224 Lo = truncateVectorWithPACK(Opcode, HalfPackedVT, Lo, DL, DAG, Subtarget);
20225 Hi = truncateVectorWithPACK(Opcode, HalfPackedVT, Hi, DL, DAG, Subtarget);
20226 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi);
20227 return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
20230 /// Truncate using inreg zero extension (AND mask) and X86ISD::PACKUS.
20231 /// e.g. trunc <8 x i32> X to <8 x i16> -->
20232 /// MaskX = X & 0xffff (clear high bits to prevent saturation)
20233 /// packus (extract_subv MaskX, 0), (extract_subv MaskX, 1)
20234 static SDValue truncateVectorWithPACKUS(EVT DstVT, SDValue In, const SDLoc &DL,
20235 const X86Subtarget &Subtarget,
20236 SelectionDAG &DAG) {
20237 In = DAG.getZeroExtendInReg(In, DL, DstVT);
20238 return truncateVectorWithPACK(X86ISD::PACKUS, DstVT, In, DL, DAG, Subtarget);
20241 /// Truncate using inreg sign extension and X86ISD::PACKSS.
20242 static SDValue truncateVectorWithPACKSS(EVT DstVT, SDValue In, const SDLoc &DL,
20243 const X86Subtarget &Subtarget,
20244 SelectionDAG &DAG) {
20245 EVT SrcVT = In.getValueType();
20246 In = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, SrcVT, In,
20247 DAG.getValueType(DstVT));
20248 return truncateVectorWithPACK(X86ISD::PACKSS, DstVT, In, DL, DAG, Subtarget);
20251 /// Helper to determine if \p In truncated to \p DstVT has the necessary
20252 /// signbits / leading zero bits to be truncated with PACKSS / PACKUS,
20253 /// possibly by converting a SRL node to SRA for sign extension.
20254 static SDValue matchTruncateWithPACK(unsigned &PackOpcode, EVT DstVT,
20255 SDValue In, const SDLoc &DL,
20256 SelectionDAG &DAG,
20257 const X86Subtarget &Subtarget) {
20258 // Requires SSE2.
20259 if (!Subtarget.hasSSE2())
20260 return SDValue();
20262 EVT SrcVT = In.getValueType();
20263 EVT DstSVT = DstVT.getVectorElementType();
20264 EVT SrcSVT = SrcVT.getVectorElementType();
20266 // Check we have a truncation suited for PACKSS/PACKUS.
20267 if (!((SrcSVT == MVT::i16 || SrcSVT == MVT::i32 || SrcSVT == MVT::i64) &&
20268 (DstSVT == MVT::i8 || DstSVT == MVT::i16 || DstSVT == MVT::i32)))
20269 return SDValue();
20271 assert(SrcSVT.getSizeInBits() > DstSVT.getSizeInBits() && "Bad truncation");
20272 unsigned NumStages = Log2_32(SrcSVT.getSizeInBits() / DstSVT.getSizeInBits());
20274 // Truncation from 128-bit to vXi32 can be better handled with PSHUFD.
20275 // Truncation to sub-64-bit vXi16 can be better handled with PSHUFD/PSHUFLW.
20276 // Truncation from v2i64 to v2i8 can be better handled with PSHUFB.
20277 if ((DstSVT == MVT::i32 && SrcVT.getSizeInBits() <= 128) ||
20278 (DstSVT == MVT::i16 && SrcVT.getSizeInBits() <= (64 * NumStages)) ||
20279 (DstVT == MVT::v2i8 && SrcVT == MVT::v2i64 && Subtarget.hasSSSE3()))
20280 return SDValue();
20282 // Prefer to lower v4i64 -> v4i32 as a shuffle unless we can cheaply
20283 // split this for packing.
20284 if (SrcVT == MVT::v4i64 && DstVT == MVT::v4i32 &&
20285 !isFreeToSplitVector(In.getNode(), DAG) &&
20286 (!Subtarget.hasAVX() || DAG.ComputeNumSignBits(In) != 64))
20287 return SDValue();
20289 // Don't truncate AVX512 targets as multiple PACK nodes stages.
20290 if (Subtarget.hasAVX512() && NumStages > 1)
20291 return SDValue();
20293 unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits();
20294 unsigned NumPackedSignBits = std::min<unsigned>(DstSVT.getSizeInBits(), 16);
20295 unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
20297 // Truncate with PACKUS if we are truncating a vector with leading zero
20298 // bits that extend all the way to the packed/truncated value.
20299 // e.g. Masks, zext_in_reg, etc.
20300 // Pre-SSE41 we can only use PACKUSWB.
20301 KnownBits Known = DAG.computeKnownBits(In);
20302 if ((NumSrcEltBits - NumPackedZeroBits) <= Known.countMinLeadingZeros()) {
20303 PackOpcode = X86ISD::PACKUS;
20304 return In;
20307 // Truncate with PACKSS if we are truncating a vector with sign-bits
20308 // that extend all the way to the packed/truncated value.
20309 // e.g. Comparison result, sext_in_reg, etc.
20310 unsigned NumSignBits = DAG.ComputeNumSignBits(In);
20312 // Don't use PACKSS for vXi64 -> vXi32 truncations unless we're dealing with
20313 // a sign splat (or AVX512 VPSRAQ support). ComputeNumSignBits struggles to
20314 // see through BITCASTs later on and combines/simplifications can't then use
20315 // it.
20316 if (DstSVT == MVT::i32 && NumSignBits != SrcSVT.getSizeInBits() &&
20317 !Subtarget.hasAVX512())
20318 return SDValue();
20320 unsigned MinSignBits = NumSrcEltBits - NumPackedSignBits;
20321 if (MinSignBits < NumSignBits) {
20322 PackOpcode = X86ISD::PACKSS;
20323 return In;
20326 // If we have a srl that only generates signbits that we will discard in
20327 // the truncation then we can use PACKSS by converting the srl to a sra.
20328 // SimplifyDemandedBits often relaxes sra to srl so we need to reverse it.
20329 if (In.getOpcode() == ISD::SRL && In->hasOneUse())
20330 if (const APInt *ShAmt = DAG.getValidShiftAmountConstant(
20331 In, APInt::getAllOnes(SrcVT.getVectorNumElements()))) {
20332 if (*ShAmt == MinSignBits) {
20333 PackOpcode = X86ISD::PACKSS;
20334 return DAG.getNode(ISD::SRA, DL, SrcVT, In->ops());
20338 return SDValue();
20341 /// This function lowers a vector truncation of 'extended sign-bits' or
20342 /// 'extended zero-bits' values.
20343 /// vXi16/vXi32/vXi64 to vXi8/vXi16/vXi32 into X86ISD::PACKSS/PACKUS operations.
20344 static SDValue LowerTruncateVecPackWithSignBits(MVT DstVT, SDValue In,
20345 const SDLoc &DL,
20346 const X86Subtarget &Subtarget,
20347 SelectionDAG &DAG) {
20348 MVT SrcVT = In.getSimpleValueType();
20349 MVT DstSVT = DstVT.getVectorElementType();
20350 MVT SrcSVT = SrcVT.getVectorElementType();
20351 if (!((SrcSVT == MVT::i16 || SrcSVT == MVT::i32 || SrcSVT == MVT::i64) &&
20352 (DstSVT == MVT::i8 || DstSVT == MVT::i16 || DstSVT == MVT::i32)))
20353 return SDValue();
20355 // If the upper half of the source is undef, then attempt to split and
20356 // only truncate the lower half.
20357 if (DstVT.getSizeInBits() >= 128) {
20358 SmallVector<SDValue> LowerOps;
20359 if (SDValue Lo = isUpperSubvectorUndef(In, DL, DAG)) {
20360 MVT DstHalfVT = DstVT.getHalfNumVectorElementsVT();
20361 if (SDValue Res = LowerTruncateVecPackWithSignBits(DstHalfVT, Lo, DL,
20362 Subtarget, DAG))
20363 return widenSubVector(Res, false, Subtarget, DAG, DL,
20364 DstVT.getSizeInBits());
20368 unsigned PackOpcode;
20369 if (SDValue Src =
20370 matchTruncateWithPACK(PackOpcode, DstVT, In, DL, DAG, Subtarget))
20371 return truncateVectorWithPACK(PackOpcode, DstVT, Src, DL, DAG, Subtarget);
20373 return SDValue();
20376 /// This function lowers a vector truncation from vXi32/vXi64 to vXi8/vXi16 into
20377 /// X86ISD::PACKUS/X86ISD::PACKSS operations.
20378 static SDValue LowerTruncateVecPack(MVT DstVT, SDValue In, const SDLoc &DL,
20379 const X86Subtarget &Subtarget,
20380 SelectionDAG &DAG) {
20381 MVT SrcVT = In.getSimpleValueType();
20382 MVT DstSVT = DstVT.getVectorElementType();
20383 MVT SrcSVT = SrcVT.getVectorElementType();
20384 unsigned NumElems = DstVT.getVectorNumElements();
20385 if (!((SrcSVT == MVT::i16 || SrcSVT == MVT::i32 || SrcSVT == MVT::i64) &&
20386 (DstSVT == MVT::i8 || DstSVT == MVT::i16) && isPowerOf2_32(NumElems) &&
20387 NumElems >= 8))
20388 return SDValue();
20390 // SSSE3's pshufb results in less instructions in the cases below.
20391 if (Subtarget.hasSSSE3() && NumElems == 8) {
20392 if (SrcSVT == MVT::i16)
20393 return SDValue();
20394 if (SrcSVT == MVT::i32 && (DstSVT == MVT::i8 || !Subtarget.hasSSE41()))
20395 return SDValue();
20398 // If the upper half of the source is undef, then attempt to split and
20399 // only truncate the lower half.
20400 if (DstVT.getSizeInBits() >= 128) {
20401 SmallVector<SDValue> LowerOps;
20402 if (SDValue Lo = isUpperSubvectorUndef(In, DL, DAG)) {
20403 MVT DstHalfVT = DstVT.getHalfNumVectorElementsVT();
20404 if (SDValue Res = LowerTruncateVecPack(DstHalfVT, Lo, DL, Subtarget, DAG))
20405 return widenSubVector(Res, false, Subtarget, DAG, DL,
20406 DstVT.getSizeInBits());
20410 // SSE2 provides PACKUS for only 2 x v8i16 -> v16i8 and SSE4.1 provides PACKUS
20411 // for 2 x v4i32 -> v8i16. For SSSE3 and below, we need to use PACKSS to
20412 // truncate 2 x v4i32 to v8i16.
20413 if (Subtarget.hasSSE41() || DstSVT == MVT::i8)
20414 return truncateVectorWithPACKUS(DstVT, In, DL, Subtarget, DAG);
20416 if (SrcSVT == MVT::i16 || SrcSVT == MVT::i32)
20417 return truncateVectorWithPACKSS(DstVT, In, DL, Subtarget, DAG);
20419 // Special case vXi64 -> vXi16, shuffle to vXi32 and then use PACKSS.
20420 if (DstSVT == MVT::i16 && SrcSVT == MVT::i64) {
20421 MVT TruncVT = MVT::getVectorVT(MVT::i32, NumElems);
20422 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, In);
20423 return truncateVectorWithPACKSS(DstVT, Trunc, DL, Subtarget, DAG);
20426 return SDValue();
20429 static SDValue LowerTruncateVecI1(SDValue Op, SelectionDAG &DAG,
20430 const X86Subtarget &Subtarget) {
20432 SDLoc DL(Op);
20433 MVT VT = Op.getSimpleValueType();
20434 SDValue In = Op.getOperand(0);
20435 MVT InVT = In.getSimpleValueType();
20437 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type.");
20439 // Shift LSB to MSB and use VPMOVB/W2M or TESTD/Q.
20440 unsigned ShiftInx = InVT.getScalarSizeInBits() - 1;
20441 if (InVT.getScalarSizeInBits() <= 16) {
20442 if (Subtarget.hasBWI()) {
20443 // legal, will go to VPMOVB2M, VPMOVW2M
20444 if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
20445 // We need to shift to get the lsb into sign position.
20446 // Shift packed bytes not supported natively, bitcast to word
20447 MVT ExtVT = MVT::getVectorVT(MVT::i16, InVT.getSizeInBits()/16);
20448 In = DAG.getNode(ISD::SHL, DL, ExtVT,
20449 DAG.getBitcast(ExtVT, In),
20450 DAG.getConstant(ShiftInx, DL, ExtVT));
20451 In = DAG.getBitcast(InVT, In);
20453 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT),
20454 In, ISD::SETGT);
20456 // Use TESTD/Q, extended vector to packed dword/qword.
20457 assert((InVT.is256BitVector() || InVT.is128BitVector()) &&
20458 "Unexpected vector type.");
20459 unsigned NumElts = InVT.getVectorNumElements();
20460 assert((NumElts == 8 || NumElts == 16) && "Unexpected number of elements");
20461 // We need to change to a wider element type that we have support for.
20462 // For 8 element vectors this is easy, we either extend to v8i32 or v8i64.
20463 // For 16 element vectors we extend to v16i32 unless we are explicitly
20464 // trying to avoid 512-bit vectors. If we are avoiding 512-bit vectors
20465 // we need to split into two 8 element vectors which we can extend to v8i32,
20466 // truncate and concat the results. There's an additional complication if
20467 // the original type is v16i8. In that case we can't split the v16i8
20468 // directly, so we need to shuffle high elements to low and use
20469 // sign_extend_vector_inreg.
20470 if (NumElts == 16 && !Subtarget.canExtendTo512DQ()) {
20471 SDValue Lo, Hi;
20472 if (InVT == MVT::v16i8) {
20473 Lo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, MVT::v8i32, In);
20474 Hi = DAG.getVectorShuffle(
20475 InVT, DL, In, In,
20476 {8, 9, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1});
20477 Hi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, MVT::v8i32, Hi);
20478 } else {
20479 assert(InVT == MVT::v16i16 && "Unexpected VT!");
20480 Lo = extract128BitVector(In, 0, DAG, DL);
20481 Hi = extract128BitVector(In, 8, DAG, DL);
20483 // We're split now, just emit two truncates and a concat. The two
20484 // truncates will trigger legalization to come back to this function.
20485 Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Lo);
20486 Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Hi);
20487 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
20489 // We either have 8 elements or we're allowed to use 512-bit vectors.
20490 // If we have VLX, we want to use the narrowest vector that can get the
20491 // job done so we use vXi32.
20492 MVT EltVT = Subtarget.hasVLX() ? MVT::i32 : MVT::getIntegerVT(512/NumElts);
20493 MVT ExtVT = MVT::getVectorVT(EltVT, NumElts);
20494 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
20495 InVT = ExtVT;
20496 ShiftInx = InVT.getScalarSizeInBits() - 1;
20499 if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
20500 // We need to shift to get the lsb into sign position.
20501 In = DAG.getNode(ISD::SHL, DL, InVT, In,
20502 DAG.getConstant(ShiftInx, DL, InVT));
20504 // If we have DQI, emit a pattern that will be iseled as vpmovq2m/vpmovd2m.
20505 if (Subtarget.hasDQI())
20506 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT), In, ISD::SETGT);
20507 return DAG.getSetCC(DL, VT, In, DAG.getConstant(0, DL, InVT), ISD::SETNE);
20510 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
20511 SDLoc DL(Op);
20512 MVT VT = Op.getSimpleValueType();
20513 SDValue In = Op.getOperand(0);
20514 MVT InVT = In.getSimpleValueType();
20515 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
20516 "Invalid TRUNCATE operation");
20518 // If we're called by the type legalizer, handle a few cases.
20519 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
20520 if (!TLI.isTypeLegal(VT) || !TLI.isTypeLegal(InVT)) {
20521 if ((InVT == MVT::v8i64 || InVT == MVT::v16i32 || InVT == MVT::v16i64) &&
20522 VT.is128BitVector() && Subtarget.hasAVX512()) {
20523 assert((InVT == MVT::v16i64 || Subtarget.hasVLX()) &&
20524 "Unexpected subtarget!");
20525 // The default behavior is to truncate one step, concatenate, and then
20526 // truncate the remainder. We'd rather produce two 64-bit results and
20527 // concatenate those.
20528 SDValue Lo, Hi;
20529 std::tie(Lo, Hi) = DAG.SplitVector(In, DL);
20531 EVT LoVT, HiVT;
20532 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
20534 Lo = DAG.getNode(ISD::TRUNCATE, DL, LoVT, Lo);
20535 Hi = DAG.getNode(ISD::TRUNCATE, DL, HiVT, Hi);
20536 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
20539 // Pre-AVX512 (or prefer-256bit) see if we can make use of PACKSS/PACKUS.
20540 if (!Subtarget.hasAVX512() ||
20541 (InVT.is512BitVector() && VT.is256BitVector()))
20542 if (SDValue SignPack =
20543 LowerTruncateVecPackWithSignBits(VT, In, DL, Subtarget, DAG))
20544 return SignPack;
20546 // Pre-AVX512 see if we can make use of PACKSS/PACKUS.
20547 if (!Subtarget.hasAVX512())
20548 return LowerTruncateVecPack(VT, In, DL, Subtarget, DAG);
20550 // Otherwise let default legalization handle it.
20551 return SDValue();
20554 if (VT.getVectorElementType() == MVT::i1)
20555 return LowerTruncateVecI1(Op, DAG, Subtarget);
20557 // Attempt to truncate with PACKUS/PACKSS even on AVX512 if we'd have to
20558 // concat from subvectors to use VPTRUNC etc.
20559 if (!Subtarget.hasAVX512() || isFreeToSplitVector(In.getNode(), DAG))
20560 if (SDValue SignPack =
20561 LowerTruncateVecPackWithSignBits(VT, In, DL, Subtarget, DAG))
20562 return SignPack;
20564 // vpmovqb/w/d, vpmovdb/w, vpmovwb
20565 if (Subtarget.hasAVX512()) {
20566 if (InVT == MVT::v32i16 && !Subtarget.hasBWI()) {
20567 assert(VT == MVT::v32i8 && "Unexpected VT!");
20568 return splitVectorIntUnary(Op, DAG);
20571 // word to byte only under BWI. Otherwise we have to promoted to v16i32
20572 // and then truncate that. But we should only do that if we haven't been
20573 // asked to avoid 512-bit vectors. The actual promotion to v16i32 will be
20574 // handled by isel patterns.
20575 if (InVT != MVT::v16i16 || Subtarget.hasBWI() ||
20576 Subtarget.canExtendTo512DQ())
20577 return Op;
20580 // Handle truncation of V256 to V128 using shuffles.
20581 assert(VT.is128BitVector() && InVT.is256BitVector() && "Unexpected types!");
20583 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
20584 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
20585 if (Subtarget.hasInt256()) {
20586 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
20587 In = DAG.getBitcast(MVT::v8i32, In);
20588 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, In, ShufMask);
20589 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
20590 DAG.getIntPtrConstant(0, DL));
20593 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
20594 DAG.getIntPtrConstant(0, DL));
20595 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
20596 DAG.getIntPtrConstant(2, DL));
20597 static const int ShufMask[] = {0, 2, 4, 6};
20598 return DAG.getVectorShuffle(VT, DL, DAG.getBitcast(MVT::v4i32, OpLo),
20599 DAG.getBitcast(MVT::v4i32, OpHi), ShufMask);
20602 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
20603 // On AVX2, v8i32 -> v8i16 becomes PSHUFB.
20604 if (Subtarget.hasInt256()) {
20605 // The PSHUFB mask:
20606 static const int ShufMask1[] = { 0, 1, 4, 5, 8, 9, 12, 13,
20607 -1, -1, -1, -1, -1, -1, -1, -1,
20608 16, 17, 20, 21, 24, 25, 28, 29,
20609 -1, -1, -1, -1, -1, -1, -1, -1 };
20610 In = DAG.getBitcast(MVT::v32i8, In);
20611 In = DAG.getVectorShuffle(MVT::v32i8, DL, In, In, ShufMask1);
20612 In = DAG.getBitcast(MVT::v4i64, In);
20614 static const int ShufMask2[] = {0, 2, -1, -1};
20615 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, In, ShufMask2);
20616 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
20617 DAG.getIntPtrConstant(0, DL));
20618 return DAG.getBitcast(MVT::v8i16, In);
20621 return Subtarget.hasSSE41()
20622 ? truncateVectorWithPACKUS(VT, In, DL, Subtarget, DAG)
20623 : truncateVectorWithPACKSS(VT, In, DL, Subtarget, DAG);
20626 if (VT == MVT::v16i8 && InVT == MVT::v16i16)
20627 return truncateVectorWithPACKUS(VT, In, DL, Subtarget, DAG);
20629 llvm_unreachable("All 256->128 cases should have been handled above!");
20632 // We can leverage the specific way the "cvttps2dq/cvttpd2dq" instruction
20633 // behaves on out of range inputs to generate optimized conversions.
20634 static SDValue expandFP_TO_UINT_SSE(MVT VT, SDValue Src, const SDLoc &dl,
20635 SelectionDAG &DAG,
20636 const X86Subtarget &Subtarget) {
20637 MVT SrcVT = Src.getSimpleValueType();
20638 unsigned DstBits = VT.getScalarSizeInBits();
20639 assert(DstBits == 32 && "expandFP_TO_UINT_SSE - only vXi32 supported");
20641 // Calculate the converted result for values in the range 0 to
20642 // 2^31-1 ("Small") and from 2^31 to 2^32-1 ("Big").
20643 SDValue Small = DAG.getNode(X86ISD::CVTTP2SI, dl, VT, Src);
20644 SDValue Big =
20645 DAG.getNode(X86ISD::CVTTP2SI, dl, VT,
20646 DAG.getNode(ISD::FSUB, dl, SrcVT, Src,
20647 DAG.getConstantFP(2147483648.0f, dl, SrcVT)));
20649 // The "CVTTP2SI" instruction conveniently sets the sign bit if
20650 // and only if the value was out of range. So we can use that
20651 // as our indicator that we rather use "Big" instead of "Small".
20653 // Use "Small" if "IsOverflown" has all bits cleared
20654 // and "0x80000000 | Big" if all bits in "IsOverflown" are set.
20656 // AVX1 can't use the signsplat masking for 256-bit vectors - we have to
20657 // use the slightly slower blendv select instead.
20658 if (VT == MVT::v8i32 && !Subtarget.hasAVX2()) {
20659 SDValue Overflow = DAG.getNode(ISD::OR, dl, VT, Small, Big);
20660 return DAG.getNode(X86ISD::BLENDV, dl, VT, Small, Overflow, Small);
20663 SDValue IsOverflown =
20664 DAG.getNode(X86ISD::VSRAI, dl, VT, Small,
20665 DAG.getTargetConstant(DstBits - 1, dl, MVT::i8));
20666 return DAG.getNode(ISD::OR, dl, VT, Small,
20667 DAG.getNode(ISD::AND, dl, VT, Big, IsOverflown));
20670 SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
20671 bool IsStrict = Op->isStrictFPOpcode();
20672 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
20673 Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
20674 MVT VT = Op->getSimpleValueType(0);
20675 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
20676 SDValue Chain = IsStrict ? Op->getOperand(0) : SDValue();
20677 MVT SrcVT = Src.getSimpleValueType();
20678 SDLoc dl(Op);
20680 SDValue Res;
20681 if (isSoftF16(SrcVT, Subtarget)) {
20682 MVT NVT = VT.isVector() ? VT.changeVectorElementType(MVT::f32) : MVT::f32;
20683 if (IsStrict)
20684 return DAG.getNode(Op.getOpcode(), dl, {VT, MVT::Other},
20685 {Chain, DAG.getNode(ISD::STRICT_FP_EXTEND, dl,
20686 {NVT, MVT::Other}, {Chain, Src})});
20687 return DAG.getNode(Op.getOpcode(), dl, VT,
20688 DAG.getNode(ISD::FP_EXTEND, dl, NVT, Src));
20689 } else if (isTypeLegal(SrcVT) && isLegalConversion(VT, IsSigned, Subtarget)) {
20690 return Op;
20693 if (VT.isVector()) {
20694 if (VT == MVT::v2i1 && SrcVT == MVT::v2f64) {
20695 MVT ResVT = MVT::v4i32;
20696 MVT TruncVT = MVT::v4i1;
20697 unsigned Opc;
20698 if (IsStrict)
20699 Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
20700 else
20701 Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
20703 if (!IsSigned && !Subtarget.hasVLX()) {
20704 assert(Subtarget.useAVX512Regs() && "Unexpected features!");
20705 // Widen to 512-bits.
20706 ResVT = MVT::v8i32;
20707 TruncVT = MVT::v8i1;
20708 Opc = Op.getOpcode();
20709 // Need to concat with zero vector for strict fp to avoid spurious
20710 // exceptions.
20711 // TODO: Should we just do this for non-strict as well?
20712 SDValue Tmp = IsStrict ? DAG.getConstantFP(0.0, dl, MVT::v8f64)
20713 : DAG.getUNDEF(MVT::v8f64);
20714 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8f64, Tmp, Src,
20715 DAG.getIntPtrConstant(0, dl));
20717 if (IsStrict) {
20718 Res = DAG.getNode(Opc, dl, {ResVT, MVT::Other}, {Chain, Src});
20719 Chain = Res.getValue(1);
20720 } else {
20721 Res = DAG.getNode(Opc, dl, ResVT, Src);
20724 Res = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Res);
20725 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i1, Res,
20726 DAG.getIntPtrConstant(0, dl));
20727 if (IsStrict)
20728 return DAG.getMergeValues({Res, Chain}, dl);
20729 return Res;
20732 if (Subtarget.hasFP16() && SrcVT.getVectorElementType() == MVT::f16) {
20733 if (VT == MVT::v8i16 || VT == MVT::v16i16 || VT == MVT::v32i16)
20734 return Op;
20736 MVT ResVT = VT;
20737 MVT EleVT = VT.getVectorElementType();
20738 if (EleVT != MVT::i64)
20739 ResVT = EleVT == MVT::i32 ? MVT::v4i32 : MVT::v8i16;
20741 if (SrcVT != MVT::v8f16) {
20742 SDValue Tmp =
20743 IsStrict ? DAG.getConstantFP(0.0, dl, SrcVT) : DAG.getUNDEF(SrcVT);
20744 SmallVector<SDValue, 4> Ops(SrcVT == MVT::v2f16 ? 4 : 2, Tmp);
20745 Ops[0] = Src;
20746 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8f16, Ops);
20749 if (IsStrict) {
20750 Res = DAG.getNode(IsSigned ? X86ISD::STRICT_CVTTP2SI
20751 : X86ISD::STRICT_CVTTP2UI,
20752 dl, {ResVT, MVT::Other}, {Chain, Src});
20753 Chain = Res.getValue(1);
20754 } else {
20755 Res = DAG.getNode(IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI, dl,
20756 ResVT, Src);
20759 // TODO: Need to add exception check code for strict FP.
20760 if (EleVT.getSizeInBits() < 16) {
20761 ResVT = MVT::getVectorVT(EleVT, 8);
20762 Res = DAG.getNode(ISD::TRUNCATE, dl, ResVT, Res);
20765 if (ResVT != VT)
20766 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
20767 DAG.getIntPtrConstant(0, dl));
20769 if (IsStrict)
20770 return DAG.getMergeValues({Res, Chain}, dl);
20771 return Res;
20774 // v8f32/v16f32/v8f64->v8i16/v16i16 need to widen first.
20775 if (VT.getVectorElementType() == MVT::i16) {
20776 assert((SrcVT.getVectorElementType() == MVT::f32 ||
20777 SrcVT.getVectorElementType() == MVT::f64) &&
20778 "Expected f32/f64 vector!");
20779 MVT NVT = VT.changeVectorElementType(MVT::i32);
20780 if (IsStrict) {
20781 Res = DAG.getNode(IsSigned ? ISD::STRICT_FP_TO_SINT
20782 : ISD::STRICT_FP_TO_UINT,
20783 dl, {NVT, MVT::Other}, {Chain, Src});
20784 Chain = Res.getValue(1);
20785 } else {
20786 Res = DAG.getNode(IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, dl,
20787 NVT, Src);
20790 // TODO: Need to add exception check code for strict FP.
20791 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20793 if (IsStrict)
20794 return DAG.getMergeValues({Res, Chain}, dl);
20795 return Res;
20798 // v8f64->v8i32 is legal, but we need v8i32 to be custom for v8f32.
20799 if (VT == MVT::v8i32 && SrcVT == MVT::v8f64) {
20800 assert(!IsSigned && "Expected unsigned conversion!");
20801 assert(Subtarget.useAVX512Regs() && "Requires avx512f");
20802 return Op;
20805 // Widen vXi32 fp_to_uint with avx512f to 512-bit source.
20806 if ((VT == MVT::v4i32 || VT == MVT::v8i32) &&
20807 (SrcVT == MVT::v4f64 || SrcVT == MVT::v4f32 || SrcVT == MVT::v8f32) &&
20808 Subtarget.useAVX512Regs()) {
20809 assert(!IsSigned && "Expected unsigned conversion!");
20810 assert(!Subtarget.hasVLX() && "Unexpected features!");
20811 MVT WideVT = SrcVT == MVT::v4f64 ? MVT::v8f64 : MVT::v16f32;
20812 MVT ResVT = SrcVT == MVT::v4f64 ? MVT::v8i32 : MVT::v16i32;
20813 // Need to concat with zero vector for strict fp to avoid spurious
20814 // exceptions.
20815 // TODO: Should we just do this for non-strict as well?
20816 SDValue Tmp =
20817 IsStrict ? DAG.getConstantFP(0.0, dl, WideVT) : DAG.getUNDEF(WideVT);
20818 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVT, Tmp, Src,
20819 DAG.getIntPtrConstant(0, dl));
20821 if (IsStrict) {
20822 Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, dl, {ResVT, MVT::Other},
20823 {Chain, Src});
20824 Chain = Res.getValue(1);
20825 } else {
20826 Res = DAG.getNode(ISD::FP_TO_UINT, dl, ResVT, Src);
20829 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
20830 DAG.getIntPtrConstant(0, dl));
20832 if (IsStrict)
20833 return DAG.getMergeValues({Res, Chain}, dl);
20834 return Res;
20837 // Widen vXi64 fp_to_uint/fp_to_sint with avx512dq to 512-bit source.
20838 if ((VT == MVT::v2i64 || VT == MVT::v4i64) &&
20839 (SrcVT == MVT::v2f64 || SrcVT == MVT::v4f64 || SrcVT == MVT::v4f32) &&
20840 Subtarget.useAVX512Regs() && Subtarget.hasDQI()) {
20841 assert(!Subtarget.hasVLX() && "Unexpected features!");
20842 MVT WideVT = SrcVT == MVT::v4f32 ? MVT::v8f32 : MVT::v8f64;
20843 // Need to concat with zero vector for strict fp to avoid spurious
20844 // exceptions.
20845 // TODO: Should we just do this for non-strict as well?
20846 SDValue Tmp =
20847 IsStrict ? DAG.getConstantFP(0.0, dl, WideVT) : DAG.getUNDEF(WideVT);
20848 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVT, Tmp, Src,
20849 DAG.getIntPtrConstant(0, dl));
20851 if (IsStrict) {
20852 Res = DAG.getNode(Op.getOpcode(), dl, {MVT::v8i64, MVT::Other},
20853 {Chain, Src});
20854 Chain = Res.getValue(1);
20855 } else {
20856 Res = DAG.getNode(Op.getOpcode(), dl, MVT::v8i64, Src);
20859 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
20860 DAG.getIntPtrConstant(0, dl));
20862 if (IsStrict)
20863 return DAG.getMergeValues({Res, Chain}, dl);
20864 return Res;
20867 if (VT == MVT::v2i64 && SrcVT == MVT::v2f32) {
20868 if (!Subtarget.hasVLX()) {
20869 // Non-strict nodes without VLX can we widened to v4f32->v4i64 by type
20870 // legalizer and then widened again by vector op legalization.
20871 if (!IsStrict)
20872 return SDValue();
20874 SDValue Zero = DAG.getConstantFP(0.0, dl, MVT::v2f32);
20875 SDValue Tmp = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8f32,
20876 {Src, Zero, Zero, Zero});
20877 Tmp = DAG.getNode(Op.getOpcode(), dl, {MVT::v8i64, MVT::Other},
20878 {Chain, Tmp});
20879 SDValue Chain = Tmp.getValue(1);
20880 Tmp = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Tmp,
20881 DAG.getIntPtrConstant(0, dl));
20882 return DAG.getMergeValues({Tmp, Chain}, dl);
20885 assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL");
20886 SDValue Tmp = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
20887 DAG.getUNDEF(MVT::v2f32));
20888 if (IsStrict) {
20889 unsigned Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI
20890 : X86ISD::STRICT_CVTTP2UI;
20891 return DAG.getNode(Opc, dl, {VT, MVT::Other}, {Op->getOperand(0), Tmp});
20893 unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
20894 return DAG.getNode(Opc, dl, VT, Tmp);
20897 // Generate optimized instructions for pre AVX512 unsigned conversions from
20898 // vXf32 to vXi32.
20899 if ((VT == MVT::v4i32 && SrcVT == MVT::v4f32) ||
20900 (VT == MVT::v4i32 && SrcVT == MVT::v4f64) ||
20901 (VT == MVT::v8i32 && SrcVT == MVT::v8f32)) {
20902 assert(!IsSigned && "Expected unsigned conversion!");
20903 return expandFP_TO_UINT_SSE(VT, Src, dl, DAG, Subtarget);
20906 return SDValue();
20909 assert(!VT.isVector());
20911 bool UseSSEReg = isScalarFPTypeInSSEReg(SrcVT);
20913 if (!IsSigned && UseSSEReg) {
20914 // Conversions from f32/f64 with AVX512 should be legal.
20915 if (Subtarget.hasAVX512())
20916 return Op;
20918 // We can leverage the specific way the "cvttss2si/cvttsd2si" instruction
20919 // behaves on out of range inputs to generate optimized conversions.
20920 if (!IsStrict && ((VT == MVT::i32 && !Subtarget.is64Bit()) ||
20921 (VT == MVT::i64 && Subtarget.is64Bit()))) {
20922 unsigned DstBits = VT.getScalarSizeInBits();
20923 APInt UIntLimit = APInt::getSignMask(DstBits);
20924 SDValue FloatOffset = DAG.getNode(ISD::UINT_TO_FP, dl, SrcVT,
20925 DAG.getConstant(UIntLimit, dl, VT));
20926 MVT SrcVecVT = MVT::getVectorVT(SrcVT, 128 / SrcVT.getScalarSizeInBits());
20928 // Calculate the converted result for values in the range:
20929 // (i32) 0 to 2^31-1 ("Small") and from 2^31 to 2^32-1 ("Big").
20930 // (i64) 0 to 2^63-1 ("Small") and from 2^63 to 2^64-1 ("Big").
20931 SDValue Small =
20932 DAG.getNode(X86ISD::CVTTS2SI, dl, VT,
20933 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, SrcVecVT, Src));
20934 SDValue Big = DAG.getNode(
20935 X86ISD::CVTTS2SI, dl, VT,
20936 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, SrcVecVT,
20937 DAG.getNode(ISD::FSUB, dl, SrcVT, Src, FloatOffset)));
20939 // The "CVTTS2SI" instruction conveniently sets the sign bit if
20940 // and only if the value was out of range. So we can use that
20941 // as our indicator that we rather use "Big" instead of "Small".
20943 // Use "Small" if "IsOverflown" has all bits cleared
20944 // and "0x80000000 | Big" if all bits in "IsOverflown" are set.
20945 SDValue IsOverflown = DAG.getNode(
20946 ISD::SRA, dl, VT, Small, DAG.getConstant(DstBits - 1, dl, MVT::i8));
20947 return DAG.getNode(ISD::OR, dl, VT, Small,
20948 DAG.getNode(ISD::AND, dl, VT, Big, IsOverflown));
20951 // Use default expansion for i64.
20952 if (VT == MVT::i64)
20953 return SDValue();
20955 assert(VT == MVT::i32 && "Unexpected VT!");
20957 // Promote i32 to i64 and use a signed operation on 64-bit targets.
20958 // FIXME: This does not generate an invalid exception if the input does not
20959 // fit in i32. PR44019
20960 if (Subtarget.is64Bit()) {
20961 if (IsStrict) {
20962 Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, {MVT::i64, MVT::Other},
20963 {Chain, Src});
20964 Chain = Res.getValue(1);
20965 } else
20966 Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i64, Src);
20968 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20969 if (IsStrict)
20970 return DAG.getMergeValues({Res, Chain}, dl);
20971 return Res;
20974 // Use default expansion for SSE1/2 targets without SSE3. With SSE3 we can
20975 // use fisttp which will be handled later.
20976 if (!Subtarget.hasSSE3())
20977 return SDValue();
20980 // Promote i16 to i32 if we can use a SSE operation or the type is f128.
20981 // FIXME: This does not generate an invalid exception if the input does not
20982 // fit in i16. PR44019
20983 if (VT == MVT::i16 && (UseSSEReg || SrcVT == MVT::f128)) {
20984 assert(IsSigned && "Expected i16 FP_TO_UINT to have been promoted!");
20985 if (IsStrict) {
20986 Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, {MVT::i32, MVT::Other},
20987 {Chain, Src});
20988 Chain = Res.getValue(1);
20989 } else
20990 Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
20992 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20993 if (IsStrict)
20994 return DAG.getMergeValues({Res, Chain}, dl);
20995 return Res;
20998 // If this is a FP_TO_SINT using SSEReg we're done.
20999 if (UseSSEReg && IsSigned)
21000 return Op;
21002 // fp128 needs to use a libcall.
21003 if (SrcVT == MVT::f128) {
21004 RTLIB::Libcall LC;
21005 if (IsSigned)
21006 LC = RTLIB::getFPTOSINT(SrcVT, VT);
21007 else
21008 LC = RTLIB::getFPTOUINT(SrcVT, VT);
21010 MakeLibCallOptions CallOptions;
21011 std::pair<SDValue, SDValue> Tmp = makeLibCall(DAG, LC, VT, Src, CallOptions,
21012 SDLoc(Op), Chain);
21014 if (IsStrict)
21015 return DAG.getMergeValues({ Tmp.first, Tmp.second }, dl);
21017 return Tmp.first;
21020 // Fall back to X87.
21021 if (SDValue V = FP_TO_INTHelper(Op, DAG, IsSigned, Chain)) {
21022 if (IsStrict)
21023 return DAG.getMergeValues({V, Chain}, dl);
21024 return V;
21027 llvm_unreachable("Expected FP_TO_INTHelper to handle all remaining cases.");
21030 SDValue X86TargetLowering::LowerLRINT_LLRINT(SDValue Op,
21031 SelectionDAG &DAG) const {
21032 SDValue Src = Op.getOperand(0);
21033 MVT SrcVT = Src.getSimpleValueType();
21035 if (SrcVT == MVT::f16)
21036 return SDValue();
21038 // If the source is in an SSE register, the node is Legal.
21039 if (isScalarFPTypeInSSEReg(SrcVT))
21040 return Op;
21042 return LRINT_LLRINTHelper(Op.getNode(), DAG);
21045 SDValue X86TargetLowering::LRINT_LLRINTHelper(SDNode *N,
21046 SelectionDAG &DAG) const {
21047 EVT DstVT = N->getValueType(0);
21048 SDValue Src = N->getOperand(0);
21049 EVT SrcVT = Src.getValueType();
21051 if (SrcVT != MVT::f32 && SrcVT != MVT::f64 && SrcVT != MVT::f80) {
21052 // f16 must be promoted before using the lowering in this routine.
21053 // fp128 does not use this lowering.
21054 return SDValue();
21057 SDLoc DL(N);
21058 SDValue Chain = DAG.getEntryNode();
21060 bool UseSSE = isScalarFPTypeInSSEReg(SrcVT);
21062 // If we're converting from SSE, the stack slot needs to hold both types.
21063 // Otherwise it only needs to hold the DstVT.
21064 EVT OtherVT = UseSSE ? SrcVT : DstVT;
21065 SDValue StackPtr = DAG.CreateStackTemporary(DstVT, OtherVT);
21066 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
21067 MachinePointerInfo MPI =
21068 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
21070 if (UseSSE) {
21071 assert(DstVT == MVT::i64 && "Invalid LRINT/LLRINT to lower!");
21072 Chain = DAG.getStore(Chain, DL, Src, StackPtr, MPI);
21073 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
21074 SDValue Ops[] = { Chain, StackPtr };
21076 Src = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, SrcVT, MPI,
21077 /*Align*/ std::nullopt,
21078 MachineMemOperand::MOLoad);
21079 Chain = Src.getValue(1);
21082 SDValue StoreOps[] = { Chain, Src, StackPtr };
21083 Chain = DAG.getMemIntrinsicNode(X86ISD::FIST, DL, DAG.getVTList(MVT::Other),
21084 StoreOps, DstVT, MPI, /*Align*/ std::nullopt,
21085 MachineMemOperand::MOStore);
21087 return DAG.getLoad(DstVT, DL, Chain, StackPtr, MPI);
21090 SDValue
21091 X86TargetLowering::LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const {
21092 // This is based on the TargetLowering::expandFP_TO_INT_SAT implementation,
21093 // but making use of X86 specifics to produce better instruction sequences.
21094 SDNode *Node = Op.getNode();
21095 bool IsSigned = Node->getOpcode() == ISD::FP_TO_SINT_SAT;
21096 unsigned FpToIntOpcode = IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
21097 SDLoc dl(SDValue(Node, 0));
21098 SDValue Src = Node->getOperand(0);
21100 // There are three types involved here: SrcVT is the source floating point
21101 // type, DstVT is the type of the result, and TmpVT is the result of the
21102 // intermediate FP_TO_*INT operation we'll use (which may be a promotion of
21103 // DstVT).
21104 EVT SrcVT = Src.getValueType();
21105 EVT DstVT = Node->getValueType(0);
21106 EVT TmpVT = DstVT;
21108 // This code is only for floats and doubles. Fall back to generic code for
21109 // anything else.
21110 if (!isScalarFPTypeInSSEReg(SrcVT) || isSoftF16(SrcVT, Subtarget))
21111 return SDValue();
21113 EVT SatVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
21114 unsigned SatWidth = SatVT.getScalarSizeInBits();
21115 unsigned DstWidth = DstVT.getScalarSizeInBits();
21116 unsigned TmpWidth = TmpVT.getScalarSizeInBits();
21117 assert(SatWidth <= DstWidth && SatWidth <= TmpWidth &&
21118 "Expected saturation width smaller than result width");
21120 // Promote result of FP_TO_*INT to at least 32 bits.
21121 if (TmpWidth < 32) {
21122 TmpVT = MVT::i32;
21123 TmpWidth = 32;
21126 // Promote conversions to unsigned 32-bit to 64-bit, because it will allow
21127 // us to use a native signed conversion instead.
21128 if (SatWidth == 32 && !IsSigned && Subtarget.is64Bit()) {
21129 TmpVT = MVT::i64;
21130 TmpWidth = 64;
21133 // If the saturation width is smaller than the size of the temporary result,
21134 // we can always use signed conversion, which is native.
21135 if (SatWidth < TmpWidth)
21136 FpToIntOpcode = ISD::FP_TO_SINT;
21138 // Determine minimum and maximum integer values and their corresponding
21139 // floating-point values.
21140 APInt MinInt, MaxInt;
21141 if (IsSigned) {
21142 MinInt = APInt::getSignedMinValue(SatWidth).sext(DstWidth);
21143 MaxInt = APInt::getSignedMaxValue(SatWidth).sext(DstWidth);
21144 } else {
21145 MinInt = APInt::getMinValue(SatWidth).zext(DstWidth);
21146 MaxInt = APInt::getMaxValue(SatWidth).zext(DstWidth);
21149 APFloat MinFloat(DAG.EVTToAPFloatSemantics(SrcVT));
21150 APFloat MaxFloat(DAG.EVTToAPFloatSemantics(SrcVT));
21152 APFloat::opStatus MinStatus = MinFloat.convertFromAPInt(
21153 MinInt, IsSigned, APFloat::rmTowardZero);
21154 APFloat::opStatus MaxStatus = MaxFloat.convertFromAPInt(
21155 MaxInt, IsSigned, APFloat::rmTowardZero);
21156 bool AreExactFloatBounds = !(MinStatus & APFloat::opStatus::opInexact)
21157 && !(MaxStatus & APFloat::opStatus::opInexact);
21159 SDValue MinFloatNode = DAG.getConstantFP(MinFloat, dl, SrcVT);
21160 SDValue MaxFloatNode = DAG.getConstantFP(MaxFloat, dl, SrcVT);
21162 // If the integer bounds are exactly representable as floats, emit a
21163 // min+max+fptoi sequence. Otherwise use comparisons and selects.
21164 if (AreExactFloatBounds) {
21165 if (DstVT != TmpVT) {
21166 // Clamp by MinFloat from below. If Src is NaN, propagate NaN.
21167 SDValue MinClamped = DAG.getNode(
21168 X86ISD::FMAX, dl, SrcVT, MinFloatNode, Src);
21169 // Clamp by MaxFloat from above. If Src is NaN, propagate NaN.
21170 SDValue BothClamped = DAG.getNode(
21171 X86ISD::FMIN, dl, SrcVT, MaxFloatNode, MinClamped);
21172 // Convert clamped value to integer.
21173 SDValue FpToInt = DAG.getNode(FpToIntOpcode, dl, TmpVT, BothClamped);
21175 // NaN will become INDVAL, with the top bit set and the rest zero.
21176 // Truncation will discard the top bit, resulting in zero.
21177 return DAG.getNode(ISD::TRUNCATE, dl, DstVT, FpToInt);
21180 // Clamp by MinFloat from below. If Src is NaN, the result is MinFloat.
21181 SDValue MinClamped = DAG.getNode(
21182 X86ISD::FMAX, dl, SrcVT, Src, MinFloatNode);
21183 // Clamp by MaxFloat from above. NaN cannot occur.
21184 SDValue BothClamped = DAG.getNode(
21185 X86ISD::FMINC, dl, SrcVT, MinClamped, MaxFloatNode);
21186 // Convert clamped value to integer.
21187 SDValue FpToInt = DAG.getNode(FpToIntOpcode, dl, DstVT, BothClamped);
21189 if (!IsSigned) {
21190 // In the unsigned case we're done, because we mapped NaN to MinFloat,
21191 // which is zero.
21192 return FpToInt;
21195 // Otherwise, select zero if Src is NaN.
21196 SDValue ZeroInt = DAG.getConstant(0, dl, DstVT);
21197 return DAG.getSelectCC(
21198 dl, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
21201 SDValue MinIntNode = DAG.getConstant(MinInt, dl, DstVT);
21202 SDValue MaxIntNode = DAG.getConstant(MaxInt, dl, DstVT);
21204 // Result of direct conversion, which may be selected away.
21205 SDValue FpToInt = DAG.getNode(FpToIntOpcode, dl, TmpVT, Src);
21207 if (DstVT != TmpVT) {
21208 // NaN will become INDVAL, with the top bit set and the rest zero.
21209 // Truncation will discard the top bit, resulting in zero.
21210 FpToInt = DAG.getNode(ISD::TRUNCATE, dl, DstVT, FpToInt);
21213 SDValue Select = FpToInt;
21214 // For signed conversions where we saturate to the same size as the
21215 // result type of the fptoi instructions, INDVAL coincides with integer
21216 // minimum, so we don't need to explicitly check it.
21217 if (!IsSigned || SatWidth != TmpVT.getScalarSizeInBits()) {
21218 // If Src ULT MinFloat, select MinInt. In particular, this also selects
21219 // MinInt if Src is NaN.
21220 Select = DAG.getSelectCC(
21221 dl, Src, MinFloatNode, MinIntNode, Select, ISD::CondCode::SETULT);
21224 // If Src OGT MaxFloat, select MaxInt.
21225 Select = DAG.getSelectCC(
21226 dl, Src, MaxFloatNode, MaxIntNode, Select, ISD::CondCode::SETOGT);
21228 // In the unsigned case we are done, because we mapped NaN to MinInt, which
21229 // is already zero. The promoted case was already handled above.
21230 if (!IsSigned || DstVT != TmpVT) {
21231 return Select;
21234 // Otherwise, select 0 if Src is NaN.
21235 SDValue ZeroInt = DAG.getConstant(0, dl, DstVT);
21236 return DAG.getSelectCC(
21237 dl, Src, Src, ZeroInt, Select, ISD::CondCode::SETUO);
21240 SDValue X86TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
21241 bool IsStrict = Op->isStrictFPOpcode();
21243 SDLoc DL(Op);
21244 MVT VT = Op.getSimpleValueType();
21245 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
21246 SDValue In = Op.getOperand(IsStrict ? 1 : 0);
21247 MVT SVT = In.getSimpleValueType();
21249 // Let f16->f80 get lowered to a libcall, except for darwin, where we should
21250 // lower it to an fp_extend via f32 (as only f16<>f32 libcalls are available)
21251 if (VT == MVT::f128 || (SVT == MVT::f16 && VT == MVT::f80 &&
21252 !Subtarget.getTargetTriple().isOSDarwin()))
21253 return SDValue();
21255 if ((SVT == MVT::v8f16 && Subtarget.hasF16C()) ||
21256 (SVT == MVT::v16f16 && Subtarget.useAVX512Regs()))
21257 return Op;
21259 if (SVT == MVT::f16) {
21260 if (Subtarget.hasFP16())
21261 return Op;
21263 if (VT != MVT::f32) {
21264 if (IsStrict)
21265 return DAG.getNode(
21266 ISD::STRICT_FP_EXTEND, DL, {VT, MVT::Other},
21267 {Chain, DAG.getNode(ISD::STRICT_FP_EXTEND, DL,
21268 {MVT::f32, MVT::Other}, {Chain, In})});
21270 return DAG.getNode(ISD::FP_EXTEND, DL, VT,
21271 DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, In));
21274 if (!Subtarget.hasF16C()) {
21275 if (!Subtarget.getTargetTriple().isOSDarwin())
21276 return SDValue();
21278 assert(VT == MVT::f32 && SVT == MVT::f16 && "unexpected extend libcall");
21280 // Need a libcall, but ABI for f16 is soft-float on MacOS.
21281 TargetLowering::CallLoweringInfo CLI(DAG);
21282 Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
21284 In = DAG.getBitcast(MVT::i16, In);
21285 TargetLowering::ArgListTy Args;
21286 TargetLowering::ArgListEntry Entry;
21287 Entry.Node = In;
21288 Entry.Ty = EVT(MVT::i16).getTypeForEVT(*DAG.getContext());
21289 Entry.IsSExt = false;
21290 Entry.IsZExt = true;
21291 Args.push_back(Entry);
21293 SDValue Callee = DAG.getExternalSymbol(
21294 getLibcallName(RTLIB::FPEXT_F16_F32),
21295 getPointerTy(DAG.getDataLayout()));
21296 CLI.setDebugLoc(DL).setChain(Chain).setLibCallee(
21297 CallingConv::C, EVT(VT).getTypeForEVT(*DAG.getContext()), Callee,
21298 std::move(Args));
21300 SDValue Res;
21301 std::tie(Res,Chain) = LowerCallTo(CLI);
21302 if (IsStrict)
21303 Res = DAG.getMergeValues({Res, Chain}, DL);
21305 return Res;
21308 In = DAG.getBitcast(MVT::i16, In);
21309 In = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v8i16,
21310 getZeroVector(MVT::v8i16, Subtarget, DAG, DL), In,
21311 DAG.getIntPtrConstant(0, DL));
21312 SDValue Res;
21313 if (IsStrict) {
21314 Res = DAG.getNode(X86ISD::STRICT_CVTPH2PS, DL, {MVT::v4f32, MVT::Other},
21315 {Chain, In});
21316 Chain = Res.getValue(1);
21317 } else {
21318 Res = DAG.getNode(X86ISD::CVTPH2PS, DL, MVT::v4f32, In,
21319 DAG.getTargetConstant(4, DL, MVT::i32));
21321 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Res,
21322 DAG.getIntPtrConstant(0, DL));
21323 if (IsStrict)
21324 return DAG.getMergeValues({Res, Chain}, DL);
21325 return Res;
21328 if (!SVT.isVector())
21329 return Op;
21331 if (SVT.getVectorElementType() == MVT::bf16) {
21332 // FIXME: Do we need to support strict FP?
21333 assert(!IsStrict && "Strict FP doesn't support BF16");
21334 if (VT.getVectorElementType() == MVT::f64) {
21335 MVT TmpVT = VT.changeVectorElementType(MVT::f32);
21336 return DAG.getNode(ISD::FP_EXTEND, DL, VT,
21337 DAG.getNode(ISD::FP_EXTEND, DL, TmpVT, In));
21339 assert(VT.getVectorElementType() == MVT::f32 && "Unexpected fpext");
21340 MVT NVT = SVT.changeVectorElementType(MVT::i32);
21341 In = DAG.getBitcast(SVT.changeTypeToInteger(), In);
21342 In = DAG.getNode(ISD::ZERO_EXTEND, DL, NVT, In);
21343 In = DAG.getNode(ISD::SHL, DL, NVT, In, DAG.getConstant(16, DL, NVT));
21344 return DAG.getBitcast(VT, In);
21347 if (SVT.getVectorElementType() == MVT::f16) {
21348 if (Subtarget.hasFP16() && isTypeLegal(SVT))
21349 return Op;
21350 assert(Subtarget.hasF16C() && "Unexpected features!");
21351 if (SVT == MVT::v2f16)
21352 In = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f16, In,
21353 DAG.getUNDEF(MVT::v2f16));
21354 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8f16, In,
21355 DAG.getUNDEF(MVT::v4f16));
21356 if (IsStrict)
21357 return DAG.getNode(X86ISD::STRICT_VFPEXT, DL, {VT, MVT::Other},
21358 {Op->getOperand(0), Res});
21359 return DAG.getNode(X86ISD::VFPEXT, DL, VT, Res);
21360 } else if (VT == MVT::v4f64 || VT == MVT::v8f64) {
21361 return Op;
21364 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
21366 SDValue Res =
21367 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32, In, DAG.getUNDEF(SVT));
21368 if (IsStrict)
21369 return DAG.getNode(X86ISD::STRICT_VFPEXT, DL, {VT, MVT::Other},
21370 {Op->getOperand(0), Res});
21371 return DAG.getNode(X86ISD::VFPEXT, DL, VT, Res);
21374 SDValue X86TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
21375 bool IsStrict = Op->isStrictFPOpcode();
21377 SDLoc DL(Op);
21378 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
21379 SDValue In = Op.getOperand(IsStrict ? 1 : 0);
21380 MVT VT = Op.getSimpleValueType();
21381 MVT SVT = In.getSimpleValueType();
21383 if (SVT == MVT::f128 || (VT == MVT::f16 && SVT == MVT::f80))
21384 return SDValue();
21386 if (VT == MVT::f16 && (SVT == MVT::f64 || SVT == MVT::f32) &&
21387 !Subtarget.hasFP16() && (SVT == MVT::f64 || !Subtarget.hasF16C())) {
21388 if (!Subtarget.getTargetTriple().isOSDarwin())
21389 return SDValue();
21391 // We need a libcall but the ABI for f16 libcalls on MacOS is soft.
21392 TargetLowering::CallLoweringInfo CLI(DAG);
21393 Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
21395 TargetLowering::ArgListTy Args;
21396 TargetLowering::ArgListEntry Entry;
21397 Entry.Node = In;
21398 Entry.Ty = EVT(SVT).getTypeForEVT(*DAG.getContext());
21399 Entry.IsSExt = false;
21400 Entry.IsZExt = true;
21401 Args.push_back(Entry);
21403 SDValue Callee = DAG.getExternalSymbol(
21404 getLibcallName(SVT == MVT::f64 ? RTLIB::FPROUND_F64_F16
21405 : RTLIB::FPROUND_F32_F16),
21406 getPointerTy(DAG.getDataLayout()));
21407 CLI.setDebugLoc(DL).setChain(Chain).setLibCallee(
21408 CallingConv::C, EVT(MVT::i16).getTypeForEVT(*DAG.getContext()), Callee,
21409 std::move(Args));
21411 SDValue Res;
21412 std::tie(Res, Chain) = LowerCallTo(CLI);
21414 Res = DAG.getBitcast(MVT::f16, Res);
21416 if (IsStrict)
21417 Res = DAG.getMergeValues({Res, Chain}, DL);
21419 return Res;
21422 if (VT.getScalarType() == MVT::bf16) {
21423 if (SVT.getScalarType() == MVT::f32 && isTypeLegal(VT))
21424 return Op;
21425 return SDValue();
21428 if (VT.getScalarType() == MVT::f16 && !Subtarget.hasFP16()) {
21429 if (!Subtarget.hasF16C() || SVT.getScalarType() != MVT::f32)
21430 return SDValue();
21432 if (VT.isVector())
21433 return Op;
21435 SDValue Res;
21436 SDValue Rnd = DAG.getTargetConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, DL,
21437 MVT::i32);
21438 if (IsStrict) {
21439 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v4f32,
21440 DAG.getConstantFP(0, DL, MVT::v4f32), In,
21441 DAG.getIntPtrConstant(0, DL));
21442 Res = DAG.getNode(X86ISD::STRICT_CVTPS2PH, DL, {MVT::v8i16, MVT::Other},
21443 {Chain, Res, Rnd});
21444 Chain = Res.getValue(1);
21445 } else {
21446 // FIXME: Should we use zeros for upper elements for non-strict?
21447 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, In);
21448 Res = DAG.getNode(X86ISD::CVTPS2PH, DL, MVT::v8i16, Res, Rnd);
21451 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i16, Res,
21452 DAG.getIntPtrConstant(0, DL));
21453 Res = DAG.getBitcast(MVT::f16, Res);
21455 if (IsStrict)
21456 return DAG.getMergeValues({Res, Chain}, DL);
21458 return Res;
21461 return Op;
21464 static SDValue LowerFP16_TO_FP(SDValue Op, SelectionDAG &DAG) {
21465 bool IsStrict = Op->isStrictFPOpcode();
21466 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
21467 assert(Src.getValueType() == MVT::i16 && Op.getValueType() == MVT::f32 &&
21468 "Unexpected VT!");
21470 SDLoc dl(Op);
21471 SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16,
21472 DAG.getConstant(0, dl, MVT::v8i16), Src,
21473 DAG.getIntPtrConstant(0, dl));
21475 SDValue Chain;
21476 if (IsStrict) {
21477 Res = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {MVT::v4f32, MVT::Other},
21478 {Op.getOperand(0), Res});
21479 Chain = Res.getValue(1);
21480 } else {
21481 Res = DAG.getNode(X86ISD::CVTPH2PS, dl, MVT::v4f32, Res);
21484 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
21485 DAG.getIntPtrConstant(0, dl));
21487 if (IsStrict)
21488 return DAG.getMergeValues({Res, Chain}, dl);
21490 return Res;
21493 static SDValue LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) {
21494 bool IsStrict = Op->isStrictFPOpcode();
21495 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
21496 assert(Src.getValueType() == MVT::f32 && Op.getValueType() == MVT::i16 &&
21497 "Unexpected VT!");
21499 SDLoc dl(Op);
21500 SDValue Res, Chain;
21501 if (IsStrict) {
21502 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v4f32,
21503 DAG.getConstantFP(0, dl, MVT::v4f32), Src,
21504 DAG.getIntPtrConstant(0, dl));
21505 Res = DAG.getNode(
21506 X86ISD::STRICT_CVTPS2PH, dl, {MVT::v8i16, MVT::Other},
21507 {Op.getOperand(0), Res, DAG.getTargetConstant(4, dl, MVT::i32)});
21508 Chain = Res.getValue(1);
21509 } else {
21510 // FIXME: Should we use zeros for upper elements for non-strict?
21511 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, Src);
21512 Res = DAG.getNode(X86ISD::CVTPS2PH, dl, MVT::v8i16, Res,
21513 DAG.getTargetConstant(4, dl, MVT::i32));
21516 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Res,
21517 DAG.getIntPtrConstant(0, dl));
21519 if (IsStrict)
21520 return DAG.getMergeValues({Res, Chain}, dl);
21522 return Res;
21525 SDValue X86TargetLowering::LowerFP_TO_BF16(SDValue Op,
21526 SelectionDAG &DAG) const {
21527 SDLoc DL(Op);
21529 MVT SVT = Op.getOperand(0).getSimpleValueType();
21530 if (SVT == MVT::f32 && (Subtarget.hasBF16() || Subtarget.hasAVXNECONVERT())) {
21531 SDValue Res;
21532 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, Op.getOperand(0));
21533 Res = DAG.getNode(X86ISD::CVTNEPS2BF16, DL, MVT::v8bf16, Res);
21534 Res = DAG.getBitcast(MVT::v8i16, Res);
21535 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i16, Res,
21536 DAG.getIntPtrConstant(0, DL));
21539 MakeLibCallOptions CallOptions;
21540 RTLIB::Libcall LC = RTLIB::getFPROUND(SVT, MVT::bf16);
21541 SDValue Res =
21542 makeLibCall(DAG, LC, MVT::f16, Op.getOperand(0), CallOptions, DL).first;
21543 return DAG.getBitcast(MVT::i16, Res);
21546 /// Depending on uarch and/or optimizing for size, we might prefer to use a
21547 /// vector operation in place of the typical scalar operation.
21548 static SDValue lowerAddSubToHorizontalOp(SDValue Op, SelectionDAG &DAG,
21549 const X86Subtarget &Subtarget) {
21550 // If both operands have other uses, this is probably not profitable.
21551 SDValue LHS = Op.getOperand(0);
21552 SDValue RHS = Op.getOperand(1);
21553 if (!LHS.hasOneUse() && !RHS.hasOneUse())
21554 return Op;
21556 // FP horizontal add/sub were added with SSE3. Integer with SSSE3.
21557 bool IsFP = Op.getSimpleValueType().isFloatingPoint();
21558 if (IsFP && !Subtarget.hasSSE3())
21559 return Op;
21560 if (!IsFP && !Subtarget.hasSSSE3())
21561 return Op;
21563 // Extract from a common vector.
21564 if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
21565 RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
21566 LHS.getOperand(0) != RHS.getOperand(0) ||
21567 !isa<ConstantSDNode>(LHS.getOperand(1)) ||
21568 !isa<ConstantSDNode>(RHS.getOperand(1)) ||
21569 !shouldUseHorizontalOp(true, DAG, Subtarget))
21570 return Op;
21572 // Allow commuted 'hadd' ops.
21573 // TODO: Allow commuted (f)sub by negating the result of (F)HSUB?
21574 unsigned HOpcode;
21575 switch (Op.getOpcode()) {
21576 case ISD::ADD: HOpcode = X86ISD::HADD; break;
21577 case ISD::SUB: HOpcode = X86ISD::HSUB; break;
21578 case ISD::FADD: HOpcode = X86ISD::FHADD; break;
21579 case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
21580 default:
21581 llvm_unreachable("Trying to lower unsupported opcode to horizontal op");
21583 unsigned LExtIndex = LHS.getConstantOperandVal(1);
21584 unsigned RExtIndex = RHS.getConstantOperandVal(1);
21585 if ((LExtIndex & 1) == 1 && (RExtIndex & 1) == 0 &&
21586 (HOpcode == X86ISD::HADD || HOpcode == X86ISD::FHADD))
21587 std::swap(LExtIndex, RExtIndex);
21589 if ((LExtIndex & 1) != 0 || RExtIndex != (LExtIndex + 1))
21590 return Op;
21592 SDValue X = LHS.getOperand(0);
21593 EVT VecVT = X.getValueType();
21594 unsigned BitWidth = VecVT.getSizeInBits();
21595 unsigned NumLanes = BitWidth / 128;
21596 unsigned NumEltsPerLane = VecVT.getVectorNumElements() / NumLanes;
21597 assert((BitWidth == 128 || BitWidth == 256 || BitWidth == 512) &&
21598 "Not expecting illegal vector widths here");
21600 // Creating a 256-bit horizontal op would be wasteful, and there is no 512-bit
21601 // equivalent, so extract the 256/512-bit source op to 128-bit if we can.
21602 SDLoc DL(Op);
21603 if (BitWidth == 256 || BitWidth == 512) {
21604 unsigned LaneIdx = LExtIndex / NumEltsPerLane;
21605 X = extract128BitVector(X, LaneIdx * NumEltsPerLane, DAG, DL);
21606 LExtIndex %= NumEltsPerLane;
21609 // add (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hadd X, X), 0
21610 // add (extractelt (X, 1), extractelt (X, 0)) --> extractelt (hadd X, X), 0
21611 // add (extractelt (X, 2), extractelt (X, 3)) --> extractelt (hadd X, X), 1
21612 // sub (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hsub X, X), 0
21613 SDValue HOp = DAG.getNode(HOpcode, DL, X.getValueType(), X, X);
21614 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getSimpleValueType(), HOp,
21615 DAG.getIntPtrConstant(LExtIndex / 2, DL));
21618 /// Depending on uarch and/or optimizing for size, we might prefer to use a
21619 /// vector operation in place of the typical scalar operation.
21620 SDValue X86TargetLowering::lowerFaddFsub(SDValue Op, SelectionDAG &DAG) const {
21621 assert((Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::f64) &&
21622 "Only expecting float/double");
21623 return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
21626 /// ISD::FROUND is defined to round to nearest with ties rounding away from 0.
21627 /// This mode isn't supported in hardware on X86. But as long as we aren't
21628 /// compiling with trapping math, we can emulate this with
21629 /// trunc(X + copysign(nextafter(0.5, 0.0), X)).
21630 static SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG) {
21631 SDValue N0 = Op.getOperand(0);
21632 SDLoc dl(Op);
21633 MVT VT = Op.getSimpleValueType();
21635 // N0 += copysign(nextafter(0.5, 0.0), N0)
21636 const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
21637 bool Ignored;
21638 APFloat Point5Pred = APFloat(0.5f);
21639 Point5Pred.convert(Sem, APFloat::rmNearestTiesToEven, &Ignored);
21640 Point5Pred.next(/*nextDown*/true);
21642 SDValue Adder = DAG.getNode(ISD::FCOPYSIGN, dl, VT,
21643 DAG.getConstantFP(Point5Pred, dl, VT), N0);
21644 N0 = DAG.getNode(ISD::FADD, dl, VT, N0, Adder);
21646 // Truncate the result to remove fraction.
21647 return DAG.getNode(ISD::FTRUNC, dl, VT, N0);
21650 /// The only differences between FABS and FNEG are the mask and the logic op.
21651 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
21652 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
21653 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
21654 "Wrong opcode for lowering FABS or FNEG.");
21656 bool IsFABS = (Op.getOpcode() == ISD::FABS);
21658 // If this is a FABS and it has an FNEG user, bail out to fold the combination
21659 // into an FNABS. We'll lower the FABS after that if it is still in use.
21660 if (IsFABS)
21661 for (SDNode *User : Op->uses())
21662 if (User->getOpcode() == ISD::FNEG)
21663 return Op;
21665 SDLoc dl(Op);
21666 MVT VT = Op.getSimpleValueType();
21668 bool IsF128 = (VT == MVT::f128);
21669 assert(VT.isFloatingPoint() && VT != MVT::f80 &&
21670 DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
21671 "Unexpected type in LowerFABSorFNEG");
21673 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOptLevel to
21674 // decide if we should generate a 16-byte constant mask when we only need 4 or
21675 // 8 bytes for the scalar case.
21677 // There are no scalar bitwise logical SSE/AVX instructions, so we
21678 // generate a 16-byte vector constant and logic op even for the scalar case.
21679 // Using a 16-byte mask allows folding the load of the mask with
21680 // the logic op, so it can save (~4 bytes) on code size.
21681 bool IsFakeVector = !VT.isVector() && !IsF128;
21682 MVT LogicVT = VT;
21683 if (IsFakeVector)
21684 LogicVT = (VT == MVT::f64) ? MVT::v2f64
21685 : (VT == MVT::f32) ? MVT::v4f32
21686 : MVT::v8f16;
21688 unsigned EltBits = VT.getScalarSizeInBits();
21689 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
21690 APInt MaskElt = IsFABS ? APInt::getSignedMaxValue(EltBits) :
21691 APInt::getSignMask(EltBits);
21692 const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
21693 SDValue Mask = DAG.getConstantFP(APFloat(Sem, MaskElt), dl, LogicVT);
21695 SDValue Op0 = Op.getOperand(0);
21696 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
21697 unsigned LogicOp = IsFABS ? X86ISD::FAND :
21698 IsFNABS ? X86ISD::FOR :
21699 X86ISD::FXOR;
21700 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
21702 if (VT.isVector() || IsF128)
21703 return DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
21705 // For the scalar case extend to a 128-bit vector, perform the logic op,
21706 // and extract the scalar result back out.
21707 Operand = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Operand);
21708 SDValue LogicNode = DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
21709 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, LogicNode,
21710 DAG.getIntPtrConstant(0, dl));
21713 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
21714 SDValue Mag = Op.getOperand(0);
21715 SDValue Sign = Op.getOperand(1);
21716 SDLoc dl(Op);
21718 // If the sign operand is smaller, extend it first.
21719 MVT VT = Op.getSimpleValueType();
21720 if (Sign.getSimpleValueType().bitsLT(VT))
21721 Sign = DAG.getNode(ISD::FP_EXTEND, dl, VT, Sign);
21723 // And if it is bigger, shrink it first.
21724 if (Sign.getSimpleValueType().bitsGT(VT))
21725 Sign = DAG.getNode(ISD::FP_ROUND, dl, VT, Sign,
21726 DAG.getIntPtrConstant(0, dl, /*isTarget=*/true));
21728 // At this point the operands and the result should have the same
21729 // type, and that won't be f80 since that is not custom lowered.
21730 bool IsF128 = (VT == MVT::f128);
21731 assert(VT.isFloatingPoint() && VT != MVT::f80 &&
21732 DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
21733 "Unexpected type in LowerFCOPYSIGN");
21735 const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
21737 // Perform all scalar logic operations as 16-byte vectors because there are no
21738 // scalar FP logic instructions in SSE.
21739 // TODO: This isn't necessary. If we used scalar types, we might avoid some
21740 // unnecessary splats, but we might miss load folding opportunities. Should
21741 // this decision be based on OptimizeForSize?
21742 bool IsFakeVector = !VT.isVector() && !IsF128;
21743 MVT LogicVT = VT;
21744 if (IsFakeVector)
21745 LogicVT = (VT == MVT::f64) ? MVT::v2f64
21746 : (VT == MVT::f32) ? MVT::v4f32
21747 : MVT::v8f16;
21749 // The mask constants are automatically splatted for vector types.
21750 unsigned EltSizeInBits = VT.getScalarSizeInBits();
21751 SDValue SignMask = DAG.getConstantFP(
21752 APFloat(Sem, APInt::getSignMask(EltSizeInBits)), dl, LogicVT);
21753 SDValue MagMask = DAG.getConstantFP(
21754 APFloat(Sem, APInt::getSignedMaxValue(EltSizeInBits)), dl, LogicVT);
21756 // First, clear all bits but the sign bit from the second operand (sign).
21757 if (IsFakeVector)
21758 Sign = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Sign);
21759 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, LogicVT, Sign, SignMask);
21761 // Next, clear the sign bit from the first operand (magnitude).
21762 // TODO: If we had general constant folding for FP logic ops, this check
21763 // wouldn't be necessary.
21764 SDValue MagBits;
21765 if (ConstantFPSDNode *Op0CN = isConstOrConstSplatFP(Mag)) {
21766 APFloat APF = Op0CN->getValueAPF();
21767 APF.clearSign();
21768 MagBits = DAG.getConstantFP(APF, dl, LogicVT);
21769 } else {
21770 // If the magnitude operand wasn't a constant, we need to AND out the sign.
21771 if (IsFakeVector)
21772 Mag = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Mag);
21773 MagBits = DAG.getNode(X86ISD::FAND, dl, LogicVT, Mag, MagMask);
21776 // OR the magnitude value with the sign bit.
21777 SDValue Or = DAG.getNode(X86ISD::FOR, dl, LogicVT, MagBits, SignBit);
21778 return !IsFakeVector ? Or : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Or,
21779 DAG.getIntPtrConstant(0, dl));
21782 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
21783 SDValue N0 = Op.getOperand(0);
21784 SDLoc dl(Op);
21785 MVT VT = Op.getSimpleValueType();
21787 MVT OpVT = N0.getSimpleValueType();
21788 assert((OpVT == MVT::f32 || OpVT == MVT::f64) &&
21789 "Unexpected type for FGETSIGN");
21791 // Lower ISD::FGETSIGN to (AND (X86ISD::MOVMSK ...) 1).
21792 MVT VecVT = (OpVT == MVT::f32 ? MVT::v4f32 : MVT::v2f64);
21793 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, N0);
21794 Res = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32, Res);
21795 Res = DAG.getZExtOrTrunc(Res, dl, VT);
21796 Res = DAG.getNode(ISD::AND, dl, VT, Res, DAG.getConstant(1, dl, VT));
21797 return Res;
21800 /// Helper for attempting to create a X86ISD::BT node.
21801 static SDValue getBT(SDValue Src, SDValue BitNo, const SDLoc &DL, SelectionDAG &DAG) {
21802 // If Src is i8, promote it to i32 with any_extend. There is no i8 BT
21803 // instruction. Since the shift amount is in-range-or-undefined, we know
21804 // that doing a bittest on the i32 value is ok. We extend to i32 because
21805 // the encoding for the i16 version is larger than the i32 version.
21806 // Also promote i16 to i32 for performance / code size reason.
21807 if (Src.getValueType().getScalarSizeInBits() < 32)
21808 Src = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Src);
21810 // No legal type found, give up.
21811 if (!DAG.getTargetLoweringInfo().isTypeLegal(Src.getValueType()))
21812 return SDValue();
21814 // See if we can use the 32-bit instruction instead of the 64-bit one for a
21815 // shorter encoding. Since the former takes the modulo 32 of BitNo and the
21816 // latter takes the modulo 64, this is only valid if the 5th bit of BitNo is
21817 // known to be zero.
21818 if (Src.getValueType() == MVT::i64 &&
21819 DAG.MaskedValueIsZero(BitNo, APInt(BitNo.getValueSizeInBits(), 32)))
21820 Src = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Src);
21822 // If the operand types disagree, extend the shift amount to match. Since
21823 // BT ignores high bits (like shifts) we can use anyextend.
21824 if (Src.getValueType() != BitNo.getValueType()) {
21825 // Peek through a mask/modulo operation.
21826 // TODO: DAGCombine fails to do this as it just checks isTruncateFree, but
21827 // we probably need a better IsDesirableToPromoteOp to handle this as well.
21828 if (BitNo.getOpcode() == ISD::AND && BitNo->hasOneUse())
21829 BitNo = DAG.getNode(ISD::AND, DL, Src.getValueType(),
21830 DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(),
21831 BitNo.getOperand(0)),
21832 DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(),
21833 BitNo.getOperand(1)));
21834 else
21835 BitNo = DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(), BitNo);
21838 return DAG.getNode(X86ISD::BT, DL, MVT::i32, Src, BitNo);
21841 /// Helper for creating a X86ISD::SETCC node.
21842 static SDValue getSETCC(X86::CondCode Cond, SDValue EFLAGS, const SDLoc &dl,
21843 SelectionDAG &DAG) {
21844 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
21845 DAG.getTargetConstant(Cond, dl, MVT::i8), EFLAGS);
21848 /// Recursive helper for combineVectorSizedSetCCEquality() to see if we have a
21849 /// recognizable memcmp expansion.
21850 static bool isOrXorXorTree(SDValue X, bool Root = true) {
21851 if (X.getOpcode() == ISD::OR)
21852 return isOrXorXorTree(X.getOperand(0), false) &&
21853 isOrXorXorTree(X.getOperand(1), false);
21854 if (Root)
21855 return false;
21856 return X.getOpcode() == ISD::XOR;
21859 /// Recursive helper for combineVectorSizedSetCCEquality() to emit the memcmp
21860 /// expansion.
21861 template <typename F>
21862 static SDValue emitOrXorXorTree(SDValue X, const SDLoc &DL, SelectionDAG &DAG,
21863 EVT VecVT, EVT CmpVT, bool HasPT, F SToV) {
21864 SDValue Op0 = X.getOperand(0);
21865 SDValue Op1 = X.getOperand(1);
21866 if (X.getOpcode() == ISD::OR) {
21867 SDValue A = emitOrXorXorTree(Op0, DL, DAG, VecVT, CmpVT, HasPT, SToV);
21868 SDValue B = emitOrXorXorTree(Op1, DL, DAG, VecVT, CmpVT, HasPT, SToV);
21869 if (VecVT != CmpVT)
21870 return DAG.getNode(ISD::OR, DL, CmpVT, A, B);
21871 if (HasPT)
21872 return DAG.getNode(ISD::OR, DL, VecVT, A, B);
21873 return DAG.getNode(ISD::AND, DL, CmpVT, A, B);
21875 if (X.getOpcode() == ISD::XOR) {
21876 SDValue A = SToV(Op0);
21877 SDValue B = SToV(Op1);
21878 if (VecVT != CmpVT)
21879 return DAG.getSetCC(DL, CmpVT, A, B, ISD::SETNE);
21880 if (HasPT)
21881 return DAG.getNode(ISD::XOR, DL, VecVT, A, B);
21882 return DAG.getSetCC(DL, CmpVT, A, B, ISD::SETEQ);
21884 llvm_unreachable("Impossible");
21887 /// Try to map a 128-bit or larger integer comparison to vector instructions
21888 /// before type legalization splits it up into chunks.
21889 static SDValue combineVectorSizedSetCCEquality(EVT VT, SDValue X, SDValue Y,
21890 ISD::CondCode CC,
21891 const SDLoc &DL,
21892 SelectionDAG &DAG,
21893 const X86Subtarget &Subtarget) {
21894 assert((CC == ISD::SETNE || CC == ISD::SETEQ) && "Bad comparison predicate");
21896 // We're looking for an oversized integer equality comparison.
21897 EVT OpVT = X.getValueType();
21898 unsigned OpSize = OpVT.getSizeInBits();
21899 if (!OpVT.isScalarInteger() || OpSize < 128)
21900 return SDValue();
21902 // Ignore a comparison with zero because that gets special treatment in
21903 // EmitTest(). But make an exception for the special case of a pair of
21904 // logically-combined vector-sized operands compared to zero. This pattern may
21905 // be generated by the memcmp expansion pass with oversized integer compares
21906 // (see PR33325).
21907 bool IsOrXorXorTreeCCZero = isNullConstant(Y) && isOrXorXorTree(X);
21908 if (isNullConstant(Y) && !IsOrXorXorTreeCCZero)
21909 return SDValue();
21911 // Don't perform this combine if constructing the vector will be expensive.
21912 auto IsVectorBitCastCheap = [](SDValue X) {
21913 X = peekThroughBitcasts(X);
21914 return isa<ConstantSDNode>(X) || X.getValueType().isVector() ||
21915 X.getOpcode() == ISD::LOAD;
21917 if ((!IsVectorBitCastCheap(X) || !IsVectorBitCastCheap(Y)) &&
21918 !IsOrXorXorTreeCCZero)
21919 return SDValue();
21921 // Use XOR (plus OR) and PTEST after SSE4.1 for 128/256-bit operands.
21922 // Use PCMPNEQ (plus OR) and KORTEST for 512-bit operands.
21923 // Otherwise use PCMPEQ (plus AND) and mask testing.
21924 bool NoImplicitFloatOps =
21925 DAG.getMachineFunction().getFunction().hasFnAttribute(
21926 Attribute::NoImplicitFloat);
21927 if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
21928 ((OpSize == 128 && Subtarget.hasSSE2()) ||
21929 (OpSize == 256 && Subtarget.hasAVX()) ||
21930 (OpSize == 512 && Subtarget.useAVX512Regs()))) {
21931 bool HasPT = Subtarget.hasSSE41();
21933 // PTEST and MOVMSK are slow on Knights Landing and Knights Mill and widened
21934 // vector registers are essentially free. (Technically, widening registers
21935 // prevents load folding, but the tradeoff is worth it.)
21936 bool PreferKOT = Subtarget.preferMaskRegisters();
21937 bool NeedZExt = PreferKOT && !Subtarget.hasVLX() && OpSize != 512;
21939 EVT VecVT = MVT::v16i8;
21940 EVT CmpVT = PreferKOT ? MVT::v16i1 : VecVT;
21941 if (OpSize == 256) {
21942 VecVT = MVT::v32i8;
21943 CmpVT = PreferKOT ? MVT::v32i1 : VecVT;
21945 EVT CastVT = VecVT;
21946 bool NeedsAVX512FCast = false;
21947 if (OpSize == 512 || NeedZExt) {
21948 if (Subtarget.hasBWI()) {
21949 VecVT = MVT::v64i8;
21950 CmpVT = MVT::v64i1;
21951 if (OpSize == 512)
21952 CastVT = VecVT;
21953 } else {
21954 VecVT = MVT::v16i32;
21955 CmpVT = MVT::v16i1;
21956 CastVT = OpSize == 512 ? VecVT
21957 : OpSize == 256 ? MVT::v8i32
21958 : MVT::v4i32;
21959 NeedsAVX512FCast = true;
21963 auto ScalarToVector = [&](SDValue X) -> SDValue {
21964 bool TmpZext = false;
21965 EVT TmpCastVT = CastVT;
21966 if (X.getOpcode() == ISD::ZERO_EXTEND) {
21967 SDValue OrigX = X.getOperand(0);
21968 unsigned OrigSize = OrigX.getScalarValueSizeInBits();
21969 if (OrigSize < OpSize) {
21970 if (OrigSize == 128) {
21971 TmpCastVT = NeedsAVX512FCast ? MVT::v4i32 : MVT::v16i8;
21972 X = OrigX;
21973 TmpZext = true;
21974 } else if (OrigSize == 256) {
21975 TmpCastVT = NeedsAVX512FCast ? MVT::v8i32 : MVT::v32i8;
21976 X = OrigX;
21977 TmpZext = true;
21981 X = DAG.getBitcast(TmpCastVT, X);
21982 if (!NeedZExt && !TmpZext)
21983 return X;
21984 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT,
21985 DAG.getConstant(0, DL, VecVT), X,
21986 DAG.getVectorIdxConstant(0, DL));
21989 SDValue Cmp;
21990 if (IsOrXorXorTreeCCZero) {
21991 // This is a bitwise-combined equality comparison of 2 pairs of vectors:
21992 // setcc i128 (or (xor A, B), (xor C, D)), 0, eq|ne
21993 // Use 2 vector equality compares and 'and' the results before doing a
21994 // MOVMSK.
21995 Cmp = emitOrXorXorTree(X, DL, DAG, VecVT, CmpVT, HasPT, ScalarToVector);
21996 } else {
21997 SDValue VecX = ScalarToVector(X);
21998 SDValue VecY = ScalarToVector(Y);
21999 if (VecVT != CmpVT) {
22000 Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETNE);
22001 } else if (HasPT) {
22002 Cmp = DAG.getNode(ISD::XOR, DL, VecVT, VecX, VecY);
22003 } else {
22004 Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETEQ);
22007 // AVX512 should emit a setcc that will lower to kortest.
22008 if (VecVT != CmpVT) {
22009 EVT KRegVT = CmpVT == MVT::v64i1 ? MVT::i64
22010 : CmpVT == MVT::v32i1 ? MVT::i32
22011 : MVT::i16;
22012 return DAG.getSetCC(DL, VT, DAG.getBitcast(KRegVT, Cmp),
22013 DAG.getConstant(0, DL, KRegVT), CC);
22015 if (HasPT) {
22016 SDValue BCCmp =
22017 DAG.getBitcast(OpSize == 256 ? MVT::v4i64 : MVT::v2i64, Cmp);
22018 SDValue PT = DAG.getNode(X86ISD::PTEST, DL, MVT::i32, BCCmp, BCCmp);
22019 X86::CondCode X86CC = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
22020 SDValue X86SetCC = getSETCC(X86CC, PT, DL, DAG);
22021 return DAG.getNode(ISD::TRUNCATE, DL, VT, X86SetCC.getValue(0));
22023 // If all bytes match (bitmask is 0x(FFFF)FFFF), that's equality.
22024 // setcc i128 X, Y, eq --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, eq
22025 // setcc i128 X, Y, ne --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, ne
22026 assert(Cmp.getValueType() == MVT::v16i8 &&
22027 "Non 128-bit vector on pre-SSE41 target");
22028 SDValue MovMsk = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Cmp);
22029 SDValue FFFFs = DAG.getConstant(0xFFFF, DL, MVT::i32);
22030 return DAG.getSetCC(DL, VT, MovMsk, FFFFs, CC);
22033 return SDValue();
22036 /// Helper for matching BINOP(EXTRACTELT(X,0),BINOP(EXTRACTELT(X,1),...))
22037 /// style scalarized (associative) reduction patterns. Partial reductions
22038 /// are supported when the pointer SrcMask is non-null.
22039 /// TODO - move this to SelectionDAG?
22040 static bool matchScalarReduction(SDValue Op, ISD::NodeType BinOp,
22041 SmallVectorImpl<SDValue> &SrcOps,
22042 SmallVectorImpl<APInt> *SrcMask = nullptr) {
22043 SmallVector<SDValue, 8> Opnds;
22044 DenseMap<SDValue, APInt> SrcOpMap;
22045 EVT VT = MVT::Other;
22047 // Recognize a special case where a vector is casted into wide integer to
22048 // test all 0s.
22049 assert(Op.getOpcode() == unsigned(BinOp) &&
22050 "Unexpected bit reduction opcode");
22051 Opnds.push_back(Op.getOperand(0));
22052 Opnds.push_back(Op.getOperand(1));
22054 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
22055 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
22056 // BFS traverse all BinOp operands.
22057 if (I->getOpcode() == unsigned(BinOp)) {
22058 Opnds.push_back(I->getOperand(0));
22059 Opnds.push_back(I->getOperand(1));
22060 // Re-evaluate the number of nodes to be traversed.
22061 e += 2; // 2 more nodes (LHS and RHS) are pushed.
22062 continue;
22065 // Quit if a non-EXTRACT_VECTOR_ELT
22066 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
22067 return false;
22069 // Quit if without a constant index.
22070 auto *Idx = dyn_cast<ConstantSDNode>(I->getOperand(1));
22071 if (!Idx)
22072 return false;
22074 SDValue Src = I->getOperand(0);
22075 DenseMap<SDValue, APInt>::iterator M = SrcOpMap.find(Src);
22076 if (M == SrcOpMap.end()) {
22077 VT = Src.getValueType();
22078 // Quit if not the same type.
22079 if (!SrcOpMap.empty() && VT != SrcOpMap.begin()->first.getValueType())
22080 return false;
22081 unsigned NumElts = VT.getVectorNumElements();
22082 APInt EltCount = APInt::getZero(NumElts);
22083 M = SrcOpMap.insert(std::make_pair(Src, EltCount)).first;
22084 SrcOps.push_back(Src);
22087 // Quit if element already used.
22088 unsigned CIdx = Idx->getZExtValue();
22089 if (M->second[CIdx])
22090 return false;
22091 M->second.setBit(CIdx);
22094 if (SrcMask) {
22095 // Collect the source partial masks.
22096 for (SDValue &SrcOp : SrcOps)
22097 SrcMask->push_back(SrcOpMap[SrcOp]);
22098 } else {
22099 // Quit if not all elements are used.
22100 for (const auto &I : SrcOpMap)
22101 if (!I.second.isAllOnes())
22102 return false;
22105 return true;
22108 // Helper function for comparing all bits of two vectors.
22109 static SDValue LowerVectorAllEqual(const SDLoc &DL, SDValue LHS, SDValue RHS,
22110 ISD::CondCode CC, const APInt &OriginalMask,
22111 const X86Subtarget &Subtarget,
22112 SelectionDAG &DAG, X86::CondCode &X86CC) {
22113 EVT VT = LHS.getValueType();
22114 unsigned ScalarSize = VT.getScalarSizeInBits();
22115 if (OriginalMask.getBitWidth() != ScalarSize) {
22116 assert(ScalarSize == 1 && "Element Mask vs Vector bitwidth mismatch");
22117 return SDValue();
22120 // Quit if not convertable to legal scalar or 128/256-bit vector.
22121 if (!llvm::has_single_bit<uint32_t>(VT.getSizeInBits()))
22122 return SDValue();
22124 // FCMP may use ISD::SETNE when nnan - early out if we manage to get here.
22125 if (VT.isFloatingPoint())
22126 return SDValue();
22128 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && "Unsupported ISD::CondCode");
22129 X86CC = (CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE);
22131 APInt Mask = OriginalMask;
22133 auto MaskBits = [&](SDValue Src) {
22134 if (Mask.isAllOnes())
22135 return Src;
22136 EVT SrcVT = Src.getValueType();
22137 SDValue MaskValue = DAG.getConstant(Mask, DL, SrcVT);
22138 return DAG.getNode(ISD::AND, DL, SrcVT, Src, MaskValue);
22141 // For sub-128-bit vector, cast to (legal) integer and compare with zero.
22142 if (VT.getSizeInBits() < 128) {
22143 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
22144 if (!DAG.getTargetLoweringInfo().isTypeLegal(IntVT)) {
22145 if (IntVT != MVT::i64)
22146 return SDValue();
22147 auto SplitLHS = DAG.SplitScalar(DAG.getBitcast(IntVT, MaskBits(LHS)), DL,
22148 MVT::i32, MVT::i32);
22149 auto SplitRHS = DAG.SplitScalar(DAG.getBitcast(IntVT, MaskBits(RHS)), DL,
22150 MVT::i32, MVT::i32);
22151 SDValue Lo =
22152 DAG.getNode(ISD::XOR, DL, MVT::i32, SplitLHS.first, SplitRHS.first);
22153 SDValue Hi =
22154 DAG.getNode(ISD::XOR, DL, MVT::i32, SplitLHS.second, SplitRHS.second);
22155 return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
22156 DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi),
22157 DAG.getConstant(0, DL, MVT::i32));
22159 return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
22160 DAG.getBitcast(IntVT, MaskBits(LHS)),
22161 DAG.getBitcast(IntVT, MaskBits(RHS)));
22164 // Without PTEST, a masked v2i64 or-reduction is not faster than
22165 // scalarization.
22166 bool UseKORTEST = Subtarget.useAVX512Regs();
22167 bool UsePTEST = Subtarget.hasSSE41();
22168 if (!UsePTEST && !Mask.isAllOnes() && ScalarSize > 32)
22169 return SDValue();
22171 // Split down to 128/256/512-bit vector.
22172 unsigned TestSize = UseKORTEST ? 512 : (Subtarget.hasAVX() ? 256 : 128);
22174 // If the input vector has vector elements wider than the target test size,
22175 // then cast to <X x i64> so it will safely split.
22176 if (ScalarSize > TestSize) {
22177 if (!Mask.isAllOnes())
22178 return SDValue();
22179 VT = EVT::getVectorVT(*DAG.getContext(), MVT::i64, VT.getSizeInBits() / 64);
22180 LHS = DAG.getBitcast(VT, LHS);
22181 RHS = DAG.getBitcast(VT, RHS);
22182 Mask = APInt::getAllOnes(64);
22185 if (VT.getSizeInBits() > TestSize) {
22186 KnownBits KnownRHS = DAG.computeKnownBits(RHS);
22187 if (KnownRHS.isConstant() && KnownRHS.getConstant() == Mask) {
22188 // If ICMP(AND(LHS,MASK),MASK) - reduce using AND splits.
22189 while (VT.getSizeInBits() > TestSize) {
22190 auto Split = DAG.SplitVector(LHS, DL);
22191 VT = Split.first.getValueType();
22192 LHS = DAG.getNode(ISD::AND, DL, VT, Split.first, Split.second);
22194 RHS = DAG.getAllOnesConstant(DL, VT);
22195 } else if (!UsePTEST && !KnownRHS.isZero()) {
22196 // MOVMSK Special Case:
22197 // ALLOF(CMPEQ(X,Y)) -> AND(CMPEQ(X[0],Y[0]),CMPEQ(X[1],Y[1]),....)
22198 MVT SVT = ScalarSize >= 32 ? MVT::i32 : MVT::i8;
22199 VT = MVT::getVectorVT(SVT, VT.getSizeInBits() / SVT.getSizeInBits());
22200 LHS = DAG.getBitcast(VT, MaskBits(LHS));
22201 RHS = DAG.getBitcast(VT, MaskBits(RHS));
22202 EVT BoolVT = VT.changeVectorElementType(MVT::i1);
22203 SDValue V = DAG.getSetCC(DL, BoolVT, LHS, RHS, ISD::SETEQ);
22204 V = DAG.getSExtOrTrunc(V, DL, VT);
22205 while (VT.getSizeInBits() > TestSize) {
22206 auto Split = DAG.SplitVector(V, DL);
22207 VT = Split.first.getValueType();
22208 V = DAG.getNode(ISD::AND, DL, VT, Split.first, Split.second);
22210 V = DAG.getNOT(DL, V, VT);
22211 V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
22212 return DAG.getNode(X86ISD::CMP, DL, MVT::i32, V,
22213 DAG.getConstant(0, DL, MVT::i32));
22214 } else {
22215 // Convert to a ICMP_EQ(XOR(LHS,RHS),0) pattern.
22216 SDValue V = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS);
22217 while (VT.getSizeInBits() > TestSize) {
22218 auto Split = DAG.SplitVector(V, DL);
22219 VT = Split.first.getValueType();
22220 V = DAG.getNode(ISD::OR, DL, VT, Split.first, Split.second);
22222 LHS = V;
22223 RHS = DAG.getConstant(0, DL, VT);
22227 if (UseKORTEST && VT.is512BitVector()) {
22228 MVT TestVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits() / 32);
22229 MVT BoolVT = TestVT.changeVectorElementType(MVT::i1);
22230 LHS = DAG.getBitcast(TestVT, MaskBits(LHS));
22231 RHS = DAG.getBitcast(TestVT, MaskBits(RHS));
22232 SDValue V = DAG.getSetCC(DL, BoolVT, LHS, RHS, ISD::SETNE);
22233 return DAG.getNode(X86ISD::KORTEST, DL, MVT::i32, V, V);
22236 if (UsePTEST) {
22237 MVT TestVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
22238 LHS = DAG.getBitcast(TestVT, MaskBits(LHS));
22239 RHS = DAG.getBitcast(TestVT, MaskBits(RHS));
22240 SDValue V = DAG.getNode(ISD::XOR, DL, TestVT, LHS, RHS);
22241 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32, V, V);
22244 assert(VT.getSizeInBits() == 128 && "Failure to split to 128-bits");
22245 MVT MaskVT = ScalarSize >= 32 ? MVT::v4i32 : MVT::v16i8;
22246 LHS = DAG.getBitcast(MaskVT, MaskBits(LHS));
22247 RHS = DAG.getBitcast(MaskVT, MaskBits(RHS));
22248 SDValue V = DAG.getNode(X86ISD::PCMPEQ, DL, MaskVT, LHS, RHS);
22249 V = DAG.getNOT(DL, V, MaskVT);
22250 V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
22251 return DAG.getNode(X86ISD::CMP, DL, MVT::i32, V,
22252 DAG.getConstant(0, DL, MVT::i32));
22255 // Check whether an AND/OR'd reduction tree is PTEST-able, or if we can fallback
22256 // to CMP(MOVMSK(PCMPEQB(X,Y))).
22257 static SDValue MatchVectorAllEqualTest(SDValue LHS, SDValue RHS,
22258 ISD::CondCode CC, const SDLoc &DL,
22259 const X86Subtarget &Subtarget,
22260 SelectionDAG &DAG,
22261 X86::CondCode &X86CC) {
22262 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && "Unsupported ISD::CondCode");
22264 bool CmpNull = isNullConstant(RHS);
22265 bool CmpAllOnes = isAllOnesConstant(RHS);
22266 if (!CmpNull && !CmpAllOnes)
22267 return SDValue();
22269 SDValue Op = LHS;
22270 if (!Subtarget.hasSSE2() || !Op->hasOneUse())
22271 return SDValue();
22273 // Check whether we're masking/truncating an OR-reduction result, in which
22274 // case track the masked bits.
22275 // TODO: Add CmpAllOnes support.
22276 APInt Mask = APInt::getAllOnes(Op.getScalarValueSizeInBits());
22277 if (CmpNull) {
22278 switch (Op.getOpcode()) {
22279 case ISD::TRUNCATE: {
22280 SDValue Src = Op.getOperand(0);
22281 Mask = APInt::getLowBitsSet(Src.getScalarValueSizeInBits(),
22282 Op.getScalarValueSizeInBits());
22283 Op = Src;
22284 break;
22286 case ISD::AND: {
22287 if (auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
22288 Mask = Cst->getAPIntValue();
22289 Op = Op.getOperand(0);
22291 break;
22296 ISD::NodeType LogicOp = CmpNull ? ISD::OR : ISD::AND;
22298 // Match icmp(or(extract(X,0),extract(X,1)),0) anyof reduction patterns.
22299 // Match icmp(and(extract(X,0),extract(X,1)),-1) allof reduction patterns.
22300 SmallVector<SDValue, 8> VecIns;
22301 if (Op.getOpcode() == LogicOp && matchScalarReduction(Op, LogicOp, VecIns)) {
22302 EVT VT = VecIns[0].getValueType();
22303 assert(llvm::all_of(VecIns,
22304 [VT](SDValue V) { return VT == V.getValueType(); }) &&
22305 "Reduction source vector mismatch");
22307 // Quit if not splittable to scalar/128/256/512-bit vector.
22308 if (!llvm::has_single_bit<uint32_t>(VT.getSizeInBits()))
22309 return SDValue();
22311 // If more than one full vector is evaluated, AND/OR them first before
22312 // PTEST.
22313 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1;
22314 Slot += 2, e += 1) {
22315 // Each iteration will AND/OR 2 nodes and append the result until there is
22316 // only 1 node left, i.e. the final value of all vectors.
22317 SDValue LHS = VecIns[Slot];
22318 SDValue RHS = VecIns[Slot + 1];
22319 VecIns.push_back(DAG.getNode(LogicOp, DL, VT, LHS, RHS));
22322 return LowerVectorAllEqual(DL, VecIns.back(),
22323 CmpNull ? DAG.getConstant(0, DL, VT)
22324 : DAG.getAllOnesConstant(DL, VT),
22325 CC, Mask, Subtarget, DAG, X86CC);
22328 // Match icmp(reduce_or(X),0) anyof reduction patterns.
22329 // Match icmp(reduce_and(X),-1) allof reduction patterns.
22330 if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
22331 ISD::NodeType BinOp;
22332 if (SDValue Match =
22333 DAG.matchBinOpReduction(Op.getNode(), BinOp, {LogicOp})) {
22334 EVT MatchVT = Match.getValueType();
22335 return LowerVectorAllEqual(DL, Match,
22336 CmpNull ? DAG.getConstant(0, DL, MatchVT)
22337 : DAG.getAllOnesConstant(DL, MatchVT),
22338 CC, Mask, Subtarget, DAG, X86CC);
22342 if (Mask.isAllOnes()) {
22343 assert(!Op.getValueType().isVector() &&
22344 "Illegal vector type for reduction pattern");
22345 SDValue Src = peekThroughBitcasts(Op);
22346 if (Src.getValueType().isFixedLengthVector() &&
22347 Src.getValueType().getScalarType() == MVT::i1) {
22348 // Match icmp(bitcast(icmp_ne(X,Y)),0) reduction patterns.
22349 // Match icmp(bitcast(icmp_eq(X,Y)),-1) reduction patterns.
22350 if (Src.getOpcode() == ISD::SETCC) {
22351 SDValue LHS = Src.getOperand(0);
22352 SDValue RHS = Src.getOperand(1);
22353 EVT LHSVT = LHS.getValueType();
22354 ISD::CondCode SrcCC = cast<CondCodeSDNode>(Src.getOperand(2))->get();
22355 if (SrcCC == (CmpNull ? ISD::SETNE : ISD::SETEQ) &&
22356 llvm::has_single_bit<uint32_t>(LHSVT.getSizeInBits())) {
22357 APInt SrcMask = APInt::getAllOnes(LHSVT.getScalarSizeInBits());
22358 return LowerVectorAllEqual(DL, LHS, RHS, CC, SrcMask, Subtarget, DAG,
22359 X86CC);
22362 // Match icmp(bitcast(vXi1 trunc(Y)),0) reduction patterns.
22363 // Match icmp(bitcast(vXi1 trunc(Y)),-1) reduction patterns.
22364 // Peek through truncation, mask the LSB and compare against zero/LSB.
22365 if (Src.getOpcode() == ISD::TRUNCATE) {
22366 SDValue Inner = Src.getOperand(0);
22367 EVT InnerVT = Inner.getValueType();
22368 if (llvm::has_single_bit<uint32_t>(InnerVT.getSizeInBits())) {
22369 unsigned BW = InnerVT.getScalarSizeInBits();
22370 APInt SrcMask = APInt(BW, 1);
22371 APInt Cmp = CmpNull ? APInt::getZero(BW) : SrcMask;
22372 return LowerVectorAllEqual(DL, Inner,
22373 DAG.getConstant(Cmp, DL, InnerVT), CC,
22374 SrcMask, Subtarget, DAG, X86CC);
22380 return SDValue();
22383 /// return true if \c Op has a use that doesn't just read flags.
22384 static bool hasNonFlagsUse(SDValue Op) {
22385 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
22386 ++UI) {
22387 SDNode *User = *UI;
22388 unsigned UOpNo = UI.getOperandNo();
22389 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
22390 // Look pass truncate.
22391 UOpNo = User->use_begin().getOperandNo();
22392 User = *User->use_begin();
22395 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
22396 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
22397 return true;
22399 return false;
22402 // Transform to an x86-specific ALU node with flags if there is a chance of
22403 // using an RMW op or only the flags are used. Otherwise, leave
22404 // the node alone and emit a 'cmp' or 'test' instruction.
22405 static bool isProfitableToUseFlagOp(SDValue Op) {
22406 for (SDNode *U : Op->uses())
22407 if (U->getOpcode() != ISD::CopyToReg &&
22408 U->getOpcode() != ISD::SETCC &&
22409 U->getOpcode() != ISD::STORE)
22410 return false;
22412 return true;
22415 /// Emit nodes that will be selected as "test Op0,Op0", or something
22416 /// equivalent.
22417 static SDValue EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl,
22418 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
22419 // CF and OF aren't always set the way we want. Determine which
22420 // of these we need.
22421 bool NeedCF = false;
22422 bool NeedOF = false;
22423 switch (X86CC) {
22424 default: break;
22425 case X86::COND_A: case X86::COND_AE:
22426 case X86::COND_B: case X86::COND_BE:
22427 NeedCF = true;
22428 break;
22429 case X86::COND_G: case X86::COND_GE:
22430 case X86::COND_L: case X86::COND_LE:
22431 case X86::COND_O: case X86::COND_NO: {
22432 // Check if we really need to set the
22433 // Overflow flag. If NoSignedWrap is present
22434 // that is not actually needed.
22435 switch (Op->getOpcode()) {
22436 case ISD::ADD:
22437 case ISD::SUB:
22438 case ISD::MUL:
22439 case ISD::SHL:
22440 if (Op.getNode()->getFlags().hasNoSignedWrap())
22441 break;
22442 [[fallthrough]];
22443 default:
22444 NeedOF = true;
22445 break;
22447 break;
22450 // See if we can use the EFLAGS value from the operand instead of
22451 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
22452 // we prove that the arithmetic won't overflow, we can't use OF or CF.
22453 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
22454 // Emit a CMP with 0, which is the TEST pattern.
22455 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
22456 DAG.getConstant(0, dl, Op.getValueType()));
22458 unsigned Opcode = 0;
22459 unsigned NumOperands = 0;
22461 SDValue ArithOp = Op;
22463 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
22464 // which may be the result of a CAST. We use the variable 'Op', which is the
22465 // non-casted variable when we check for possible users.
22466 switch (ArithOp.getOpcode()) {
22467 case ISD::AND:
22468 // If the primary 'and' result isn't used, don't bother using X86ISD::AND,
22469 // because a TEST instruction will be better.
22470 if (!hasNonFlagsUse(Op))
22471 break;
22473 [[fallthrough]];
22474 case ISD::ADD:
22475 case ISD::SUB:
22476 case ISD::OR:
22477 case ISD::XOR:
22478 if (!isProfitableToUseFlagOp(Op))
22479 break;
22481 // Otherwise use a regular EFLAGS-setting instruction.
22482 switch (ArithOp.getOpcode()) {
22483 default: llvm_unreachable("unexpected operator!");
22484 case ISD::ADD: Opcode = X86ISD::ADD; break;
22485 case ISD::SUB: Opcode = X86ISD::SUB; break;
22486 case ISD::XOR: Opcode = X86ISD::XOR; break;
22487 case ISD::AND: Opcode = X86ISD::AND; break;
22488 case ISD::OR: Opcode = X86ISD::OR; break;
22491 NumOperands = 2;
22492 break;
22493 case X86ISD::ADD:
22494 case X86ISD::SUB:
22495 case X86ISD::OR:
22496 case X86ISD::XOR:
22497 case X86ISD::AND:
22498 return SDValue(Op.getNode(), 1);
22499 case ISD::SSUBO:
22500 case ISD::USUBO: {
22501 // /USUBO/SSUBO will become a X86ISD::SUB and we can use its Z flag.
22502 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
22503 return DAG.getNode(X86ISD::SUB, dl, VTs, Op->getOperand(0),
22504 Op->getOperand(1)).getValue(1);
22506 default:
22507 break;
22510 if (Opcode == 0) {
22511 // Emit a CMP with 0, which is the TEST pattern.
22512 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
22513 DAG.getConstant(0, dl, Op.getValueType()));
22515 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
22516 SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
22518 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
22519 DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), New);
22520 return SDValue(New.getNode(), 1);
22523 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
22524 /// equivalent.
22525 static SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
22526 const SDLoc &dl, SelectionDAG &DAG,
22527 const X86Subtarget &Subtarget) {
22528 if (isNullConstant(Op1))
22529 return EmitTest(Op0, X86CC, dl, DAG, Subtarget);
22531 EVT CmpVT = Op0.getValueType();
22533 assert((CmpVT == MVT::i8 || CmpVT == MVT::i16 ||
22534 CmpVT == MVT::i32 || CmpVT == MVT::i64) && "Unexpected VT!");
22536 // Only promote the compare up to I32 if it is a 16 bit operation
22537 // with an immediate. 16 bit immediates are to be avoided.
22538 if (CmpVT == MVT::i16 && !Subtarget.isAtom() &&
22539 !DAG.getMachineFunction().getFunction().hasMinSize()) {
22540 ConstantSDNode *COp0 = dyn_cast<ConstantSDNode>(Op0);
22541 ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1);
22542 // Don't do this if the immediate can fit in 8-bits.
22543 if ((COp0 && !COp0->getAPIntValue().isSignedIntN(8)) ||
22544 (COp1 && !COp1->getAPIntValue().isSignedIntN(8))) {
22545 unsigned ExtendOp =
22546 isX86CCSigned(X86CC) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
22547 if (X86CC == X86::COND_E || X86CC == X86::COND_NE) {
22548 // For equality comparisons try to use SIGN_EXTEND if the input was
22549 // truncate from something with enough sign bits.
22550 if (Op0.getOpcode() == ISD::TRUNCATE) {
22551 if (DAG.ComputeMaxSignificantBits(Op0.getOperand(0)) <= 16)
22552 ExtendOp = ISD::SIGN_EXTEND;
22553 } else if (Op1.getOpcode() == ISD::TRUNCATE) {
22554 if (DAG.ComputeMaxSignificantBits(Op1.getOperand(0)) <= 16)
22555 ExtendOp = ISD::SIGN_EXTEND;
22559 CmpVT = MVT::i32;
22560 Op0 = DAG.getNode(ExtendOp, dl, CmpVT, Op0);
22561 Op1 = DAG.getNode(ExtendOp, dl, CmpVT, Op1);
22565 // Try to shrink i64 compares if the input has enough zero bits.
22566 // FIXME: Do this for non-constant compares for constant on LHS?
22567 if (CmpVT == MVT::i64 && isa<ConstantSDNode>(Op1) && !isX86CCSigned(X86CC) &&
22568 Op0.hasOneUse() && // Hacky way to not break CSE opportunities with sub.
22569 Op1->getAsAPIntVal().getActiveBits() <= 32 &&
22570 DAG.MaskedValueIsZero(Op0, APInt::getHighBitsSet(64, 32))) {
22571 CmpVT = MVT::i32;
22572 Op0 = DAG.getNode(ISD::TRUNCATE, dl, CmpVT, Op0);
22573 Op1 = DAG.getNode(ISD::TRUNCATE, dl, CmpVT, Op1);
22576 // 0-x == y --> x+y == 0
22577 // 0-x != y --> x+y != 0
22578 if (Op0.getOpcode() == ISD::SUB && isNullConstant(Op0.getOperand(0)) &&
22579 Op0.hasOneUse() && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
22580 SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
22581 SDValue Add = DAG.getNode(X86ISD::ADD, dl, VTs, Op0.getOperand(1), Op1);
22582 return Add.getValue(1);
22585 // x == 0-y --> x+y == 0
22586 // x != 0-y --> x+y != 0
22587 if (Op1.getOpcode() == ISD::SUB && isNullConstant(Op1.getOperand(0)) &&
22588 Op1.hasOneUse() && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
22589 SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
22590 SDValue Add = DAG.getNode(X86ISD::ADD, dl, VTs, Op0, Op1.getOperand(1));
22591 return Add.getValue(1);
22594 // Use SUB instead of CMP to enable CSE between SUB and CMP.
22595 SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
22596 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs, Op0, Op1);
22597 return Sub.getValue(1);
22600 bool X86TargetLowering::isXAndYEqZeroPreferableToXAndYEqY(ISD::CondCode Cond,
22601 EVT VT) const {
22602 return !VT.isVector() || Cond != ISD::CondCode::SETEQ;
22605 bool X86TargetLowering::optimizeFMulOrFDivAsShiftAddBitcast(
22606 SDNode *N, SDValue, SDValue IntPow2) const {
22607 if (N->getOpcode() == ISD::FDIV)
22608 return true;
22610 EVT FPVT = N->getValueType(0);
22611 EVT IntVT = IntPow2.getValueType();
22613 // This indicates a non-free bitcast.
22614 // TODO: This is probably overly conservative as we will need to scale the
22615 // integer vector anyways for the int->fp cast.
22616 if (FPVT.isVector() &&
22617 FPVT.getScalarSizeInBits() != IntVT.getScalarSizeInBits())
22618 return false;
22620 return true;
22623 /// Check if replacement of SQRT with RSQRT should be disabled.
22624 bool X86TargetLowering::isFsqrtCheap(SDValue Op, SelectionDAG &DAG) const {
22625 EVT VT = Op.getValueType();
22627 // We don't need to replace SQRT with RSQRT for half type.
22628 if (VT.getScalarType() == MVT::f16)
22629 return true;
22631 // We never want to use both SQRT and RSQRT instructions for the same input.
22632 if (DAG.doesNodeExist(X86ISD::FRSQRT, DAG.getVTList(VT), Op))
22633 return false;
22635 if (VT.isVector())
22636 return Subtarget.hasFastVectorFSQRT();
22637 return Subtarget.hasFastScalarFSQRT();
22640 /// The minimum architected relative accuracy is 2^-12. We need one
22641 /// Newton-Raphson step to have a good float result (24 bits of precision).
22642 SDValue X86TargetLowering::getSqrtEstimate(SDValue Op,
22643 SelectionDAG &DAG, int Enabled,
22644 int &RefinementSteps,
22645 bool &UseOneConstNR,
22646 bool Reciprocal) const {
22647 SDLoc DL(Op);
22648 EVT VT = Op.getValueType();
22650 // SSE1 has rsqrtss and rsqrtps. AVX adds a 256-bit variant for rsqrtps.
22651 // It is likely not profitable to do this for f64 because a double-precision
22652 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
22653 // instructions: convert to single, rsqrtss, convert back to double, refine
22654 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
22655 // along with FMA, this could be a throughput win.
22656 // TODO: SQRT requires SSE2 to prevent the introduction of an illegal v4i32
22657 // after legalize types.
22658 if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
22659 (VT == MVT::v4f32 && Subtarget.hasSSE1() && Reciprocal) ||
22660 (VT == MVT::v4f32 && Subtarget.hasSSE2() && !Reciprocal) ||
22661 (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
22662 (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
22663 if (RefinementSteps == ReciprocalEstimate::Unspecified)
22664 RefinementSteps = 1;
22666 UseOneConstNR = false;
22667 // There is no FSQRT for 512-bits, but there is RSQRT14.
22668 unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RSQRT14 : X86ISD::FRSQRT;
22669 SDValue Estimate = DAG.getNode(Opcode, DL, VT, Op);
22670 if (RefinementSteps == 0 && !Reciprocal)
22671 Estimate = DAG.getNode(ISD::FMUL, DL, VT, Op, Estimate);
22672 return Estimate;
22675 if (VT.getScalarType() == MVT::f16 && isTypeLegal(VT) &&
22676 Subtarget.hasFP16()) {
22677 assert(Reciprocal && "Don't replace SQRT with RSQRT for half type");
22678 if (RefinementSteps == ReciprocalEstimate::Unspecified)
22679 RefinementSteps = 0;
22681 if (VT == MVT::f16) {
22682 SDValue Zero = DAG.getIntPtrConstant(0, DL);
22683 SDValue Undef = DAG.getUNDEF(MVT::v8f16);
22684 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v8f16, Op);
22685 Op = DAG.getNode(X86ISD::RSQRT14S, DL, MVT::v8f16, Undef, Op);
22686 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Op, Zero);
22689 return DAG.getNode(X86ISD::RSQRT14, DL, VT, Op);
22691 return SDValue();
22694 /// The minimum architected relative accuracy is 2^-12. We need one
22695 /// Newton-Raphson step to have a good float result (24 bits of precision).
22696 SDValue X86TargetLowering::getRecipEstimate(SDValue Op, SelectionDAG &DAG,
22697 int Enabled,
22698 int &RefinementSteps) const {
22699 SDLoc DL(Op);
22700 EVT VT = Op.getValueType();
22702 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
22703 // It is likely not profitable to do this for f64 because a double-precision
22704 // reciprocal estimate with refinement on x86 prior to FMA requires
22705 // 15 instructions: convert to single, rcpss, convert back to double, refine
22706 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
22707 // along with FMA, this could be a throughput win.
22709 if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
22710 (VT == MVT::v4f32 && Subtarget.hasSSE1()) ||
22711 (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
22712 (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
22713 // Enable estimate codegen with 1 refinement step for vector division.
22714 // Scalar division estimates are disabled because they break too much
22715 // real-world code. These defaults are intended to match GCC behavior.
22716 if (VT == MVT::f32 && Enabled == ReciprocalEstimate::Unspecified)
22717 return SDValue();
22719 if (RefinementSteps == ReciprocalEstimate::Unspecified)
22720 RefinementSteps = 1;
22722 // There is no FSQRT for 512-bits, but there is RCP14.
22723 unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RCP14 : X86ISD::FRCP;
22724 return DAG.getNode(Opcode, DL, VT, Op);
22727 if (VT.getScalarType() == MVT::f16 && isTypeLegal(VT) &&
22728 Subtarget.hasFP16()) {
22729 if (RefinementSteps == ReciprocalEstimate::Unspecified)
22730 RefinementSteps = 0;
22732 if (VT == MVT::f16) {
22733 SDValue Zero = DAG.getIntPtrConstant(0, DL);
22734 SDValue Undef = DAG.getUNDEF(MVT::v8f16);
22735 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v8f16, Op);
22736 Op = DAG.getNode(X86ISD::RCP14S, DL, MVT::v8f16, Undef, Op);
22737 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Op, Zero);
22740 return DAG.getNode(X86ISD::RCP14, DL, VT, Op);
22742 return SDValue();
22745 /// If we have at least two divisions that use the same divisor, convert to
22746 /// multiplication by a reciprocal. This may need to be adjusted for a given
22747 /// CPU if a division's cost is not at least twice the cost of a multiplication.
22748 /// This is because we still need one division to calculate the reciprocal and
22749 /// then we need two multiplies by that reciprocal as replacements for the
22750 /// original divisions.
22751 unsigned X86TargetLowering::combineRepeatedFPDivisors() const {
22752 return 2;
22755 SDValue
22756 X86TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
22757 SelectionDAG &DAG,
22758 SmallVectorImpl<SDNode *> &Created) const {
22759 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
22760 if (isIntDivCheap(N->getValueType(0), Attr))
22761 return SDValue(N,0); // Lower SDIV as SDIV
22763 assert((Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()) &&
22764 "Unexpected divisor!");
22766 // Only perform this transform if CMOV is supported otherwise the select
22767 // below will become a branch.
22768 if (!Subtarget.canUseCMOV())
22769 return SDValue();
22771 // fold (sdiv X, pow2)
22772 EVT VT = N->getValueType(0);
22773 // FIXME: Support i8.
22774 if (VT != MVT::i16 && VT != MVT::i32 &&
22775 !(Subtarget.is64Bit() && VT == MVT::i64))
22776 return SDValue();
22778 // If the divisor is 2 or -2, the default expansion is better.
22779 if (Divisor == 2 ||
22780 Divisor == APInt(Divisor.getBitWidth(), -2, /*isSigned*/ true))
22781 return SDValue();
22783 return TargetLowering::buildSDIVPow2WithCMov(N, Divisor, DAG, Created);
22786 /// Result of 'and' is compared against zero. Change to a BT node if possible.
22787 /// Returns the BT node and the condition code needed to use it.
22788 static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC, const SDLoc &dl,
22789 SelectionDAG &DAG, X86::CondCode &X86CC) {
22790 assert(And.getOpcode() == ISD::AND && "Expected AND node!");
22791 SDValue Op0 = And.getOperand(0);
22792 SDValue Op1 = And.getOperand(1);
22793 if (Op0.getOpcode() == ISD::TRUNCATE)
22794 Op0 = Op0.getOperand(0);
22795 if (Op1.getOpcode() == ISD::TRUNCATE)
22796 Op1 = Op1.getOperand(0);
22798 SDValue Src, BitNo;
22799 if (Op1.getOpcode() == ISD::SHL)
22800 std::swap(Op0, Op1);
22801 if (Op0.getOpcode() == ISD::SHL) {
22802 if (isOneConstant(Op0.getOperand(0))) {
22803 // If we looked past a truncate, check that it's only truncating away
22804 // known zeros.
22805 unsigned BitWidth = Op0.getValueSizeInBits();
22806 unsigned AndBitWidth = And.getValueSizeInBits();
22807 if (BitWidth > AndBitWidth) {
22808 KnownBits Known = DAG.computeKnownBits(Op0);
22809 if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
22810 return SDValue();
22812 Src = Op1;
22813 BitNo = Op0.getOperand(1);
22815 } else if (Op1.getOpcode() == ISD::Constant) {
22816 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
22817 uint64_t AndRHSVal = AndRHS->getZExtValue();
22818 SDValue AndLHS = Op0;
22820 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
22821 Src = AndLHS.getOperand(0);
22822 BitNo = AndLHS.getOperand(1);
22823 } else {
22824 // Use BT if the immediate can't be encoded in a TEST instruction or we
22825 // are optimizing for size and the immedaite won't fit in a byte.
22826 bool OptForSize = DAG.shouldOptForSize();
22827 if ((!isUInt<32>(AndRHSVal) || (OptForSize && !isUInt<8>(AndRHSVal))) &&
22828 isPowerOf2_64(AndRHSVal)) {
22829 Src = AndLHS;
22830 BitNo = DAG.getConstant(Log2_64_Ceil(AndRHSVal), dl,
22831 Src.getValueType());
22836 // No patterns found, give up.
22837 if (!Src.getNode())
22838 return SDValue();
22840 // Remove any bit flip.
22841 if (isBitwiseNot(Src)) {
22842 Src = Src.getOperand(0);
22843 CC = CC == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ;
22846 // Attempt to create the X86ISD::BT node.
22847 if (SDValue BT = getBT(Src, BitNo, dl, DAG)) {
22848 X86CC = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
22849 return BT;
22852 return SDValue();
22855 // Check if pre-AVX condcode can be performed by a single FCMP op.
22856 static bool cheapX86FSETCC_SSE(ISD::CondCode SetCCOpcode) {
22857 return (SetCCOpcode != ISD::SETONE) && (SetCCOpcode != ISD::SETUEQ);
22860 /// Turns an ISD::CondCode into a value suitable for SSE floating-point mask
22861 /// CMPs.
22862 static unsigned translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
22863 SDValue &Op1, bool &IsAlwaysSignaling) {
22864 unsigned SSECC;
22865 bool Swap = false;
22867 // SSE Condition code mapping:
22868 // 0 - EQ
22869 // 1 - LT
22870 // 2 - LE
22871 // 3 - UNORD
22872 // 4 - NEQ
22873 // 5 - NLT
22874 // 6 - NLE
22875 // 7 - ORD
22876 switch (SetCCOpcode) {
22877 default: llvm_unreachable("Unexpected SETCC condition");
22878 case ISD::SETOEQ:
22879 case ISD::SETEQ: SSECC = 0; break;
22880 case ISD::SETOGT:
22881 case ISD::SETGT: Swap = true; [[fallthrough]];
22882 case ISD::SETLT:
22883 case ISD::SETOLT: SSECC = 1; break;
22884 case ISD::SETOGE:
22885 case ISD::SETGE: Swap = true; [[fallthrough]];
22886 case ISD::SETLE:
22887 case ISD::SETOLE: SSECC = 2; break;
22888 case ISD::SETUO: SSECC = 3; break;
22889 case ISD::SETUNE:
22890 case ISD::SETNE: SSECC = 4; break;
22891 case ISD::SETULE: Swap = true; [[fallthrough]];
22892 case ISD::SETUGE: SSECC = 5; break;
22893 case ISD::SETULT: Swap = true; [[fallthrough]];
22894 case ISD::SETUGT: SSECC = 6; break;
22895 case ISD::SETO: SSECC = 7; break;
22896 case ISD::SETUEQ: SSECC = 8; break;
22897 case ISD::SETONE: SSECC = 12; break;
22899 if (Swap)
22900 std::swap(Op0, Op1);
22902 switch (SetCCOpcode) {
22903 default:
22904 IsAlwaysSignaling = true;
22905 break;
22906 case ISD::SETEQ:
22907 case ISD::SETOEQ:
22908 case ISD::SETUEQ:
22909 case ISD::SETNE:
22910 case ISD::SETONE:
22911 case ISD::SETUNE:
22912 case ISD::SETO:
22913 case ISD::SETUO:
22914 IsAlwaysSignaling = false;
22915 break;
22918 return SSECC;
22921 /// Break a VSETCC 256-bit integer VSETCC into two new 128 ones and then
22922 /// concatenate the result back.
22923 static SDValue splitIntVSETCC(EVT VT, SDValue LHS, SDValue RHS,
22924 ISD::CondCode Cond, SelectionDAG &DAG,
22925 const SDLoc &dl) {
22926 assert(VT.isInteger() && VT == LHS.getValueType() &&
22927 VT == RHS.getValueType() && "Unsupported VTs!");
22929 SDValue CC = DAG.getCondCode(Cond);
22931 // Extract the LHS Lo/Hi vectors
22932 SDValue LHS1, LHS2;
22933 std::tie(LHS1, LHS2) = splitVector(LHS, DAG, dl);
22935 // Extract the RHS Lo/Hi vectors
22936 SDValue RHS1, RHS2;
22937 std::tie(RHS1, RHS2) = splitVector(RHS, DAG, dl);
22939 // Issue the operation on the smaller types and concatenate the result back
22940 EVT LoVT, HiVT;
22941 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
22942 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
22943 DAG.getNode(ISD::SETCC, dl, LoVT, LHS1, RHS1, CC),
22944 DAG.getNode(ISD::SETCC, dl, HiVT, LHS2, RHS2, CC));
22947 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
22949 SDValue Op0 = Op.getOperand(0);
22950 SDValue Op1 = Op.getOperand(1);
22951 SDValue CC = Op.getOperand(2);
22952 MVT VT = Op.getSimpleValueType();
22953 SDLoc dl(Op);
22955 assert(VT.getVectorElementType() == MVT::i1 &&
22956 "Cannot set masked compare for this operation");
22958 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
22960 // Prefer SETGT over SETLT.
22961 if (SetCCOpcode == ISD::SETLT) {
22962 SetCCOpcode = ISD::getSetCCSwappedOperands(SetCCOpcode);
22963 std::swap(Op0, Op1);
22966 return DAG.getSetCC(dl, VT, Op0, Op1, SetCCOpcode);
22969 /// Given a buildvector constant, return a new vector constant with each element
22970 /// incremented or decremented. If incrementing or decrementing would result in
22971 /// unsigned overflow or underflow or this is not a simple vector constant,
22972 /// return an empty value.
22973 static SDValue incDecVectorConstant(SDValue V, SelectionDAG &DAG, bool IsInc,
22974 bool NSW) {
22975 auto *BV = dyn_cast<BuildVectorSDNode>(V.getNode());
22976 if (!BV || !V.getValueType().isSimple())
22977 return SDValue();
22979 MVT VT = V.getSimpleValueType();
22980 MVT EltVT = VT.getVectorElementType();
22981 unsigned NumElts = VT.getVectorNumElements();
22982 SmallVector<SDValue, 8> NewVecC;
22983 SDLoc DL(V);
22984 for (unsigned i = 0; i < NumElts; ++i) {
22985 auto *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
22986 if (!Elt || Elt->isOpaque() || Elt->getSimpleValueType(0) != EltVT)
22987 return SDValue();
22989 // Avoid overflow/underflow.
22990 const APInt &EltC = Elt->getAPIntValue();
22991 if ((IsInc && EltC.isMaxValue()) || (!IsInc && EltC.isZero()))
22992 return SDValue();
22993 if (NSW && ((IsInc && EltC.isMaxSignedValue()) ||
22994 (!IsInc && EltC.isMinSignedValue())))
22995 return SDValue();
22997 NewVecC.push_back(DAG.getConstant(EltC + (IsInc ? 1 : -1), DL, EltVT));
23000 return DAG.getBuildVector(VT, DL, NewVecC);
23003 /// As another special case, use PSUBUS[BW] when it's profitable. E.g. for
23004 /// Op0 u<= Op1:
23005 /// t = psubus Op0, Op1
23006 /// pcmpeq t, <0..0>
23007 static SDValue LowerVSETCCWithSUBUS(SDValue Op0, SDValue Op1, MVT VT,
23008 ISD::CondCode Cond, const SDLoc &dl,
23009 const X86Subtarget &Subtarget,
23010 SelectionDAG &DAG) {
23011 if (!Subtarget.hasSSE2())
23012 return SDValue();
23014 MVT VET = VT.getVectorElementType();
23015 if (VET != MVT::i8 && VET != MVT::i16)
23016 return SDValue();
23018 switch (Cond) {
23019 default:
23020 return SDValue();
23021 case ISD::SETULT: {
23022 // If the comparison is against a constant we can turn this into a
23023 // setule. With psubus, setule does not require a swap. This is
23024 // beneficial because the constant in the register is no longer
23025 // destructed as the destination so it can be hoisted out of a loop.
23026 // Only do this pre-AVX since vpcmp* is no longer destructive.
23027 if (Subtarget.hasAVX())
23028 return SDValue();
23029 SDValue ULEOp1 =
23030 incDecVectorConstant(Op1, DAG, /*IsInc*/ false, /*NSW*/ false);
23031 if (!ULEOp1)
23032 return SDValue();
23033 Op1 = ULEOp1;
23034 break;
23036 case ISD::SETUGT: {
23037 // If the comparison is against a constant, we can turn this into a setuge.
23038 // This is beneficial because materializing a constant 0 for the PCMPEQ is
23039 // probably cheaper than XOR+PCMPGT using 2 different vector constants:
23040 // cmpgt (xor X, SignMaskC) CmpC --> cmpeq (usubsat (CmpC+1), X), 0
23041 SDValue UGEOp1 =
23042 incDecVectorConstant(Op1, DAG, /*IsInc*/ true, /*NSW*/ false);
23043 if (!UGEOp1)
23044 return SDValue();
23045 Op1 = Op0;
23046 Op0 = UGEOp1;
23047 break;
23049 // Psubus is better than flip-sign because it requires no inversion.
23050 case ISD::SETUGE:
23051 std::swap(Op0, Op1);
23052 break;
23053 case ISD::SETULE:
23054 break;
23057 SDValue Result = DAG.getNode(ISD::USUBSAT, dl, VT, Op0, Op1);
23058 return DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
23059 DAG.getConstant(0, dl, VT));
23062 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
23063 SelectionDAG &DAG) {
23064 bool IsStrict = Op.getOpcode() == ISD::STRICT_FSETCC ||
23065 Op.getOpcode() == ISD::STRICT_FSETCCS;
23066 SDValue Op0 = Op.getOperand(IsStrict ? 1 : 0);
23067 SDValue Op1 = Op.getOperand(IsStrict ? 2 : 1);
23068 SDValue CC = Op.getOperand(IsStrict ? 3 : 2);
23069 MVT VT = Op->getSimpleValueType(0);
23070 ISD::CondCode Cond = cast<CondCodeSDNode>(CC)->get();
23071 bool isFP = Op1.getSimpleValueType().isFloatingPoint();
23072 SDLoc dl(Op);
23074 if (isFP) {
23075 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
23076 assert(EltVT == MVT::f16 || EltVT == MVT::f32 || EltVT == MVT::f64);
23077 if (isSoftF16(EltVT, Subtarget))
23078 return SDValue();
23080 bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
23081 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
23083 // If we have a strict compare with a vXi1 result and the input is 128/256
23084 // bits we can't use a masked compare unless we have VLX. If we use a wider
23085 // compare like we do for non-strict, we might trigger spurious exceptions
23086 // from the upper elements. Instead emit a AVX compare and convert to mask.
23087 unsigned Opc;
23088 if (Subtarget.hasAVX512() && VT.getVectorElementType() == MVT::i1 &&
23089 (!IsStrict || Subtarget.hasVLX() ||
23090 Op0.getSimpleValueType().is512BitVector())) {
23091 #ifndef NDEBUG
23092 unsigned Num = VT.getVectorNumElements();
23093 assert(Num <= 16 || (Num == 32 && EltVT == MVT::f16));
23094 #endif
23095 Opc = IsStrict ? X86ISD::STRICT_CMPM : X86ISD::CMPM;
23096 } else {
23097 Opc = IsStrict ? X86ISD::STRICT_CMPP : X86ISD::CMPP;
23098 // The SSE/AVX packed FP comparison nodes are defined with a
23099 // floating-point vector result that matches the operand type. This allows
23100 // them to work with an SSE1 target (integer vector types are not legal).
23101 VT = Op0.getSimpleValueType();
23104 SDValue Cmp;
23105 bool IsAlwaysSignaling;
23106 unsigned SSECC = translateX86FSETCC(Cond, Op0, Op1, IsAlwaysSignaling);
23107 if (!Subtarget.hasAVX()) {
23108 // TODO: We could use following steps to handle a quiet compare with
23109 // signaling encodings.
23110 // 1. Get ordered masks from a quiet ISD::SETO
23111 // 2. Use the masks to mask potential unordered elements in operand A, B
23112 // 3. Get the compare results of masked A, B
23113 // 4. Calculating final result using the mask and result from 3
23114 // But currently, we just fall back to scalar operations.
23115 if (IsStrict && IsAlwaysSignaling && !IsSignaling)
23116 return SDValue();
23118 // Insert an extra signaling instruction to raise exception.
23119 if (IsStrict && !IsAlwaysSignaling && IsSignaling) {
23120 SDValue SignalCmp = DAG.getNode(
23121 Opc, dl, {VT, MVT::Other},
23122 {Chain, Op0, Op1, DAG.getTargetConstant(1, dl, MVT::i8)}); // LT_OS
23123 // FIXME: It seems we need to update the flags of all new strict nodes.
23124 // Otherwise, mayRaiseFPException in MI will return false due to
23125 // NoFPExcept = false by default. However, I didn't find it in other
23126 // patches.
23127 SignalCmp->setFlags(Op->getFlags());
23128 Chain = SignalCmp.getValue(1);
23131 // In the two cases not handled by SSE compare predicates (SETUEQ/SETONE),
23132 // emit two comparisons and a logic op to tie them together.
23133 if (!cheapX86FSETCC_SSE(Cond)) {
23134 // LLVM predicate is SETUEQ or SETONE.
23135 unsigned CC0, CC1;
23136 unsigned CombineOpc;
23137 if (Cond == ISD::SETUEQ) {
23138 CC0 = 3; // UNORD
23139 CC1 = 0; // EQ
23140 CombineOpc = X86ISD::FOR;
23141 } else {
23142 assert(Cond == ISD::SETONE);
23143 CC0 = 7; // ORD
23144 CC1 = 4; // NEQ
23145 CombineOpc = X86ISD::FAND;
23148 SDValue Cmp0, Cmp1;
23149 if (IsStrict) {
23150 Cmp0 = DAG.getNode(
23151 Opc, dl, {VT, MVT::Other},
23152 {Chain, Op0, Op1, DAG.getTargetConstant(CC0, dl, MVT::i8)});
23153 Cmp1 = DAG.getNode(
23154 Opc, dl, {VT, MVT::Other},
23155 {Chain, Op0, Op1, DAG.getTargetConstant(CC1, dl, MVT::i8)});
23156 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Cmp0.getValue(1),
23157 Cmp1.getValue(1));
23158 } else {
23159 Cmp0 = DAG.getNode(
23160 Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(CC0, dl, MVT::i8));
23161 Cmp1 = DAG.getNode(
23162 Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(CC1, dl, MVT::i8));
23164 Cmp = DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
23165 } else {
23166 if (IsStrict) {
23167 Cmp = DAG.getNode(
23168 Opc, dl, {VT, MVT::Other},
23169 {Chain, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8)});
23170 Chain = Cmp.getValue(1);
23171 } else
23172 Cmp = DAG.getNode(
23173 Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8));
23175 } else {
23176 // Handle all other FP comparisons here.
23177 if (IsStrict) {
23178 // Make a flip on already signaling CCs before setting bit 4 of AVX CC.
23179 SSECC |= (IsAlwaysSignaling ^ IsSignaling) << 4;
23180 Cmp = DAG.getNode(
23181 Opc, dl, {VT, MVT::Other},
23182 {Chain, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8)});
23183 Chain = Cmp.getValue(1);
23184 } else
23185 Cmp = DAG.getNode(
23186 Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8));
23189 if (VT.getFixedSizeInBits() >
23190 Op.getSimpleValueType().getFixedSizeInBits()) {
23191 // We emitted a compare with an XMM/YMM result. Finish converting to a
23192 // mask register using a vptestm.
23193 EVT CastVT = EVT(VT).changeVectorElementTypeToInteger();
23194 Cmp = DAG.getBitcast(CastVT, Cmp);
23195 Cmp = DAG.getSetCC(dl, Op.getSimpleValueType(), Cmp,
23196 DAG.getConstant(0, dl, CastVT), ISD::SETNE);
23197 } else {
23198 // If this is SSE/AVX CMPP, bitcast the result back to integer to match
23199 // the result type of SETCC. The bitcast is expected to be optimized
23200 // away during combining/isel.
23201 Cmp = DAG.getBitcast(Op.getSimpleValueType(), Cmp);
23204 if (IsStrict)
23205 return DAG.getMergeValues({Cmp, Chain}, dl);
23207 return Cmp;
23210 assert(!IsStrict && "Strict SETCC only handles FP operands.");
23212 MVT VTOp0 = Op0.getSimpleValueType();
23213 (void)VTOp0;
23214 assert(VTOp0 == Op1.getSimpleValueType() &&
23215 "Expected operands with same type!");
23216 assert(VT.getVectorNumElements() == VTOp0.getVectorNumElements() &&
23217 "Invalid number of packed elements for source and destination!");
23219 // The non-AVX512 code below works under the assumption that source and
23220 // destination types are the same.
23221 assert((Subtarget.hasAVX512() || (VT == VTOp0)) &&
23222 "Value types for source and destination must be the same!");
23224 // The result is boolean, but operands are int/float
23225 if (VT.getVectorElementType() == MVT::i1) {
23226 // In AVX-512 architecture setcc returns mask with i1 elements,
23227 // But there is no compare instruction for i8 and i16 elements in KNL.
23228 assert((VTOp0.getScalarSizeInBits() >= 32 || Subtarget.hasBWI()) &&
23229 "Unexpected operand type");
23230 return LowerIntVSETCC_AVX512(Op, DAG);
23233 // Lower using XOP integer comparisons.
23234 if (VT.is128BitVector() && Subtarget.hasXOP()) {
23235 // Translate compare code to XOP PCOM compare mode.
23236 unsigned CmpMode = 0;
23237 switch (Cond) {
23238 default: llvm_unreachable("Unexpected SETCC condition");
23239 case ISD::SETULT:
23240 case ISD::SETLT: CmpMode = 0x00; break;
23241 case ISD::SETULE:
23242 case ISD::SETLE: CmpMode = 0x01; break;
23243 case ISD::SETUGT:
23244 case ISD::SETGT: CmpMode = 0x02; break;
23245 case ISD::SETUGE:
23246 case ISD::SETGE: CmpMode = 0x03; break;
23247 case ISD::SETEQ: CmpMode = 0x04; break;
23248 case ISD::SETNE: CmpMode = 0x05; break;
23251 // Are we comparing unsigned or signed integers?
23252 unsigned Opc =
23253 ISD::isUnsignedIntSetCC(Cond) ? X86ISD::VPCOMU : X86ISD::VPCOM;
23255 return DAG.getNode(Opc, dl, VT, Op0, Op1,
23256 DAG.getTargetConstant(CmpMode, dl, MVT::i8));
23259 // (X & Y) != 0 --> (X & Y) == Y iff Y is power-of-2.
23260 // Revert part of the simplifySetCCWithAnd combine, to avoid an invert.
23261 if (Cond == ISD::SETNE && ISD::isBuildVectorAllZeros(Op1.getNode())) {
23262 SDValue BC0 = peekThroughBitcasts(Op0);
23263 if (BC0.getOpcode() == ISD::AND) {
23264 APInt UndefElts;
23265 SmallVector<APInt, 64> EltBits;
23266 if (getTargetConstantBitsFromNode(BC0.getOperand(1),
23267 VT.getScalarSizeInBits(), UndefElts,
23268 EltBits, false, false)) {
23269 if (llvm::all_of(EltBits, [](APInt &V) { return V.isPowerOf2(); })) {
23270 Cond = ISD::SETEQ;
23271 Op1 = DAG.getBitcast(VT, BC0.getOperand(1));
23277 // ICMP_EQ(AND(X,C),C) -> SRA(SHL(X,LOG2(C)),BW-1) iff C is power-of-2.
23278 if (Cond == ISD::SETEQ && Op0.getOpcode() == ISD::AND &&
23279 Op0.getOperand(1) == Op1 && Op0.hasOneUse()) {
23280 ConstantSDNode *C1 = isConstOrConstSplat(Op1);
23281 if (C1 && C1->getAPIntValue().isPowerOf2()) {
23282 unsigned BitWidth = VT.getScalarSizeInBits();
23283 unsigned ShiftAmt = BitWidth - C1->getAPIntValue().logBase2() - 1;
23285 SDValue Result = Op0.getOperand(0);
23286 Result = DAG.getNode(ISD::SHL, dl, VT, Result,
23287 DAG.getConstant(ShiftAmt, dl, VT));
23288 Result = DAG.getNode(ISD::SRA, dl, VT, Result,
23289 DAG.getConstant(BitWidth - 1, dl, VT));
23290 return Result;
23294 // Break 256-bit integer vector compare into smaller ones.
23295 if (VT.is256BitVector() && !Subtarget.hasInt256())
23296 return splitIntVSETCC(VT, Op0, Op1, Cond, DAG, dl);
23298 // Break 512-bit integer vector compare into smaller ones.
23299 // TODO: Try harder to use VPCMPx + VPMOV2x?
23300 if (VT.is512BitVector())
23301 return splitIntVSETCC(VT, Op0, Op1, Cond, DAG, dl);
23303 // If we have a limit constant, try to form PCMPGT (signed cmp) to avoid
23304 // not-of-PCMPEQ:
23305 // X != INT_MIN --> X >s INT_MIN
23306 // X != INT_MAX --> X <s INT_MAX --> INT_MAX >s X
23307 // +X != 0 --> +X >s 0
23308 APInt ConstValue;
23309 if (Cond == ISD::SETNE &&
23310 ISD::isConstantSplatVector(Op1.getNode(), ConstValue)) {
23311 if (ConstValue.isMinSignedValue())
23312 Cond = ISD::SETGT;
23313 else if (ConstValue.isMaxSignedValue())
23314 Cond = ISD::SETLT;
23315 else if (ConstValue.isZero() && DAG.SignBitIsZero(Op0))
23316 Cond = ISD::SETGT;
23319 // If both operands are known non-negative, then an unsigned compare is the
23320 // same as a signed compare and there's no need to flip signbits.
23321 // TODO: We could check for more general simplifications here since we're
23322 // computing known bits.
23323 bool FlipSigns = ISD::isUnsignedIntSetCC(Cond) &&
23324 !(DAG.SignBitIsZero(Op0) && DAG.SignBitIsZero(Op1));
23326 // Special case: Use min/max operations for unsigned compares.
23327 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23328 if (ISD::isUnsignedIntSetCC(Cond) &&
23329 (FlipSigns || ISD::isTrueWhenEqual(Cond)) &&
23330 TLI.isOperationLegal(ISD::UMIN, VT)) {
23331 // If we have a constant operand, increment/decrement it and change the
23332 // condition to avoid an invert.
23333 if (Cond == ISD::SETUGT) {
23334 // X > C --> X >= (C+1) --> X == umax(X, C+1)
23335 if (SDValue UGTOp1 =
23336 incDecVectorConstant(Op1, DAG, /*IsInc*/ true, /*NSW*/ false)) {
23337 Op1 = UGTOp1;
23338 Cond = ISD::SETUGE;
23341 if (Cond == ISD::SETULT) {
23342 // X < C --> X <= (C-1) --> X == umin(X, C-1)
23343 if (SDValue ULTOp1 =
23344 incDecVectorConstant(Op1, DAG, /*IsInc*/ false, /*NSW*/ false)) {
23345 Op1 = ULTOp1;
23346 Cond = ISD::SETULE;
23349 bool Invert = false;
23350 unsigned Opc;
23351 switch (Cond) {
23352 default: llvm_unreachable("Unexpected condition code");
23353 case ISD::SETUGT: Invert = true; [[fallthrough]];
23354 case ISD::SETULE: Opc = ISD::UMIN; break;
23355 case ISD::SETULT: Invert = true; [[fallthrough]];
23356 case ISD::SETUGE: Opc = ISD::UMAX; break;
23359 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
23360 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
23362 // If the logical-not of the result is required, perform that now.
23363 if (Invert)
23364 Result = DAG.getNOT(dl, Result, VT);
23366 return Result;
23369 // Try to use SUBUS and PCMPEQ.
23370 if (FlipSigns)
23371 if (SDValue V =
23372 LowerVSETCCWithSUBUS(Op0, Op1, VT, Cond, dl, Subtarget, DAG))
23373 return V;
23375 // We are handling one of the integer comparisons here. Since SSE only has
23376 // GT and EQ comparisons for integer, swapping operands and multiple
23377 // operations may be required for some comparisons.
23378 unsigned Opc = (Cond == ISD::SETEQ || Cond == ISD::SETNE) ? X86ISD::PCMPEQ
23379 : X86ISD::PCMPGT;
23380 bool Swap = Cond == ISD::SETLT || Cond == ISD::SETULT ||
23381 Cond == ISD::SETGE || Cond == ISD::SETUGE;
23382 bool Invert = Cond == ISD::SETNE ||
23383 (Cond != ISD::SETEQ && ISD::isTrueWhenEqual(Cond));
23385 if (Swap)
23386 std::swap(Op0, Op1);
23388 // Check that the operation in question is available (most are plain SSE2,
23389 // but PCMPGTQ and PCMPEQQ have different requirements).
23390 if (VT == MVT::v2i64) {
23391 if (Opc == X86ISD::PCMPGT && !Subtarget.hasSSE42()) {
23392 assert(Subtarget.hasSSE2() && "Don't know how to lower!");
23394 // Special case for sign bit test. We can use a v4i32 PCMPGT and shuffle
23395 // the odd elements over the even elements.
23396 if (!FlipSigns && !Invert && ISD::isBuildVectorAllZeros(Op0.getNode())) {
23397 Op0 = DAG.getConstant(0, dl, MVT::v4i32);
23398 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
23400 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
23401 static const int MaskHi[] = { 1, 1, 3, 3 };
23402 SDValue Result = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
23404 return DAG.getBitcast(VT, Result);
23407 if (!FlipSigns && !Invert && ISD::isBuildVectorAllOnes(Op1.getNode())) {
23408 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
23409 Op1 = DAG.getConstant(-1, dl, MVT::v4i32);
23411 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
23412 static const int MaskHi[] = { 1, 1, 3, 3 };
23413 SDValue Result = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
23415 return DAG.getBitcast(VT, Result);
23418 // Since SSE has no unsigned integer comparisons, we need to flip the sign
23419 // bits of the inputs before performing those operations. The lower
23420 // compare is always unsigned.
23421 SDValue SB = DAG.getConstant(FlipSigns ? 0x8000000080000000ULL
23422 : 0x0000000080000000ULL,
23423 dl, MVT::v2i64);
23425 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op0, SB);
23426 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op1, SB);
23428 // Cast everything to the right type.
23429 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
23430 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
23432 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
23433 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
23434 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
23436 // Create masks for only the low parts/high parts of the 64 bit integers.
23437 static const int MaskHi[] = { 1, 1, 3, 3 };
23438 static const int MaskLo[] = { 0, 0, 2, 2 };
23439 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
23440 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
23441 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
23443 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
23444 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
23446 if (Invert)
23447 Result = DAG.getNOT(dl, Result, MVT::v4i32);
23449 return DAG.getBitcast(VT, Result);
23452 if (Opc == X86ISD::PCMPEQ && !Subtarget.hasSSE41()) {
23453 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
23454 // pcmpeqd + pshufd + pand.
23455 assert(Subtarget.hasSSE2() && !FlipSigns && "Don't know how to lower!");
23457 // First cast everything to the right type.
23458 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
23459 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
23461 // Do the compare.
23462 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
23464 // Make sure the lower and upper halves are both all-ones.
23465 static const int Mask[] = { 1, 0, 3, 2 };
23466 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
23467 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
23469 if (Invert)
23470 Result = DAG.getNOT(dl, Result, MVT::v4i32);
23472 return DAG.getBitcast(VT, Result);
23476 // Since SSE has no unsigned integer comparisons, we need to flip the sign
23477 // bits of the inputs before performing those operations.
23478 if (FlipSigns) {
23479 MVT EltVT = VT.getVectorElementType();
23480 SDValue SM = DAG.getConstant(APInt::getSignMask(EltVT.getSizeInBits()), dl,
23481 VT);
23482 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SM);
23483 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SM);
23486 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
23488 // If the logical-not of the result is required, perform that now.
23489 if (Invert)
23490 Result = DAG.getNOT(dl, Result, VT);
23492 return Result;
23495 // Try to select this as a KORTEST+SETCC or KTEST+SETCC if possible.
23496 static SDValue EmitAVX512Test(SDValue Op0, SDValue Op1, ISD::CondCode CC,
23497 const SDLoc &dl, SelectionDAG &DAG,
23498 const X86Subtarget &Subtarget,
23499 SDValue &X86CC) {
23500 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && "Unsupported ISD::CondCode");
23502 // Must be a bitcast from vXi1.
23503 if (Op0.getOpcode() != ISD::BITCAST)
23504 return SDValue();
23506 Op0 = Op0.getOperand(0);
23507 MVT VT = Op0.getSimpleValueType();
23508 if (!(Subtarget.hasAVX512() && VT == MVT::v16i1) &&
23509 !(Subtarget.hasDQI() && VT == MVT::v8i1) &&
23510 !(Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1)))
23511 return SDValue();
23513 X86::CondCode X86Cond;
23514 if (isNullConstant(Op1)) {
23515 X86Cond = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
23516 } else if (isAllOnesConstant(Op1)) {
23517 // C flag is set for all ones.
23518 X86Cond = CC == ISD::SETEQ ? X86::COND_B : X86::COND_AE;
23519 } else
23520 return SDValue();
23522 // If the input is an AND, we can combine it's operands into the KTEST.
23523 bool KTestable = false;
23524 if (Subtarget.hasDQI() && (VT == MVT::v8i1 || VT == MVT::v16i1))
23525 KTestable = true;
23526 if (Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1))
23527 KTestable = true;
23528 if (!isNullConstant(Op1))
23529 KTestable = false;
23530 if (KTestable && Op0.getOpcode() == ISD::AND && Op0.hasOneUse()) {
23531 SDValue LHS = Op0.getOperand(0);
23532 SDValue RHS = Op0.getOperand(1);
23533 X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
23534 return DAG.getNode(X86ISD::KTEST, dl, MVT::i32, LHS, RHS);
23537 // If the input is an OR, we can combine it's operands into the KORTEST.
23538 SDValue LHS = Op0;
23539 SDValue RHS = Op0;
23540 if (Op0.getOpcode() == ISD::OR && Op0.hasOneUse()) {
23541 LHS = Op0.getOperand(0);
23542 RHS = Op0.getOperand(1);
23545 X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
23546 return DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
23549 /// Emit flags for the given setcc condition and operands. Also returns the
23550 /// corresponding X86 condition code constant in X86CC.
23551 SDValue X86TargetLowering::emitFlagsForSetcc(SDValue Op0, SDValue Op1,
23552 ISD::CondCode CC, const SDLoc &dl,
23553 SelectionDAG &DAG,
23554 SDValue &X86CC) const {
23555 // Equality Combines.
23556 if (CC == ISD::SETEQ || CC == ISD::SETNE) {
23557 X86::CondCode X86CondCode;
23559 // Optimize to BT if possible.
23560 // Lower (X & (1 << N)) == 0 to BT(X, N).
23561 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
23562 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
23563 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && isNullConstant(Op1)) {
23564 if (SDValue BT = LowerAndToBT(Op0, CC, dl, DAG, X86CondCode)) {
23565 X86CC = DAG.getTargetConstant(X86CondCode, dl, MVT::i8);
23566 return BT;
23570 // Try to use PTEST/PMOVMSKB for a tree AND/ORs equality compared with -1/0.
23571 if (SDValue CmpZ = MatchVectorAllEqualTest(Op0, Op1, CC, dl, Subtarget, DAG,
23572 X86CondCode)) {
23573 X86CC = DAG.getTargetConstant(X86CondCode, dl, MVT::i8);
23574 return CmpZ;
23577 // Try to lower using KORTEST or KTEST.
23578 if (SDValue Test = EmitAVX512Test(Op0, Op1, CC, dl, DAG, Subtarget, X86CC))
23579 return Test;
23581 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms
23582 // of these.
23583 if (isOneConstant(Op1) || isNullConstant(Op1)) {
23584 // If the input is a setcc, then reuse the input setcc or use a new one
23585 // with the inverted condition.
23586 if (Op0.getOpcode() == X86ISD::SETCC) {
23587 bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
23589 X86CC = Op0.getOperand(0);
23590 if (Invert) {
23591 X86CondCode = (X86::CondCode)Op0.getConstantOperandVal(0);
23592 X86CondCode = X86::GetOppositeBranchCondition(X86CondCode);
23593 X86CC = DAG.getTargetConstant(X86CondCode, dl, MVT::i8);
23596 return Op0.getOperand(1);
23600 // Try to use the carry flag from the add in place of an separate CMP for:
23601 // (seteq (add X, -1), -1). Similar for setne.
23602 if (isAllOnesConstant(Op1) && Op0.getOpcode() == ISD::ADD &&
23603 Op0.getOperand(1) == Op1) {
23604 if (isProfitableToUseFlagOp(Op0)) {
23605 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
23607 SDValue New = DAG.getNode(X86ISD::ADD, dl, VTs, Op0.getOperand(0),
23608 Op0.getOperand(1));
23609 DAG.ReplaceAllUsesOfValueWith(SDValue(Op0.getNode(), 0), New);
23610 X86CondCode = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
23611 X86CC = DAG.getTargetConstant(X86CondCode, dl, MVT::i8);
23612 return SDValue(New.getNode(), 1);
23617 X86::CondCode CondCode =
23618 TranslateX86CC(CC, dl, /*IsFP*/ false, Op0, Op1, DAG);
23619 assert(CondCode != X86::COND_INVALID && "Unexpected condition code!");
23621 SDValue EFLAGS = EmitCmp(Op0, Op1, CondCode, dl, DAG, Subtarget);
23622 X86CC = DAG.getTargetConstant(CondCode, dl, MVT::i8);
23623 return EFLAGS;
23626 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
23628 bool IsStrict = Op.getOpcode() == ISD::STRICT_FSETCC ||
23629 Op.getOpcode() == ISD::STRICT_FSETCCS;
23630 MVT VT = Op->getSimpleValueType(0);
23632 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
23634 assert(VT == MVT::i8 && "SetCC type must be 8-bit integer");
23635 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
23636 SDValue Op0 = Op.getOperand(IsStrict ? 1 : 0);
23637 SDValue Op1 = Op.getOperand(IsStrict ? 2 : 1);
23638 SDLoc dl(Op);
23639 ISD::CondCode CC =
23640 cast<CondCodeSDNode>(Op.getOperand(IsStrict ? 3 : 2))->get();
23642 if (isSoftF16(Op0.getValueType(), Subtarget))
23643 return SDValue();
23645 // Handle f128 first, since one possible outcome is a normal integer
23646 // comparison which gets handled by emitFlagsForSetcc.
23647 if (Op0.getValueType() == MVT::f128) {
23648 softenSetCCOperands(DAG, MVT::f128, Op0, Op1, CC, dl, Op0, Op1, Chain,
23649 Op.getOpcode() == ISD::STRICT_FSETCCS);
23651 // If softenSetCCOperands returned a scalar, use it.
23652 if (!Op1.getNode()) {
23653 assert(Op0.getValueType() == Op.getValueType() &&
23654 "Unexpected setcc expansion!");
23655 if (IsStrict)
23656 return DAG.getMergeValues({Op0, Chain}, dl);
23657 return Op0;
23661 if (Op0.getSimpleValueType().isInteger()) {
23662 // Attempt to canonicalize SGT/UGT -> SGE/UGE compares with constant which
23663 // reduces the number of EFLAGs bit reads (the GE conditions don't read ZF),
23664 // this may translate to less uops depending on uarch implementation. The
23665 // equivalent for SLE/ULE -> SLT/ULT isn't likely to happen as we already
23666 // canonicalize to that CondCode.
23667 // NOTE: Only do this if incrementing the constant doesn't increase the bit
23668 // encoding size - so it must either already be a i8 or i32 immediate, or it
23669 // shrinks down to that. We don't do this for any i64's to avoid additional
23670 // constant materializations.
23671 // TODO: Can we move this to TranslateX86CC to handle jumps/branches too?
23672 if (auto *Op1C = dyn_cast<ConstantSDNode>(Op1)) {
23673 const APInt &Op1Val = Op1C->getAPIntValue();
23674 if (!Op1Val.isZero()) {
23675 // Ensure the constant+1 doesn't overflow.
23676 if ((CC == ISD::CondCode::SETGT && !Op1Val.isMaxSignedValue()) ||
23677 (CC == ISD::CondCode::SETUGT && !Op1Val.isMaxValue())) {
23678 APInt Op1ValPlusOne = Op1Val + 1;
23679 if (Op1ValPlusOne.isSignedIntN(32) &&
23680 (!Op1Val.isSignedIntN(8) || Op1ValPlusOne.isSignedIntN(8))) {
23681 Op1 = DAG.getConstant(Op1ValPlusOne, dl, Op0.getValueType());
23682 CC = CC == ISD::CondCode::SETGT ? ISD::CondCode::SETGE
23683 : ISD::CondCode::SETUGE;
23689 SDValue X86CC;
23690 SDValue EFLAGS = emitFlagsForSetcc(Op0, Op1, CC, dl, DAG, X86CC);
23691 SDValue Res = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, X86CC, EFLAGS);
23692 return IsStrict ? DAG.getMergeValues({Res, Chain}, dl) : Res;
23695 // Handle floating point.
23696 X86::CondCode CondCode = TranslateX86CC(CC, dl, /*IsFP*/ true, Op0, Op1, DAG);
23697 if (CondCode == X86::COND_INVALID)
23698 return SDValue();
23700 SDValue EFLAGS;
23701 if (IsStrict) {
23702 bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
23703 EFLAGS =
23704 DAG.getNode(IsSignaling ? X86ISD::STRICT_FCMPS : X86ISD::STRICT_FCMP,
23705 dl, {MVT::i32, MVT::Other}, {Chain, Op0, Op1});
23706 Chain = EFLAGS.getValue(1);
23707 } else {
23708 EFLAGS = DAG.getNode(X86ISD::FCMP, dl, MVT::i32, Op0, Op1);
23711 SDValue X86CC = DAG.getTargetConstant(CondCode, dl, MVT::i8);
23712 SDValue Res = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, X86CC, EFLAGS);
23713 return IsStrict ? DAG.getMergeValues({Res, Chain}, dl) : Res;
23716 SDValue X86TargetLowering::LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const {
23717 SDValue LHS = Op.getOperand(0);
23718 SDValue RHS = Op.getOperand(1);
23719 SDValue Carry = Op.getOperand(2);
23720 SDValue Cond = Op.getOperand(3);
23721 SDLoc DL(Op);
23723 assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
23724 X86::CondCode CC = TranslateIntegerX86CC(cast<CondCodeSDNode>(Cond)->get());
23726 // Recreate the carry if needed.
23727 EVT CarryVT = Carry.getValueType();
23728 Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
23729 Carry, DAG.getAllOnesConstant(DL, CarryVT));
23731 SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
23732 SDValue Cmp = DAG.getNode(X86ISD::SBB, DL, VTs, LHS, RHS, Carry.getValue(1));
23733 return getSETCC(CC, Cmp.getValue(1), DL, DAG);
23736 // This function returns three things: the arithmetic computation itself
23737 // (Value), an EFLAGS result (Overflow), and a condition code (Cond). The
23738 // flag and the condition code define the case in which the arithmetic
23739 // computation overflows.
23740 static std::pair<SDValue, SDValue>
23741 getX86XALUOOp(X86::CondCode &Cond, SDValue Op, SelectionDAG &DAG) {
23742 assert(Op.getResNo() == 0 && "Unexpected result number!");
23743 SDValue Value, Overflow;
23744 SDValue LHS = Op.getOperand(0);
23745 SDValue RHS = Op.getOperand(1);
23746 unsigned BaseOp = 0;
23747 SDLoc DL(Op);
23748 switch (Op.getOpcode()) {
23749 default: llvm_unreachable("Unknown ovf instruction!");
23750 case ISD::SADDO:
23751 BaseOp = X86ISD::ADD;
23752 Cond = X86::COND_O;
23753 break;
23754 case ISD::UADDO:
23755 BaseOp = X86ISD::ADD;
23756 Cond = isOneConstant(RHS) ? X86::COND_E : X86::COND_B;
23757 break;
23758 case ISD::SSUBO:
23759 BaseOp = X86ISD::SUB;
23760 Cond = X86::COND_O;
23761 break;
23762 case ISD::USUBO:
23763 BaseOp = X86ISD::SUB;
23764 Cond = X86::COND_B;
23765 break;
23766 case ISD::SMULO:
23767 BaseOp = X86ISD::SMUL;
23768 Cond = X86::COND_O;
23769 break;
23770 case ISD::UMULO:
23771 BaseOp = X86ISD::UMUL;
23772 Cond = X86::COND_O;
23773 break;
23776 if (BaseOp) {
23777 // Also sets EFLAGS.
23778 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
23779 Value = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
23780 Overflow = Value.getValue(1);
23783 return std::make_pair(Value, Overflow);
23786 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
23787 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
23788 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
23789 // looks for this combo and may remove the "setcc" instruction if the "setcc"
23790 // has only one use.
23791 SDLoc DL(Op);
23792 X86::CondCode Cond;
23793 SDValue Value, Overflow;
23794 std::tie(Value, Overflow) = getX86XALUOOp(Cond, Op, DAG);
23796 SDValue SetCC = getSETCC(Cond, Overflow, DL, DAG);
23797 assert(Op->getValueType(1) == MVT::i8 && "Unexpected VT!");
23798 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Value, SetCC);
23801 /// Return true if opcode is a X86 logical comparison.
23802 static bool isX86LogicalCmp(SDValue Op) {
23803 unsigned Opc = Op.getOpcode();
23804 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
23805 Opc == X86ISD::FCMP)
23806 return true;
23807 if (Op.getResNo() == 1 &&
23808 (Opc == X86ISD::ADD || Opc == X86ISD::SUB || Opc == X86ISD::ADC ||
23809 Opc == X86ISD::SBB || Opc == X86ISD::SMUL || Opc == X86ISD::UMUL ||
23810 Opc == X86ISD::OR || Opc == X86ISD::XOR || Opc == X86ISD::AND))
23811 return true;
23813 return false;
23816 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
23817 if (V.getOpcode() != ISD::TRUNCATE)
23818 return false;
23820 SDValue VOp0 = V.getOperand(0);
23821 unsigned InBits = VOp0.getValueSizeInBits();
23822 unsigned Bits = V.getValueSizeInBits();
23823 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
23826 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
23827 bool AddTest = true;
23828 SDValue Cond = Op.getOperand(0);
23829 SDValue Op1 = Op.getOperand(1);
23830 SDValue Op2 = Op.getOperand(2);
23831 SDLoc DL(Op);
23832 MVT VT = Op1.getSimpleValueType();
23833 SDValue CC;
23835 if (isSoftF16(VT, Subtarget)) {
23836 MVT NVT = VT.changeTypeToInteger();
23837 return DAG.getBitcast(VT, DAG.getNode(ISD::SELECT, DL, NVT, Cond,
23838 DAG.getBitcast(NVT, Op1),
23839 DAG.getBitcast(NVT, Op2)));
23842 // Lower FP selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
23843 // are available or VBLENDV if AVX is available.
23844 // Otherwise FP cmovs get lowered into a less efficient branch sequence later.
23845 if (Cond.getOpcode() == ISD::SETCC && isScalarFPTypeInSSEReg(VT) &&
23846 VT == Cond.getOperand(0).getSimpleValueType() && Cond->hasOneUse()) {
23847 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
23848 bool IsAlwaysSignaling;
23849 unsigned SSECC =
23850 translateX86FSETCC(cast<CondCodeSDNode>(Cond.getOperand(2))->get(),
23851 CondOp0, CondOp1, IsAlwaysSignaling);
23853 if (Subtarget.hasAVX512()) {
23854 SDValue Cmp =
23855 DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CondOp0, CondOp1,
23856 DAG.getTargetConstant(SSECC, DL, MVT::i8));
23857 assert(!VT.isVector() && "Not a scalar type?");
23858 return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
23861 if (SSECC < 8 || Subtarget.hasAVX()) {
23862 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
23863 DAG.getTargetConstant(SSECC, DL, MVT::i8));
23865 // If we have AVX, we can use a variable vector select (VBLENDV) instead
23866 // of 3 logic instructions for size savings and potentially speed.
23867 // Unfortunately, there is no scalar form of VBLENDV.
23869 // If either operand is a +0.0 constant, don't try this. We can expect to
23870 // optimize away at least one of the logic instructions later in that
23871 // case, so that sequence would be faster than a variable blend.
23873 // BLENDV was introduced with SSE 4.1, but the 2 register form implicitly
23874 // uses XMM0 as the selection register. That may need just as many
23875 // instructions as the AND/ANDN/OR sequence due to register moves, so
23876 // don't bother.
23877 if (Subtarget.hasAVX() && !isNullFPConstant(Op1) &&
23878 !isNullFPConstant(Op2)) {
23879 // Convert to vectors, do a VSELECT, and convert back to scalar.
23880 // All of the conversions should be optimized away.
23881 MVT VecVT = VT == MVT::f32 ? MVT::v4f32 : MVT::v2f64;
23882 SDValue VOp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op1);
23883 SDValue VOp2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op2);
23884 SDValue VCmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Cmp);
23886 MVT VCmpVT = VT == MVT::f32 ? MVT::v4i32 : MVT::v2i64;
23887 VCmp = DAG.getBitcast(VCmpVT, VCmp);
23889 SDValue VSel = DAG.getSelect(DL, VecVT, VCmp, VOp1, VOp2);
23891 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
23892 VSel, DAG.getIntPtrConstant(0, DL));
23894 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
23895 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
23896 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
23900 // AVX512 fallback is to lower selects of scalar floats to masked moves.
23901 if (isScalarFPTypeInSSEReg(VT) && Subtarget.hasAVX512()) {
23902 SDValue Cmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1, Cond);
23903 return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
23906 if (Cond.getOpcode() == ISD::SETCC &&
23907 !isSoftF16(Cond.getOperand(0).getSimpleValueType(), Subtarget)) {
23908 if (SDValue NewCond = LowerSETCC(Cond, DAG)) {
23909 Cond = NewCond;
23910 // If the condition was updated, it's possible that the operands of the
23911 // select were also updated (for example, EmitTest has a RAUW). Refresh
23912 // the local references to the select operands in case they got stale.
23913 Op1 = Op.getOperand(1);
23914 Op2 = Op.getOperand(2);
23918 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
23919 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
23920 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
23921 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
23922 // (select (and (x , 0x1) == 0), y, (z ^ y) ) -> (-(and (x , 0x1)) & z ) ^ y
23923 // (select (and (x , 0x1) == 0), y, (z | y) ) -> (-(and (x , 0x1)) & z ) | y
23924 // (select (x > 0), x, 0) -> (~(x >> (size_in_bits(x)-1))) & x
23925 // (select (x < 0), x, 0) -> ((x >> (size_in_bits(x)-1))) & x
23926 if (Cond.getOpcode() == X86ISD::SETCC &&
23927 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
23928 isNullConstant(Cond.getOperand(1).getOperand(1))) {
23929 SDValue Cmp = Cond.getOperand(1);
23930 SDValue CmpOp0 = Cmp.getOperand(0);
23931 unsigned CondCode = Cond.getConstantOperandVal(0);
23933 // Special handling for __builtin_ffs(X) - 1 pattern which looks like
23934 // (select (seteq X, 0), -1, (cttz_zero_undef X)). Disable the special
23935 // handle to keep the CMP with 0. This should be removed by
23936 // optimizeCompareInst by using the flags from the BSR/TZCNT used for the
23937 // cttz_zero_undef.
23938 auto MatchFFSMinus1 = [&](SDValue Op1, SDValue Op2) {
23939 return (Op1.getOpcode() == ISD::CTTZ_ZERO_UNDEF && Op1.hasOneUse() &&
23940 Op1.getOperand(0) == CmpOp0 && isAllOnesConstant(Op2));
23942 if (Subtarget.canUseCMOV() && (VT == MVT::i32 || VT == MVT::i64) &&
23943 ((CondCode == X86::COND_NE && MatchFFSMinus1(Op1, Op2)) ||
23944 (CondCode == X86::COND_E && MatchFFSMinus1(Op2, Op1)))) {
23945 // Keep Cmp.
23946 } else if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
23947 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
23948 SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
23949 SDVTList CmpVTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
23951 // 'X - 1' sets the carry flag if X == 0.
23952 // '0 - X' sets the carry flag if X != 0.
23953 // Convert the carry flag to a -1/0 mask with sbb:
23954 // select (X != 0), -1, Y --> 0 - X; or (sbb), Y
23955 // select (X == 0), Y, -1 --> 0 - X; or (sbb), Y
23956 // select (X != 0), Y, -1 --> X - 1; or (sbb), Y
23957 // select (X == 0), -1, Y --> X - 1; or (sbb), Y
23958 SDValue Sub;
23959 if (isAllOnesConstant(Op1) == (CondCode == X86::COND_NE)) {
23960 SDValue Zero = DAG.getConstant(0, DL, CmpOp0.getValueType());
23961 Sub = DAG.getNode(X86ISD::SUB, DL, CmpVTs, Zero, CmpOp0);
23962 } else {
23963 SDValue One = DAG.getConstant(1, DL, CmpOp0.getValueType());
23964 Sub = DAG.getNode(X86ISD::SUB, DL, CmpVTs, CmpOp0, One);
23966 SDValue SBB = DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
23967 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
23968 Sub.getValue(1));
23969 return DAG.getNode(ISD::OR, DL, VT, SBB, Y);
23970 } else if (!Subtarget.canUseCMOV() && CondCode == X86::COND_E &&
23971 CmpOp0.getOpcode() == ISD::AND &&
23972 isOneConstant(CmpOp0.getOperand(1))) {
23973 SDValue Src1, Src2;
23974 // true if Op2 is XOR or OR operator and one of its operands
23975 // is equal to Op1
23976 // ( a , a op b) || ( b , a op b)
23977 auto isOrXorPattern = [&]() {
23978 if ((Op2.getOpcode() == ISD::XOR || Op2.getOpcode() == ISD::OR) &&
23979 (Op2.getOperand(0) == Op1 || Op2.getOperand(1) == Op1)) {
23980 Src1 =
23981 Op2.getOperand(0) == Op1 ? Op2.getOperand(1) : Op2.getOperand(0);
23982 Src2 = Op1;
23983 return true;
23985 return false;
23988 if (isOrXorPattern()) {
23989 SDValue Neg;
23990 unsigned int CmpSz = CmpOp0.getSimpleValueType().getSizeInBits();
23991 // we need mask of all zeros or ones with same size of the other
23992 // operands.
23993 if (CmpSz > VT.getSizeInBits())
23994 Neg = DAG.getNode(ISD::TRUNCATE, DL, VT, CmpOp0);
23995 else if (CmpSz < VT.getSizeInBits())
23996 Neg = DAG.getNode(ISD::AND, DL, VT,
23997 DAG.getNode(ISD::ANY_EXTEND, DL, VT, CmpOp0.getOperand(0)),
23998 DAG.getConstant(1, DL, VT));
23999 else
24000 Neg = CmpOp0;
24001 SDValue Mask = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
24002 Neg); // -(and (x, 0x1))
24003 SDValue And = DAG.getNode(ISD::AND, DL, VT, Mask, Src1); // Mask & z
24004 return DAG.getNode(Op2.getOpcode(), DL, VT, And, Src2); // And Op y
24006 } else if ((VT == MVT::i32 || VT == MVT::i64) && isNullConstant(Op2) &&
24007 Cmp.getNode()->hasOneUse() && (CmpOp0 == Op1) &&
24008 ((CondCode == X86::COND_S) || // smin(x, 0)
24009 (CondCode == X86::COND_G && hasAndNot(Op1)))) { // smax(x, 0)
24010 // (select (x < 0), x, 0) -> ((x >> (size_in_bits(x)-1))) & x
24012 // If the comparison is testing for a positive value, we have to invert
24013 // the sign bit mask, so only do that transform if the target has a
24014 // bitwise 'and not' instruction (the invert is free).
24015 // (select (x > 0), x, 0) -> (~(x >> (size_in_bits(x)-1))) & x
24016 unsigned ShCt = VT.getSizeInBits() - 1;
24017 SDValue ShiftAmt = DAG.getConstant(ShCt, DL, VT);
24018 SDValue Shift = DAG.getNode(ISD::SRA, DL, VT, Op1, ShiftAmt);
24019 if (CondCode == X86::COND_G)
24020 Shift = DAG.getNOT(DL, Shift, VT);
24021 return DAG.getNode(ISD::AND, DL, VT, Shift, Op1);
24025 // Look past (and (setcc_carry (cmp ...)), 1).
24026 if (Cond.getOpcode() == ISD::AND &&
24027 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
24028 isOneConstant(Cond.getOperand(1)))
24029 Cond = Cond.getOperand(0);
24031 // If condition flag is set by a X86ISD::CMP, then use it as the condition
24032 // setting operand in place of the X86ISD::SETCC.
24033 unsigned CondOpcode = Cond.getOpcode();
24034 if (CondOpcode == X86ISD::SETCC ||
24035 CondOpcode == X86ISD::SETCC_CARRY) {
24036 CC = Cond.getOperand(0);
24038 SDValue Cmp = Cond.getOperand(1);
24039 bool IllegalFPCMov = false;
24040 if (VT.isFloatingPoint() && !VT.isVector() &&
24041 !isScalarFPTypeInSSEReg(VT) && Subtarget.canUseCMOV()) // FPStack?
24042 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
24044 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
24045 Cmp.getOpcode() == X86ISD::BT) { // FIXME
24046 Cond = Cmp;
24047 AddTest = false;
24049 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
24050 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
24051 CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
24052 SDValue Value;
24053 X86::CondCode X86Cond;
24054 std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
24056 CC = DAG.getTargetConstant(X86Cond, DL, MVT::i8);
24057 AddTest = false;
24060 if (AddTest) {
24061 // Look past the truncate if the high bits are known zero.
24062 if (isTruncWithZeroHighBitsInput(Cond, DAG))
24063 Cond = Cond.getOperand(0);
24065 // We know the result of AND is compared against zero. Try to match
24066 // it to BT.
24067 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
24068 X86::CondCode X86CondCode;
24069 if (SDValue BT = LowerAndToBT(Cond, ISD::SETNE, DL, DAG, X86CondCode)) {
24070 CC = DAG.getTargetConstant(X86CondCode, DL, MVT::i8);
24071 Cond = BT;
24072 AddTest = false;
24077 if (AddTest) {
24078 CC = DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8);
24079 Cond = EmitTest(Cond, X86::COND_NE, DL, DAG, Subtarget);
24082 // a < b ? -1 : 0 -> RES = ~setcc_carry
24083 // a < b ? 0 : -1 -> RES = setcc_carry
24084 // a >= b ? -1 : 0 -> RES = setcc_carry
24085 // a >= b ? 0 : -1 -> RES = ~setcc_carry
24086 if (Cond.getOpcode() == X86ISD::SUB) {
24087 unsigned CondCode = CC->getAsZExtVal();
24089 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
24090 (isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
24091 (isNullConstant(Op1) || isNullConstant(Op2))) {
24092 SDValue Res =
24093 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
24094 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), Cond);
24095 if (isAllOnesConstant(Op1) != (CondCode == X86::COND_B))
24096 return DAG.getNOT(DL, Res, Res.getValueType());
24097 return Res;
24101 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
24102 // widen the cmov and push the truncate through. This avoids introducing a new
24103 // branch during isel and doesn't add any extensions.
24104 if (Op.getValueType() == MVT::i8 &&
24105 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
24106 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
24107 if (T1.getValueType() == T2.getValueType() &&
24108 // Exclude CopyFromReg to avoid partial register stalls.
24109 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
24110 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, T1.getValueType(), T2, T1,
24111 CC, Cond);
24112 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
24116 // Or finally, promote i8 cmovs if we have CMOV,
24117 // or i16 cmovs if it won't prevent folding a load.
24118 // FIXME: we should not limit promotion of i8 case to only when the CMOV is
24119 // legal, but EmitLoweredSelect() can not deal with these extensions
24120 // being inserted between two CMOV's. (in i16 case too TBN)
24121 // https://bugs.llvm.org/show_bug.cgi?id=40974
24122 if ((Op.getValueType() == MVT::i8 && Subtarget.canUseCMOV()) ||
24123 (Op.getValueType() == MVT::i16 && !X86::mayFoldLoad(Op1, Subtarget) &&
24124 !X86::mayFoldLoad(Op2, Subtarget))) {
24125 Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1);
24126 Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2);
24127 SDValue Ops[] = { Op2, Op1, CC, Cond };
24128 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, MVT::i32, Ops);
24129 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
24132 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
24133 // condition is true.
24134 SDValue Ops[] = { Op2, Op1, CC, Cond };
24135 return DAG.getNode(X86ISD::CMOV, DL, Op.getValueType(), Ops, Op->getFlags());
24138 static SDValue LowerSIGN_EXTEND_Mask(SDValue Op,
24139 const X86Subtarget &Subtarget,
24140 SelectionDAG &DAG) {
24141 MVT VT = Op->getSimpleValueType(0);
24142 SDValue In = Op->getOperand(0);
24143 MVT InVT = In.getSimpleValueType();
24144 assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
24145 MVT VTElt = VT.getVectorElementType();
24146 SDLoc dl(Op);
24148 unsigned NumElts = VT.getVectorNumElements();
24150 // Extend VT if the scalar type is i8/i16 and BWI is not supported.
24151 MVT ExtVT = VT;
24152 if (!Subtarget.hasBWI() && VTElt.getSizeInBits() <= 16) {
24153 // If v16i32 is to be avoided, we'll need to split and concatenate.
24154 if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
24155 return SplitAndExtendv16i1(Op.getOpcode(), VT, In, dl, DAG);
24157 ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
24160 // Widen to 512-bits if VLX is not supported.
24161 MVT WideVT = ExtVT;
24162 if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
24163 NumElts *= 512 / ExtVT.getSizeInBits();
24164 InVT = MVT::getVectorVT(MVT::i1, NumElts);
24165 In = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, InVT, DAG.getUNDEF(InVT),
24166 In, DAG.getIntPtrConstant(0, dl));
24167 WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(), NumElts);
24170 SDValue V;
24171 MVT WideEltVT = WideVT.getVectorElementType();
24172 if ((Subtarget.hasDQI() && WideEltVT.getSizeInBits() >= 32) ||
24173 (Subtarget.hasBWI() && WideEltVT.getSizeInBits() <= 16)) {
24174 V = DAG.getNode(Op.getOpcode(), dl, WideVT, In);
24175 } else {
24176 SDValue NegOne = DAG.getConstant(-1, dl, WideVT);
24177 SDValue Zero = DAG.getConstant(0, dl, WideVT);
24178 V = DAG.getSelect(dl, WideVT, In, NegOne, Zero);
24181 // Truncate if we had to extend i16/i8 above.
24182 if (VT != ExtVT) {
24183 WideVT = MVT::getVectorVT(VTElt, NumElts);
24184 V = DAG.getNode(ISD::TRUNCATE, dl, WideVT, V);
24187 // Extract back to 128/256-bit if we widened.
24188 if (WideVT != VT)
24189 V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, V,
24190 DAG.getIntPtrConstant(0, dl));
24192 return V;
24195 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
24196 SelectionDAG &DAG) {
24197 SDValue In = Op->getOperand(0);
24198 MVT InVT = In.getSimpleValueType();
24200 if (InVT.getVectorElementType() == MVT::i1)
24201 return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
24203 assert(Subtarget.hasAVX() && "Expected AVX support");
24204 return LowerAVXExtend(Op, DAG, Subtarget);
24207 // Lowering for SIGN_EXTEND_VECTOR_INREG and ZERO_EXTEND_VECTOR_INREG.
24208 // For sign extend this needs to handle all vector sizes and SSE4.1 and
24209 // non-SSE4.1 targets. For zero extend this should only handle inputs of
24210 // MVT::v64i8 when BWI is not supported, but AVX512 is.
24211 static SDValue LowerEXTEND_VECTOR_INREG(SDValue Op,
24212 const X86Subtarget &Subtarget,
24213 SelectionDAG &DAG) {
24214 SDValue In = Op->getOperand(0);
24215 MVT VT = Op->getSimpleValueType(0);
24216 MVT InVT = In.getSimpleValueType();
24218 MVT SVT = VT.getVectorElementType();
24219 MVT InSVT = InVT.getVectorElementType();
24220 assert(SVT.getFixedSizeInBits() > InSVT.getFixedSizeInBits());
24222 if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
24223 return SDValue();
24224 if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
24225 return SDValue();
24226 if (!(VT.is128BitVector() && Subtarget.hasSSE2()) &&
24227 !(VT.is256BitVector() && Subtarget.hasAVX()) &&
24228 !(VT.is512BitVector() && Subtarget.hasAVX512()))
24229 return SDValue();
24231 SDLoc dl(Op);
24232 unsigned Opc = Op.getOpcode();
24233 unsigned NumElts = VT.getVectorNumElements();
24235 // For 256-bit vectors, we only need the lower (128-bit) half of the input.
24236 // For 512-bit vectors, we need 128-bits or 256-bits.
24237 if (InVT.getSizeInBits() > 128) {
24238 // Input needs to be at least the same number of elements as output, and
24239 // at least 128-bits.
24240 int InSize = InSVT.getSizeInBits() * NumElts;
24241 In = extractSubVector(In, 0, DAG, dl, std::max(InSize, 128));
24242 InVT = In.getSimpleValueType();
24245 // SSE41 targets can use the pmov[sz]x* instructions directly for 128-bit results,
24246 // so are legal and shouldn't occur here. AVX2/AVX512 pmovsx* instructions still
24247 // need to be handled here for 256/512-bit results.
24248 if (Subtarget.hasInt256()) {
24249 assert(VT.getSizeInBits() > 128 && "Unexpected 128-bit vector extension");
24251 if (InVT.getVectorNumElements() != NumElts)
24252 return DAG.getNode(Op.getOpcode(), dl, VT, In);
24254 // FIXME: Apparently we create inreg operations that could be regular
24255 // extends.
24256 unsigned ExtOpc =
24257 Opc == ISD::SIGN_EXTEND_VECTOR_INREG ? ISD::SIGN_EXTEND
24258 : ISD::ZERO_EXTEND;
24259 return DAG.getNode(ExtOpc, dl, VT, In);
24262 // pre-AVX2 256-bit extensions need to be split into 128-bit instructions.
24263 if (Subtarget.hasAVX()) {
24264 assert(VT.is256BitVector() && "256-bit vector expected");
24265 MVT HalfVT = VT.getHalfNumVectorElementsVT();
24266 int HalfNumElts = HalfVT.getVectorNumElements();
24268 unsigned NumSrcElts = InVT.getVectorNumElements();
24269 SmallVector<int, 16> HiMask(NumSrcElts, SM_SentinelUndef);
24270 for (int i = 0; i != HalfNumElts; ++i)
24271 HiMask[i] = HalfNumElts + i;
24273 SDValue Lo = DAG.getNode(Opc, dl, HalfVT, In);
24274 SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, DAG.getUNDEF(InVT), HiMask);
24275 Hi = DAG.getNode(Opc, dl, HalfVT, Hi);
24276 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
24279 // We should only get here for sign extend.
24280 assert(Opc == ISD::SIGN_EXTEND_VECTOR_INREG && "Unexpected opcode!");
24281 assert(VT.is128BitVector() && InVT.is128BitVector() && "Unexpected VTs");
24282 unsigned InNumElts = InVT.getVectorNumElements();
24284 // If the source elements are already all-signbits, we don't need to extend,
24285 // just splat the elements.
24286 APInt DemandedElts = APInt::getLowBitsSet(InNumElts, NumElts);
24287 if (DAG.ComputeNumSignBits(In, DemandedElts) == InVT.getScalarSizeInBits()) {
24288 unsigned Scale = InNumElts / NumElts;
24289 SmallVector<int, 16> ShuffleMask;
24290 for (unsigned I = 0; I != NumElts; ++I)
24291 ShuffleMask.append(Scale, I);
24292 return DAG.getBitcast(VT,
24293 DAG.getVectorShuffle(InVT, dl, In, In, ShuffleMask));
24296 // pre-SSE41 targets unpack lower lanes and then sign-extend using SRAI.
24297 SDValue Curr = In;
24298 SDValue SignExt = Curr;
24300 // As SRAI is only available on i16/i32 types, we expand only up to i32
24301 // and handle i64 separately.
24302 if (InVT != MVT::v4i32) {
24303 MVT DestVT = VT == MVT::v2i64 ? MVT::v4i32 : VT;
24305 unsigned DestWidth = DestVT.getScalarSizeInBits();
24306 unsigned Scale = DestWidth / InSVT.getSizeInBits();
24307 unsigned DestElts = DestVT.getVectorNumElements();
24309 // Build a shuffle mask that takes each input element and places it in the
24310 // MSBs of the new element size.
24311 SmallVector<int, 16> Mask(InNumElts, SM_SentinelUndef);
24312 for (unsigned i = 0; i != DestElts; ++i)
24313 Mask[i * Scale + (Scale - 1)] = i;
24315 Curr = DAG.getVectorShuffle(InVT, dl, In, In, Mask);
24316 Curr = DAG.getBitcast(DestVT, Curr);
24318 unsigned SignExtShift = DestWidth - InSVT.getSizeInBits();
24319 SignExt = DAG.getNode(X86ISD::VSRAI, dl, DestVT, Curr,
24320 DAG.getTargetConstant(SignExtShift, dl, MVT::i8));
24323 if (VT == MVT::v2i64) {
24324 assert(Curr.getValueType() == MVT::v4i32 && "Unexpected input VT");
24325 SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
24326 SDValue Sign = DAG.getSetCC(dl, MVT::v4i32, Zero, Curr, ISD::SETGT);
24327 SignExt = DAG.getVectorShuffle(MVT::v4i32, dl, SignExt, Sign, {0, 4, 1, 5});
24328 SignExt = DAG.getBitcast(VT, SignExt);
24331 return SignExt;
24334 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
24335 SelectionDAG &DAG) {
24336 MVT VT = Op->getSimpleValueType(0);
24337 SDValue In = Op->getOperand(0);
24338 MVT InVT = In.getSimpleValueType();
24339 SDLoc dl(Op);
24341 if (InVT.getVectorElementType() == MVT::i1)
24342 return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
24344 assert(VT.isVector() && InVT.isVector() && "Expected vector type");
24345 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
24346 "Expected same number of elements");
24347 assert((VT.getVectorElementType() == MVT::i16 ||
24348 VT.getVectorElementType() == MVT::i32 ||
24349 VT.getVectorElementType() == MVT::i64) &&
24350 "Unexpected element type");
24351 assert((InVT.getVectorElementType() == MVT::i8 ||
24352 InVT.getVectorElementType() == MVT::i16 ||
24353 InVT.getVectorElementType() == MVT::i32) &&
24354 "Unexpected element type");
24356 if (VT == MVT::v32i16 && !Subtarget.hasBWI()) {
24357 assert(InVT == MVT::v32i8 && "Unexpected VT!");
24358 return splitVectorIntUnary(Op, DAG);
24361 if (Subtarget.hasInt256())
24362 return Op;
24364 // Optimize vectors in AVX mode
24365 // Sign extend v8i16 to v8i32 and
24366 // v4i32 to v4i64
24368 // Divide input vector into two parts
24369 // for v4i32 the high shuffle mask will be {2, 3, -1, -1}
24370 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
24371 // concat the vectors to original VT
24372 MVT HalfVT = VT.getHalfNumVectorElementsVT();
24373 SDValue OpLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, In);
24375 unsigned NumElems = InVT.getVectorNumElements();
24376 SmallVector<int,8> ShufMask(NumElems, -1);
24377 for (unsigned i = 0; i != NumElems/2; ++i)
24378 ShufMask[i] = i + NumElems/2;
24380 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
24381 OpHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, OpHi);
24383 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
24386 /// Change a vector store into a pair of half-size vector stores.
24387 static SDValue splitVectorStore(StoreSDNode *Store, SelectionDAG &DAG) {
24388 SDValue StoredVal = Store->getValue();
24389 assert((StoredVal.getValueType().is256BitVector() ||
24390 StoredVal.getValueType().is512BitVector()) &&
24391 "Expecting 256/512-bit op");
24393 // Splitting volatile memory ops is not allowed unless the operation was not
24394 // legal to begin with. Assume the input store is legal (this transform is
24395 // only used for targets with AVX). Note: It is possible that we have an
24396 // illegal type like v2i128, and so we could allow splitting a volatile store
24397 // in that case if that is important.
24398 if (!Store->isSimple())
24399 return SDValue();
24401 SDLoc DL(Store);
24402 SDValue Value0, Value1;
24403 std::tie(Value0, Value1) = splitVector(StoredVal, DAG, DL);
24404 unsigned HalfOffset = Value0.getValueType().getStoreSize();
24405 SDValue Ptr0 = Store->getBasePtr();
24406 SDValue Ptr1 =
24407 DAG.getMemBasePlusOffset(Ptr0, TypeSize::getFixed(HalfOffset), DL);
24408 SDValue Ch0 =
24409 DAG.getStore(Store->getChain(), DL, Value0, Ptr0, Store->getPointerInfo(),
24410 Store->getOriginalAlign(),
24411 Store->getMemOperand()->getFlags());
24412 SDValue Ch1 = DAG.getStore(Store->getChain(), DL, Value1, Ptr1,
24413 Store->getPointerInfo().getWithOffset(HalfOffset),
24414 Store->getOriginalAlign(),
24415 Store->getMemOperand()->getFlags());
24416 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Ch0, Ch1);
24419 /// Scalarize a vector store, bitcasting to TargetVT to determine the scalar
24420 /// type.
24421 static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT,
24422 SelectionDAG &DAG) {
24423 SDValue StoredVal = Store->getValue();
24424 assert(StoreVT.is128BitVector() &&
24425 StoredVal.getValueType().is128BitVector() && "Expecting 128-bit op");
24426 StoredVal = DAG.getBitcast(StoreVT, StoredVal);
24428 // Splitting volatile memory ops is not allowed unless the operation was not
24429 // legal to begin with. We are assuming the input op is legal (this transform
24430 // is only used for targets with AVX).
24431 if (!Store->isSimple())
24432 return SDValue();
24434 MVT StoreSVT = StoreVT.getScalarType();
24435 unsigned NumElems = StoreVT.getVectorNumElements();
24436 unsigned ScalarSize = StoreSVT.getStoreSize();
24438 SDLoc DL(Store);
24439 SmallVector<SDValue, 4> Stores;
24440 for (unsigned i = 0; i != NumElems; ++i) {
24441 unsigned Offset = i * ScalarSize;
24442 SDValue Ptr = DAG.getMemBasePlusOffset(Store->getBasePtr(),
24443 TypeSize::getFixed(Offset), DL);
24444 SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, StoreSVT, StoredVal,
24445 DAG.getIntPtrConstant(i, DL));
24446 SDValue Ch = DAG.getStore(Store->getChain(), DL, Scl, Ptr,
24447 Store->getPointerInfo().getWithOffset(Offset),
24448 Store->getOriginalAlign(),
24449 Store->getMemOperand()->getFlags());
24450 Stores.push_back(Ch);
24452 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores);
24455 static SDValue LowerStore(SDValue Op, const X86Subtarget &Subtarget,
24456 SelectionDAG &DAG) {
24457 StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
24458 SDLoc dl(St);
24459 SDValue StoredVal = St->getValue();
24461 // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 stores.
24462 if (StoredVal.getValueType().isVector() &&
24463 StoredVal.getValueType().getVectorElementType() == MVT::i1) {
24464 unsigned NumElts = StoredVal.getValueType().getVectorNumElements();
24465 assert(NumElts <= 8 && "Unexpected VT");
24466 assert(!St->isTruncatingStore() && "Expected non-truncating store");
24467 assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
24468 "Expected AVX512F without AVX512DQI");
24470 // We must pad with zeros to ensure we store zeroes to any unused bits.
24471 StoredVal = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
24472 DAG.getUNDEF(MVT::v16i1), StoredVal,
24473 DAG.getIntPtrConstant(0, dl));
24474 StoredVal = DAG.getBitcast(MVT::i16, StoredVal);
24475 StoredVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, StoredVal);
24476 // Make sure we store zeros in the extra bits.
24477 if (NumElts < 8)
24478 StoredVal = DAG.getZeroExtendInReg(
24479 StoredVal, dl, EVT::getIntegerVT(*DAG.getContext(), NumElts));
24481 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
24482 St->getPointerInfo(), St->getOriginalAlign(),
24483 St->getMemOperand()->getFlags());
24486 if (St->isTruncatingStore())
24487 return SDValue();
24489 // If this is a 256-bit store of concatenated ops, we are better off splitting
24490 // that store into two 128-bit stores. This avoids spurious use of 256-bit ops
24491 // and each half can execute independently. Some cores would split the op into
24492 // halves anyway, so the concat (vinsertf128) is purely an extra op.
24493 MVT StoreVT = StoredVal.getSimpleValueType();
24494 if (StoreVT.is256BitVector() ||
24495 ((StoreVT == MVT::v32i16 || StoreVT == MVT::v64i8) &&
24496 !Subtarget.hasBWI())) {
24497 if (StoredVal.hasOneUse() && isFreeToSplitVector(StoredVal.getNode(), DAG))
24498 return splitVectorStore(St, DAG);
24499 return SDValue();
24502 if (StoreVT.is32BitVector())
24503 return SDValue();
24505 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24506 assert(StoreVT.is64BitVector() && "Unexpected VT");
24507 assert(TLI.getTypeAction(*DAG.getContext(), StoreVT) ==
24508 TargetLowering::TypeWidenVector &&
24509 "Unexpected type action!");
24511 EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), StoreVT);
24512 StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, StoredVal,
24513 DAG.getUNDEF(StoreVT));
24515 if (Subtarget.hasSSE2()) {
24516 // Widen the vector, cast to a v2x64 type, extract the single 64-bit element
24517 // and store it.
24518 MVT StVT = Subtarget.is64Bit() && StoreVT.isInteger() ? MVT::i64 : MVT::f64;
24519 MVT CastVT = MVT::getVectorVT(StVT, 2);
24520 StoredVal = DAG.getBitcast(CastVT, StoredVal);
24521 StoredVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, StVT, StoredVal,
24522 DAG.getIntPtrConstant(0, dl));
24524 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
24525 St->getPointerInfo(), St->getOriginalAlign(),
24526 St->getMemOperand()->getFlags());
24528 assert(Subtarget.hasSSE1() && "Expected SSE");
24529 SDVTList Tys = DAG.getVTList(MVT::Other);
24530 SDValue Ops[] = {St->getChain(), StoredVal, St->getBasePtr()};
24531 return DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys, Ops, MVT::i64,
24532 St->getMemOperand());
24535 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
24536 // may emit an illegal shuffle but the expansion is still better than scalar
24537 // code. We generate sext/sext_invec for SEXTLOADs if it's available, otherwise
24538 // we'll emit a shuffle and a arithmetic shift.
24539 // FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
24540 // TODO: It is possible to support ZExt by zeroing the undef values during
24541 // the shuffle phase or after the shuffle.
24542 static SDValue LowerLoad(SDValue Op, const X86Subtarget &Subtarget,
24543 SelectionDAG &DAG) {
24544 MVT RegVT = Op.getSimpleValueType();
24545 assert(RegVT.isVector() && "We only custom lower vector loads.");
24546 assert(RegVT.isInteger() &&
24547 "We only custom lower integer vector loads.");
24549 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
24550 SDLoc dl(Ld);
24552 // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 loads.
24553 if (RegVT.getVectorElementType() == MVT::i1) {
24554 assert(EVT(RegVT) == Ld->getMemoryVT() && "Expected non-extending load");
24555 assert(RegVT.getVectorNumElements() <= 8 && "Unexpected VT");
24556 assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
24557 "Expected AVX512F without AVX512DQI");
24559 SDValue NewLd = DAG.getLoad(MVT::i8, dl, Ld->getChain(), Ld->getBasePtr(),
24560 Ld->getPointerInfo(), Ld->getOriginalAlign(),
24561 Ld->getMemOperand()->getFlags());
24563 // Replace chain users with the new chain.
24564 assert(NewLd->getNumValues() == 2 && "Loads must carry a chain!");
24566 SDValue Val = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, NewLd);
24567 Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, RegVT,
24568 DAG.getBitcast(MVT::v16i1, Val),
24569 DAG.getIntPtrConstant(0, dl));
24570 return DAG.getMergeValues({Val, NewLd.getValue(1)}, dl);
24573 return SDValue();
24576 /// Return true if node is an ISD::AND or ISD::OR of two X86ISD::SETCC nodes
24577 /// each of which has no other use apart from the AND / OR.
24578 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
24579 Opc = Op.getOpcode();
24580 if (Opc != ISD::OR && Opc != ISD::AND)
24581 return false;
24582 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
24583 Op.getOperand(0).hasOneUse() &&
24584 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
24585 Op.getOperand(1).hasOneUse());
24588 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
24589 SDValue Chain = Op.getOperand(0);
24590 SDValue Cond = Op.getOperand(1);
24591 SDValue Dest = Op.getOperand(2);
24592 SDLoc dl(Op);
24594 // Bail out when we don't have native compare instructions.
24595 if (Cond.getOpcode() == ISD::SETCC &&
24596 Cond.getOperand(0).getValueType() != MVT::f128 &&
24597 !isSoftF16(Cond.getOperand(0).getValueType(), Subtarget)) {
24598 SDValue LHS = Cond.getOperand(0);
24599 SDValue RHS = Cond.getOperand(1);
24600 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
24602 // Special case for
24603 // setcc([su]{add,sub,mul}o == 0)
24604 // setcc([su]{add,sub,mul}o != 1)
24605 if (ISD::isOverflowIntrOpRes(LHS) &&
24606 (CC == ISD::SETEQ || CC == ISD::SETNE) &&
24607 (isNullConstant(RHS) || isOneConstant(RHS))) {
24608 SDValue Value, Overflow;
24609 X86::CondCode X86Cond;
24610 std::tie(Value, Overflow) = getX86XALUOOp(X86Cond, LHS.getValue(0), DAG);
24612 if ((CC == ISD::SETEQ) == isNullConstant(RHS))
24613 X86Cond = X86::GetOppositeBranchCondition(X86Cond);
24615 SDValue CCVal = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
24616 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
24617 Overflow);
24620 if (LHS.getSimpleValueType().isInteger()) {
24621 SDValue CCVal;
24622 SDValue EFLAGS = emitFlagsForSetcc(LHS, RHS, CC, SDLoc(Cond), DAG, CCVal);
24623 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
24624 EFLAGS);
24627 if (CC == ISD::SETOEQ) {
24628 // For FCMP_OEQ, we can emit
24629 // two branches instead of an explicit AND instruction with a
24630 // separate test. However, we only do this if this block doesn't
24631 // have a fall-through edge, because this requires an explicit
24632 // jmp when the condition is false.
24633 if (Op.getNode()->hasOneUse()) {
24634 SDNode *User = *Op.getNode()->use_begin();
24635 // Look for an unconditional branch following this conditional branch.
24636 // We need this because we need to reverse the successors in order
24637 // to implement FCMP_OEQ.
24638 if (User->getOpcode() == ISD::BR) {
24639 SDValue FalseBB = User->getOperand(1);
24640 SDNode *NewBR =
24641 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
24642 assert(NewBR == User);
24643 (void)NewBR;
24644 Dest = FalseBB;
24646 SDValue Cmp =
24647 DAG.getNode(X86ISD::FCMP, SDLoc(Cond), MVT::i32, LHS, RHS);
24648 SDValue CCVal = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
24649 Chain = DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest,
24650 CCVal, Cmp);
24651 CCVal = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
24652 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
24653 Cmp);
24656 } else if (CC == ISD::SETUNE) {
24657 // For FCMP_UNE, we can emit
24658 // two branches instead of an explicit OR instruction with a
24659 // separate test.
24660 SDValue Cmp = DAG.getNode(X86ISD::FCMP, SDLoc(Cond), MVT::i32, LHS, RHS);
24661 SDValue CCVal = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
24662 Chain =
24663 DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal, Cmp);
24664 CCVal = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
24665 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
24666 Cmp);
24667 } else {
24668 X86::CondCode X86Cond =
24669 TranslateX86CC(CC, dl, /*IsFP*/ true, LHS, RHS, DAG);
24670 SDValue Cmp = DAG.getNode(X86ISD::FCMP, SDLoc(Cond), MVT::i32, LHS, RHS);
24671 SDValue CCVal = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
24672 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
24673 Cmp);
24677 if (ISD::isOverflowIntrOpRes(Cond)) {
24678 SDValue Value, Overflow;
24679 X86::CondCode X86Cond;
24680 std::tie(Value, Overflow) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
24682 SDValue CCVal = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
24683 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
24684 Overflow);
24687 // Look past the truncate if the high bits are known zero.
24688 if (isTruncWithZeroHighBitsInput(Cond, DAG))
24689 Cond = Cond.getOperand(0);
24691 EVT CondVT = Cond.getValueType();
24693 // Add an AND with 1 if we don't already have one.
24694 if (!(Cond.getOpcode() == ISD::AND && isOneConstant(Cond.getOperand(1))))
24695 Cond =
24696 DAG.getNode(ISD::AND, dl, CondVT, Cond, DAG.getConstant(1, dl, CondVT));
24698 SDValue LHS = Cond;
24699 SDValue RHS = DAG.getConstant(0, dl, CondVT);
24701 SDValue CCVal;
24702 SDValue EFLAGS = emitFlagsForSetcc(LHS, RHS, ISD::SETNE, dl, DAG, CCVal);
24703 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
24704 EFLAGS);
24707 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
24708 // Calls to _alloca are needed to probe the stack when allocating more than 4k
24709 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
24710 // that the guard pages used by the OS virtual memory manager are allocated in
24711 // correct sequence.
24712 SDValue
24713 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
24714 SelectionDAG &DAG) const {
24715 MachineFunction &MF = DAG.getMachineFunction();
24716 bool SplitStack = MF.shouldSplitStack();
24717 bool EmitStackProbeCall = hasStackProbeSymbol(MF);
24718 bool Lower = (Subtarget.isOSWindows() && !Subtarget.isTargetMachO()) ||
24719 SplitStack || EmitStackProbeCall;
24720 SDLoc dl(Op);
24722 // Get the inputs.
24723 SDNode *Node = Op.getNode();
24724 SDValue Chain = Op.getOperand(0);
24725 SDValue Size = Op.getOperand(1);
24726 MaybeAlign Alignment(Op.getConstantOperandVal(2));
24727 EVT VT = Node->getValueType(0);
24729 // Chain the dynamic stack allocation so that it doesn't modify the stack
24730 // pointer when other instructions are using the stack.
24731 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
24733 bool Is64Bit = Subtarget.is64Bit();
24734 MVT SPTy = getPointerTy(DAG.getDataLayout());
24736 SDValue Result;
24737 if (!Lower) {
24738 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24739 Register SPReg = TLI.getStackPointerRegisterToSaveRestore();
24740 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
24741 " not tell us which reg is the stack pointer!");
24743 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
24744 const Align StackAlign = TFI.getStackAlign();
24745 if (hasInlineStackProbe(MF)) {
24746 MachineRegisterInfo &MRI = MF.getRegInfo();
24748 const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
24749 Register Vreg = MRI.createVirtualRegister(AddrRegClass);
24750 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
24751 Result = DAG.getNode(X86ISD::PROBED_ALLOCA, dl, SPTy, Chain,
24752 DAG.getRegister(Vreg, SPTy));
24753 } else {
24754 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
24755 Chain = SP.getValue(1);
24756 Result = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
24758 if (Alignment && *Alignment > StackAlign)
24759 Result =
24760 DAG.getNode(ISD::AND, dl, VT, Result,
24761 DAG.getConstant(~(Alignment->value() - 1ULL), dl, VT));
24762 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Result); // Output chain
24763 } else if (SplitStack) {
24764 MachineRegisterInfo &MRI = MF.getRegInfo();
24766 if (Is64Bit) {
24767 // The 64 bit implementation of segmented stacks needs to clobber both r10
24768 // r11. This makes it impossible to use it along with nested parameters.
24769 const Function &F = MF.getFunction();
24770 for (const auto &A : F.args()) {
24771 if (A.hasNestAttr())
24772 report_fatal_error("Cannot use segmented stacks with functions that "
24773 "have nested arguments.");
24777 const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
24778 Register Vreg = MRI.createVirtualRegister(AddrRegClass);
24779 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
24780 Result = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
24781 DAG.getRegister(Vreg, SPTy));
24782 } else {
24783 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
24784 Chain = DAG.getNode(X86ISD::DYN_ALLOCA, dl, NodeTys, Chain, Size);
24785 MF.getInfo<X86MachineFunctionInfo>()->setHasDynAlloca(true);
24787 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
24788 Register SPReg = RegInfo->getStackRegister();
24789 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
24790 Chain = SP.getValue(1);
24792 if (Alignment) {
24793 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
24794 DAG.getConstant(~(Alignment->value() - 1ULL), dl, VT));
24795 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
24798 Result = SP;
24801 Chain = DAG.getCALLSEQ_END(Chain, 0, 0, SDValue(), dl);
24803 SDValue Ops[2] = {Result, Chain};
24804 return DAG.getMergeValues(Ops, dl);
24807 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
24808 MachineFunction &MF = DAG.getMachineFunction();
24809 auto PtrVT = getPointerTy(MF.getDataLayout());
24810 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
24812 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
24813 SDLoc DL(Op);
24815 if (!Subtarget.is64Bit() ||
24816 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv())) {
24817 // vastart just stores the address of the VarArgsFrameIndex slot into the
24818 // memory location argument.
24819 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
24820 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
24821 MachinePointerInfo(SV));
24824 // __va_list_tag:
24825 // gp_offset (0 - 6 * 8)
24826 // fp_offset (48 - 48 + 8 * 16)
24827 // overflow_arg_area (point to parameters coming in memory).
24828 // reg_save_area
24829 SmallVector<SDValue, 8> MemOps;
24830 SDValue FIN = Op.getOperand(1);
24831 // Store gp_offset
24832 SDValue Store = DAG.getStore(
24833 Op.getOperand(0), DL,
24834 DAG.getConstant(FuncInfo->getVarArgsGPOffset(), DL, MVT::i32), FIN,
24835 MachinePointerInfo(SV));
24836 MemOps.push_back(Store);
24838 // Store fp_offset
24839 FIN = DAG.getMemBasePlusOffset(FIN, TypeSize::getFixed(4), DL);
24840 Store = DAG.getStore(
24841 Op.getOperand(0), DL,
24842 DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32), FIN,
24843 MachinePointerInfo(SV, 4));
24844 MemOps.push_back(Store);
24846 // Store ptr to overflow_arg_area
24847 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4, DL));
24848 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
24849 Store =
24850 DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, MachinePointerInfo(SV, 8));
24851 MemOps.push_back(Store);
24853 // Store ptr to reg_save_area.
24854 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(
24855 Subtarget.isTarget64BitLP64() ? 8 : 4, DL));
24856 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT);
24857 Store = DAG.getStore(
24858 Op.getOperand(0), DL, RSFIN, FIN,
24859 MachinePointerInfo(SV, Subtarget.isTarget64BitLP64() ? 16 : 12));
24860 MemOps.push_back(Store);
24861 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
24864 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
24865 assert(Subtarget.is64Bit() &&
24866 "LowerVAARG only handles 64-bit va_arg!");
24867 assert(Op.getNumOperands() == 4);
24869 MachineFunction &MF = DAG.getMachineFunction();
24870 if (Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()))
24871 // The Win64 ABI uses char* instead of a structure.
24872 return DAG.expandVAArg(Op.getNode());
24874 SDValue Chain = Op.getOperand(0);
24875 SDValue SrcPtr = Op.getOperand(1);
24876 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
24877 unsigned Align = Op.getConstantOperandVal(3);
24878 SDLoc dl(Op);
24880 EVT ArgVT = Op.getNode()->getValueType(0);
24881 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
24882 uint32_t ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy);
24883 uint8_t ArgMode;
24885 // Decide which area this value should be read from.
24886 // TODO: Implement the AMD64 ABI in its entirety. This simple
24887 // selection mechanism works only for the basic types.
24888 assert(ArgVT != MVT::f80 && "va_arg for f80 not yet implemented");
24889 if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
24890 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
24891 } else {
24892 assert(ArgVT.isInteger() && ArgSize <= 32 /*bytes*/ &&
24893 "Unhandled argument type in LowerVAARG");
24894 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
24897 if (ArgMode == 2) {
24898 // Make sure using fp_offset makes sense.
24899 assert(!Subtarget.useSoftFloat() &&
24900 !(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) &&
24901 Subtarget.hasSSE1());
24904 // Insert VAARG node into the DAG
24905 // VAARG returns two values: Variable Argument Address, Chain
24906 SDValue InstOps[] = {Chain, SrcPtr,
24907 DAG.getTargetConstant(ArgSize, dl, MVT::i32),
24908 DAG.getTargetConstant(ArgMode, dl, MVT::i8),
24909 DAG.getTargetConstant(Align, dl, MVT::i32)};
24910 SDVTList VTs = DAG.getVTList(getPointerTy(DAG.getDataLayout()), MVT::Other);
24911 SDValue VAARG = DAG.getMemIntrinsicNode(
24912 Subtarget.isTarget64BitLP64() ? X86ISD::VAARG_64 : X86ISD::VAARG_X32, dl,
24913 VTs, InstOps, MVT::i64, MachinePointerInfo(SV),
24914 /*Alignment=*/std::nullopt,
24915 MachineMemOperand::MOLoad | MachineMemOperand::MOStore);
24916 Chain = VAARG.getValue(1);
24918 // Load the next argument and return it
24919 return DAG.getLoad(ArgVT, dl, Chain, VAARG, MachinePointerInfo());
24922 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget,
24923 SelectionDAG &DAG) {
24924 // X86-64 va_list is a struct { i32, i32, i8*, i8* }, except on Windows,
24925 // where a va_list is still an i8*.
24926 assert(Subtarget.is64Bit() && "This code only handles 64-bit va_copy!");
24927 if (Subtarget.isCallingConvWin64(
24928 DAG.getMachineFunction().getFunction().getCallingConv()))
24929 // Probably a Win64 va_copy.
24930 return DAG.expandVACopy(Op.getNode());
24932 SDValue Chain = Op.getOperand(0);
24933 SDValue DstPtr = Op.getOperand(1);
24934 SDValue SrcPtr = Op.getOperand(2);
24935 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
24936 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
24937 SDLoc DL(Op);
24939 return DAG.getMemcpy(
24940 Chain, DL, DstPtr, SrcPtr,
24941 DAG.getIntPtrConstant(Subtarget.isTarget64BitLP64() ? 24 : 16, DL),
24942 Align(Subtarget.isTarget64BitLP64() ? 8 : 4), /*isVolatile*/ false, false,
24943 false, MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
24946 // Helper to get immediate/variable SSE shift opcode from other shift opcodes.
24947 static unsigned getTargetVShiftUniformOpcode(unsigned Opc, bool IsVariable) {
24948 switch (Opc) {
24949 case ISD::SHL:
24950 case X86ISD::VSHL:
24951 case X86ISD::VSHLI:
24952 return IsVariable ? X86ISD::VSHL : X86ISD::VSHLI;
24953 case ISD::SRL:
24954 case X86ISD::VSRL:
24955 case X86ISD::VSRLI:
24956 return IsVariable ? X86ISD::VSRL : X86ISD::VSRLI;
24957 case ISD::SRA:
24958 case X86ISD::VSRA:
24959 case X86ISD::VSRAI:
24960 return IsVariable ? X86ISD::VSRA : X86ISD::VSRAI;
24962 llvm_unreachable("Unknown target vector shift node");
24965 /// Handle vector element shifts where the shift amount is a constant.
24966 /// Takes immediate version of shift as input.
24967 static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
24968 SDValue SrcOp, uint64_t ShiftAmt,
24969 SelectionDAG &DAG) {
24970 MVT ElementType = VT.getVectorElementType();
24972 // Bitcast the source vector to the output type, this is mainly necessary for
24973 // vXi8/vXi64 shifts.
24974 if (VT != SrcOp.getSimpleValueType())
24975 SrcOp = DAG.getBitcast(VT, SrcOp);
24977 // Fold this packed shift into its first operand if ShiftAmt is 0.
24978 if (ShiftAmt == 0)
24979 return SrcOp;
24981 // Check for ShiftAmt >= element width
24982 if (ShiftAmt >= ElementType.getSizeInBits()) {
24983 if (Opc == X86ISD::VSRAI)
24984 ShiftAmt = ElementType.getSizeInBits() - 1;
24985 else
24986 return DAG.getConstant(0, dl, VT);
24989 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
24990 && "Unknown target vector shift-by-constant node");
24992 // Fold this packed vector shift into a build vector if SrcOp is a
24993 // vector of Constants or UNDEFs.
24994 if (ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
24995 unsigned ShiftOpc;
24996 switch (Opc) {
24997 default: llvm_unreachable("Unknown opcode!");
24998 case X86ISD::VSHLI:
24999 ShiftOpc = ISD::SHL;
25000 break;
25001 case X86ISD::VSRLI:
25002 ShiftOpc = ISD::SRL;
25003 break;
25004 case X86ISD::VSRAI:
25005 ShiftOpc = ISD::SRA;
25006 break;
25009 SDValue Amt = DAG.getConstant(ShiftAmt, dl, VT);
25010 if (SDValue C = DAG.FoldConstantArithmetic(ShiftOpc, dl, VT, {SrcOp, Amt}))
25011 return C;
25014 return DAG.getNode(Opc, dl, VT, SrcOp,
25015 DAG.getTargetConstant(ShiftAmt, dl, MVT::i8));
25018 /// Handle vector element shifts by a splat shift amount
25019 static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT,
25020 SDValue SrcOp, SDValue ShAmt, int ShAmtIdx,
25021 const X86Subtarget &Subtarget,
25022 SelectionDAG &DAG) {
25023 MVT AmtVT = ShAmt.getSimpleValueType();
25024 assert(AmtVT.isVector() && "Vector shift type mismatch");
25025 assert(0 <= ShAmtIdx && ShAmtIdx < (int)AmtVT.getVectorNumElements() &&
25026 "Illegal vector splat index");
25028 // Move the splat element to the bottom element.
25029 if (ShAmtIdx != 0) {
25030 SmallVector<int> Mask(AmtVT.getVectorNumElements(), -1);
25031 Mask[0] = ShAmtIdx;
25032 ShAmt = DAG.getVectorShuffle(AmtVT, dl, ShAmt, DAG.getUNDEF(AmtVT), Mask);
25035 // Peek through any zext node if we can get back to a 128-bit source.
25036 if (AmtVT.getScalarSizeInBits() == 64 &&
25037 (ShAmt.getOpcode() == ISD::ZERO_EXTEND ||
25038 ShAmt.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG) &&
25039 ShAmt.getOperand(0).getValueType().isSimple() &&
25040 ShAmt.getOperand(0).getValueType().is128BitVector()) {
25041 ShAmt = ShAmt.getOperand(0);
25042 AmtVT = ShAmt.getSimpleValueType();
25045 // See if we can mask off the upper elements using the existing source node.
25046 // The shift uses the entire lower 64-bits of the amount vector, so no need to
25047 // do this for vXi64 types.
25048 bool IsMasked = false;
25049 if (AmtVT.getScalarSizeInBits() < 64) {
25050 if (ShAmt.getOpcode() == ISD::BUILD_VECTOR ||
25051 ShAmt.getOpcode() == ISD::SCALAR_TO_VECTOR) {
25052 // If the shift amount has come from a scalar, then zero-extend the scalar
25053 // before moving to the vector.
25054 ShAmt = DAG.getZExtOrTrunc(ShAmt.getOperand(0), dl, MVT::i32);
25055 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, ShAmt);
25056 ShAmt = DAG.getNode(X86ISD::VZEXT_MOVL, dl, MVT::v4i32, ShAmt);
25057 AmtVT = MVT::v4i32;
25058 IsMasked = true;
25059 } else if (ShAmt.getOpcode() == ISD::AND) {
25060 // See if the shift amount is already masked (e.g. for rotation modulo),
25061 // then we can zero-extend it by setting all the other mask elements to
25062 // zero.
25063 SmallVector<SDValue> MaskElts(
25064 AmtVT.getVectorNumElements(),
25065 DAG.getConstant(0, dl, AmtVT.getScalarType()));
25066 MaskElts[0] = DAG.getAllOnesConstant(dl, AmtVT.getScalarType());
25067 SDValue Mask = DAG.getBuildVector(AmtVT, dl, MaskElts);
25068 if ((Mask = DAG.FoldConstantArithmetic(ISD::AND, dl, AmtVT,
25069 {ShAmt.getOperand(1), Mask}))) {
25070 ShAmt = DAG.getNode(ISD::AND, dl, AmtVT, ShAmt.getOperand(0), Mask);
25071 IsMasked = true;
25076 // Extract if the shift amount vector is larger than 128-bits.
25077 if (AmtVT.getSizeInBits() > 128) {
25078 ShAmt = extract128BitVector(ShAmt, 0, DAG, dl);
25079 AmtVT = ShAmt.getSimpleValueType();
25082 // Zero-extend bottom element to v2i64 vector type, either by extension or
25083 // shuffle masking.
25084 if (!IsMasked && AmtVT.getScalarSizeInBits() < 64) {
25085 if (AmtVT == MVT::v4i32 && (ShAmt.getOpcode() == X86ISD::VBROADCAST ||
25086 ShAmt.getOpcode() == X86ISD::VBROADCAST_LOAD)) {
25087 ShAmt = DAG.getNode(X86ISD::VZEXT_MOVL, SDLoc(ShAmt), MVT::v4i32, ShAmt);
25088 } else if (Subtarget.hasSSE41()) {
25089 ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
25090 MVT::v2i64, ShAmt);
25091 } else {
25092 SDValue ByteShift = DAG.getTargetConstant(
25093 (128 - AmtVT.getScalarSizeInBits()) / 8, SDLoc(ShAmt), MVT::i8);
25094 ShAmt = DAG.getBitcast(MVT::v16i8, ShAmt);
25095 ShAmt = DAG.getNode(X86ISD::VSHLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
25096 ByteShift);
25097 ShAmt = DAG.getNode(X86ISD::VSRLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
25098 ByteShift);
25102 // Change opcode to non-immediate version.
25103 Opc = getTargetVShiftUniformOpcode(Opc, true);
25105 // The return type has to be a 128-bit type with the same element
25106 // type as the input type.
25107 MVT EltVT = VT.getVectorElementType();
25108 MVT ShVT = MVT::getVectorVT(EltVT, 128 / EltVT.getSizeInBits());
25110 ShAmt = DAG.getBitcast(ShVT, ShAmt);
25111 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
25114 /// Return Mask with the necessary casting or extending
25115 /// for \p Mask according to \p MaskVT when lowering masking intrinsics
25116 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
25117 const X86Subtarget &Subtarget, SelectionDAG &DAG,
25118 const SDLoc &dl) {
25120 if (isAllOnesConstant(Mask))
25121 return DAG.getConstant(1, dl, MaskVT);
25122 if (X86::isZeroNode(Mask))
25123 return DAG.getConstant(0, dl, MaskVT);
25125 assert(MaskVT.bitsLE(Mask.getSimpleValueType()) && "Unexpected mask size!");
25127 if (Mask.getSimpleValueType() == MVT::i64 && Subtarget.is32Bit()) {
25128 assert(MaskVT == MVT::v64i1 && "Expected v64i1 mask!");
25129 assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
25130 // In case 32bit mode, bitcast i64 is illegal, extend/split it.
25131 SDValue Lo, Hi;
25132 std::tie(Lo, Hi) = DAG.SplitScalar(Mask, dl, MVT::i32, MVT::i32);
25133 Lo = DAG.getBitcast(MVT::v32i1, Lo);
25134 Hi = DAG.getBitcast(MVT::v32i1, Hi);
25135 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
25136 } else {
25137 MVT BitcastVT = MVT::getVectorVT(MVT::i1,
25138 Mask.getSimpleValueType().getSizeInBits());
25139 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
25140 // are extracted by EXTRACT_SUBVECTOR.
25141 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
25142 DAG.getBitcast(BitcastVT, Mask),
25143 DAG.getIntPtrConstant(0, dl));
25147 /// Return (and \p Op, \p Mask) for compare instructions or
25148 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
25149 /// necessary casting or extending for \p Mask when lowering masking intrinsics
25150 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
25151 SDValue PreservedSrc,
25152 const X86Subtarget &Subtarget,
25153 SelectionDAG &DAG) {
25154 MVT VT = Op.getSimpleValueType();
25155 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
25156 unsigned OpcodeSelect = ISD::VSELECT;
25157 SDLoc dl(Op);
25159 if (isAllOnesConstant(Mask))
25160 return Op;
25162 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
25164 if (PreservedSrc.isUndef())
25165 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
25166 return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc);
25169 /// Creates an SDNode for a predicated scalar operation.
25170 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
25171 /// The mask is coming as MVT::i8 and it should be transformed
25172 /// to MVT::v1i1 while lowering masking intrinsics.
25173 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
25174 /// "X86select" instead of "vselect". We just can't create the "vselect" node
25175 /// for a scalar instruction.
25176 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
25177 SDValue PreservedSrc,
25178 const X86Subtarget &Subtarget,
25179 SelectionDAG &DAG) {
25181 if (auto *MaskConst = dyn_cast<ConstantSDNode>(Mask))
25182 if (MaskConst->getZExtValue() & 0x1)
25183 return Op;
25185 MVT VT = Op.getSimpleValueType();
25186 SDLoc dl(Op);
25188 assert(Mask.getValueType() == MVT::i8 && "Unexpect type");
25189 SDValue IMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v1i1,
25190 DAG.getBitcast(MVT::v8i1, Mask),
25191 DAG.getIntPtrConstant(0, dl));
25192 if (Op.getOpcode() == X86ISD::FSETCCM ||
25193 Op.getOpcode() == X86ISD::FSETCCM_SAE ||
25194 Op.getOpcode() == X86ISD::VFPCLASSS)
25195 return DAG.getNode(ISD::AND, dl, VT, Op, IMask);
25197 if (PreservedSrc.isUndef())
25198 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
25199 return DAG.getNode(X86ISD::SELECTS, dl, VT, IMask, Op, PreservedSrc);
25202 static int getSEHRegistrationNodeSize(const Function *Fn) {
25203 if (!Fn->hasPersonalityFn())
25204 report_fatal_error(
25205 "querying registration node size for function without personality");
25206 // The RegNodeSize is 6 32-bit words for SEH and 4 for C++ EH. See
25207 // WinEHStatePass for the full struct definition.
25208 switch (classifyEHPersonality(Fn->getPersonalityFn())) {
25209 case EHPersonality::MSVC_X86SEH: return 24;
25210 case EHPersonality::MSVC_CXX: return 16;
25211 default: break;
25213 report_fatal_error(
25214 "can only recover FP for 32-bit MSVC EH personality functions");
25217 /// When the MSVC runtime transfers control to us, either to an outlined
25218 /// function or when returning to a parent frame after catching an exception, we
25219 /// recover the parent frame pointer by doing arithmetic on the incoming EBP.
25220 /// Here's the math:
25221 /// RegNodeBase = EntryEBP - RegNodeSize
25222 /// ParentFP = RegNodeBase - ParentFrameOffset
25223 /// Subtracting RegNodeSize takes us to the offset of the registration node, and
25224 /// subtracting the offset (negative on x86) takes us back to the parent FP.
25225 static SDValue recoverFramePointer(SelectionDAG &DAG, const Function *Fn,
25226 SDValue EntryEBP) {
25227 MachineFunction &MF = DAG.getMachineFunction();
25228 SDLoc dl;
25230 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25231 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
25233 // It's possible that the parent function no longer has a personality function
25234 // if the exceptional code was optimized away, in which case we just return
25235 // the incoming EBP.
25236 if (!Fn->hasPersonalityFn())
25237 return EntryEBP;
25239 // Get an MCSymbol that will ultimately resolve to the frame offset of the EH
25240 // registration, or the .set_setframe offset.
25241 MCSymbol *OffsetSym =
25242 MF.getMMI().getContext().getOrCreateParentFrameOffsetSymbol(
25243 GlobalValue::dropLLVMManglingEscape(Fn->getName()));
25244 SDValue OffsetSymVal = DAG.getMCSymbol(OffsetSym, PtrVT);
25245 SDValue ParentFrameOffset =
25246 DAG.getNode(ISD::LOCAL_RECOVER, dl, PtrVT, OffsetSymVal);
25248 // Return EntryEBP + ParentFrameOffset for x64. This adjusts from RSP after
25249 // prologue to RBP in the parent function.
25250 const X86Subtarget &Subtarget = DAG.getSubtarget<X86Subtarget>();
25251 if (Subtarget.is64Bit())
25252 return DAG.getNode(ISD::ADD, dl, PtrVT, EntryEBP, ParentFrameOffset);
25254 int RegNodeSize = getSEHRegistrationNodeSize(Fn);
25255 // RegNodeBase = EntryEBP - RegNodeSize
25256 // ParentFP = RegNodeBase - ParentFrameOffset
25257 SDValue RegNodeBase = DAG.getNode(ISD::SUB, dl, PtrVT, EntryEBP,
25258 DAG.getConstant(RegNodeSize, dl, PtrVT));
25259 return DAG.getNode(ISD::SUB, dl, PtrVT, RegNodeBase, ParentFrameOffset);
25262 SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
25263 SelectionDAG &DAG) const {
25264 // Helper to detect if the operand is CUR_DIRECTION rounding mode.
25265 auto isRoundModeCurDirection = [](SDValue Rnd) {
25266 if (auto *C = dyn_cast<ConstantSDNode>(Rnd))
25267 return C->getAPIntValue() == X86::STATIC_ROUNDING::CUR_DIRECTION;
25269 return false;
25271 auto isRoundModeSAE = [](SDValue Rnd) {
25272 if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
25273 unsigned RC = C->getZExtValue();
25274 if (RC & X86::STATIC_ROUNDING::NO_EXC) {
25275 // Clear the NO_EXC bit and check remaining bits.
25276 RC ^= X86::STATIC_ROUNDING::NO_EXC;
25277 // As a convenience we allow no other bits or explicitly
25278 // current direction.
25279 return RC == 0 || RC == X86::STATIC_ROUNDING::CUR_DIRECTION;
25283 return false;
25285 auto isRoundModeSAEToX = [](SDValue Rnd, unsigned &RC) {
25286 if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
25287 RC = C->getZExtValue();
25288 if (RC & X86::STATIC_ROUNDING::NO_EXC) {
25289 // Clear the NO_EXC bit and check remaining bits.
25290 RC ^= X86::STATIC_ROUNDING::NO_EXC;
25291 return RC == X86::STATIC_ROUNDING::TO_NEAREST_INT ||
25292 RC == X86::STATIC_ROUNDING::TO_NEG_INF ||
25293 RC == X86::STATIC_ROUNDING::TO_POS_INF ||
25294 RC == X86::STATIC_ROUNDING::TO_ZERO;
25298 return false;
25301 SDLoc dl(Op);
25302 unsigned IntNo = Op.getConstantOperandVal(0);
25303 MVT VT = Op.getSimpleValueType();
25304 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
25306 // Propagate flags from original node to transformed node(s).
25307 SelectionDAG::FlagInserter FlagsInserter(DAG, Op->getFlags());
25309 if (IntrData) {
25310 switch(IntrData->Type) {
25311 case INTR_TYPE_1OP: {
25312 // We specify 2 possible opcodes for intrinsics with rounding modes.
25313 // First, we check if the intrinsic may have non-default rounding mode,
25314 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
25315 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
25316 if (IntrWithRoundingModeOpcode != 0) {
25317 SDValue Rnd = Op.getOperand(2);
25318 unsigned RC = 0;
25319 if (isRoundModeSAEToX(Rnd, RC))
25320 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
25321 Op.getOperand(1),
25322 DAG.getTargetConstant(RC, dl, MVT::i32));
25323 if (!isRoundModeCurDirection(Rnd))
25324 return SDValue();
25326 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
25327 Op.getOperand(1));
25329 case INTR_TYPE_1OP_SAE: {
25330 SDValue Sae = Op.getOperand(2);
25332 unsigned Opc;
25333 if (isRoundModeCurDirection(Sae))
25334 Opc = IntrData->Opc0;
25335 else if (isRoundModeSAE(Sae))
25336 Opc = IntrData->Opc1;
25337 else
25338 return SDValue();
25340 return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1));
25342 case INTR_TYPE_2OP: {
25343 SDValue Src2 = Op.getOperand(2);
25345 // We specify 2 possible opcodes for intrinsics with rounding modes.
25346 // First, we check if the intrinsic may have non-default rounding mode,
25347 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
25348 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
25349 if (IntrWithRoundingModeOpcode != 0) {
25350 SDValue Rnd = Op.getOperand(3);
25351 unsigned RC = 0;
25352 if (isRoundModeSAEToX(Rnd, RC))
25353 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
25354 Op.getOperand(1), Src2,
25355 DAG.getTargetConstant(RC, dl, MVT::i32));
25356 if (!isRoundModeCurDirection(Rnd))
25357 return SDValue();
25360 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
25361 Op.getOperand(1), Src2);
25363 case INTR_TYPE_2OP_SAE: {
25364 SDValue Sae = Op.getOperand(3);
25366 unsigned Opc;
25367 if (isRoundModeCurDirection(Sae))
25368 Opc = IntrData->Opc0;
25369 else if (isRoundModeSAE(Sae))
25370 Opc = IntrData->Opc1;
25371 else
25372 return SDValue();
25374 return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1),
25375 Op.getOperand(2));
25377 case INTR_TYPE_3OP:
25378 case INTR_TYPE_3OP_IMM8: {
25379 SDValue Src1 = Op.getOperand(1);
25380 SDValue Src2 = Op.getOperand(2);
25381 SDValue Src3 = Op.getOperand(3);
25383 if (IntrData->Type == INTR_TYPE_3OP_IMM8 &&
25384 Src3.getValueType() != MVT::i8) {
25385 Src3 = DAG.getTargetConstant(Src3->getAsZExtVal() & 0xff, dl, MVT::i8);
25388 // We specify 2 possible opcodes for intrinsics with rounding modes.
25389 // First, we check if the intrinsic may have non-default rounding mode,
25390 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
25391 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
25392 if (IntrWithRoundingModeOpcode != 0) {
25393 SDValue Rnd = Op.getOperand(4);
25394 unsigned RC = 0;
25395 if (isRoundModeSAEToX(Rnd, RC))
25396 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
25397 Src1, Src2, Src3,
25398 DAG.getTargetConstant(RC, dl, MVT::i32));
25399 if (!isRoundModeCurDirection(Rnd))
25400 return SDValue();
25403 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
25404 {Src1, Src2, Src3});
25406 case INTR_TYPE_4OP_IMM8: {
25407 assert(Op.getOperand(4)->getOpcode() == ISD::TargetConstant);
25408 SDValue Src4 = Op.getOperand(4);
25409 if (Src4.getValueType() != MVT::i8) {
25410 Src4 = DAG.getTargetConstant(Src4->getAsZExtVal() & 0xff, dl, MVT::i8);
25413 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
25414 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
25415 Src4);
25417 case INTR_TYPE_1OP_MASK: {
25418 SDValue Src = Op.getOperand(1);
25419 SDValue PassThru = Op.getOperand(2);
25420 SDValue Mask = Op.getOperand(3);
25421 // We add rounding mode to the Node when
25422 // - RC Opcode is specified and
25423 // - RC is not "current direction".
25424 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
25425 if (IntrWithRoundingModeOpcode != 0) {
25426 SDValue Rnd = Op.getOperand(4);
25427 unsigned RC = 0;
25428 if (isRoundModeSAEToX(Rnd, RC))
25429 return getVectorMaskingNode(
25430 DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
25431 Src, DAG.getTargetConstant(RC, dl, MVT::i32)),
25432 Mask, PassThru, Subtarget, DAG);
25433 if (!isRoundModeCurDirection(Rnd))
25434 return SDValue();
25436 return getVectorMaskingNode(
25437 DAG.getNode(IntrData->Opc0, dl, VT, Src), Mask, PassThru,
25438 Subtarget, DAG);
25440 case INTR_TYPE_1OP_MASK_SAE: {
25441 SDValue Src = Op.getOperand(1);
25442 SDValue PassThru = Op.getOperand(2);
25443 SDValue Mask = Op.getOperand(3);
25444 SDValue Rnd = Op.getOperand(4);
25446 unsigned Opc;
25447 if (isRoundModeCurDirection(Rnd))
25448 Opc = IntrData->Opc0;
25449 else if (isRoundModeSAE(Rnd))
25450 Opc = IntrData->Opc1;
25451 else
25452 return SDValue();
25454 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src), Mask, PassThru,
25455 Subtarget, DAG);
25457 case INTR_TYPE_SCALAR_MASK: {
25458 SDValue Src1 = Op.getOperand(1);
25459 SDValue Src2 = Op.getOperand(2);
25460 SDValue passThru = Op.getOperand(3);
25461 SDValue Mask = Op.getOperand(4);
25462 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
25463 // There are 2 kinds of intrinsics in this group:
25464 // (1) With suppress-all-exceptions (sae) or rounding mode- 6 operands
25465 // (2) With rounding mode and sae - 7 operands.
25466 bool HasRounding = IntrWithRoundingModeOpcode != 0;
25467 if (Op.getNumOperands() == (5U + HasRounding)) {
25468 if (HasRounding) {
25469 SDValue Rnd = Op.getOperand(5);
25470 unsigned RC = 0;
25471 if (isRoundModeSAEToX(Rnd, RC))
25472 return getScalarMaskingNode(
25473 DAG.getNode(IntrWithRoundingModeOpcode, dl, VT, Src1, Src2,
25474 DAG.getTargetConstant(RC, dl, MVT::i32)),
25475 Mask, passThru, Subtarget, DAG);
25476 if (!isRoundModeCurDirection(Rnd))
25477 return SDValue();
25479 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1,
25480 Src2),
25481 Mask, passThru, Subtarget, DAG);
25484 assert(Op.getNumOperands() == (6U + HasRounding) &&
25485 "Unexpected intrinsic form");
25486 SDValue RoundingMode = Op.getOperand(5);
25487 unsigned Opc = IntrData->Opc0;
25488 if (HasRounding) {
25489 SDValue Sae = Op.getOperand(6);
25490 if (isRoundModeSAE(Sae))
25491 Opc = IntrWithRoundingModeOpcode;
25492 else if (!isRoundModeCurDirection(Sae))
25493 return SDValue();
25495 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1,
25496 Src2, RoundingMode),
25497 Mask, passThru, Subtarget, DAG);
25499 case INTR_TYPE_SCALAR_MASK_RND: {
25500 SDValue Src1 = Op.getOperand(1);
25501 SDValue Src2 = Op.getOperand(2);
25502 SDValue passThru = Op.getOperand(3);
25503 SDValue Mask = Op.getOperand(4);
25504 SDValue Rnd = Op.getOperand(5);
25506 SDValue NewOp;
25507 unsigned RC = 0;
25508 if (isRoundModeCurDirection(Rnd))
25509 NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
25510 else if (isRoundModeSAEToX(Rnd, RC))
25511 NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
25512 DAG.getTargetConstant(RC, dl, MVT::i32));
25513 else
25514 return SDValue();
25516 return getScalarMaskingNode(NewOp, Mask, passThru, Subtarget, DAG);
25518 case INTR_TYPE_SCALAR_MASK_SAE: {
25519 SDValue Src1 = Op.getOperand(1);
25520 SDValue Src2 = Op.getOperand(2);
25521 SDValue passThru = Op.getOperand(3);
25522 SDValue Mask = Op.getOperand(4);
25523 SDValue Sae = Op.getOperand(5);
25524 unsigned Opc;
25525 if (isRoundModeCurDirection(Sae))
25526 Opc = IntrData->Opc0;
25527 else if (isRoundModeSAE(Sae))
25528 Opc = IntrData->Opc1;
25529 else
25530 return SDValue();
25532 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
25533 Mask, passThru, Subtarget, DAG);
25535 case INTR_TYPE_2OP_MASK: {
25536 SDValue Src1 = Op.getOperand(1);
25537 SDValue Src2 = Op.getOperand(2);
25538 SDValue PassThru = Op.getOperand(3);
25539 SDValue Mask = Op.getOperand(4);
25540 SDValue NewOp;
25541 if (IntrData->Opc1 != 0) {
25542 SDValue Rnd = Op.getOperand(5);
25543 unsigned RC = 0;
25544 if (isRoundModeSAEToX(Rnd, RC))
25545 NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
25546 DAG.getTargetConstant(RC, dl, MVT::i32));
25547 else if (!isRoundModeCurDirection(Rnd))
25548 return SDValue();
25550 if (!NewOp)
25551 NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
25552 return getVectorMaskingNode(NewOp, Mask, PassThru, Subtarget, DAG);
25554 case INTR_TYPE_2OP_MASK_SAE: {
25555 SDValue Src1 = Op.getOperand(1);
25556 SDValue Src2 = Op.getOperand(2);
25557 SDValue PassThru = Op.getOperand(3);
25558 SDValue Mask = Op.getOperand(4);
25560 unsigned Opc = IntrData->Opc0;
25561 if (IntrData->Opc1 != 0) {
25562 SDValue Sae = Op.getOperand(5);
25563 if (isRoundModeSAE(Sae))
25564 Opc = IntrData->Opc1;
25565 else if (!isRoundModeCurDirection(Sae))
25566 return SDValue();
25569 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
25570 Mask, PassThru, Subtarget, DAG);
25572 case INTR_TYPE_3OP_SCALAR_MASK_SAE: {
25573 SDValue Src1 = Op.getOperand(1);
25574 SDValue Src2 = Op.getOperand(2);
25575 SDValue Src3 = Op.getOperand(3);
25576 SDValue PassThru = Op.getOperand(4);
25577 SDValue Mask = Op.getOperand(5);
25578 SDValue Sae = Op.getOperand(6);
25579 unsigned Opc;
25580 if (isRoundModeCurDirection(Sae))
25581 Opc = IntrData->Opc0;
25582 else if (isRoundModeSAE(Sae))
25583 Opc = IntrData->Opc1;
25584 else
25585 return SDValue();
25587 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
25588 Mask, PassThru, Subtarget, DAG);
25590 case INTR_TYPE_3OP_MASK_SAE: {
25591 SDValue Src1 = Op.getOperand(1);
25592 SDValue Src2 = Op.getOperand(2);
25593 SDValue Src3 = Op.getOperand(3);
25594 SDValue PassThru = Op.getOperand(4);
25595 SDValue Mask = Op.getOperand(5);
25597 unsigned Opc = IntrData->Opc0;
25598 if (IntrData->Opc1 != 0) {
25599 SDValue Sae = Op.getOperand(6);
25600 if (isRoundModeSAE(Sae))
25601 Opc = IntrData->Opc1;
25602 else if (!isRoundModeCurDirection(Sae))
25603 return SDValue();
25605 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
25606 Mask, PassThru, Subtarget, DAG);
25608 case BLENDV: {
25609 SDValue Src1 = Op.getOperand(1);
25610 SDValue Src2 = Op.getOperand(2);
25611 SDValue Src3 = Op.getOperand(3);
25613 EVT MaskVT = Src3.getValueType().changeVectorElementTypeToInteger();
25614 Src3 = DAG.getBitcast(MaskVT, Src3);
25616 // Reverse the operands to match VSELECT order.
25617 return DAG.getNode(IntrData->Opc0, dl, VT, Src3, Src2, Src1);
25619 case VPERM_2OP : {
25620 SDValue Src1 = Op.getOperand(1);
25621 SDValue Src2 = Op.getOperand(2);
25623 // Swap Src1 and Src2 in the node creation
25624 return DAG.getNode(IntrData->Opc0, dl, VT,Src2, Src1);
25626 case CFMA_OP_MASKZ:
25627 case CFMA_OP_MASK: {
25628 SDValue Src1 = Op.getOperand(1);
25629 SDValue Src2 = Op.getOperand(2);
25630 SDValue Src3 = Op.getOperand(3);
25631 SDValue Mask = Op.getOperand(4);
25632 MVT VT = Op.getSimpleValueType();
25634 SDValue PassThru = Src3;
25635 if (IntrData->Type == CFMA_OP_MASKZ)
25636 PassThru = getZeroVector(VT, Subtarget, DAG, dl);
25638 // We add rounding mode to the Node when
25639 // - RC Opcode is specified and
25640 // - RC is not "current direction".
25641 SDValue NewOp;
25642 if (IntrData->Opc1 != 0) {
25643 SDValue Rnd = Op.getOperand(5);
25644 unsigned RC = 0;
25645 if (isRoundModeSAEToX(Rnd, RC))
25646 NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2, Src3,
25647 DAG.getTargetConstant(RC, dl, MVT::i32));
25648 else if (!isRoundModeCurDirection(Rnd))
25649 return SDValue();
25651 if (!NewOp)
25652 NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2, Src3);
25653 return getVectorMaskingNode(NewOp, Mask, PassThru, Subtarget, DAG);
25655 case IFMA_OP:
25656 // NOTE: We need to swizzle the operands to pass the multiply operands
25657 // first.
25658 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
25659 Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
25660 case FPCLASSS: {
25661 SDValue Src1 = Op.getOperand(1);
25662 SDValue Imm = Op.getOperand(2);
25663 SDValue Mask = Op.getOperand(3);
25664 SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Imm);
25665 SDValue FPclassMask = getScalarMaskingNode(FPclass, Mask, SDValue(),
25666 Subtarget, DAG);
25667 // Need to fill with zeros to ensure the bitcast will produce zeroes
25668 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
25669 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
25670 DAG.getConstant(0, dl, MVT::v8i1),
25671 FPclassMask, DAG.getIntPtrConstant(0, dl));
25672 return DAG.getBitcast(MVT::i8, Ins);
25675 case CMP_MASK_CC: {
25676 MVT MaskVT = Op.getSimpleValueType();
25677 SDValue CC = Op.getOperand(3);
25678 SDValue Mask = Op.getOperand(4);
25679 // We specify 2 possible opcodes for intrinsics with rounding modes.
25680 // First, we check if the intrinsic may have non-default rounding mode,
25681 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
25682 if (IntrData->Opc1 != 0) {
25683 SDValue Sae = Op.getOperand(5);
25684 if (isRoundModeSAE(Sae))
25685 return DAG.getNode(IntrData->Opc1, dl, MaskVT, Op.getOperand(1),
25686 Op.getOperand(2), CC, Mask, Sae);
25687 if (!isRoundModeCurDirection(Sae))
25688 return SDValue();
25690 //default rounding mode
25691 return DAG.getNode(IntrData->Opc0, dl, MaskVT,
25692 {Op.getOperand(1), Op.getOperand(2), CC, Mask});
25694 case CMP_MASK_SCALAR_CC: {
25695 SDValue Src1 = Op.getOperand(1);
25696 SDValue Src2 = Op.getOperand(2);
25697 SDValue CC = Op.getOperand(3);
25698 SDValue Mask = Op.getOperand(4);
25700 SDValue Cmp;
25701 if (IntrData->Opc1 != 0) {
25702 SDValue Sae = Op.getOperand(5);
25703 if (isRoundModeSAE(Sae))
25704 Cmp = DAG.getNode(IntrData->Opc1, dl, MVT::v1i1, Src1, Src2, CC, Sae);
25705 else if (!isRoundModeCurDirection(Sae))
25706 return SDValue();
25708 //default rounding mode
25709 if (!Cmp.getNode())
25710 Cmp = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Src2, CC);
25712 SDValue CmpMask = getScalarMaskingNode(Cmp, Mask, SDValue(),
25713 Subtarget, DAG);
25714 // Need to fill with zeros to ensure the bitcast will produce zeroes
25715 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
25716 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
25717 DAG.getConstant(0, dl, MVT::v8i1),
25718 CmpMask, DAG.getIntPtrConstant(0, dl));
25719 return DAG.getBitcast(MVT::i8, Ins);
25721 case COMI: { // Comparison intrinsics
25722 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
25723 SDValue LHS = Op.getOperand(1);
25724 SDValue RHS = Op.getOperand(2);
25725 // Some conditions require the operands to be swapped.
25726 if (CC == ISD::SETLT || CC == ISD::SETLE)
25727 std::swap(LHS, RHS);
25729 SDValue Comi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
25730 SDValue SetCC;
25731 switch (CC) {
25732 case ISD::SETEQ: { // (ZF = 0 and PF = 0)
25733 SetCC = getSETCC(X86::COND_E, Comi, dl, DAG);
25734 SDValue SetNP = getSETCC(X86::COND_NP, Comi, dl, DAG);
25735 SetCC = DAG.getNode(ISD::AND, dl, MVT::i8, SetCC, SetNP);
25736 break;
25738 case ISD::SETNE: { // (ZF = 1 or PF = 1)
25739 SetCC = getSETCC(X86::COND_NE, Comi, dl, DAG);
25740 SDValue SetP = getSETCC(X86::COND_P, Comi, dl, DAG);
25741 SetCC = DAG.getNode(ISD::OR, dl, MVT::i8, SetCC, SetP);
25742 break;
25744 case ISD::SETGT: // (CF = 0 and ZF = 0)
25745 case ISD::SETLT: { // Condition opposite to GT. Operands swapped above.
25746 SetCC = getSETCC(X86::COND_A, Comi, dl, DAG);
25747 break;
25749 case ISD::SETGE: // CF = 0
25750 case ISD::SETLE: // Condition opposite to GE. Operands swapped above.
25751 SetCC = getSETCC(X86::COND_AE, Comi, dl, DAG);
25752 break;
25753 default:
25754 llvm_unreachable("Unexpected illegal condition!");
25756 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
25758 case COMI_RM: { // Comparison intrinsics with Sae
25759 SDValue LHS = Op.getOperand(1);
25760 SDValue RHS = Op.getOperand(2);
25761 unsigned CondVal = Op.getConstantOperandVal(3);
25762 SDValue Sae = Op.getOperand(4);
25764 SDValue FCmp;
25765 if (isRoundModeCurDirection(Sae))
25766 FCmp = DAG.getNode(X86ISD::FSETCCM, dl, MVT::v1i1, LHS, RHS,
25767 DAG.getTargetConstant(CondVal, dl, MVT::i8));
25768 else if (isRoundModeSAE(Sae))
25769 FCmp = DAG.getNode(X86ISD::FSETCCM_SAE, dl, MVT::v1i1, LHS, RHS,
25770 DAG.getTargetConstant(CondVal, dl, MVT::i8), Sae);
25771 else
25772 return SDValue();
25773 // Need to fill with zeros to ensure the bitcast will produce zeroes
25774 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
25775 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
25776 DAG.getConstant(0, dl, MVT::v16i1),
25777 FCmp, DAG.getIntPtrConstant(0, dl));
25778 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32,
25779 DAG.getBitcast(MVT::i16, Ins));
25781 case VSHIFT: {
25782 SDValue SrcOp = Op.getOperand(1);
25783 SDValue ShAmt = Op.getOperand(2);
25784 assert(ShAmt.getValueType() == MVT::i32 &&
25785 "Unexpected VSHIFT amount type");
25787 // Catch shift-by-constant.
25788 if (auto *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
25789 return getTargetVShiftByConstNode(IntrData->Opc0, dl,
25790 Op.getSimpleValueType(), SrcOp,
25791 CShAmt->getZExtValue(), DAG);
25793 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, ShAmt);
25794 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
25795 SrcOp, ShAmt, 0, Subtarget, DAG);
25797 case COMPRESS_EXPAND_IN_REG: {
25798 SDValue Mask = Op.getOperand(3);
25799 SDValue DataToCompress = Op.getOperand(1);
25800 SDValue PassThru = Op.getOperand(2);
25801 if (ISD::isBuildVectorAllOnes(Mask.getNode())) // return data as is
25802 return Op.getOperand(1);
25804 // Avoid false dependency.
25805 if (PassThru.isUndef())
25806 PassThru = getZeroVector(VT, Subtarget, DAG, dl);
25808 return DAG.getNode(IntrData->Opc0, dl, VT, DataToCompress, PassThru,
25809 Mask);
25811 case FIXUPIMM:
25812 case FIXUPIMM_MASKZ: {
25813 SDValue Src1 = Op.getOperand(1);
25814 SDValue Src2 = Op.getOperand(2);
25815 SDValue Src3 = Op.getOperand(3);
25816 SDValue Imm = Op.getOperand(4);
25817 SDValue Mask = Op.getOperand(5);
25818 SDValue Passthru = (IntrData->Type == FIXUPIMM)
25819 ? Src1
25820 : getZeroVector(VT, Subtarget, DAG, dl);
25822 unsigned Opc = IntrData->Opc0;
25823 if (IntrData->Opc1 != 0) {
25824 SDValue Sae = Op.getOperand(6);
25825 if (isRoundModeSAE(Sae))
25826 Opc = IntrData->Opc1;
25827 else if (!isRoundModeCurDirection(Sae))
25828 return SDValue();
25831 SDValue FixupImm = DAG.getNode(Opc, dl, VT, Src1, Src2, Src3, Imm);
25833 if (Opc == X86ISD::VFIXUPIMM || Opc == X86ISD::VFIXUPIMM_SAE)
25834 return getVectorMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
25836 return getScalarMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
25838 case ROUNDP: {
25839 assert(IntrData->Opc0 == X86ISD::VRNDSCALE && "Unexpected opcode");
25840 // Clear the upper bits of the rounding immediate so that the legacy
25841 // intrinsic can't trigger the scaling behavior of VRNDSCALE.
25842 auto Round = cast<ConstantSDNode>(Op.getOperand(2));
25843 SDValue RoundingMode =
25844 DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
25845 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
25846 Op.getOperand(1), RoundingMode);
25848 case ROUNDS: {
25849 assert(IntrData->Opc0 == X86ISD::VRNDSCALES && "Unexpected opcode");
25850 // Clear the upper bits of the rounding immediate so that the legacy
25851 // intrinsic can't trigger the scaling behavior of VRNDSCALE.
25852 auto Round = cast<ConstantSDNode>(Op.getOperand(3));
25853 SDValue RoundingMode =
25854 DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
25855 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
25856 Op.getOperand(1), Op.getOperand(2), RoundingMode);
25858 case BEXTRI: {
25859 assert(IntrData->Opc0 == X86ISD::BEXTRI && "Unexpected opcode");
25861 uint64_t Imm = Op.getConstantOperandVal(2);
25862 SDValue Control = DAG.getTargetConstant(Imm & 0xffff, dl,
25863 Op.getValueType());
25864 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
25865 Op.getOperand(1), Control);
25867 // ADC/SBB
25868 case ADX: {
25869 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::i32);
25870 SDVTList VTs = DAG.getVTList(Op.getOperand(2).getValueType(), MVT::i32);
25872 SDValue Res;
25873 // If the carry in is zero, then we should just use ADD/SUB instead of
25874 // ADC/SBB.
25875 if (isNullConstant(Op.getOperand(1))) {
25876 Res = DAG.getNode(IntrData->Opc1, dl, VTs, Op.getOperand(2),
25877 Op.getOperand(3));
25878 } else {
25879 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(1),
25880 DAG.getConstant(-1, dl, MVT::i8));
25881 Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(2),
25882 Op.getOperand(3), GenCF.getValue(1));
25884 SDValue SetCC = getSETCC(X86::COND_B, Res.getValue(1), dl, DAG);
25885 SDValue Results[] = { SetCC, Res };
25886 return DAG.getMergeValues(Results, dl);
25888 case CVTPD2PS_MASK:
25889 case CVTPD2DQ_MASK:
25890 case CVTQQ2PS_MASK:
25891 case TRUNCATE_TO_REG: {
25892 SDValue Src = Op.getOperand(1);
25893 SDValue PassThru = Op.getOperand(2);
25894 SDValue Mask = Op.getOperand(3);
25896 if (isAllOnesConstant(Mask))
25897 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
25899 MVT SrcVT = Src.getSimpleValueType();
25900 MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
25901 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
25902 return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(),
25903 {Src, PassThru, Mask});
25905 case CVTPS2PH_MASK: {
25906 SDValue Src = Op.getOperand(1);
25907 SDValue Rnd = Op.getOperand(2);
25908 SDValue PassThru = Op.getOperand(3);
25909 SDValue Mask = Op.getOperand(4);
25911 unsigned RC = 0;
25912 unsigned Opc = IntrData->Opc0;
25913 bool SAE = Src.getValueType().is512BitVector() &&
25914 (isRoundModeSAEToX(Rnd, RC) || isRoundModeSAE(Rnd));
25915 if (SAE) {
25916 Opc = X86ISD::CVTPS2PH_SAE;
25917 Rnd = DAG.getTargetConstant(RC, dl, MVT::i32);
25920 if (isAllOnesConstant(Mask))
25921 return DAG.getNode(Opc, dl, Op.getValueType(), Src, Rnd);
25923 if (SAE)
25924 Opc = X86ISD::MCVTPS2PH_SAE;
25925 else
25926 Opc = IntrData->Opc1;
25927 MVT SrcVT = Src.getSimpleValueType();
25928 MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
25929 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
25930 return DAG.getNode(Opc, dl, Op.getValueType(), Src, Rnd, PassThru, Mask);
25932 case CVTNEPS2BF16_MASK: {
25933 SDValue Src = Op.getOperand(1);
25934 SDValue PassThru = Op.getOperand(2);
25935 SDValue Mask = Op.getOperand(3);
25937 if (ISD::isBuildVectorAllOnes(Mask.getNode()))
25938 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
25940 // Break false dependency.
25941 if (PassThru.isUndef())
25942 PassThru = DAG.getConstant(0, dl, PassThru.getValueType());
25944 return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, PassThru,
25945 Mask);
25947 default:
25948 break;
25952 switch (IntNo) {
25953 default: return SDValue(); // Don't custom lower most intrinsics.
25955 // ptest and testp intrinsics. The intrinsic these come from are designed to
25956 // return an integer value, not just an instruction so lower it to the ptest
25957 // or testp pattern and a setcc for the result.
25958 case Intrinsic::x86_avx512_ktestc_b:
25959 case Intrinsic::x86_avx512_ktestc_w:
25960 case Intrinsic::x86_avx512_ktestc_d:
25961 case Intrinsic::x86_avx512_ktestc_q:
25962 case Intrinsic::x86_avx512_ktestz_b:
25963 case Intrinsic::x86_avx512_ktestz_w:
25964 case Intrinsic::x86_avx512_ktestz_d:
25965 case Intrinsic::x86_avx512_ktestz_q:
25966 case Intrinsic::x86_sse41_ptestz:
25967 case Intrinsic::x86_sse41_ptestc:
25968 case Intrinsic::x86_sse41_ptestnzc:
25969 case Intrinsic::x86_avx_ptestz_256:
25970 case Intrinsic::x86_avx_ptestc_256:
25971 case Intrinsic::x86_avx_ptestnzc_256:
25972 case Intrinsic::x86_avx_vtestz_ps:
25973 case Intrinsic::x86_avx_vtestc_ps:
25974 case Intrinsic::x86_avx_vtestnzc_ps:
25975 case Intrinsic::x86_avx_vtestz_pd:
25976 case Intrinsic::x86_avx_vtestc_pd:
25977 case Intrinsic::x86_avx_vtestnzc_pd:
25978 case Intrinsic::x86_avx_vtestz_ps_256:
25979 case Intrinsic::x86_avx_vtestc_ps_256:
25980 case Intrinsic::x86_avx_vtestnzc_ps_256:
25981 case Intrinsic::x86_avx_vtestz_pd_256:
25982 case Intrinsic::x86_avx_vtestc_pd_256:
25983 case Intrinsic::x86_avx_vtestnzc_pd_256: {
25984 unsigned TestOpc = X86ISD::PTEST;
25985 X86::CondCode X86CC;
25986 switch (IntNo) {
25987 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
25988 case Intrinsic::x86_avx512_ktestc_b:
25989 case Intrinsic::x86_avx512_ktestc_w:
25990 case Intrinsic::x86_avx512_ktestc_d:
25991 case Intrinsic::x86_avx512_ktestc_q:
25992 // CF = 1
25993 TestOpc = X86ISD::KTEST;
25994 X86CC = X86::COND_B;
25995 break;
25996 case Intrinsic::x86_avx512_ktestz_b:
25997 case Intrinsic::x86_avx512_ktestz_w:
25998 case Intrinsic::x86_avx512_ktestz_d:
25999 case Intrinsic::x86_avx512_ktestz_q:
26000 TestOpc = X86ISD::KTEST;
26001 X86CC = X86::COND_E;
26002 break;
26003 case Intrinsic::x86_avx_vtestz_ps:
26004 case Intrinsic::x86_avx_vtestz_pd:
26005 case Intrinsic::x86_avx_vtestz_ps_256:
26006 case Intrinsic::x86_avx_vtestz_pd_256:
26007 TestOpc = X86ISD::TESTP;
26008 [[fallthrough]];
26009 case Intrinsic::x86_sse41_ptestz:
26010 case Intrinsic::x86_avx_ptestz_256:
26011 // ZF = 1
26012 X86CC = X86::COND_E;
26013 break;
26014 case Intrinsic::x86_avx_vtestc_ps:
26015 case Intrinsic::x86_avx_vtestc_pd:
26016 case Intrinsic::x86_avx_vtestc_ps_256:
26017 case Intrinsic::x86_avx_vtestc_pd_256:
26018 TestOpc = X86ISD::TESTP;
26019 [[fallthrough]];
26020 case Intrinsic::x86_sse41_ptestc:
26021 case Intrinsic::x86_avx_ptestc_256:
26022 // CF = 1
26023 X86CC = X86::COND_B;
26024 break;
26025 case Intrinsic::x86_avx_vtestnzc_ps:
26026 case Intrinsic::x86_avx_vtestnzc_pd:
26027 case Intrinsic::x86_avx_vtestnzc_ps_256:
26028 case Intrinsic::x86_avx_vtestnzc_pd_256:
26029 TestOpc = X86ISD::TESTP;
26030 [[fallthrough]];
26031 case Intrinsic::x86_sse41_ptestnzc:
26032 case Intrinsic::x86_avx_ptestnzc_256:
26033 // ZF and CF = 0
26034 X86CC = X86::COND_A;
26035 break;
26038 SDValue LHS = Op.getOperand(1);
26039 SDValue RHS = Op.getOperand(2);
26040 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
26041 SDValue SetCC = getSETCC(X86CC, Test, dl, DAG);
26042 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
26045 case Intrinsic::x86_sse42_pcmpistria128:
26046 case Intrinsic::x86_sse42_pcmpestria128:
26047 case Intrinsic::x86_sse42_pcmpistric128:
26048 case Intrinsic::x86_sse42_pcmpestric128:
26049 case Intrinsic::x86_sse42_pcmpistrio128:
26050 case Intrinsic::x86_sse42_pcmpestrio128:
26051 case Intrinsic::x86_sse42_pcmpistris128:
26052 case Intrinsic::x86_sse42_pcmpestris128:
26053 case Intrinsic::x86_sse42_pcmpistriz128:
26054 case Intrinsic::x86_sse42_pcmpestriz128: {
26055 unsigned Opcode;
26056 X86::CondCode X86CC;
26057 switch (IntNo) {
26058 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
26059 case Intrinsic::x86_sse42_pcmpistria128:
26060 Opcode = X86ISD::PCMPISTR;
26061 X86CC = X86::COND_A;
26062 break;
26063 case Intrinsic::x86_sse42_pcmpestria128:
26064 Opcode = X86ISD::PCMPESTR;
26065 X86CC = X86::COND_A;
26066 break;
26067 case Intrinsic::x86_sse42_pcmpistric128:
26068 Opcode = X86ISD::PCMPISTR;
26069 X86CC = X86::COND_B;
26070 break;
26071 case Intrinsic::x86_sse42_pcmpestric128:
26072 Opcode = X86ISD::PCMPESTR;
26073 X86CC = X86::COND_B;
26074 break;
26075 case Intrinsic::x86_sse42_pcmpistrio128:
26076 Opcode = X86ISD::PCMPISTR;
26077 X86CC = X86::COND_O;
26078 break;
26079 case Intrinsic::x86_sse42_pcmpestrio128:
26080 Opcode = X86ISD::PCMPESTR;
26081 X86CC = X86::COND_O;
26082 break;
26083 case Intrinsic::x86_sse42_pcmpistris128:
26084 Opcode = X86ISD::PCMPISTR;
26085 X86CC = X86::COND_S;
26086 break;
26087 case Intrinsic::x86_sse42_pcmpestris128:
26088 Opcode = X86ISD::PCMPESTR;
26089 X86CC = X86::COND_S;
26090 break;
26091 case Intrinsic::x86_sse42_pcmpistriz128:
26092 Opcode = X86ISD::PCMPISTR;
26093 X86CC = X86::COND_E;
26094 break;
26095 case Intrinsic::x86_sse42_pcmpestriz128:
26096 Opcode = X86ISD::PCMPESTR;
26097 X86CC = X86::COND_E;
26098 break;
26100 SmallVector<SDValue, 5> NewOps(llvm::drop_begin(Op->ops()));
26101 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
26102 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps).getValue(2);
26103 SDValue SetCC = getSETCC(X86CC, PCMP, dl, DAG);
26104 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
26107 case Intrinsic::x86_sse42_pcmpistri128:
26108 case Intrinsic::x86_sse42_pcmpestri128: {
26109 unsigned Opcode;
26110 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
26111 Opcode = X86ISD::PCMPISTR;
26112 else
26113 Opcode = X86ISD::PCMPESTR;
26115 SmallVector<SDValue, 5> NewOps(llvm::drop_begin(Op->ops()));
26116 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
26117 return DAG.getNode(Opcode, dl, VTs, NewOps);
26120 case Intrinsic::x86_sse42_pcmpistrm128:
26121 case Intrinsic::x86_sse42_pcmpestrm128: {
26122 unsigned Opcode;
26123 if (IntNo == Intrinsic::x86_sse42_pcmpistrm128)
26124 Opcode = X86ISD::PCMPISTR;
26125 else
26126 Opcode = X86ISD::PCMPESTR;
26128 SmallVector<SDValue, 5> NewOps(llvm::drop_begin(Op->ops()));
26129 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
26130 return DAG.getNode(Opcode, dl, VTs, NewOps).getValue(1);
26133 case Intrinsic::eh_sjlj_lsda: {
26134 MachineFunction &MF = DAG.getMachineFunction();
26135 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26136 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
26137 auto &Context = MF.getMMI().getContext();
26138 MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
26139 Twine(MF.getFunctionNumber()));
26140 return DAG.getNode(getGlobalWrapperKind(nullptr, /*OpFlags=*/0), dl, VT,
26141 DAG.getMCSymbol(S, PtrVT));
26144 case Intrinsic::x86_seh_lsda: {
26145 // Compute the symbol for the LSDA. We know it'll get emitted later.
26146 MachineFunction &MF = DAG.getMachineFunction();
26147 SDValue Op1 = Op.getOperand(1);
26148 auto *Fn = cast<Function>(cast<GlobalAddressSDNode>(Op1)->getGlobal());
26149 MCSymbol *LSDASym = MF.getMMI().getContext().getOrCreateLSDASymbol(
26150 GlobalValue::dropLLVMManglingEscape(Fn->getName()));
26152 // Generate a simple absolute symbol reference. This intrinsic is only
26153 // supported on 32-bit Windows, which isn't PIC.
26154 SDValue Result = DAG.getMCSymbol(LSDASym, VT);
26155 return DAG.getNode(X86ISD::Wrapper, dl, VT, Result);
26158 case Intrinsic::eh_recoverfp: {
26159 SDValue FnOp = Op.getOperand(1);
26160 SDValue IncomingFPOp = Op.getOperand(2);
26161 GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp);
26162 auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr);
26163 if (!Fn)
26164 report_fatal_error(
26165 "llvm.eh.recoverfp must take a function as the first argument");
26166 return recoverFramePointer(DAG, Fn, IncomingFPOp);
26169 case Intrinsic::localaddress: {
26170 // Returns one of the stack, base, or frame pointer registers, depending on
26171 // which is used to reference local variables.
26172 MachineFunction &MF = DAG.getMachineFunction();
26173 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
26174 unsigned Reg;
26175 if (RegInfo->hasBasePointer(MF))
26176 Reg = RegInfo->getBaseRegister();
26177 else { // Handles the SP or FP case.
26178 bool CantUseFP = RegInfo->hasStackRealignment(MF);
26179 if (CantUseFP)
26180 Reg = RegInfo->getPtrSizedStackRegister(MF);
26181 else
26182 Reg = RegInfo->getPtrSizedFrameRegister(MF);
26184 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
26186 case Intrinsic::x86_avx512_vp2intersect_q_512:
26187 case Intrinsic::x86_avx512_vp2intersect_q_256:
26188 case Intrinsic::x86_avx512_vp2intersect_q_128:
26189 case Intrinsic::x86_avx512_vp2intersect_d_512:
26190 case Intrinsic::x86_avx512_vp2intersect_d_256:
26191 case Intrinsic::x86_avx512_vp2intersect_d_128: {
26192 MVT MaskVT = Op.getSimpleValueType();
26194 SDVTList VTs = DAG.getVTList(MVT::Untyped, MVT::Other);
26195 SDLoc DL(Op);
26197 SDValue Operation =
26198 DAG.getNode(X86ISD::VP2INTERSECT, DL, VTs,
26199 Op->getOperand(1), Op->getOperand(2));
26201 SDValue Result0 = DAG.getTargetExtractSubreg(X86::sub_mask_0, DL,
26202 MaskVT, Operation);
26203 SDValue Result1 = DAG.getTargetExtractSubreg(X86::sub_mask_1, DL,
26204 MaskVT, Operation);
26205 return DAG.getMergeValues({Result0, Result1}, DL);
26207 case Intrinsic::x86_mmx_pslli_w:
26208 case Intrinsic::x86_mmx_pslli_d:
26209 case Intrinsic::x86_mmx_pslli_q:
26210 case Intrinsic::x86_mmx_psrli_w:
26211 case Intrinsic::x86_mmx_psrli_d:
26212 case Intrinsic::x86_mmx_psrli_q:
26213 case Intrinsic::x86_mmx_psrai_w:
26214 case Intrinsic::x86_mmx_psrai_d: {
26215 SDLoc DL(Op);
26216 SDValue ShAmt = Op.getOperand(2);
26217 // If the argument is a constant, convert it to a target constant.
26218 if (auto *C = dyn_cast<ConstantSDNode>(ShAmt)) {
26219 // Clamp out of bounds shift amounts since they will otherwise be masked
26220 // to 8-bits which may make it no longer out of bounds.
26221 unsigned ShiftAmount = C->getAPIntValue().getLimitedValue(255);
26222 if (ShiftAmount == 0)
26223 return Op.getOperand(1);
26225 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
26226 Op.getOperand(0), Op.getOperand(1),
26227 DAG.getTargetConstant(ShiftAmount, DL, MVT::i32));
26230 unsigned NewIntrinsic;
26231 switch (IntNo) {
26232 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
26233 case Intrinsic::x86_mmx_pslli_w:
26234 NewIntrinsic = Intrinsic::x86_mmx_psll_w;
26235 break;
26236 case Intrinsic::x86_mmx_pslli_d:
26237 NewIntrinsic = Intrinsic::x86_mmx_psll_d;
26238 break;
26239 case Intrinsic::x86_mmx_pslli_q:
26240 NewIntrinsic = Intrinsic::x86_mmx_psll_q;
26241 break;
26242 case Intrinsic::x86_mmx_psrli_w:
26243 NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
26244 break;
26245 case Intrinsic::x86_mmx_psrli_d:
26246 NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
26247 break;
26248 case Intrinsic::x86_mmx_psrli_q:
26249 NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
26250 break;
26251 case Intrinsic::x86_mmx_psrai_w:
26252 NewIntrinsic = Intrinsic::x86_mmx_psra_w;
26253 break;
26254 case Intrinsic::x86_mmx_psrai_d:
26255 NewIntrinsic = Intrinsic::x86_mmx_psra_d;
26256 break;
26259 // The vector shift intrinsics with scalars uses 32b shift amounts but
26260 // the sse2/mmx shift instructions reads 64 bits. Copy the 32 bits to an
26261 // MMX register.
26262 ShAmt = DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, ShAmt);
26263 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
26264 DAG.getTargetConstant(NewIntrinsic, DL,
26265 getPointerTy(DAG.getDataLayout())),
26266 Op.getOperand(1), ShAmt);
26268 case Intrinsic::thread_pointer: {
26269 if (Subtarget.isTargetELF()) {
26270 SDLoc dl(Op);
26271 EVT PtrVT = getPointerTy(DAG.getDataLayout());
26272 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
26273 Value *Ptr = Constant::getNullValue(PointerType::get(
26274 *DAG.getContext(), Subtarget.is64Bit() ? X86AS::FS : X86AS::GS));
26275 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
26276 DAG.getIntPtrConstant(0, dl), MachinePointerInfo(Ptr));
26278 report_fatal_error(
26279 "Target OS doesn't support __builtin_thread_pointer() yet.");
26284 static SDValue getAVX2GatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
26285 SDValue Src, SDValue Mask, SDValue Base,
26286 SDValue Index, SDValue ScaleOp, SDValue Chain,
26287 const X86Subtarget &Subtarget) {
26288 SDLoc dl(Op);
26289 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
26290 // Scale must be constant.
26291 if (!C)
26292 return SDValue();
26293 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26294 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
26295 TLI.getPointerTy(DAG.getDataLayout()));
26296 EVT MaskVT = Mask.getValueType().changeVectorElementTypeToInteger();
26297 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Other);
26298 // If source is undef or we know it won't be used, use a zero vector
26299 // to break register dependency.
26300 // TODO: use undef instead and let BreakFalseDeps deal with it?
26301 if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
26302 Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
26304 // Cast mask to an integer type.
26305 Mask = DAG.getBitcast(MaskVT, Mask);
26307 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
26309 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
26310 SDValue Res =
26311 DAG.getMemIntrinsicNode(X86ISD::MGATHER, dl, VTs, Ops,
26312 MemIntr->getMemoryVT(), MemIntr->getMemOperand());
26313 return DAG.getMergeValues({Res, Res.getValue(1)}, dl);
26316 static SDValue getGatherNode(SDValue Op, SelectionDAG &DAG,
26317 SDValue Src, SDValue Mask, SDValue Base,
26318 SDValue Index, SDValue ScaleOp, SDValue Chain,
26319 const X86Subtarget &Subtarget) {
26320 MVT VT = Op.getSimpleValueType();
26321 SDLoc dl(Op);
26322 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
26323 // Scale must be constant.
26324 if (!C)
26325 return SDValue();
26326 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26327 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
26328 TLI.getPointerTy(DAG.getDataLayout()));
26329 unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
26330 VT.getVectorNumElements());
26331 MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
26333 // We support two versions of the gather intrinsics. One with scalar mask and
26334 // one with vXi1 mask. Convert scalar to vXi1 if necessary.
26335 if (Mask.getValueType() != MaskVT)
26336 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
26338 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Other);
26339 // If source is undef or we know it won't be used, use a zero vector
26340 // to break register dependency.
26341 // TODO: use undef instead and let BreakFalseDeps deal with it?
26342 if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
26343 Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
26345 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
26347 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
26348 SDValue Res =
26349 DAG.getMemIntrinsicNode(X86ISD::MGATHER, dl, VTs, Ops,
26350 MemIntr->getMemoryVT(), MemIntr->getMemOperand());
26351 return DAG.getMergeValues({Res, Res.getValue(1)}, dl);
26354 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
26355 SDValue Src, SDValue Mask, SDValue Base,
26356 SDValue Index, SDValue ScaleOp, SDValue Chain,
26357 const X86Subtarget &Subtarget) {
26358 SDLoc dl(Op);
26359 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
26360 // Scale must be constant.
26361 if (!C)
26362 return SDValue();
26363 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26364 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
26365 TLI.getPointerTy(DAG.getDataLayout()));
26366 unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
26367 Src.getSimpleValueType().getVectorNumElements());
26368 MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
26370 // We support two versions of the scatter intrinsics. One with scalar mask and
26371 // one with vXi1 mask. Convert scalar to vXi1 if necessary.
26372 if (Mask.getValueType() != MaskVT)
26373 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
26375 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
26377 SDVTList VTs = DAG.getVTList(MVT::Other);
26378 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale};
26379 SDValue Res =
26380 DAG.getMemIntrinsicNode(X86ISD::MSCATTER, dl, VTs, Ops,
26381 MemIntr->getMemoryVT(), MemIntr->getMemOperand());
26382 return Res;
26385 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
26386 SDValue Mask, SDValue Base, SDValue Index,
26387 SDValue ScaleOp, SDValue Chain,
26388 const X86Subtarget &Subtarget) {
26389 SDLoc dl(Op);
26390 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
26391 // Scale must be constant.
26392 if (!C)
26393 return SDValue();
26394 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26395 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
26396 TLI.getPointerTy(DAG.getDataLayout()));
26397 SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
26398 SDValue Segment = DAG.getRegister(0, MVT::i32);
26399 MVT MaskVT =
26400 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
26401 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
26402 SDValue Ops[] = {VMask, Base, Scale, Index, Disp, Segment, Chain};
26403 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
26404 return SDValue(Res, 0);
26407 /// Handles the lowering of builtin intrinsics with chain that return their
26408 /// value into registers EDX:EAX.
26409 /// If operand ScrReg is a valid register identifier, then operand 2 of N is
26410 /// copied to SrcReg. The assumption is that SrcReg is an implicit input to
26411 /// TargetOpcode.
26412 /// Returns a Glue value which can be used to add extra copy-from-reg if the
26413 /// expanded intrinsics implicitly defines extra registers (i.e. not just
26414 /// EDX:EAX).
26415 static SDValue expandIntrinsicWChainHelper(SDNode *N, const SDLoc &DL,
26416 SelectionDAG &DAG,
26417 unsigned TargetOpcode,
26418 unsigned SrcReg,
26419 const X86Subtarget &Subtarget,
26420 SmallVectorImpl<SDValue> &Results) {
26421 SDValue Chain = N->getOperand(0);
26422 SDValue Glue;
26424 if (SrcReg) {
26425 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
26426 Chain = DAG.getCopyToReg(Chain, DL, SrcReg, N->getOperand(2), Glue);
26427 Glue = Chain.getValue(1);
26430 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
26431 SDValue N1Ops[] = {Chain, Glue};
26432 SDNode *N1 = DAG.getMachineNode(
26433 TargetOpcode, DL, Tys, ArrayRef<SDValue>(N1Ops, Glue.getNode() ? 2 : 1));
26434 Chain = SDValue(N1, 0);
26436 // Reads the content of XCR and returns it in registers EDX:EAX.
26437 SDValue LO, HI;
26438 if (Subtarget.is64Bit()) {
26439 LO = DAG.getCopyFromReg(Chain, DL, X86::RAX, MVT::i64, SDValue(N1, 1));
26440 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
26441 LO.getValue(2));
26442 } else {
26443 LO = DAG.getCopyFromReg(Chain, DL, X86::EAX, MVT::i32, SDValue(N1, 1));
26444 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
26445 LO.getValue(2));
26447 Chain = HI.getValue(1);
26448 Glue = HI.getValue(2);
26450 if (Subtarget.is64Bit()) {
26451 // Merge the two 32-bit values into a 64-bit one.
26452 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
26453 DAG.getConstant(32, DL, MVT::i8));
26454 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
26455 Results.push_back(Chain);
26456 return Glue;
26459 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
26460 SDValue Ops[] = { LO, HI };
26461 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
26462 Results.push_back(Pair);
26463 Results.push_back(Chain);
26464 return Glue;
26467 /// Handles the lowering of builtin intrinsics that read the time stamp counter
26468 /// (x86_rdtsc and x86_rdtscp). This function is also used to custom lower
26469 /// READCYCLECOUNTER nodes.
26470 static void getReadTimeStampCounter(SDNode *N, const SDLoc &DL, unsigned Opcode,
26471 SelectionDAG &DAG,
26472 const X86Subtarget &Subtarget,
26473 SmallVectorImpl<SDValue> &Results) {
26474 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
26475 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
26476 // and the EAX register is loaded with the low-order 32 bits.
26477 SDValue Glue = expandIntrinsicWChainHelper(N, DL, DAG, Opcode,
26478 /* NoRegister */0, Subtarget,
26479 Results);
26480 if (Opcode != X86::RDTSCP)
26481 return;
26483 SDValue Chain = Results[1];
26484 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
26485 // the ECX register. Add 'ecx' explicitly to the chain.
26486 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32, Glue);
26487 Results[1] = ecx;
26488 Results.push_back(ecx.getValue(1));
26491 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget &Subtarget,
26492 SelectionDAG &DAG) {
26493 SmallVector<SDValue, 3> Results;
26494 SDLoc DL(Op);
26495 getReadTimeStampCounter(Op.getNode(), DL, X86::RDTSC, DAG, Subtarget,
26496 Results);
26497 return DAG.getMergeValues(Results, DL);
26500 static SDValue MarkEHRegistrationNode(SDValue Op, SelectionDAG &DAG) {
26501 MachineFunction &MF = DAG.getMachineFunction();
26502 SDValue Chain = Op.getOperand(0);
26503 SDValue RegNode = Op.getOperand(2);
26504 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
26505 if (!EHInfo)
26506 report_fatal_error("EH registrations only live in functions using WinEH");
26508 // Cast the operand to an alloca, and remember the frame index.
26509 auto *FINode = dyn_cast<FrameIndexSDNode>(RegNode);
26510 if (!FINode)
26511 report_fatal_error("llvm.x86.seh.ehregnode expects a static alloca");
26512 EHInfo->EHRegNodeFrameIndex = FINode->getIndex();
26514 // Return the chain operand without making any DAG nodes.
26515 return Chain;
26518 static SDValue MarkEHGuard(SDValue Op, SelectionDAG &DAG) {
26519 MachineFunction &MF = DAG.getMachineFunction();
26520 SDValue Chain = Op.getOperand(0);
26521 SDValue EHGuard = Op.getOperand(2);
26522 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
26523 if (!EHInfo)
26524 report_fatal_error("EHGuard only live in functions using WinEH");
26526 // Cast the operand to an alloca, and remember the frame index.
26527 auto *FINode = dyn_cast<FrameIndexSDNode>(EHGuard);
26528 if (!FINode)
26529 report_fatal_error("llvm.x86.seh.ehguard expects a static alloca");
26530 EHInfo->EHGuardFrameIndex = FINode->getIndex();
26532 // Return the chain operand without making any DAG nodes.
26533 return Chain;
26536 /// Emit Truncating Store with signed or unsigned saturation.
26537 static SDValue
26538 EmitTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &DL, SDValue Val,
26539 SDValue Ptr, EVT MemVT, MachineMemOperand *MMO,
26540 SelectionDAG &DAG) {
26541 SDVTList VTs = DAG.getVTList(MVT::Other);
26542 SDValue Undef = DAG.getUNDEF(Ptr.getValueType());
26543 SDValue Ops[] = { Chain, Val, Ptr, Undef };
26544 unsigned Opc = SignedSat ? X86ISD::VTRUNCSTORES : X86ISD::VTRUNCSTOREUS;
26545 return DAG.getMemIntrinsicNode(Opc, DL, VTs, Ops, MemVT, MMO);
26548 /// Emit Masked Truncating Store with signed or unsigned saturation.
26549 static SDValue EmitMaskedTruncSStore(bool SignedSat, SDValue Chain,
26550 const SDLoc &DL,
26551 SDValue Val, SDValue Ptr, SDValue Mask, EVT MemVT,
26552 MachineMemOperand *MMO, SelectionDAG &DAG) {
26553 SDVTList VTs = DAG.getVTList(MVT::Other);
26554 SDValue Ops[] = { Chain, Val, Ptr, Mask };
26555 unsigned Opc = SignedSat ? X86ISD::VMTRUNCSTORES : X86ISD::VMTRUNCSTOREUS;
26556 return DAG.getMemIntrinsicNode(Opc, DL, VTs, Ops, MemVT, MMO);
26559 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
26560 SelectionDAG &DAG) {
26561 unsigned IntNo = Op.getConstantOperandVal(1);
26562 const IntrinsicData *IntrData = getIntrinsicWithChain(IntNo);
26563 if (!IntrData) {
26564 switch (IntNo) {
26566 case Intrinsic::swift_async_context_addr: {
26567 SDLoc dl(Op);
26568 auto &MF = DAG.getMachineFunction();
26569 auto X86FI = MF.getInfo<X86MachineFunctionInfo>();
26570 if (Subtarget.is64Bit()) {
26571 MF.getFrameInfo().setFrameAddressIsTaken(true);
26572 X86FI->setHasSwiftAsyncContext(true);
26573 SDValue Chain = Op->getOperand(0);
26574 SDValue CopyRBP = DAG.getCopyFromReg(Chain, dl, X86::RBP, MVT::i64);
26575 SDValue Result =
26576 SDValue(DAG.getMachineNode(X86::SUB64ri32, dl, MVT::i64, CopyRBP,
26577 DAG.getTargetConstant(8, dl, MVT::i32)),
26579 // Return { result, chain }.
26580 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result,
26581 CopyRBP.getValue(1));
26582 } else {
26583 // 32-bit so no special extended frame, create or reuse an existing
26584 // stack slot.
26585 if (!X86FI->getSwiftAsyncContextFrameIdx())
26586 X86FI->setSwiftAsyncContextFrameIdx(
26587 MF.getFrameInfo().CreateStackObject(4, Align(4), false));
26588 SDValue Result =
26589 DAG.getFrameIndex(*X86FI->getSwiftAsyncContextFrameIdx(), MVT::i32);
26590 // Return { result, chain }.
26591 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result,
26592 Op->getOperand(0));
26596 case llvm::Intrinsic::x86_seh_ehregnode:
26597 return MarkEHRegistrationNode(Op, DAG);
26598 case llvm::Intrinsic::x86_seh_ehguard:
26599 return MarkEHGuard(Op, DAG);
26600 case llvm::Intrinsic::x86_rdpkru: {
26601 SDLoc dl(Op);
26602 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
26603 // Create a RDPKRU node and pass 0 to the ECX parameter.
26604 return DAG.getNode(X86ISD::RDPKRU, dl, VTs, Op.getOperand(0),
26605 DAG.getConstant(0, dl, MVT::i32));
26607 case llvm::Intrinsic::x86_wrpkru: {
26608 SDLoc dl(Op);
26609 // Create a WRPKRU node, pass the input to the EAX parameter, and pass 0
26610 // to the EDX and ECX parameters.
26611 return DAG.getNode(X86ISD::WRPKRU, dl, MVT::Other,
26612 Op.getOperand(0), Op.getOperand(2),
26613 DAG.getConstant(0, dl, MVT::i32),
26614 DAG.getConstant(0, dl, MVT::i32));
26616 case llvm::Intrinsic::asan_check_memaccess: {
26617 // Mark this as adjustsStack because it will be lowered to a call.
26618 DAG.getMachineFunction().getFrameInfo().setAdjustsStack(true);
26619 // Don't do anything here, we will expand these intrinsics out later.
26620 return Op;
26622 case llvm::Intrinsic::x86_flags_read_u32:
26623 case llvm::Intrinsic::x86_flags_read_u64:
26624 case llvm::Intrinsic::x86_flags_write_u32:
26625 case llvm::Intrinsic::x86_flags_write_u64: {
26626 // We need a frame pointer because this will get lowered to a PUSH/POP
26627 // sequence.
26628 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
26629 MFI.setHasCopyImplyingStackAdjustment(true);
26630 // Don't do anything here, we will expand these intrinsics out later
26631 // during FinalizeISel in EmitInstrWithCustomInserter.
26632 return Op;
26634 case Intrinsic::x86_lwpins32:
26635 case Intrinsic::x86_lwpins64:
26636 case Intrinsic::x86_umwait:
26637 case Intrinsic::x86_tpause: {
26638 SDLoc dl(Op);
26639 SDValue Chain = Op->getOperand(0);
26640 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
26641 unsigned Opcode;
26643 switch (IntNo) {
26644 default: llvm_unreachable("Impossible intrinsic");
26645 case Intrinsic::x86_umwait:
26646 Opcode = X86ISD::UMWAIT;
26647 break;
26648 case Intrinsic::x86_tpause:
26649 Opcode = X86ISD::TPAUSE;
26650 break;
26651 case Intrinsic::x86_lwpins32:
26652 case Intrinsic::x86_lwpins64:
26653 Opcode = X86ISD::LWPINS;
26654 break;
26657 SDValue Operation =
26658 DAG.getNode(Opcode, dl, VTs, Chain, Op->getOperand(2),
26659 Op->getOperand(3), Op->getOperand(4));
26660 SDValue SetCC = getSETCC(X86::COND_B, Operation.getValue(0), dl, DAG);
26661 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
26662 Operation.getValue(1));
26664 case Intrinsic::x86_enqcmd:
26665 case Intrinsic::x86_enqcmds: {
26666 SDLoc dl(Op);
26667 SDValue Chain = Op.getOperand(0);
26668 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
26669 unsigned Opcode;
26670 switch (IntNo) {
26671 default: llvm_unreachable("Impossible intrinsic!");
26672 case Intrinsic::x86_enqcmd:
26673 Opcode = X86ISD::ENQCMD;
26674 break;
26675 case Intrinsic::x86_enqcmds:
26676 Opcode = X86ISD::ENQCMDS;
26677 break;
26679 SDValue Operation = DAG.getNode(Opcode, dl, VTs, Chain, Op.getOperand(2),
26680 Op.getOperand(3));
26681 SDValue SetCC = getSETCC(X86::COND_E, Operation.getValue(0), dl, DAG);
26682 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
26683 Operation.getValue(1));
26685 case Intrinsic::x86_aesenc128kl:
26686 case Intrinsic::x86_aesdec128kl:
26687 case Intrinsic::x86_aesenc256kl:
26688 case Intrinsic::x86_aesdec256kl: {
26689 SDLoc DL(Op);
26690 SDVTList VTs = DAG.getVTList(MVT::v2i64, MVT::i32, MVT::Other);
26691 SDValue Chain = Op.getOperand(0);
26692 unsigned Opcode;
26694 switch (IntNo) {
26695 default: llvm_unreachable("Impossible intrinsic");
26696 case Intrinsic::x86_aesenc128kl:
26697 Opcode = X86ISD::AESENC128KL;
26698 break;
26699 case Intrinsic::x86_aesdec128kl:
26700 Opcode = X86ISD::AESDEC128KL;
26701 break;
26702 case Intrinsic::x86_aesenc256kl:
26703 Opcode = X86ISD::AESENC256KL;
26704 break;
26705 case Intrinsic::x86_aesdec256kl:
26706 Opcode = X86ISD::AESDEC256KL;
26707 break;
26710 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
26711 MachineMemOperand *MMO = MemIntr->getMemOperand();
26712 EVT MemVT = MemIntr->getMemoryVT();
26713 SDValue Operation = DAG.getMemIntrinsicNode(
26714 Opcode, DL, VTs, {Chain, Op.getOperand(2), Op.getOperand(3)}, MemVT,
26715 MMO);
26716 SDValue ZF = getSETCC(X86::COND_E, Operation.getValue(1), DL, DAG);
26718 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
26719 {ZF, Operation.getValue(0), Operation.getValue(2)});
26721 case Intrinsic::x86_aesencwide128kl:
26722 case Intrinsic::x86_aesdecwide128kl:
26723 case Intrinsic::x86_aesencwide256kl:
26724 case Intrinsic::x86_aesdecwide256kl: {
26725 SDLoc DL(Op);
26726 SDVTList VTs = DAG.getVTList(
26727 {MVT::i32, MVT::v2i64, MVT::v2i64, MVT::v2i64, MVT::v2i64, MVT::v2i64,
26728 MVT::v2i64, MVT::v2i64, MVT::v2i64, MVT::Other});
26729 SDValue Chain = Op.getOperand(0);
26730 unsigned Opcode;
26732 switch (IntNo) {
26733 default: llvm_unreachable("Impossible intrinsic");
26734 case Intrinsic::x86_aesencwide128kl:
26735 Opcode = X86ISD::AESENCWIDE128KL;
26736 break;
26737 case Intrinsic::x86_aesdecwide128kl:
26738 Opcode = X86ISD::AESDECWIDE128KL;
26739 break;
26740 case Intrinsic::x86_aesencwide256kl:
26741 Opcode = X86ISD::AESENCWIDE256KL;
26742 break;
26743 case Intrinsic::x86_aesdecwide256kl:
26744 Opcode = X86ISD::AESDECWIDE256KL;
26745 break;
26748 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
26749 MachineMemOperand *MMO = MemIntr->getMemOperand();
26750 EVT MemVT = MemIntr->getMemoryVT();
26751 SDValue Operation = DAG.getMemIntrinsicNode(
26752 Opcode, DL, VTs,
26753 {Chain, Op.getOperand(2), Op.getOperand(3), Op.getOperand(4),
26754 Op.getOperand(5), Op.getOperand(6), Op.getOperand(7),
26755 Op.getOperand(8), Op.getOperand(9), Op.getOperand(10)},
26756 MemVT, MMO);
26757 SDValue ZF = getSETCC(X86::COND_E, Operation.getValue(0), DL, DAG);
26759 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
26760 {ZF, Operation.getValue(1), Operation.getValue(2),
26761 Operation.getValue(3), Operation.getValue(4),
26762 Operation.getValue(5), Operation.getValue(6),
26763 Operation.getValue(7), Operation.getValue(8),
26764 Operation.getValue(9)});
26766 case Intrinsic::x86_testui: {
26767 SDLoc dl(Op);
26768 SDValue Chain = Op.getOperand(0);
26769 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
26770 SDValue Operation = DAG.getNode(X86ISD::TESTUI, dl, VTs, Chain);
26771 SDValue SetCC = getSETCC(X86::COND_B, Operation.getValue(0), dl, DAG);
26772 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
26773 Operation.getValue(1));
26775 case Intrinsic::x86_atomic_bts_rm:
26776 case Intrinsic::x86_atomic_btc_rm:
26777 case Intrinsic::x86_atomic_btr_rm: {
26778 SDLoc DL(Op);
26779 MVT VT = Op.getSimpleValueType();
26780 SDValue Chain = Op.getOperand(0);
26781 SDValue Op1 = Op.getOperand(2);
26782 SDValue Op2 = Op.getOperand(3);
26783 unsigned Opc = IntNo == Intrinsic::x86_atomic_bts_rm ? X86ISD::LBTS_RM
26784 : IntNo == Intrinsic::x86_atomic_btc_rm ? X86ISD::LBTC_RM
26785 : X86ISD::LBTR_RM;
26786 MachineMemOperand *MMO = cast<MemIntrinsicSDNode>(Op)->getMemOperand();
26787 SDValue Res =
26788 DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::i32, MVT::Other),
26789 {Chain, Op1, Op2}, VT, MMO);
26790 Chain = Res.getValue(1);
26791 Res = DAG.getZExtOrTrunc(getSETCC(X86::COND_B, Res, DL, DAG), DL, VT);
26792 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Res, Chain);
26794 case Intrinsic::x86_atomic_bts:
26795 case Intrinsic::x86_atomic_btc:
26796 case Intrinsic::x86_atomic_btr: {
26797 SDLoc DL(Op);
26798 MVT VT = Op.getSimpleValueType();
26799 SDValue Chain = Op.getOperand(0);
26800 SDValue Op1 = Op.getOperand(2);
26801 SDValue Op2 = Op.getOperand(3);
26802 unsigned Opc = IntNo == Intrinsic::x86_atomic_bts ? X86ISD::LBTS
26803 : IntNo == Intrinsic::x86_atomic_btc ? X86ISD::LBTC
26804 : X86ISD::LBTR;
26805 SDValue Size = DAG.getConstant(VT.getScalarSizeInBits(), DL, MVT::i32);
26806 MachineMemOperand *MMO = cast<MemIntrinsicSDNode>(Op)->getMemOperand();
26807 SDValue Res =
26808 DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::i32, MVT::Other),
26809 {Chain, Op1, Op2, Size}, VT, MMO);
26810 Chain = Res.getValue(1);
26811 Res = DAG.getZExtOrTrunc(getSETCC(X86::COND_B, Res, DL, DAG), DL, VT);
26812 unsigned Imm = Op2->getAsZExtVal();
26813 if (Imm)
26814 Res = DAG.getNode(ISD::SHL, DL, VT, Res,
26815 DAG.getShiftAmountConstant(Imm, VT, DL));
26816 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Res, Chain);
26818 case Intrinsic::x86_cmpccxadd32:
26819 case Intrinsic::x86_cmpccxadd64: {
26820 SDLoc DL(Op);
26821 SDValue Chain = Op.getOperand(0);
26822 SDValue Addr = Op.getOperand(2);
26823 SDValue Src1 = Op.getOperand(3);
26824 SDValue Src2 = Op.getOperand(4);
26825 SDValue CC = Op.getOperand(5);
26826 MachineMemOperand *MMO = cast<MemIntrinsicSDNode>(Op)->getMemOperand();
26827 SDValue Operation = DAG.getMemIntrinsicNode(
26828 X86ISD::CMPCCXADD, DL, Op->getVTList(), {Chain, Addr, Src1, Src2, CC},
26829 MVT::i32, MMO);
26830 return Operation;
26832 case Intrinsic::x86_aadd32:
26833 case Intrinsic::x86_aadd64:
26834 case Intrinsic::x86_aand32:
26835 case Intrinsic::x86_aand64:
26836 case Intrinsic::x86_aor32:
26837 case Intrinsic::x86_aor64:
26838 case Intrinsic::x86_axor32:
26839 case Intrinsic::x86_axor64: {
26840 SDLoc DL(Op);
26841 SDValue Chain = Op.getOperand(0);
26842 SDValue Op1 = Op.getOperand(2);
26843 SDValue Op2 = Op.getOperand(3);
26844 MVT VT = Op2.getSimpleValueType();
26845 unsigned Opc = 0;
26846 switch (IntNo) {
26847 default:
26848 llvm_unreachable("Unknown Intrinsic");
26849 case Intrinsic::x86_aadd32:
26850 case Intrinsic::x86_aadd64:
26851 Opc = X86ISD::AADD;
26852 break;
26853 case Intrinsic::x86_aand32:
26854 case Intrinsic::x86_aand64:
26855 Opc = X86ISD::AAND;
26856 break;
26857 case Intrinsic::x86_aor32:
26858 case Intrinsic::x86_aor64:
26859 Opc = X86ISD::AOR;
26860 break;
26861 case Intrinsic::x86_axor32:
26862 case Intrinsic::x86_axor64:
26863 Opc = X86ISD::AXOR;
26864 break;
26866 MachineMemOperand *MMO = cast<MemSDNode>(Op)->getMemOperand();
26867 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(),
26868 {Chain, Op1, Op2}, VT, MMO);
26870 case Intrinsic::x86_atomic_add_cc:
26871 case Intrinsic::x86_atomic_sub_cc:
26872 case Intrinsic::x86_atomic_or_cc:
26873 case Intrinsic::x86_atomic_and_cc:
26874 case Intrinsic::x86_atomic_xor_cc: {
26875 SDLoc DL(Op);
26876 SDValue Chain = Op.getOperand(0);
26877 SDValue Op1 = Op.getOperand(2);
26878 SDValue Op2 = Op.getOperand(3);
26879 X86::CondCode CC = (X86::CondCode)Op.getConstantOperandVal(4);
26880 MVT VT = Op2.getSimpleValueType();
26881 unsigned Opc = 0;
26882 switch (IntNo) {
26883 default:
26884 llvm_unreachable("Unknown Intrinsic");
26885 case Intrinsic::x86_atomic_add_cc:
26886 Opc = X86ISD::LADD;
26887 break;
26888 case Intrinsic::x86_atomic_sub_cc:
26889 Opc = X86ISD::LSUB;
26890 break;
26891 case Intrinsic::x86_atomic_or_cc:
26892 Opc = X86ISD::LOR;
26893 break;
26894 case Intrinsic::x86_atomic_and_cc:
26895 Opc = X86ISD::LAND;
26896 break;
26897 case Intrinsic::x86_atomic_xor_cc:
26898 Opc = X86ISD::LXOR;
26899 break;
26901 MachineMemOperand *MMO = cast<MemIntrinsicSDNode>(Op)->getMemOperand();
26902 SDValue LockArith =
26903 DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::i32, MVT::Other),
26904 {Chain, Op1, Op2}, VT, MMO);
26905 Chain = LockArith.getValue(1);
26906 return DAG.getMergeValues({getSETCC(CC, LockArith, DL, DAG), Chain}, DL);
26909 return SDValue();
26912 SDLoc dl(Op);
26913 switch(IntrData->Type) {
26914 default: llvm_unreachable("Unknown Intrinsic Type");
26915 case RDSEED:
26916 case RDRAND: {
26917 // Emit the node with the right value type.
26918 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32, MVT::Other);
26919 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
26921 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
26922 // Otherwise return the value from Rand, which is always 0, casted to i32.
26923 SDValue Ops[] = {DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
26924 DAG.getConstant(1, dl, Op->getValueType(1)),
26925 DAG.getTargetConstant(X86::COND_B, dl, MVT::i8),
26926 SDValue(Result.getNode(), 1)};
26927 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, Op->getValueType(1), Ops);
26929 // Return { result, isValid, chain }.
26930 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
26931 SDValue(Result.getNode(), 2));
26933 case GATHER_AVX2: {
26934 SDValue Chain = Op.getOperand(0);
26935 SDValue Src = Op.getOperand(2);
26936 SDValue Base = Op.getOperand(3);
26937 SDValue Index = Op.getOperand(4);
26938 SDValue Mask = Op.getOperand(5);
26939 SDValue Scale = Op.getOperand(6);
26940 return getAVX2GatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
26941 Scale, Chain, Subtarget);
26943 case GATHER: {
26944 //gather(v1, mask, index, base, scale);
26945 SDValue Chain = Op.getOperand(0);
26946 SDValue Src = Op.getOperand(2);
26947 SDValue Base = Op.getOperand(3);
26948 SDValue Index = Op.getOperand(4);
26949 SDValue Mask = Op.getOperand(5);
26950 SDValue Scale = Op.getOperand(6);
26951 return getGatherNode(Op, DAG, Src, Mask, Base, Index, Scale,
26952 Chain, Subtarget);
26954 case SCATTER: {
26955 //scatter(base, mask, index, v1, scale);
26956 SDValue Chain = Op.getOperand(0);
26957 SDValue Base = Op.getOperand(2);
26958 SDValue Mask = Op.getOperand(3);
26959 SDValue Index = Op.getOperand(4);
26960 SDValue Src = Op.getOperand(5);
26961 SDValue Scale = Op.getOperand(6);
26962 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
26963 Scale, Chain, Subtarget);
26965 case PREFETCH: {
26966 const APInt &HintVal = Op.getConstantOperandAPInt(6);
26967 assert((HintVal == 2 || HintVal == 3) &&
26968 "Wrong prefetch hint in intrinsic: should be 2 or 3");
26969 unsigned Opcode = (HintVal == 2 ? IntrData->Opc1 : IntrData->Opc0);
26970 SDValue Chain = Op.getOperand(0);
26971 SDValue Mask = Op.getOperand(2);
26972 SDValue Index = Op.getOperand(3);
26973 SDValue Base = Op.getOperand(4);
26974 SDValue Scale = Op.getOperand(5);
26975 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain,
26976 Subtarget);
26978 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
26979 case RDTSC: {
26980 SmallVector<SDValue, 2> Results;
26981 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget,
26982 Results);
26983 return DAG.getMergeValues(Results, dl);
26985 // Read Performance Monitoring Counters.
26986 case RDPMC:
26987 // Read Processor Register.
26988 case RDPRU:
26989 // GetExtended Control Register.
26990 case XGETBV: {
26991 SmallVector<SDValue, 2> Results;
26993 // RDPMC uses ECX to select the index of the performance counter to read.
26994 // RDPRU uses ECX to select the processor register to read.
26995 // XGETBV uses ECX to select the index of the XCR register to return.
26996 // The result is stored into registers EDX:EAX.
26997 expandIntrinsicWChainHelper(Op.getNode(), dl, DAG, IntrData->Opc0, X86::ECX,
26998 Subtarget, Results);
26999 return DAG.getMergeValues(Results, dl);
27001 // XTEST intrinsics.
27002 case XTEST: {
27003 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
27004 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
27006 SDValue SetCC = getSETCC(X86::COND_NE, InTrans, dl, DAG);
27007 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
27008 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
27009 Ret, SDValue(InTrans.getNode(), 1));
27011 case TRUNCATE_TO_MEM_VI8:
27012 case TRUNCATE_TO_MEM_VI16:
27013 case TRUNCATE_TO_MEM_VI32: {
27014 SDValue Mask = Op.getOperand(4);
27015 SDValue DataToTruncate = Op.getOperand(3);
27016 SDValue Addr = Op.getOperand(2);
27017 SDValue Chain = Op.getOperand(0);
27019 MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
27020 assert(MemIntr && "Expected MemIntrinsicSDNode!");
27022 EVT MemVT = MemIntr->getMemoryVT();
27024 uint16_t TruncationOp = IntrData->Opc0;
27025 switch (TruncationOp) {
27026 case X86ISD::VTRUNC: {
27027 if (isAllOnesConstant(Mask)) // return just a truncate store
27028 return DAG.getTruncStore(Chain, dl, DataToTruncate, Addr, MemVT,
27029 MemIntr->getMemOperand());
27031 MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
27032 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
27033 SDValue Offset = DAG.getUNDEF(VMask.getValueType());
27035 return DAG.getMaskedStore(Chain, dl, DataToTruncate, Addr, Offset, VMask,
27036 MemVT, MemIntr->getMemOperand(), ISD::UNINDEXED,
27037 true /* truncating */);
27039 case X86ISD::VTRUNCUS:
27040 case X86ISD::VTRUNCS: {
27041 bool IsSigned = (TruncationOp == X86ISD::VTRUNCS);
27042 if (isAllOnesConstant(Mask))
27043 return EmitTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr, MemVT,
27044 MemIntr->getMemOperand(), DAG);
27046 MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
27047 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
27049 return EmitMaskedTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr,
27050 VMask, MemVT, MemIntr->getMemOperand(), DAG);
27052 default:
27053 llvm_unreachable("Unsupported truncstore intrinsic");
27059 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
27060 SelectionDAG &DAG) const {
27061 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
27062 MFI.setReturnAddressIsTaken(true);
27064 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
27065 return SDValue();
27067 unsigned Depth = Op.getConstantOperandVal(0);
27068 SDLoc dl(Op);
27069 EVT PtrVT = getPointerTy(DAG.getDataLayout());
27071 if (Depth > 0) {
27072 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
27073 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
27074 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), dl, PtrVT);
27075 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
27076 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
27077 MachinePointerInfo());
27080 // Just load the return address.
27081 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
27082 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
27083 MachinePointerInfo());
27086 SDValue X86TargetLowering::LowerADDROFRETURNADDR(SDValue Op,
27087 SelectionDAG &DAG) const {
27088 DAG.getMachineFunction().getFrameInfo().setReturnAddressIsTaken(true);
27089 return getReturnAddressFrameIndex(DAG);
27092 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
27093 MachineFunction &MF = DAG.getMachineFunction();
27094 MachineFrameInfo &MFI = MF.getFrameInfo();
27095 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
27096 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
27097 EVT VT = Op.getValueType();
27099 MFI.setFrameAddressIsTaken(true);
27101 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
27102 // Depth > 0 makes no sense on targets which use Windows unwind codes. It
27103 // is not possible to crawl up the stack without looking at the unwind codes
27104 // simultaneously.
27105 int FrameAddrIndex = FuncInfo->getFAIndex();
27106 if (!FrameAddrIndex) {
27107 // Set up a frame object for the return address.
27108 unsigned SlotSize = RegInfo->getSlotSize();
27109 FrameAddrIndex = MF.getFrameInfo().CreateFixedObject(
27110 SlotSize, /*SPOffset=*/0, /*IsImmutable=*/false);
27111 FuncInfo->setFAIndex(FrameAddrIndex);
27113 return DAG.getFrameIndex(FrameAddrIndex, VT);
27116 unsigned FrameReg =
27117 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
27118 SDLoc dl(Op); // FIXME probably not meaningful
27119 unsigned Depth = Op.getConstantOperandVal(0);
27120 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
27121 (FrameReg == X86::EBP && VT == MVT::i32)) &&
27122 "Invalid Frame Register!");
27123 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
27124 while (Depth--)
27125 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
27126 MachinePointerInfo());
27127 return FrameAddr;
27130 // FIXME? Maybe this could be a TableGen attribute on some registers and
27131 // this table could be generated automatically from RegInfo.
27132 Register X86TargetLowering::getRegisterByName(const char* RegName, LLT VT,
27133 const MachineFunction &MF) const {
27134 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
27136 Register Reg = StringSwitch<unsigned>(RegName)
27137 .Case("esp", X86::ESP)
27138 .Case("rsp", X86::RSP)
27139 .Case("ebp", X86::EBP)
27140 .Case("rbp", X86::RBP)
27141 .Case("r14", X86::R14)
27142 .Case("r15", X86::R15)
27143 .Default(0);
27145 if (Reg == X86::EBP || Reg == X86::RBP) {
27146 if (!TFI.hasFP(MF))
27147 report_fatal_error("register " + StringRef(RegName) +
27148 " is allocatable: function has no frame pointer");
27149 #ifndef NDEBUG
27150 else {
27151 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
27152 Register FrameReg = RegInfo->getPtrSizedFrameRegister(MF);
27153 assert((FrameReg == X86::EBP || FrameReg == X86::RBP) &&
27154 "Invalid Frame Register!");
27156 #endif
27159 if (Reg)
27160 return Reg;
27162 report_fatal_error("Invalid register name global variable");
27165 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
27166 SelectionDAG &DAG) const {
27167 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
27168 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize(), SDLoc(Op));
27171 Register X86TargetLowering::getExceptionPointerRegister(
27172 const Constant *PersonalityFn) const {
27173 if (classifyEHPersonality(PersonalityFn) == EHPersonality::CoreCLR)
27174 return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
27176 return Subtarget.isTarget64BitLP64() ? X86::RAX : X86::EAX;
27179 Register X86TargetLowering::getExceptionSelectorRegister(
27180 const Constant *PersonalityFn) const {
27181 // Funclet personalities don't use selectors (the runtime does the selection).
27182 if (isFuncletEHPersonality(classifyEHPersonality(PersonalityFn)))
27183 return X86::NoRegister;
27184 return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
27187 bool X86TargetLowering::needsFixedCatchObjects() const {
27188 return Subtarget.isTargetWin64();
27191 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
27192 SDValue Chain = Op.getOperand(0);
27193 SDValue Offset = Op.getOperand(1);
27194 SDValue Handler = Op.getOperand(2);
27195 SDLoc dl (Op);
27197 EVT PtrVT = getPointerTy(DAG.getDataLayout());
27198 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
27199 Register FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
27200 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
27201 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
27202 "Invalid Frame Register!");
27203 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
27204 Register StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
27206 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
27207 DAG.getIntPtrConstant(RegInfo->getSlotSize(),
27208 dl));
27209 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
27210 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
27211 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
27213 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
27214 DAG.getRegister(StoreAddrReg, PtrVT));
27217 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
27218 SelectionDAG &DAG) const {
27219 SDLoc DL(Op);
27220 // If the subtarget is not 64bit, we may need the global base reg
27221 // after isel expand pseudo, i.e., after CGBR pass ran.
27222 // Therefore, ask for the GlobalBaseReg now, so that the pass
27223 // inserts the code for us in case we need it.
27224 // Otherwise, we will end up in a situation where we will
27225 // reference a virtual register that is not defined!
27226 if (!Subtarget.is64Bit()) {
27227 const X86InstrInfo *TII = Subtarget.getInstrInfo();
27228 (void)TII->getGlobalBaseReg(&DAG.getMachineFunction());
27230 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
27231 DAG.getVTList(MVT::i32, MVT::Other),
27232 Op.getOperand(0), Op.getOperand(1));
27235 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
27236 SelectionDAG &DAG) const {
27237 SDLoc DL(Op);
27238 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
27239 Op.getOperand(0), Op.getOperand(1));
27242 SDValue X86TargetLowering::lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
27243 SelectionDAG &DAG) const {
27244 SDLoc DL(Op);
27245 return DAG.getNode(X86ISD::EH_SJLJ_SETUP_DISPATCH, DL, MVT::Other,
27246 Op.getOperand(0));
27249 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
27250 return Op.getOperand(0);
27253 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
27254 SelectionDAG &DAG) const {
27255 SDValue Root = Op.getOperand(0);
27256 SDValue Trmp = Op.getOperand(1); // trampoline
27257 SDValue FPtr = Op.getOperand(2); // nested function
27258 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
27259 SDLoc dl (Op);
27261 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
27262 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
27264 if (Subtarget.is64Bit()) {
27265 SDValue OutChains[6];
27267 // Large code-model.
27268 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
27269 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
27271 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
27272 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
27274 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
27276 // Load the pointer to the nested function into R11.
27277 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
27278 SDValue Addr = Trmp;
27279 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
27280 Addr, MachinePointerInfo(TrmpAddr));
27282 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
27283 DAG.getConstant(2, dl, MVT::i64));
27284 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
27285 MachinePointerInfo(TrmpAddr, 2), Align(2));
27287 // Load the 'nest' parameter value into R10.
27288 // R10 is specified in X86CallingConv.td
27289 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
27290 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
27291 DAG.getConstant(10, dl, MVT::i64));
27292 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
27293 Addr, MachinePointerInfo(TrmpAddr, 10));
27295 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
27296 DAG.getConstant(12, dl, MVT::i64));
27297 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
27298 MachinePointerInfo(TrmpAddr, 12), Align(2));
27300 // Jump to the nested function.
27301 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
27302 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
27303 DAG.getConstant(20, dl, MVT::i64));
27304 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
27305 Addr, MachinePointerInfo(TrmpAddr, 20));
27307 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
27308 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
27309 DAG.getConstant(22, dl, MVT::i64));
27310 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, dl, MVT::i8),
27311 Addr, MachinePointerInfo(TrmpAddr, 22));
27313 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
27314 } else {
27315 const Function *Func =
27316 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
27317 CallingConv::ID CC = Func->getCallingConv();
27318 unsigned NestReg;
27320 switch (CC) {
27321 default:
27322 llvm_unreachable("Unsupported calling convention");
27323 case CallingConv::C:
27324 case CallingConv::X86_StdCall: {
27325 // Pass 'nest' parameter in ECX.
27326 // Must be kept in sync with X86CallingConv.td
27327 NestReg = X86::ECX;
27329 // Check that ECX wasn't needed by an 'inreg' parameter.
27330 FunctionType *FTy = Func->getFunctionType();
27331 const AttributeList &Attrs = Func->getAttributes();
27333 if (!Attrs.isEmpty() && !Func->isVarArg()) {
27334 unsigned InRegCount = 0;
27335 unsigned Idx = 0;
27337 for (FunctionType::param_iterator I = FTy->param_begin(),
27338 E = FTy->param_end(); I != E; ++I, ++Idx)
27339 if (Attrs.hasParamAttr(Idx, Attribute::InReg)) {
27340 const DataLayout &DL = DAG.getDataLayout();
27341 // FIXME: should only count parameters that are lowered to integers.
27342 InRegCount += (DL.getTypeSizeInBits(*I) + 31) / 32;
27345 if (InRegCount > 2) {
27346 report_fatal_error("Nest register in use - reduce number of inreg"
27347 " parameters!");
27350 break;
27352 case CallingConv::X86_FastCall:
27353 case CallingConv::X86_ThisCall:
27354 case CallingConv::Fast:
27355 case CallingConv::Tail:
27356 case CallingConv::SwiftTail:
27357 // Pass 'nest' parameter in EAX.
27358 // Must be kept in sync with X86CallingConv.td
27359 NestReg = X86::EAX;
27360 break;
27363 SDValue OutChains[4];
27364 SDValue Addr, Disp;
27366 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
27367 DAG.getConstant(10, dl, MVT::i32));
27368 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
27370 // This is storing the opcode for MOV32ri.
27371 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
27372 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
27373 OutChains[0] =
27374 DAG.getStore(Root, dl, DAG.getConstant(MOV32ri | N86Reg, dl, MVT::i8),
27375 Trmp, MachinePointerInfo(TrmpAddr));
27377 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
27378 DAG.getConstant(1, dl, MVT::i32));
27379 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
27380 MachinePointerInfo(TrmpAddr, 1), Align(1));
27382 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
27383 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
27384 DAG.getConstant(5, dl, MVT::i32));
27385 OutChains[2] =
27386 DAG.getStore(Root, dl, DAG.getConstant(JMP, dl, MVT::i8), Addr,
27387 MachinePointerInfo(TrmpAddr, 5), Align(1));
27389 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
27390 DAG.getConstant(6, dl, MVT::i32));
27391 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
27392 MachinePointerInfo(TrmpAddr, 6), Align(1));
27394 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
27398 SDValue X86TargetLowering::LowerGET_ROUNDING(SDValue Op,
27399 SelectionDAG &DAG) const {
27401 The rounding mode is in bits 11:10 of FPSR, and has the following
27402 settings:
27403 00 Round to nearest
27404 01 Round to -inf
27405 10 Round to +inf
27406 11 Round to 0
27408 GET_ROUNDING, on the other hand, expects the following:
27409 -1 Undefined
27410 0 Round to 0
27411 1 Round to nearest
27412 2 Round to +inf
27413 3 Round to -inf
27415 To perform the conversion, we use a packed lookup table of the four 2-bit
27416 values that we can index by FPSP[11:10]
27417 0x2d --> (0b00,10,11,01) --> (0,2,3,1) >> FPSR[11:10]
27419 (0x2d >> ((FPSR & 0xc00) >> 9)) & 3
27422 MachineFunction &MF = DAG.getMachineFunction();
27423 MVT VT = Op.getSimpleValueType();
27424 SDLoc DL(Op);
27426 // Save FP Control Word to stack slot
27427 int SSFI = MF.getFrameInfo().CreateStackObject(2, Align(2), false);
27428 SDValue StackSlot =
27429 DAG.getFrameIndex(SSFI, getPointerTy(DAG.getDataLayout()));
27431 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, SSFI);
27433 SDValue Chain = Op.getOperand(0);
27434 SDValue Ops[] = {Chain, StackSlot};
27435 Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
27436 DAG.getVTList(MVT::Other), Ops, MVT::i16, MPI,
27437 Align(2), MachineMemOperand::MOStore);
27439 // Load FP Control Word from stack slot
27440 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MPI, Align(2));
27441 Chain = CWD.getValue(1);
27443 // Mask and turn the control bits into a shift for the lookup table.
27444 SDValue Shift =
27445 DAG.getNode(ISD::SRL, DL, MVT::i16,
27446 DAG.getNode(ISD::AND, DL, MVT::i16,
27447 CWD, DAG.getConstant(0xc00, DL, MVT::i16)),
27448 DAG.getConstant(9, DL, MVT::i8));
27449 Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, Shift);
27451 SDValue LUT = DAG.getConstant(0x2d, DL, MVT::i32);
27452 SDValue RetVal =
27453 DAG.getNode(ISD::AND, DL, MVT::i32,
27454 DAG.getNode(ISD::SRL, DL, MVT::i32, LUT, Shift),
27455 DAG.getConstant(3, DL, MVT::i32));
27457 RetVal = DAG.getZExtOrTrunc(RetVal, DL, VT);
27459 return DAG.getMergeValues({RetVal, Chain}, DL);
27462 SDValue X86TargetLowering::LowerSET_ROUNDING(SDValue Op,
27463 SelectionDAG &DAG) const {
27464 MachineFunction &MF = DAG.getMachineFunction();
27465 SDLoc DL(Op);
27466 SDValue Chain = Op.getNode()->getOperand(0);
27468 // FP control word may be set only from data in memory. So we need to allocate
27469 // stack space to save/load FP control word.
27470 int OldCWFrameIdx = MF.getFrameInfo().CreateStackObject(4, Align(4), false);
27471 SDValue StackSlot =
27472 DAG.getFrameIndex(OldCWFrameIdx, getPointerTy(DAG.getDataLayout()));
27473 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, OldCWFrameIdx);
27474 MachineMemOperand *MMO =
27475 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 2, Align(2));
27477 // Store FP control word into memory.
27478 SDValue Ops[] = {Chain, StackSlot};
27479 Chain = DAG.getMemIntrinsicNode(
27480 X86ISD::FNSTCW16m, DL, DAG.getVTList(MVT::Other), Ops, MVT::i16, MMO);
27482 // Load FP Control Word from stack slot and clear RM field (bits 11:10).
27483 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MPI);
27484 Chain = CWD.getValue(1);
27485 CWD = DAG.getNode(ISD::AND, DL, MVT::i16, CWD.getValue(0),
27486 DAG.getConstant(0xf3ff, DL, MVT::i16));
27488 // Calculate new rounding mode.
27489 SDValue NewRM = Op.getNode()->getOperand(1);
27490 SDValue RMBits;
27491 if (auto *CVal = dyn_cast<ConstantSDNode>(NewRM)) {
27492 uint64_t RM = CVal->getZExtValue();
27493 int FieldVal;
27494 switch (static_cast<RoundingMode>(RM)) {
27495 case RoundingMode::NearestTiesToEven: FieldVal = X86::rmToNearest; break;
27496 case RoundingMode::TowardNegative: FieldVal = X86::rmDownward; break;
27497 case RoundingMode::TowardPositive: FieldVal = X86::rmUpward; break;
27498 case RoundingMode::TowardZero: FieldVal = X86::rmTowardZero; break;
27499 default:
27500 llvm_unreachable("rounding mode is not supported by X86 hardware");
27502 RMBits = DAG.getConstant(FieldVal, DL, MVT::i16);
27503 } else {
27504 // Need to convert argument into bits of control word:
27505 // 0 Round to 0 -> 11
27506 // 1 Round to nearest -> 00
27507 // 2 Round to +inf -> 10
27508 // 3 Round to -inf -> 01
27509 // The 2-bit value needs then to be shifted so that it occupies bits 11:10.
27510 // To make the conversion, put all these values into a value 0xc9 and shift
27511 // it left depending on the rounding mode:
27512 // (0xc9 << 4) & 0xc00 = X86::rmTowardZero
27513 // (0xc9 << 6) & 0xc00 = X86::rmToNearest
27514 // ...
27515 // (0xc9 << (2 * NewRM + 4)) & 0xc00
27516 SDValue ShiftValue =
27517 DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
27518 DAG.getNode(ISD::ADD, DL, MVT::i32,
27519 DAG.getNode(ISD::SHL, DL, MVT::i32, NewRM,
27520 DAG.getConstant(1, DL, MVT::i8)),
27521 DAG.getConstant(4, DL, MVT::i32)));
27522 SDValue Shifted =
27523 DAG.getNode(ISD::SHL, DL, MVT::i16, DAG.getConstant(0xc9, DL, MVT::i16),
27524 ShiftValue);
27525 RMBits = DAG.getNode(ISD::AND, DL, MVT::i16, Shifted,
27526 DAG.getConstant(0xc00, DL, MVT::i16));
27529 // Update rounding mode bits and store the new FP Control Word into stack.
27530 CWD = DAG.getNode(ISD::OR, DL, MVT::i16, CWD, RMBits);
27531 Chain = DAG.getStore(Chain, DL, CWD, StackSlot, MPI, Align(2));
27533 // Load FP control word from the slot.
27534 SDValue OpsLD[] = {Chain, StackSlot};
27535 MachineMemOperand *MMOL =
27536 MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 2, Align(2));
27537 Chain = DAG.getMemIntrinsicNode(
27538 X86ISD::FLDCW16m, DL, DAG.getVTList(MVT::Other), OpsLD, MVT::i16, MMOL);
27540 // If target supports SSE, set MXCSR as well. Rounding mode is encoded in the
27541 // same way but in bits 14:13.
27542 if (Subtarget.hasSSE1()) {
27543 // Store MXCSR into memory.
27544 Chain = DAG.getNode(
27545 ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), Chain,
27546 DAG.getTargetConstant(Intrinsic::x86_sse_stmxcsr, DL, MVT::i32),
27547 StackSlot);
27549 // Load MXCSR from stack slot and clear RM field (bits 14:13).
27550 SDValue CWD = DAG.getLoad(MVT::i32, DL, Chain, StackSlot, MPI);
27551 Chain = CWD.getValue(1);
27552 CWD = DAG.getNode(ISD::AND, DL, MVT::i32, CWD.getValue(0),
27553 DAG.getConstant(0xffff9fff, DL, MVT::i32));
27555 // Shift X87 RM bits from 11:10 to 14:13.
27556 RMBits = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, RMBits);
27557 RMBits = DAG.getNode(ISD::SHL, DL, MVT::i32, RMBits,
27558 DAG.getConstant(3, DL, MVT::i8));
27560 // Update rounding mode bits and store the new FP Control Word into stack.
27561 CWD = DAG.getNode(ISD::OR, DL, MVT::i32, CWD, RMBits);
27562 Chain = DAG.getStore(Chain, DL, CWD, StackSlot, MPI, Align(4));
27564 // Load MXCSR from the slot.
27565 Chain = DAG.getNode(
27566 ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), Chain,
27567 DAG.getTargetConstant(Intrinsic::x86_sse_ldmxcsr, DL, MVT::i32),
27568 StackSlot);
27571 return Chain;
27574 const unsigned X87StateSize = 28;
27575 const unsigned FPStateSize = 32;
27576 [[maybe_unused]] const unsigned FPStateSizeInBits = FPStateSize * 8;
27578 SDValue X86TargetLowering::LowerGET_FPENV_MEM(SDValue Op,
27579 SelectionDAG &DAG) const {
27580 MachineFunction &MF = DAG.getMachineFunction();
27581 SDLoc DL(Op);
27582 SDValue Chain = Op->getOperand(0);
27583 SDValue Ptr = Op->getOperand(1);
27584 auto *Node = cast<FPStateAccessSDNode>(Op);
27585 EVT MemVT = Node->getMemoryVT();
27586 assert(MemVT.getSizeInBits() == FPStateSizeInBits);
27587 MachineMemOperand *MMO = cast<FPStateAccessSDNode>(Op)->getMemOperand();
27589 // Get x87 state, if it presents.
27590 if (Subtarget.hasX87()) {
27591 Chain =
27592 DAG.getMemIntrinsicNode(X86ISD::FNSTENVm, DL, DAG.getVTList(MVT::Other),
27593 {Chain, Ptr}, MemVT, MMO);
27595 // FNSTENV changes the exception mask, so load back the stored environment.
27596 MachineMemOperand::Flags NewFlags =
27597 MachineMemOperand::MOLoad |
27598 (MMO->getFlags() & ~MachineMemOperand::MOStore);
27599 MMO = MF.getMachineMemOperand(MMO, NewFlags);
27600 Chain =
27601 DAG.getMemIntrinsicNode(X86ISD::FLDENVm, DL, DAG.getVTList(MVT::Other),
27602 {Chain, Ptr}, MemVT, MMO);
27605 // If target supports SSE, get MXCSR as well.
27606 if (Subtarget.hasSSE1()) {
27607 // Get pointer to the MXCSR location in memory.
27608 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
27609 SDValue MXCSRAddr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr,
27610 DAG.getConstant(X87StateSize, DL, PtrVT));
27611 // Store MXCSR into memory.
27612 Chain = DAG.getNode(
27613 ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), Chain,
27614 DAG.getTargetConstant(Intrinsic::x86_sse_stmxcsr, DL, MVT::i32),
27615 MXCSRAddr);
27618 return Chain;
27621 static SDValue createSetFPEnvNodes(SDValue Ptr, SDValue Chain, SDLoc DL,
27622 EVT MemVT, MachineMemOperand *MMO,
27623 SelectionDAG &DAG,
27624 const X86Subtarget &Subtarget) {
27625 // Set x87 state, if it presents.
27626 if (Subtarget.hasX87())
27627 Chain =
27628 DAG.getMemIntrinsicNode(X86ISD::FLDENVm, DL, DAG.getVTList(MVT::Other),
27629 {Chain, Ptr}, MemVT, MMO);
27630 // If target supports SSE, set MXCSR as well.
27631 if (Subtarget.hasSSE1()) {
27632 // Get pointer to the MXCSR location in memory.
27633 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
27634 SDValue MXCSRAddr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr,
27635 DAG.getConstant(X87StateSize, DL, PtrVT));
27636 // Load MXCSR from memory.
27637 Chain = DAG.getNode(
27638 ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), Chain,
27639 DAG.getTargetConstant(Intrinsic::x86_sse_ldmxcsr, DL, MVT::i32),
27640 MXCSRAddr);
27642 return Chain;
27645 SDValue X86TargetLowering::LowerSET_FPENV_MEM(SDValue Op,
27646 SelectionDAG &DAG) const {
27647 SDLoc DL(Op);
27648 SDValue Chain = Op->getOperand(0);
27649 SDValue Ptr = Op->getOperand(1);
27650 auto *Node = cast<FPStateAccessSDNode>(Op);
27651 EVT MemVT = Node->getMemoryVT();
27652 assert(MemVT.getSizeInBits() == FPStateSizeInBits);
27653 MachineMemOperand *MMO = cast<FPStateAccessSDNode>(Op)->getMemOperand();
27654 return createSetFPEnvNodes(Ptr, Chain, DL, MemVT, MMO, DAG, Subtarget);
27657 SDValue X86TargetLowering::LowerRESET_FPENV(SDValue Op,
27658 SelectionDAG &DAG) const {
27659 MachineFunction &MF = DAG.getMachineFunction();
27660 SDLoc DL(Op);
27661 SDValue Chain = Op.getNode()->getOperand(0);
27663 IntegerType *ItemTy = Type::getInt32Ty(*DAG.getContext());
27664 ArrayType *FPEnvTy = ArrayType::get(ItemTy, 8);
27665 SmallVector<Constant *, 8> FPEnvVals;
27667 // x87 FPU Control Word: mask all floating-point exceptions, sets rounding to
27668 // nearest. FPU precision is set to 53 bits on Windows and 64 bits otherwise
27669 // for compatibility with glibc.
27670 unsigned X87CW = Subtarget.isTargetWindowsMSVC() ? 0x27F : 0x37F;
27671 FPEnvVals.push_back(ConstantInt::get(ItemTy, X87CW));
27672 Constant *Zero = ConstantInt::get(ItemTy, 0);
27673 for (unsigned I = 0; I < 6; ++I)
27674 FPEnvVals.push_back(Zero);
27676 // MXCSR: mask all floating-point exceptions, sets rounding to nearest, clear
27677 // all exceptions, sets DAZ and FTZ to 0.
27678 FPEnvVals.push_back(ConstantInt::get(ItemTy, 0x1F80));
27679 Constant *FPEnvBits = ConstantArray::get(FPEnvTy, FPEnvVals);
27680 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
27681 SDValue Env = DAG.getConstantPool(FPEnvBits, PtrVT);
27682 MachinePointerInfo MPI =
27683 MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
27684 MachineMemOperand *MMO = MF.getMachineMemOperand(
27685 MPI, MachineMemOperand::MOStore, X87StateSize, Align(4));
27687 return createSetFPEnvNodes(Env, Chain, DL, MVT::i32, MMO, DAG, Subtarget);
27690 /// Lower a vector CTLZ using native supported vector CTLZ instruction.
27692 // i8/i16 vector implemented using dword LZCNT vector instruction
27693 // ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
27694 // split the vector, perform operation on it's Lo a Hi part and
27695 // concatenate the results.
27696 static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG,
27697 const X86Subtarget &Subtarget) {
27698 assert(Op.getOpcode() == ISD::CTLZ);
27699 SDLoc dl(Op);
27700 MVT VT = Op.getSimpleValueType();
27701 MVT EltVT = VT.getVectorElementType();
27702 unsigned NumElems = VT.getVectorNumElements();
27704 assert((EltVT == MVT::i8 || EltVT == MVT::i16) &&
27705 "Unsupported element type");
27707 // Split vector, it's Lo and Hi parts will be handled in next iteration.
27708 if (NumElems > 16 ||
27709 (NumElems == 16 && !Subtarget.canExtendTo512DQ()))
27710 return splitVectorIntUnary(Op, DAG);
27712 MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
27713 assert((NewVT.is256BitVector() || NewVT.is512BitVector()) &&
27714 "Unsupported value type for operation");
27716 // Use native supported vector instruction vplzcntd.
27717 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, NewVT, Op.getOperand(0));
27718 SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Op);
27719 SDValue TruncNode = DAG.getNode(ISD::TRUNCATE, dl, VT, CtlzNode);
27720 SDValue Delta = DAG.getConstant(32 - EltVT.getSizeInBits(), dl, VT);
27722 return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta);
27725 // Lower CTLZ using a PSHUFB lookup table implementation.
27726 static SDValue LowerVectorCTLZInRegLUT(SDValue Op, const SDLoc &DL,
27727 const X86Subtarget &Subtarget,
27728 SelectionDAG &DAG) {
27729 MVT VT = Op.getSimpleValueType();
27730 int NumElts = VT.getVectorNumElements();
27731 int NumBytes = NumElts * (VT.getScalarSizeInBits() / 8);
27732 MVT CurrVT = MVT::getVectorVT(MVT::i8, NumBytes);
27734 // Per-nibble leading zero PSHUFB lookup table.
27735 const int LUT[16] = {/* 0 */ 4, /* 1 */ 3, /* 2 */ 2, /* 3 */ 2,
27736 /* 4 */ 1, /* 5 */ 1, /* 6 */ 1, /* 7 */ 1,
27737 /* 8 */ 0, /* 9 */ 0, /* a */ 0, /* b */ 0,
27738 /* c */ 0, /* d */ 0, /* e */ 0, /* f */ 0};
27740 SmallVector<SDValue, 64> LUTVec;
27741 for (int i = 0; i < NumBytes; ++i)
27742 LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
27743 SDValue InRegLUT = DAG.getBuildVector(CurrVT, DL, LUTVec);
27745 // Begin by bitcasting the input to byte vector, then split those bytes
27746 // into lo/hi nibbles and use the PSHUFB LUT to perform CLTZ on each of them.
27747 // If the hi input nibble is zero then we add both results together, otherwise
27748 // we just take the hi result (by masking the lo result to zero before the
27749 // add).
27750 SDValue Op0 = DAG.getBitcast(CurrVT, Op.getOperand(0));
27751 SDValue Zero = DAG.getConstant(0, DL, CurrVT);
27753 SDValue NibbleShift = DAG.getConstant(0x4, DL, CurrVT);
27754 SDValue Lo = Op0;
27755 SDValue Hi = DAG.getNode(ISD::SRL, DL, CurrVT, Op0, NibbleShift);
27756 SDValue HiZ;
27757 if (CurrVT.is512BitVector()) {
27758 MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
27759 HiZ = DAG.getSetCC(DL, MaskVT, Hi, Zero, ISD::SETEQ);
27760 HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
27761 } else {
27762 HiZ = DAG.getSetCC(DL, CurrVT, Hi, Zero, ISD::SETEQ);
27765 Lo = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Lo);
27766 Hi = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Hi);
27767 Lo = DAG.getNode(ISD::AND, DL, CurrVT, Lo, HiZ);
27768 SDValue Res = DAG.getNode(ISD::ADD, DL, CurrVT, Lo, Hi);
27770 // Merge result back from vXi8 back to VT, working on the lo/hi halves
27771 // of the current vector width in the same way we did for the nibbles.
27772 // If the upper half of the input element is zero then add the halves'
27773 // leading zero counts together, otherwise just use the upper half's.
27774 // Double the width of the result until we are at target width.
27775 while (CurrVT != VT) {
27776 int CurrScalarSizeInBits = CurrVT.getScalarSizeInBits();
27777 int CurrNumElts = CurrVT.getVectorNumElements();
27778 MVT NextSVT = MVT::getIntegerVT(CurrScalarSizeInBits * 2);
27779 MVT NextVT = MVT::getVectorVT(NextSVT, CurrNumElts / 2);
27780 SDValue Shift = DAG.getConstant(CurrScalarSizeInBits, DL, NextVT);
27782 // Check if the upper half of the input element is zero.
27783 if (CurrVT.is512BitVector()) {
27784 MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
27785 HiZ = DAG.getSetCC(DL, MaskVT, DAG.getBitcast(CurrVT, Op0),
27786 DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
27787 HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
27788 } else {
27789 HiZ = DAG.getSetCC(DL, CurrVT, DAG.getBitcast(CurrVT, Op0),
27790 DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
27792 HiZ = DAG.getBitcast(NextVT, HiZ);
27794 // Move the upper/lower halves to the lower bits as we'll be extending to
27795 // NextVT. Mask the lower result to zero if HiZ is true and add the results
27796 // together.
27797 SDValue ResNext = Res = DAG.getBitcast(NextVT, Res);
27798 SDValue R0 = DAG.getNode(ISD::SRL, DL, NextVT, ResNext, Shift);
27799 SDValue R1 = DAG.getNode(ISD::SRL, DL, NextVT, HiZ, Shift);
27800 R1 = DAG.getNode(ISD::AND, DL, NextVT, ResNext, R1);
27801 Res = DAG.getNode(ISD::ADD, DL, NextVT, R0, R1);
27802 CurrVT = NextVT;
27805 return Res;
27808 static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
27809 const X86Subtarget &Subtarget,
27810 SelectionDAG &DAG) {
27811 MVT VT = Op.getSimpleValueType();
27813 if (Subtarget.hasCDI() &&
27814 // vXi8 vectors need to be promoted to 512-bits for vXi32.
27815 (Subtarget.canExtendTo512DQ() || VT.getVectorElementType() != MVT::i8))
27816 return LowerVectorCTLZ_AVX512CDI(Op, DAG, Subtarget);
27818 // Decompose 256-bit ops into smaller 128-bit ops.
27819 if (VT.is256BitVector() && !Subtarget.hasInt256())
27820 return splitVectorIntUnary(Op, DAG);
27822 // Decompose 512-bit ops into smaller 256-bit ops.
27823 if (VT.is512BitVector() && !Subtarget.hasBWI())
27824 return splitVectorIntUnary(Op, DAG);
27826 assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB");
27827 return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
27830 static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
27831 SelectionDAG &DAG) {
27832 MVT VT = Op.getSimpleValueType();
27833 MVT OpVT = VT;
27834 unsigned NumBits = VT.getSizeInBits();
27835 SDLoc dl(Op);
27836 unsigned Opc = Op.getOpcode();
27838 if (VT.isVector())
27839 return LowerVectorCTLZ(Op, dl, Subtarget, DAG);
27841 Op = Op.getOperand(0);
27842 if (VT == MVT::i8) {
27843 // Zero extend to i32 since there is not an i8 bsr.
27844 OpVT = MVT::i32;
27845 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
27848 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
27849 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
27850 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
27852 if (Opc == ISD::CTLZ) {
27853 // If src is zero (i.e. bsr sets ZF), returns NumBits.
27854 SDValue Ops[] = {Op, DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
27855 DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
27856 Op.getValue(1)};
27857 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
27860 // Finally xor with NumBits-1.
27861 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op,
27862 DAG.getConstant(NumBits - 1, dl, OpVT));
27864 if (VT == MVT::i8)
27865 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
27866 return Op;
27869 static SDValue LowerCTTZ(SDValue Op, const X86Subtarget &Subtarget,
27870 SelectionDAG &DAG) {
27871 MVT VT = Op.getSimpleValueType();
27872 unsigned NumBits = VT.getScalarSizeInBits();
27873 SDValue N0 = Op.getOperand(0);
27874 SDLoc dl(Op);
27876 assert(!VT.isVector() && Op.getOpcode() == ISD::CTTZ &&
27877 "Only scalar CTTZ requires custom lowering");
27879 // Issue a bsf (scan bits forward) which also sets EFLAGS.
27880 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
27881 Op = DAG.getNode(X86ISD::BSF, dl, VTs, N0);
27883 // If src is known never zero we can skip the CMOV.
27884 if (DAG.isKnownNeverZero(N0))
27885 return Op;
27887 // If src is zero (i.e. bsf sets ZF), returns NumBits.
27888 SDValue Ops[] = {Op, DAG.getConstant(NumBits, dl, VT),
27889 DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
27890 Op.getValue(1)};
27891 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
27894 static SDValue lowerAddSub(SDValue Op, SelectionDAG &DAG,
27895 const X86Subtarget &Subtarget) {
27896 MVT VT = Op.getSimpleValueType();
27897 if (VT == MVT::i16 || VT == MVT::i32)
27898 return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
27900 if (VT == MVT::v32i16 || VT == MVT::v64i8)
27901 return splitVectorIntBinary(Op, DAG);
27903 assert(Op.getSimpleValueType().is256BitVector() &&
27904 Op.getSimpleValueType().isInteger() &&
27905 "Only handle AVX 256-bit vector integer operation");
27906 return splitVectorIntBinary(Op, DAG);
27909 static SDValue LowerADDSAT_SUBSAT(SDValue Op, SelectionDAG &DAG,
27910 const X86Subtarget &Subtarget) {
27911 MVT VT = Op.getSimpleValueType();
27912 SDValue X = Op.getOperand(0), Y = Op.getOperand(1);
27913 unsigned Opcode = Op.getOpcode();
27914 SDLoc DL(Op);
27916 if (VT == MVT::v32i16 || VT == MVT::v64i8 ||
27917 (VT.is256BitVector() && !Subtarget.hasInt256())) {
27918 assert(Op.getSimpleValueType().isInteger() &&
27919 "Only handle AVX vector integer operation");
27920 return splitVectorIntBinary(Op, DAG);
27923 // Avoid the generic expansion with min/max if we don't have pminu*/pmaxu*.
27924 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
27925 EVT SetCCResultType =
27926 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
27928 unsigned BitWidth = VT.getScalarSizeInBits();
27929 if (Opcode == ISD::USUBSAT) {
27930 if (!TLI.isOperationLegal(ISD::UMAX, VT) || useVPTERNLOG(Subtarget, VT)) {
27931 // Handle a special-case with a bit-hack instead of cmp+select:
27932 // usubsat X, SMIN --> (X ^ SMIN) & (X s>> BW-1)
27933 // If the target can use VPTERNLOG, DAGToDAG will match this as
27934 // "vpsra + vpternlog" which is better than "vpmax + vpsub" with a
27935 // "broadcast" constant load.
27936 ConstantSDNode *C = isConstOrConstSplat(Y, true);
27937 if (C && C->getAPIntValue().isSignMask()) {
27938 SDValue SignMask = DAG.getConstant(C->getAPIntValue(), DL, VT);
27939 SDValue ShiftAmt = DAG.getConstant(BitWidth - 1, DL, VT);
27940 SDValue Xor = DAG.getNode(ISD::XOR, DL, VT, X, SignMask);
27941 SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, X, ShiftAmt);
27942 return DAG.getNode(ISD::AND, DL, VT, Xor, Sra);
27945 if (!TLI.isOperationLegal(ISD::UMAX, VT)) {
27946 // usubsat X, Y --> (X >u Y) ? X - Y : 0
27947 SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, X, Y);
27948 SDValue Cmp = DAG.getSetCC(DL, SetCCResultType, X, Y, ISD::SETUGT);
27949 // TODO: Move this to DAGCombiner?
27950 if (SetCCResultType == VT &&
27951 DAG.ComputeNumSignBits(Cmp) == VT.getScalarSizeInBits())
27952 return DAG.getNode(ISD::AND, DL, VT, Cmp, Sub);
27953 return DAG.getSelect(DL, VT, Cmp, Sub, DAG.getConstant(0, DL, VT));
27957 if ((Opcode == ISD::SADDSAT || Opcode == ISD::SSUBSAT) &&
27958 (!VT.isVector() || VT == MVT::v2i64)) {
27959 APInt MinVal = APInt::getSignedMinValue(BitWidth);
27960 APInt MaxVal = APInt::getSignedMaxValue(BitWidth);
27961 SDValue Zero = DAG.getConstant(0, DL, VT);
27962 SDValue Result =
27963 DAG.getNode(Opcode == ISD::SADDSAT ? ISD::SADDO : ISD::SSUBO, DL,
27964 DAG.getVTList(VT, SetCCResultType), X, Y);
27965 SDValue SumDiff = Result.getValue(0);
27966 SDValue Overflow = Result.getValue(1);
27967 SDValue SatMin = DAG.getConstant(MinVal, DL, VT);
27968 SDValue SatMax = DAG.getConstant(MaxVal, DL, VT);
27969 SDValue SumNeg =
27970 DAG.getSetCC(DL, SetCCResultType, SumDiff, Zero, ISD::SETLT);
27971 Result = DAG.getSelect(DL, VT, SumNeg, SatMax, SatMin);
27972 return DAG.getSelect(DL, VT, Overflow, Result, SumDiff);
27975 // Use default expansion.
27976 return SDValue();
27979 static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget,
27980 SelectionDAG &DAG) {
27981 MVT VT = Op.getSimpleValueType();
27982 if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) {
27983 // Since X86 does not have CMOV for 8-bit integer, we don't convert
27984 // 8-bit integer abs to NEG and CMOV.
27985 SDLoc DL(Op);
27986 SDValue N0 = Op.getOperand(0);
27987 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
27988 DAG.getConstant(0, DL, VT), N0);
27989 SDValue Ops[] = {N0, Neg, DAG.getTargetConstant(X86::COND_NS, DL, MVT::i8),
27990 SDValue(Neg.getNode(), 1)};
27991 return DAG.getNode(X86ISD::CMOV, DL, VT, Ops);
27994 // ABS(vXi64 X) --> VPBLENDVPD(X, 0-X, X).
27995 if ((VT == MVT::v2i64 || VT == MVT::v4i64) && Subtarget.hasSSE41()) {
27996 SDLoc DL(Op);
27997 SDValue Src = Op.getOperand(0);
27998 SDValue Sub =
27999 DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
28000 return DAG.getNode(X86ISD::BLENDV, DL, VT, Src, Sub, Src);
28003 if (VT.is256BitVector() && !Subtarget.hasInt256()) {
28004 assert(VT.isInteger() &&
28005 "Only handle AVX 256-bit vector integer operation");
28006 return splitVectorIntUnary(Op, DAG);
28009 if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
28010 return splitVectorIntUnary(Op, DAG);
28012 // Default to expand.
28013 return SDValue();
28016 static SDValue LowerAVG(SDValue Op, const X86Subtarget &Subtarget,
28017 SelectionDAG &DAG) {
28018 MVT VT = Op.getSimpleValueType();
28020 // For AVX1 cases, split to use legal ops.
28021 if (VT.is256BitVector() && !Subtarget.hasInt256())
28022 return splitVectorIntBinary(Op, DAG);
28024 if (VT == MVT::v32i16 || VT == MVT::v64i8)
28025 return splitVectorIntBinary(Op, DAG);
28027 // Default to expand.
28028 return SDValue();
28031 static SDValue LowerMINMAX(SDValue Op, const X86Subtarget &Subtarget,
28032 SelectionDAG &DAG) {
28033 MVT VT = Op.getSimpleValueType();
28035 // For AVX1 cases, split to use legal ops.
28036 if (VT.is256BitVector() && !Subtarget.hasInt256())
28037 return splitVectorIntBinary(Op, DAG);
28039 if (VT == MVT::v32i16 || VT == MVT::v64i8)
28040 return splitVectorIntBinary(Op, DAG);
28042 // Default to expand.
28043 return SDValue();
28046 static SDValue LowerFMINIMUM_FMAXIMUM(SDValue Op, const X86Subtarget &Subtarget,
28047 SelectionDAG &DAG) {
28048 assert((Op.getOpcode() == ISD::FMAXIMUM || Op.getOpcode() == ISD::FMINIMUM) &&
28049 "Expected FMAXIMUM or FMINIMUM opcode");
28050 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28051 EVT VT = Op.getValueType();
28052 SDValue X = Op.getOperand(0);
28053 SDValue Y = Op.getOperand(1);
28054 SDLoc DL(Op);
28055 uint64_t SizeInBits = VT.getScalarSizeInBits();
28056 APInt PreferredZero = APInt::getZero(SizeInBits);
28057 APInt OppositeZero = PreferredZero;
28058 EVT IVT = VT.changeTypeToInteger();
28059 X86ISD::NodeType MinMaxOp;
28060 if (Op.getOpcode() == ISD::FMAXIMUM) {
28061 MinMaxOp = X86ISD::FMAX;
28062 OppositeZero.setSignBit();
28063 } else {
28064 PreferredZero.setSignBit();
28065 MinMaxOp = X86ISD::FMIN;
28067 EVT SetCCType =
28068 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
28070 // The tables below show the expected result of Max in cases of NaN and
28071 // signed zeros.
28073 // Y Y
28074 // Num xNaN +0 -0
28075 // --------------- ---------------
28076 // Num | Max | Y | +0 | +0 | +0 |
28077 // X --------------- X ---------------
28078 // xNaN | X | X/Y | -0 | +0 | -0 |
28079 // --------------- ---------------
28081 // It is achieved by means of FMAX/FMIN with preliminary checks and operand
28082 // reordering.
28084 // We check if any of operands is NaN and return NaN. Then we check if any of
28085 // operands is zero or negative zero (for fmaximum and fminimum respectively)
28086 // to ensure the correct zero is returned.
28087 auto MatchesZero = [](SDValue Op, APInt Zero) {
28088 Op = peekThroughBitcasts(Op);
28089 if (auto *CstOp = dyn_cast<ConstantFPSDNode>(Op))
28090 return CstOp->getValueAPF().bitcastToAPInt() == Zero;
28091 if (auto *CstOp = dyn_cast<ConstantSDNode>(Op))
28092 return CstOp->getAPIntValue() == Zero;
28093 if (Op->getOpcode() == ISD::BUILD_VECTOR ||
28094 Op->getOpcode() == ISD::SPLAT_VECTOR) {
28095 for (const SDValue &OpVal : Op->op_values()) {
28096 if (OpVal.isUndef())
28097 continue;
28098 auto *CstOp = dyn_cast<ConstantFPSDNode>(OpVal);
28099 if (!CstOp)
28100 return false;
28101 if (!CstOp->getValueAPF().isZero())
28102 continue;
28103 if (CstOp->getValueAPF().bitcastToAPInt() != Zero)
28104 return false;
28106 return true;
28108 return false;
28111 bool IsXNeverNaN = DAG.isKnownNeverNaN(X);
28112 bool IsYNeverNaN = DAG.isKnownNeverNaN(Y);
28113 bool IgnoreSignedZero = DAG.getTarget().Options.NoSignedZerosFPMath ||
28114 Op->getFlags().hasNoSignedZeros() ||
28115 DAG.isKnownNeverZeroFloat(X) ||
28116 DAG.isKnownNeverZeroFloat(Y);
28117 SDValue NewX, NewY;
28118 if (IgnoreSignedZero || MatchesZero(Y, PreferredZero) ||
28119 MatchesZero(X, OppositeZero)) {
28120 // Operands are already in right order or order does not matter.
28121 NewX = X;
28122 NewY = Y;
28123 } else if (MatchesZero(X, PreferredZero) || MatchesZero(Y, OppositeZero)) {
28124 NewX = Y;
28125 NewY = X;
28126 } else if (!VT.isVector() && (VT == MVT::f16 || Subtarget.hasDQI()) &&
28127 (Op->getFlags().hasNoNaNs() || IsXNeverNaN || IsYNeverNaN)) {
28128 if (IsXNeverNaN)
28129 std::swap(X, Y);
28130 // VFPCLASSS consumes a vector type. So provide a minimal one corresponded
28131 // xmm register.
28132 MVT VectorType = MVT::getVectorVT(VT.getSimpleVT(), 128 / SizeInBits);
28133 SDValue VX = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VectorType, X);
28134 // Bits of classes:
28135 // Bits Imm8[0] Imm8[1] Imm8[2] Imm8[3] Imm8[4] Imm8[5] Imm8[6] Imm8[7]
28136 // Class QNAN PosZero NegZero PosINF NegINF Denormal Negative SNAN
28137 SDValue Imm = DAG.getTargetConstant(MinMaxOp == X86ISD::FMAX ? 0b11 : 0b101,
28138 DL, MVT::i32);
28139 SDValue IsNanZero = DAG.getNode(X86ISD::VFPCLASSS, DL, MVT::v1i1, VX, Imm);
28140 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v8i1,
28141 DAG.getConstant(0, DL, MVT::v8i1), IsNanZero,
28142 DAG.getIntPtrConstant(0, DL));
28143 SDValue NeedSwap = DAG.getBitcast(MVT::i8, Ins);
28144 NewX = DAG.getSelect(DL, VT, NeedSwap, Y, X);
28145 NewY = DAG.getSelect(DL, VT, NeedSwap, X, Y);
28146 return DAG.getNode(MinMaxOp, DL, VT, NewX, NewY, Op->getFlags());
28147 } else {
28148 SDValue IsXSigned;
28149 if (Subtarget.is64Bit() || VT != MVT::f64) {
28150 SDValue XInt = DAG.getNode(ISD::BITCAST, DL, IVT, X);
28151 SDValue ZeroCst = DAG.getConstant(0, DL, IVT);
28152 IsXSigned = DAG.getSetCC(DL, SetCCType, XInt, ZeroCst, ISD::SETLT);
28153 } else {
28154 assert(VT == MVT::f64);
28155 SDValue Ins = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v2f64,
28156 DAG.getConstantFP(0, DL, MVT::v2f64), X,
28157 DAG.getIntPtrConstant(0, DL));
28158 SDValue VX = DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, Ins);
28159 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VX,
28160 DAG.getIntPtrConstant(1, DL));
28161 Hi = DAG.getBitcast(MVT::i32, Hi);
28162 SDValue ZeroCst = DAG.getConstant(0, DL, MVT::i32);
28163 EVT SetCCType = TLI.getSetCCResultType(DAG.getDataLayout(),
28164 *DAG.getContext(), MVT::i32);
28165 IsXSigned = DAG.getSetCC(DL, SetCCType, Hi, ZeroCst, ISD::SETLT);
28167 if (MinMaxOp == X86ISD::FMAX) {
28168 NewX = DAG.getSelect(DL, VT, IsXSigned, X, Y);
28169 NewY = DAG.getSelect(DL, VT, IsXSigned, Y, X);
28170 } else {
28171 NewX = DAG.getSelect(DL, VT, IsXSigned, Y, X);
28172 NewY = DAG.getSelect(DL, VT, IsXSigned, X, Y);
28176 bool IgnoreNaN = DAG.getTarget().Options.NoNaNsFPMath ||
28177 Op->getFlags().hasNoNaNs() || (IsXNeverNaN && IsYNeverNaN);
28179 // If we did no ordering operands for signed zero handling and we need
28180 // to process NaN and we know that the second operand is not NaN then put
28181 // it in first operand and we will not need to post handle NaN after max/min.
28182 if (IgnoreSignedZero && !IgnoreNaN && DAG.isKnownNeverNaN(NewY))
28183 std::swap(NewX, NewY);
28185 SDValue MinMax = DAG.getNode(MinMaxOp, DL, VT, NewX, NewY, Op->getFlags());
28187 if (IgnoreNaN || DAG.isKnownNeverNaN(NewX))
28188 return MinMax;
28190 SDValue IsNaN = DAG.getSetCC(DL, SetCCType, NewX, NewX, ISD::SETUO);
28191 return DAG.getSelect(DL, VT, IsNaN, NewX, MinMax);
28194 static SDValue LowerABD(SDValue Op, const X86Subtarget &Subtarget,
28195 SelectionDAG &DAG) {
28196 MVT VT = Op.getSimpleValueType();
28198 // For AVX1 cases, split to use legal ops.
28199 if (VT.is256BitVector() && !Subtarget.hasInt256())
28200 return splitVectorIntBinary(Op, DAG);
28202 if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.useBWIRegs())
28203 return splitVectorIntBinary(Op, DAG);
28205 SDLoc dl(Op);
28206 bool IsSigned = Op.getOpcode() == ISD::ABDS;
28207 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28209 // TODO: Move to TargetLowering expandABD() once we have ABD promotion.
28210 if (VT.isScalarInteger()) {
28211 unsigned WideBits = std::max<unsigned>(2 * VT.getScalarSizeInBits(), 32u);
28212 MVT WideVT = MVT::getIntegerVT(WideBits);
28213 if (TLI.isTypeLegal(WideVT)) {
28214 // abds(lhs, rhs) -> trunc(abs(sub(sext(lhs), sext(rhs))))
28215 // abdu(lhs, rhs) -> trunc(abs(sub(zext(lhs), zext(rhs))))
28216 unsigned ExtOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
28217 SDValue LHS = DAG.getNode(ExtOpc, dl, WideVT, Op.getOperand(0));
28218 SDValue RHS = DAG.getNode(ExtOpc, dl, WideVT, Op.getOperand(1));
28219 SDValue Diff = DAG.getNode(ISD::SUB, dl, WideVT, LHS, RHS);
28220 SDValue AbsDiff = DAG.getNode(ISD::ABS, dl, WideVT, Diff);
28221 return DAG.getNode(ISD::TRUNCATE, dl, VT, AbsDiff);
28225 // TODO: Move to TargetLowering expandABD().
28226 if (!Subtarget.hasSSE41() &&
28227 ((IsSigned && VT == MVT::v16i8) || VT == MVT::v4i32)) {
28228 SDValue LHS = DAG.getFreeze(Op.getOperand(0));
28229 SDValue RHS = DAG.getFreeze(Op.getOperand(1));
28230 ISD::CondCode CC = IsSigned ? ISD::CondCode::SETGT : ISD::CondCode::SETUGT;
28231 SDValue Cmp = DAG.getSetCC(dl, VT, LHS, RHS, CC);
28232 SDValue Diff0 = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS);
28233 SDValue Diff1 = DAG.getNode(ISD::SUB, dl, VT, RHS, LHS);
28234 return getBitSelect(dl, VT, Diff0, Diff1, Cmp, DAG);
28237 // Default to expand.
28238 return SDValue();
28241 static SDValue LowerMUL(SDValue Op, const X86Subtarget &Subtarget,
28242 SelectionDAG &DAG) {
28243 SDLoc dl(Op);
28244 MVT VT = Op.getSimpleValueType();
28246 // Decompose 256-bit ops into 128-bit ops.
28247 if (VT.is256BitVector() && !Subtarget.hasInt256())
28248 return splitVectorIntBinary(Op, DAG);
28250 if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
28251 return splitVectorIntBinary(Op, DAG);
28253 SDValue A = Op.getOperand(0);
28254 SDValue B = Op.getOperand(1);
28256 // Lower v16i8/v32i8/v64i8 mul as sign-extension to v8i16/v16i16/v32i16
28257 // vector pairs, multiply and truncate.
28258 if (VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8) {
28259 unsigned NumElts = VT.getVectorNumElements();
28261 if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
28262 (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
28263 MVT ExVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
28264 return DAG.getNode(
28265 ISD::TRUNCATE, dl, VT,
28266 DAG.getNode(ISD::MUL, dl, ExVT,
28267 DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, A),
28268 DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, B)));
28271 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
28273 // Extract the lo/hi parts to any extend to i16.
28274 // We're going to mask off the low byte of each result element of the
28275 // pmullw, so it doesn't matter what's in the high byte of each 16-bit
28276 // element.
28277 SDValue Undef = DAG.getUNDEF(VT);
28278 SDValue ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, Undef));
28279 SDValue AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, Undef));
28281 SDValue BLo, BHi;
28282 if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
28283 // If the RHS is a constant, manually unpackl/unpackh.
28284 SmallVector<SDValue, 16> LoOps, HiOps;
28285 for (unsigned i = 0; i != NumElts; i += 16) {
28286 for (unsigned j = 0; j != 8; ++j) {
28287 LoOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j), dl,
28288 MVT::i16));
28289 HiOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j + 8), dl,
28290 MVT::i16));
28294 BLo = DAG.getBuildVector(ExVT, dl, LoOps);
28295 BHi = DAG.getBuildVector(ExVT, dl, HiOps);
28296 } else {
28297 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, Undef));
28298 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, Undef));
28301 // Multiply, mask the lower 8bits of the lo/hi results and pack.
28302 SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
28303 SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
28304 return getPack(DAG, Subtarget, dl, VT, RLo, RHi);
28307 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
28308 if (VT == MVT::v4i32) {
28309 assert(Subtarget.hasSSE2() && !Subtarget.hasSSE41() &&
28310 "Should not custom lower when pmulld is available!");
28312 // Extract the odd parts.
28313 static const int UnpackMask[] = { 1, -1, 3, -1 };
28314 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
28315 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
28317 // Multiply the even parts.
28318 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
28319 DAG.getBitcast(MVT::v2i64, A),
28320 DAG.getBitcast(MVT::v2i64, B));
28321 // Now multiply odd parts.
28322 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
28323 DAG.getBitcast(MVT::v2i64, Aodds),
28324 DAG.getBitcast(MVT::v2i64, Bodds));
28326 Evens = DAG.getBitcast(VT, Evens);
28327 Odds = DAG.getBitcast(VT, Odds);
28329 // Merge the two vectors back together with a shuffle. This expands into 2
28330 // shuffles.
28331 static const int ShufMask[] = { 0, 4, 2, 6 };
28332 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
28335 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
28336 "Only know how to lower V2I64/V4I64/V8I64 multiply");
28337 assert(!Subtarget.hasDQI() && "DQI should use MULLQ");
28339 // Ahi = psrlqi(a, 32);
28340 // Bhi = psrlqi(b, 32);
28342 // AloBlo = pmuludq(a, b);
28343 // AloBhi = pmuludq(a, Bhi);
28344 // AhiBlo = pmuludq(Ahi, b);
28346 // Hi = psllqi(AloBhi + AhiBlo, 32);
28347 // return AloBlo + Hi;
28348 KnownBits AKnown = DAG.computeKnownBits(A);
28349 KnownBits BKnown = DAG.computeKnownBits(B);
28351 APInt LowerBitsMask = APInt::getLowBitsSet(64, 32);
28352 bool ALoIsZero = LowerBitsMask.isSubsetOf(AKnown.Zero);
28353 bool BLoIsZero = LowerBitsMask.isSubsetOf(BKnown.Zero);
28355 APInt UpperBitsMask = APInt::getHighBitsSet(64, 32);
28356 bool AHiIsZero = UpperBitsMask.isSubsetOf(AKnown.Zero);
28357 bool BHiIsZero = UpperBitsMask.isSubsetOf(BKnown.Zero);
28359 SDValue Zero = DAG.getConstant(0, dl, VT);
28361 // Only multiply lo/hi halves that aren't known to be zero.
28362 SDValue AloBlo = Zero;
28363 if (!ALoIsZero && !BLoIsZero)
28364 AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
28366 SDValue AloBhi = Zero;
28367 if (!ALoIsZero && !BHiIsZero) {
28368 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
28369 AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
28372 SDValue AhiBlo = Zero;
28373 if (!AHiIsZero && !BLoIsZero) {
28374 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
28375 AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
28378 SDValue Hi = DAG.getNode(ISD::ADD, dl, VT, AloBhi, AhiBlo);
28379 Hi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Hi, 32, DAG);
28381 return DAG.getNode(ISD::ADD, dl, VT, AloBlo, Hi);
28384 static SDValue LowervXi8MulWithUNPCK(SDValue A, SDValue B, const SDLoc &dl,
28385 MVT VT, bool IsSigned,
28386 const X86Subtarget &Subtarget,
28387 SelectionDAG &DAG,
28388 SDValue *Low = nullptr) {
28389 unsigned NumElts = VT.getVectorNumElements();
28391 // For vXi8 we will unpack the low and high half of each 128 bit lane to widen
28392 // to a vXi16 type. Do the multiplies, shift the results and pack the half
28393 // lane results back together.
28395 // We'll take different approaches for signed and unsigned.
28396 // For unsigned we'll use punpcklbw/punpckhbw to put zero extend the bytes
28397 // and use pmullw to calculate the full 16-bit product.
28398 // For signed we'll use punpcklbw/punpckbw to extend the bytes to words and
28399 // shift them left into the upper byte of each word. This allows us to use
28400 // pmulhw to calculate the full 16-bit product. This trick means we don't
28401 // need to sign extend the bytes to use pmullw.
28403 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
28404 SDValue Zero = DAG.getConstant(0, dl, VT);
28406 SDValue ALo, AHi;
28407 if (IsSigned) {
28408 ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, Zero, A));
28409 AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, Zero, A));
28410 } else {
28411 ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, Zero));
28412 AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, Zero));
28415 SDValue BLo, BHi;
28416 if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
28417 // If the RHS is a constant, manually unpackl/unpackh and extend.
28418 SmallVector<SDValue, 16> LoOps, HiOps;
28419 for (unsigned i = 0; i != NumElts; i += 16) {
28420 for (unsigned j = 0; j != 8; ++j) {
28421 SDValue LoOp = B.getOperand(i + j);
28422 SDValue HiOp = B.getOperand(i + j + 8);
28424 if (IsSigned) {
28425 LoOp = DAG.getAnyExtOrTrunc(LoOp, dl, MVT::i16);
28426 HiOp = DAG.getAnyExtOrTrunc(HiOp, dl, MVT::i16);
28427 LoOp = DAG.getNode(ISD::SHL, dl, MVT::i16, LoOp,
28428 DAG.getConstant(8, dl, MVT::i16));
28429 HiOp = DAG.getNode(ISD::SHL, dl, MVT::i16, HiOp,
28430 DAG.getConstant(8, dl, MVT::i16));
28431 } else {
28432 LoOp = DAG.getZExtOrTrunc(LoOp, dl, MVT::i16);
28433 HiOp = DAG.getZExtOrTrunc(HiOp, dl, MVT::i16);
28436 LoOps.push_back(LoOp);
28437 HiOps.push_back(HiOp);
28441 BLo = DAG.getBuildVector(ExVT, dl, LoOps);
28442 BHi = DAG.getBuildVector(ExVT, dl, HiOps);
28443 } else if (IsSigned) {
28444 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, Zero, B));
28445 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, Zero, B));
28446 } else {
28447 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, Zero));
28448 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, Zero));
28451 // Multiply, lshr the upper 8bits to the lower 8bits of the lo/hi results and
28452 // pack back to vXi8.
28453 unsigned MulOpc = IsSigned ? ISD::MULHS : ISD::MUL;
28454 SDValue RLo = DAG.getNode(MulOpc, dl, ExVT, ALo, BLo);
28455 SDValue RHi = DAG.getNode(MulOpc, dl, ExVT, AHi, BHi);
28457 if (Low)
28458 *Low = getPack(DAG, Subtarget, dl, VT, RLo, RHi);
28460 return getPack(DAG, Subtarget, dl, VT, RLo, RHi, /*PackHiHalf*/ true);
28463 static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget,
28464 SelectionDAG &DAG) {
28465 SDLoc dl(Op);
28466 MVT VT = Op.getSimpleValueType();
28467 bool IsSigned = Op->getOpcode() == ISD::MULHS;
28468 unsigned NumElts = VT.getVectorNumElements();
28469 SDValue A = Op.getOperand(0);
28470 SDValue B = Op.getOperand(1);
28472 // Decompose 256-bit ops into 128-bit ops.
28473 if (VT.is256BitVector() && !Subtarget.hasInt256())
28474 return splitVectorIntBinary(Op, DAG);
28476 if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
28477 return splitVectorIntBinary(Op, DAG);
28479 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) {
28480 assert((VT == MVT::v4i32 && Subtarget.hasSSE2()) ||
28481 (VT == MVT::v8i32 && Subtarget.hasInt256()) ||
28482 (VT == MVT::v16i32 && Subtarget.hasAVX512()));
28484 // PMULxD operations multiply each even value (starting at 0) of LHS with
28485 // the related value of RHS and produce a widen result.
28486 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
28487 // => <2 x i64> <ae|cg>
28489 // In other word, to have all the results, we need to perform two PMULxD:
28490 // 1. one with the even values.
28491 // 2. one with the odd values.
28492 // To achieve #2, with need to place the odd values at an even position.
28494 // Place the odd value at an even position (basically, shift all values 1
28495 // step to the left):
28496 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1,
28497 9, -1, 11, -1, 13, -1, 15, -1};
28498 // <a|b|c|d> => <b|undef|d|undef>
28499 SDValue Odd0 =
28500 DAG.getVectorShuffle(VT, dl, A, A, ArrayRef(&Mask[0], NumElts));
28501 // <e|f|g|h> => <f|undef|h|undef>
28502 SDValue Odd1 =
28503 DAG.getVectorShuffle(VT, dl, B, B, ArrayRef(&Mask[0], NumElts));
28505 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
28506 // ints.
28507 MVT MulVT = MVT::getVectorVT(MVT::i64, NumElts / 2);
28508 unsigned Opcode =
28509 (IsSigned && Subtarget.hasSSE41()) ? X86ISD::PMULDQ : X86ISD::PMULUDQ;
28510 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
28511 // => <2 x i64> <ae|cg>
28512 SDValue Mul1 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
28513 DAG.getBitcast(MulVT, A),
28514 DAG.getBitcast(MulVT, B)));
28515 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
28516 // => <2 x i64> <bf|dh>
28517 SDValue Mul2 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
28518 DAG.getBitcast(MulVT, Odd0),
28519 DAG.getBitcast(MulVT, Odd1)));
28521 // Shuffle it back into the right order.
28522 SmallVector<int, 16> ShufMask(NumElts);
28523 for (int i = 0; i != (int)NumElts; ++i)
28524 ShufMask[i] = (i / 2) * 2 + ((i % 2) * NumElts) + 1;
28526 SDValue Res = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, ShufMask);
28528 // If we have a signed multiply but no PMULDQ fix up the result of an
28529 // unsigned multiply.
28530 if (IsSigned && !Subtarget.hasSSE41()) {
28531 SDValue Zero = DAG.getConstant(0, dl, VT);
28532 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
28533 DAG.getSetCC(dl, VT, Zero, A, ISD::SETGT), B);
28534 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
28535 DAG.getSetCC(dl, VT, Zero, B, ISD::SETGT), A);
28537 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
28538 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Fixup);
28541 return Res;
28544 // Only i8 vectors should need custom lowering after this.
28545 assert((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||
28546 (VT == MVT::v64i8 && Subtarget.hasBWI())) &&
28547 "Unsupported vector type");
28549 // Lower v16i8/v32i8 as extension to v8i16/v16i16 vector pairs, multiply,
28550 // logical shift down the upper half and pack back to i8.
28552 // With SSE41 we can use sign/zero extend, but for pre-SSE41 we unpack
28553 // and then ashr/lshr the upper bits down to the lower bits before multiply.
28555 if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
28556 (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
28557 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
28558 unsigned ExAVX = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
28559 SDValue ExA = DAG.getNode(ExAVX, dl, ExVT, A);
28560 SDValue ExB = DAG.getNode(ExAVX, dl, ExVT, B);
28561 SDValue Mul = DAG.getNode(ISD::MUL, dl, ExVT, ExA, ExB);
28562 Mul = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
28563 return DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
28566 return LowervXi8MulWithUNPCK(A, B, dl, VT, IsSigned, Subtarget, DAG);
28569 // Custom lowering for SMULO/UMULO.
28570 static SDValue LowerMULO(SDValue Op, const X86Subtarget &Subtarget,
28571 SelectionDAG &DAG) {
28572 MVT VT = Op.getSimpleValueType();
28574 // Scalars defer to LowerXALUO.
28575 if (!VT.isVector())
28576 return LowerXALUO(Op, DAG);
28578 SDLoc dl(Op);
28579 bool IsSigned = Op->getOpcode() == ISD::SMULO;
28580 SDValue A = Op.getOperand(0);
28581 SDValue B = Op.getOperand(1);
28582 EVT OvfVT = Op->getValueType(1);
28584 if ((VT == MVT::v32i8 && !Subtarget.hasInt256()) ||
28585 (VT == MVT::v64i8 && !Subtarget.hasBWI())) {
28586 // Extract the LHS Lo/Hi vectors
28587 SDValue LHSLo, LHSHi;
28588 std::tie(LHSLo, LHSHi) = splitVector(A, DAG, dl);
28590 // Extract the RHS Lo/Hi vectors
28591 SDValue RHSLo, RHSHi;
28592 std::tie(RHSLo, RHSHi) = splitVector(B, DAG, dl);
28594 EVT LoOvfVT, HiOvfVT;
28595 std::tie(LoOvfVT, HiOvfVT) = DAG.GetSplitDestVTs(OvfVT);
28596 SDVTList LoVTs = DAG.getVTList(LHSLo.getValueType(), LoOvfVT);
28597 SDVTList HiVTs = DAG.getVTList(LHSHi.getValueType(), HiOvfVT);
28599 // Issue the split operations.
28600 SDValue Lo = DAG.getNode(Op.getOpcode(), dl, LoVTs, LHSLo, RHSLo);
28601 SDValue Hi = DAG.getNode(Op.getOpcode(), dl, HiVTs, LHSHi, RHSHi);
28603 // Join the separate data results and the overflow results.
28604 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
28605 SDValue Ovf = DAG.getNode(ISD::CONCAT_VECTORS, dl, OvfVT, Lo.getValue(1),
28606 Hi.getValue(1));
28608 return DAG.getMergeValues({Res, Ovf}, dl);
28611 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28612 EVT SetccVT =
28613 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
28615 if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
28616 (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
28617 unsigned NumElts = VT.getVectorNumElements();
28618 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
28619 unsigned ExAVX = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
28620 SDValue ExA = DAG.getNode(ExAVX, dl, ExVT, A);
28621 SDValue ExB = DAG.getNode(ExAVX, dl, ExVT, B);
28622 SDValue Mul = DAG.getNode(ISD::MUL, dl, ExVT, ExA, ExB);
28624 SDValue Low = DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
28626 SDValue Ovf;
28627 if (IsSigned) {
28628 SDValue High, LowSign;
28629 if (OvfVT.getVectorElementType() == MVT::i1 &&
28630 (Subtarget.hasBWI() || Subtarget.canExtendTo512DQ())) {
28631 // Rather the truncating try to do the compare on vXi16 or vXi32.
28632 // Shift the high down filling with sign bits.
28633 High = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Mul, 8, DAG);
28634 // Fill all 16 bits with the sign bit from the low.
28635 LowSign =
28636 getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ExVT, Mul, 8, DAG);
28637 LowSign = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, LowSign,
28638 15, DAG);
28639 SetccVT = OvfVT;
28640 if (!Subtarget.hasBWI()) {
28641 // We can't do a vXi16 compare so sign extend to v16i32.
28642 High = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v16i32, High);
28643 LowSign = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v16i32, LowSign);
28645 } else {
28646 // Otherwise do the compare at vXi8.
28647 High = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
28648 High = DAG.getNode(ISD::TRUNCATE, dl, VT, High);
28649 LowSign =
28650 DAG.getNode(ISD::SRA, dl, VT, Low, DAG.getConstant(7, dl, VT));
28653 Ovf = DAG.getSetCC(dl, SetccVT, LowSign, High, ISD::SETNE);
28654 } else {
28655 SDValue High =
28656 getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
28657 if (OvfVT.getVectorElementType() == MVT::i1 &&
28658 (Subtarget.hasBWI() || Subtarget.canExtendTo512DQ())) {
28659 // Rather the truncating try to do the compare on vXi16 or vXi32.
28660 SetccVT = OvfVT;
28661 if (!Subtarget.hasBWI()) {
28662 // We can't do a vXi16 compare so sign extend to v16i32.
28663 High = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v16i32, High);
28665 } else {
28666 // Otherwise do the compare at vXi8.
28667 High = DAG.getNode(ISD::TRUNCATE, dl, VT, High);
28670 Ovf =
28671 DAG.getSetCC(dl, SetccVT, High,
28672 DAG.getConstant(0, dl, High.getValueType()), ISD::SETNE);
28675 Ovf = DAG.getSExtOrTrunc(Ovf, dl, OvfVT);
28677 return DAG.getMergeValues({Low, Ovf}, dl);
28680 SDValue Low;
28681 SDValue High =
28682 LowervXi8MulWithUNPCK(A, B, dl, VT, IsSigned, Subtarget, DAG, &Low);
28684 SDValue Ovf;
28685 if (IsSigned) {
28686 // SMULO overflows if the high bits don't match the sign of the low.
28687 SDValue LowSign =
28688 DAG.getNode(ISD::SRA, dl, VT, Low, DAG.getConstant(7, dl, VT));
28689 Ovf = DAG.getSetCC(dl, SetccVT, LowSign, High, ISD::SETNE);
28690 } else {
28691 // UMULO overflows if the high bits are non-zero.
28692 Ovf =
28693 DAG.getSetCC(dl, SetccVT, High, DAG.getConstant(0, dl, VT), ISD::SETNE);
28696 Ovf = DAG.getSExtOrTrunc(Ovf, dl, OvfVT);
28698 return DAG.getMergeValues({Low, Ovf}, dl);
28701 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
28702 assert(Subtarget.isTargetWin64() && "Unexpected target");
28703 EVT VT = Op.getValueType();
28704 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
28705 "Unexpected return type for lowering");
28707 if (isa<ConstantSDNode>(Op->getOperand(1))) {
28708 SmallVector<SDValue> Result;
28709 if (expandDIVREMByConstant(Op.getNode(), Result, MVT::i64, DAG))
28710 return DAG.getNode(ISD::BUILD_PAIR, SDLoc(Op), VT, Result[0], Result[1]);
28713 RTLIB::Libcall LC;
28714 bool isSigned;
28715 switch (Op->getOpcode()) {
28716 default: llvm_unreachable("Unexpected request for libcall!");
28717 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
28718 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
28719 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
28720 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
28723 SDLoc dl(Op);
28724 SDValue InChain = DAG.getEntryNode();
28726 TargetLowering::ArgListTy Args;
28727 TargetLowering::ArgListEntry Entry;
28728 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
28729 EVT ArgVT = Op->getOperand(i).getValueType();
28730 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
28731 "Unexpected argument type for lowering");
28732 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
28733 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
28734 MachinePointerInfo MPI =
28735 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
28736 Entry.Node = StackPtr;
28737 InChain =
28738 DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, MPI, Align(16));
28739 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
28740 Entry.Ty = PointerType::get(ArgTy,0);
28741 Entry.IsSExt = false;
28742 Entry.IsZExt = false;
28743 Args.push_back(Entry);
28746 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
28747 getPointerTy(DAG.getDataLayout()));
28749 TargetLowering::CallLoweringInfo CLI(DAG);
28750 CLI.setDebugLoc(dl)
28751 .setChain(InChain)
28752 .setLibCallee(
28753 getLibcallCallingConv(LC),
28754 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()), Callee,
28755 std::move(Args))
28756 .setInRegister()
28757 .setSExtResult(isSigned)
28758 .setZExtResult(!isSigned);
28760 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
28761 return DAG.getBitcast(VT, CallInfo.first);
28764 SDValue X86TargetLowering::LowerWin64_FP_TO_INT128(SDValue Op,
28765 SelectionDAG &DAG,
28766 SDValue &Chain) const {
28767 assert(Subtarget.isTargetWin64() && "Unexpected target");
28768 EVT VT = Op.getValueType();
28769 bool IsStrict = Op->isStrictFPOpcode();
28771 SDValue Arg = Op.getOperand(IsStrict ? 1 : 0);
28772 EVT ArgVT = Arg.getValueType();
28774 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
28775 "Unexpected return type for lowering");
28777 RTLIB::Libcall LC;
28778 if (Op->getOpcode() == ISD::FP_TO_SINT ||
28779 Op->getOpcode() == ISD::STRICT_FP_TO_SINT)
28780 LC = RTLIB::getFPTOSINT(ArgVT, VT);
28781 else
28782 LC = RTLIB::getFPTOUINT(ArgVT, VT);
28783 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected request for libcall!");
28785 SDLoc dl(Op);
28786 MakeLibCallOptions CallOptions;
28787 Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
28789 SDValue Result;
28790 // Expect the i128 argument returned as a v2i64 in xmm0, cast back to the
28791 // expected VT (i128).
28792 std::tie(Result, Chain) =
28793 makeLibCall(DAG, LC, MVT::v2i64, Arg, CallOptions, dl, Chain);
28794 Result = DAG.getBitcast(VT, Result);
28795 return Result;
28798 SDValue X86TargetLowering::LowerWin64_INT128_TO_FP(SDValue Op,
28799 SelectionDAG &DAG) const {
28800 assert(Subtarget.isTargetWin64() && "Unexpected target");
28801 EVT VT = Op.getValueType();
28802 bool IsStrict = Op->isStrictFPOpcode();
28804 SDValue Arg = Op.getOperand(IsStrict ? 1 : 0);
28805 EVT ArgVT = Arg.getValueType();
28807 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
28808 "Unexpected argument type for lowering");
28810 RTLIB::Libcall LC;
28811 if (Op->getOpcode() == ISD::SINT_TO_FP ||
28812 Op->getOpcode() == ISD::STRICT_SINT_TO_FP)
28813 LC = RTLIB::getSINTTOFP(ArgVT, VT);
28814 else
28815 LC = RTLIB::getUINTTOFP(ArgVT, VT);
28816 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected request for libcall!");
28818 SDLoc dl(Op);
28819 MakeLibCallOptions CallOptions;
28820 SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
28822 // Pass the i128 argument as an indirect argument on the stack.
28823 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
28824 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
28825 MachinePointerInfo MPI =
28826 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
28827 Chain = DAG.getStore(Chain, dl, Arg, StackPtr, MPI, Align(16));
28829 SDValue Result;
28830 std::tie(Result, Chain) =
28831 makeLibCall(DAG, LC, VT, StackPtr, CallOptions, dl, Chain);
28832 return IsStrict ? DAG.getMergeValues({Result, Chain}, dl) : Result;
28835 // Return true if the required (according to Opcode) shift-imm form is natively
28836 // supported by the Subtarget
28837 static bool supportedVectorShiftWithImm(EVT VT, const X86Subtarget &Subtarget,
28838 unsigned Opcode) {
28839 if (!VT.isSimple())
28840 return false;
28842 if (!(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))
28843 return false;
28845 if (VT.getScalarSizeInBits() < 16)
28846 return false;
28848 if (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
28849 (VT.getScalarSizeInBits() > 16 || Subtarget.hasBWI()))
28850 return true;
28852 bool LShift = (VT.is128BitVector() && Subtarget.hasSSE2()) ||
28853 (VT.is256BitVector() && Subtarget.hasInt256());
28855 bool AShift = LShift && (Subtarget.hasAVX512() ||
28856 (VT != MVT::v2i64 && VT != MVT::v4i64));
28857 return (Opcode == ISD::SRA) ? AShift : LShift;
28860 // The shift amount is a variable, but it is the same for all vector lanes.
28861 // These instructions are defined together with shift-immediate.
28862 static
28863 bool supportedVectorShiftWithBaseAmnt(EVT VT, const X86Subtarget &Subtarget,
28864 unsigned Opcode) {
28865 return supportedVectorShiftWithImm(VT, Subtarget, Opcode);
28868 // Return true if the required (according to Opcode) variable-shift form is
28869 // natively supported by the Subtarget
28870 static bool supportedVectorVarShift(EVT VT, const X86Subtarget &Subtarget,
28871 unsigned Opcode) {
28872 if (!VT.isSimple())
28873 return false;
28875 if (!(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))
28876 return false;
28878 if (!Subtarget.hasInt256() || VT.getScalarSizeInBits() < 16)
28879 return false;
28881 // vXi16 supported only on AVX-512, BWI
28882 if (VT.getScalarSizeInBits() == 16 && !Subtarget.hasBWI())
28883 return false;
28885 if (Subtarget.hasAVX512() &&
28886 (Subtarget.useAVX512Regs() || !VT.is512BitVector()))
28887 return true;
28889 bool LShift = VT.is128BitVector() || VT.is256BitVector();
28890 bool AShift = LShift && VT != MVT::v2i64 && VT != MVT::v4i64;
28891 return (Opcode == ISD::SRA) ? AShift : LShift;
28894 static SDValue LowerShiftByScalarImmediate(SDValue Op, SelectionDAG &DAG,
28895 const X86Subtarget &Subtarget) {
28896 MVT VT = Op.getSimpleValueType();
28897 SDLoc dl(Op);
28898 SDValue R = Op.getOperand(0);
28899 SDValue Amt = Op.getOperand(1);
28900 unsigned X86Opc = getTargetVShiftUniformOpcode(Op.getOpcode(), false);
28902 auto ArithmeticShiftRight64 = [&](uint64_t ShiftAmt) {
28903 assert((VT == MVT::v2i64 || VT == MVT::v4i64) && "Unexpected SRA type");
28904 MVT ExVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
28905 SDValue Ex = DAG.getBitcast(ExVT, R);
28907 // ashr(R, 63) === cmp_slt(R, 0)
28908 if (ShiftAmt == 63 && Subtarget.hasSSE42()) {
28909 assert((VT != MVT::v4i64 || Subtarget.hasInt256()) &&
28910 "Unsupported PCMPGT op");
28911 return DAG.getNode(X86ISD::PCMPGT, dl, VT, DAG.getConstant(0, dl, VT), R);
28914 if (ShiftAmt >= 32) {
28915 // Splat sign to upper i32 dst, and SRA upper i32 src to lower i32.
28916 SDValue Upper =
28917 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex, 31, DAG);
28918 SDValue Lower = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
28919 ShiftAmt - 32, DAG);
28920 if (VT == MVT::v2i64)
28921 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {5, 1, 7, 3});
28922 if (VT == MVT::v4i64)
28923 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
28924 {9, 1, 11, 3, 13, 5, 15, 7});
28925 } else {
28926 // SRA upper i32, SRL whole i64 and select lower i32.
28927 SDValue Upper = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
28928 ShiftAmt, DAG);
28929 SDValue Lower =
28930 getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt, DAG);
28931 Lower = DAG.getBitcast(ExVT, Lower);
28932 if (VT == MVT::v2i64)
28933 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {4, 1, 6, 3});
28934 if (VT == MVT::v4i64)
28935 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
28936 {8, 1, 10, 3, 12, 5, 14, 7});
28938 return DAG.getBitcast(VT, Ex);
28941 // Optimize shl/srl/sra with constant shift amount.
28942 APInt APIntShiftAmt;
28943 if (!X86::isConstantSplat(Amt, APIntShiftAmt))
28944 return SDValue();
28946 // If the shift amount is out of range, return undef.
28947 if (APIntShiftAmt.uge(VT.getScalarSizeInBits()))
28948 return DAG.getUNDEF(VT);
28950 uint64_t ShiftAmt = APIntShiftAmt.getZExtValue();
28952 if (supportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode())) {
28953 // Hardware support for vector shifts is sparse which makes us scalarize the
28954 // vector operations in many cases. Also, on sandybridge ADD is faster than
28955 // shl: (shl V, 1) -> (add (freeze V), (freeze V))
28956 if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1) {
28957 // R may be undef at run-time, but (shl R, 1) must be an even number (LSB
28958 // must be 0). (add undef, undef) however can be any value. To make this
28959 // safe, we must freeze R to ensure that register allocation uses the same
28960 // register for an undefined value. This ensures that the result will
28961 // still be even and preserves the original semantics.
28962 R = DAG.getFreeze(R);
28963 return DAG.getNode(ISD::ADD, dl, VT, R, R);
28966 return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);
28969 // i64 SRA needs to be performed as partial shifts.
28970 if (((!Subtarget.hasXOP() && VT == MVT::v2i64) ||
28971 (Subtarget.hasInt256() && VT == MVT::v4i64)) &&
28972 Op.getOpcode() == ISD::SRA)
28973 return ArithmeticShiftRight64(ShiftAmt);
28975 if (VT == MVT::v16i8 || (Subtarget.hasInt256() && VT == MVT::v32i8) ||
28976 (Subtarget.hasBWI() && VT == MVT::v64i8)) {
28977 unsigned NumElts = VT.getVectorNumElements();
28978 MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
28980 // Simple i8 add case
28981 if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1) {
28982 // R may be undef at run-time, but (shl R, 1) must be an even number (LSB
28983 // must be 0). (add undef, undef) however can be any value. To make this
28984 // safe, we must freeze R to ensure that register allocation uses the same
28985 // register for an undefined value. This ensures that the result will
28986 // still be even and preserves the original semantics.
28987 R = DAG.getFreeze(R);
28988 return DAG.getNode(ISD::ADD, dl, VT, R, R);
28991 // ashr(R, 7) === cmp_slt(R, 0)
28992 if (Op.getOpcode() == ISD::SRA && ShiftAmt == 7) {
28993 SDValue Zeros = DAG.getConstant(0, dl, VT);
28994 if (VT.is512BitVector()) {
28995 assert(VT == MVT::v64i8 && "Unexpected element type!");
28996 SDValue CMP = DAG.getSetCC(dl, MVT::v64i1, Zeros, R, ISD::SETGT);
28997 return DAG.getNode(ISD::SIGN_EXTEND, dl, VT, CMP);
28999 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
29002 // XOP can shift v16i8 directly instead of as shift v8i16 + mask.
29003 if (VT == MVT::v16i8 && Subtarget.hasXOP())
29004 return SDValue();
29006 if (Op.getOpcode() == ISD::SHL) {
29007 // Make a large shift.
29008 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ShiftVT, R,
29009 ShiftAmt, DAG);
29010 SHL = DAG.getBitcast(VT, SHL);
29011 // Zero out the rightmost bits.
29012 APInt Mask = APInt::getHighBitsSet(8, 8 - ShiftAmt);
29013 return DAG.getNode(ISD::AND, dl, VT, SHL, DAG.getConstant(Mask, dl, VT));
29015 if (Op.getOpcode() == ISD::SRL) {
29016 // Make a large shift.
29017 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ShiftVT, R,
29018 ShiftAmt, DAG);
29019 SRL = DAG.getBitcast(VT, SRL);
29020 // Zero out the leftmost bits.
29021 APInt Mask = APInt::getLowBitsSet(8, 8 - ShiftAmt);
29022 return DAG.getNode(ISD::AND, dl, VT, SRL, DAG.getConstant(Mask, dl, VT));
29024 if (Op.getOpcode() == ISD::SRA) {
29025 // ashr(R, Amt) === sub(xor(lshr(R, Amt), Mask), Mask)
29026 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
29028 SDValue Mask = DAG.getConstant(128 >> ShiftAmt, dl, VT);
29029 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
29030 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
29031 return Res;
29033 llvm_unreachable("Unknown shift opcode.");
29036 return SDValue();
29039 static SDValue LowerShiftByScalarVariable(SDValue Op, SelectionDAG &DAG,
29040 const X86Subtarget &Subtarget) {
29041 MVT VT = Op.getSimpleValueType();
29042 SDLoc dl(Op);
29043 SDValue R = Op.getOperand(0);
29044 SDValue Amt = Op.getOperand(1);
29045 unsigned Opcode = Op.getOpcode();
29046 unsigned X86OpcI = getTargetVShiftUniformOpcode(Opcode, false);
29048 int BaseShAmtIdx = -1;
29049 if (SDValue BaseShAmt = DAG.getSplatSourceVector(Amt, BaseShAmtIdx)) {
29050 if (supportedVectorShiftWithBaseAmnt(VT, Subtarget, Opcode))
29051 return getTargetVShiftNode(X86OpcI, dl, VT, R, BaseShAmt, BaseShAmtIdx,
29052 Subtarget, DAG);
29054 // vXi8 shifts - shift as v8i16 + mask result.
29055 if (((VT == MVT::v16i8 && !Subtarget.canExtendTo512DQ()) ||
29056 (VT == MVT::v32i8 && !Subtarget.canExtendTo512BW()) ||
29057 VT == MVT::v64i8) &&
29058 !Subtarget.hasXOP()) {
29059 unsigned NumElts = VT.getVectorNumElements();
29060 MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
29061 if (supportedVectorShiftWithBaseAmnt(ExtVT, Subtarget, Opcode)) {
29062 unsigned LogicalOp = (Opcode == ISD::SHL ? ISD::SHL : ISD::SRL);
29063 unsigned LogicalX86Op = getTargetVShiftUniformOpcode(LogicalOp, false);
29065 // Create the mask using vXi16 shifts. For shift-rights we need to move
29066 // the upper byte down before splatting the vXi8 mask.
29067 SDValue BitMask = DAG.getConstant(-1, dl, ExtVT);
29068 BitMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, BitMask,
29069 BaseShAmt, BaseShAmtIdx, Subtarget, DAG);
29070 if (Opcode != ISD::SHL)
29071 BitMask = getTargetVShiftByConstNode(LogicalX86Op, dl, ExtVT, BitMask,
29072 8, DAG);
29073 BitMask = DAG.getBitcast(VT, BitMask);
29074 BitMask = DAG.getVectorShuffle(VT, dl, BitMask, BitMask,
29075 SmallVector<int, 64>(NumElts, 0));
29077 SDValue Res = getTargetVShiftNode(LogicalX86Op, dl, ExtVT,
29078 DAG.getBitcast(ExtVT, R), BaseShAmt,
29079 BaseShAmtIdx, Subtarget, DAG);
29080 Res = DAG.getBitcast(VT, Res);
29081 Res = DAG.getNode(ISD::AND, dl, VT, Res, BitMask);
29083 if (Opcode == ISD::SRA) {
29084 // ashr(R, Amt) === sub(xor(lshr(R, Amt), SignMask), SignMask)
29085 // SignMask = lshr(SignBit, Amt) - safe to do this with PSRLW.
29086 SDValue SignMask = DAG.getConstant(0x8080, dl, ExtVT);
29087 SignMask =
29088 getTargetVShiftNode(LogicalX86Op, dl, ExtVT, SignMask, BaseShAmt,
29089 BaseShAmtIdx, Subtarget, DAG);
29090 SignMask = DAG.getBitcast(VT, SignMask);
29091 Res = DAG.getNode(ISD::XOR, dl, VT, Res, SignMask);
29092 Res = DAG.getNode(ISD::SUB, dl, VT, Res, SignMask);
29094 return Res;
29099 return SDValue();
29102 // Convert a shift/rotate left amount to a multiplication scale factor.
29103 static SDValue convertShiftLeftToScale(SDValue Amt, const SDLoc &dl,
29104 const X86Subtarget &Subtarget,
29105 SelectionDAG &DAG) {
29106 MVT VT = Amt.getSimpleValueType();
29107 if (!(VT == MVT::v8i16 || VT == MVT::v4i32 ||
29108 (Subtarget.hasInt256() && VT == MVT::v16i16) ||
29109 (Subtarget.hasAVX512() && VT == MVT::v32i16) ||
29110 (!Subtarget.hasAVX512() && VT == MVT::v16i8) ||
29111 (Subtarget.hasInt256() && VT == MVT::v32i8) ||
29112 (Subtarget.hasBWI() && VT == MVT::v64i8)))
29113 return SDValue();
29115 MVT SVT = VT.getVectorElementType();
29116 unsigned SVTBits = SVT.getSizeInBits();
29117 unsigned NumElems = VT.getVectorNumElements();
29119 APInt UndefElts;
29120 SmallVector<APInt> EltBits;
29121 if (getTargetConstantBitsFromNode(Amt, SVTBits, UndefElts, EltBits)) {
29122 APInt One(SVTBits, 1);
29123 SmallVector<SDValue> Elts(NumElems, DAG.getUNDEF(SVT));
29124 for (unsigned I = 0; I != NumElems; ++I) {
29125 if (UndefElts[I] || EltBits[I].uge(SVTBits))
29126 continue;
29127 uint64_t ShAmt = EltBits[I].getZExtValue();
29128 Elts[I] = DAG.getConstant(One.shl(ShAmt), dl, SVT);
29130 return DAG.getBuildVector(VT, dl, Elts);
29133 // If the target doesn't support variable shifts, use either FP conversion
29134 // or integer multiplication to avoid shifting each element individually.
29135 if (VT == MVT::v4i32) {
29136 Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT));
29137 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt,
29138 DAG.getConstant(0x3f800000U, dl, VT));
29139 Amt = DAG.getBitcast(MVT::v4f32, Amt);
29140 return DAG.getNode(ISD::FP_TO_SINT, dl, VT, Amt);
29143 // AVX2 can more effectively perform this as a zext/trunc to/from v8i32.
29144 if (VT == MVT::v8i16 && !Subtarget.hasAVX2()) {
29145 SDValue Z = DAG.getConstant(0, dl, VT);
29146 SDValue Lo = DAG.getBitcast(MVT::v4i32, getUnpackl(DAG, dl, VT, Amt, Z));
29147 SDValue Hi = DAG.getBitcast(MVT::v4i32, getUnpackh(DAG, dl, VT, Amt, Z));
29148 Lo = convertShiftLeftToScale(Lo, dl, Subtarget, DAG);
29149 Hi = convertShiftLeftToScale(Hi, dl, Subtarget, DAG);
29150 if (Subtarget.hasSSE41())
29151 return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
29152 return getPack(DAG, Subtarget, dl, VT, Lo, Hi);
29155 return SDValue();
29158 static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
29159 SelectionDAG &DAG) {
29160 MVT VT = Op.getSimpleValueType();
29161 SDLoc dl(Op);
29162 SDValue R = Op.getOperand(0);
29163 SDValue Amt = Op.getOperand(1);
29164 unsigned EltSizeInBits = VT.getScalarSizeInBits();
29165 bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
29167 unsigned Opc = Op.getOpcode();
29168 unsigned X86OpcV = getTargetVShiftUniformOpcode(Opc, true);
29169 unsigned X86OpcI = getTargetVShiftUniformOpcode(Opc, false);
29171 assert(VT.isVector() && "Custom lowering only for vector shifts!");
29172 assert(Subtarget.hasSSE2() && "Only custom lower when we have SSE2!");
29174 if (SDValue V = LowerShiftByScalarImmediate(Op, DAG, Subtarget))
29175 return V;
29177 if (SDValue V = LowerShiftByScalarVariable(Op, DAG, Subtarget))
29178 return V;
29180 if (supportedVectorVarShift(VT, Subtarget, Opc))
29181 return Op;
29183 // i64 vector arithmetic shift can be emulated with the transform:
29184 // M = lshr(SIGN_MASK, Amt)
29185 // ashr(R, Amt) === sub(xor(lshr(R, Amt), M), M)
29186 if (((VT == MVT::v2i64 && !Subtarget.hasXOP()) ||
29187 (VT == MVT::v4i64 && Subtarget.hasInt256())) &&
29188 Opc == ISD::SRA) {
29189 SDValue S = DAG.getConstant(APInt::getSignMask(64), dl, VT);
29190 SDValue M = DAG.getNode(ISD::SRL, dl, VT, S, Amt);
29191 R = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
29192 R = DAG.getNode(ISD::XOR, dl, VT, R, M);
29193 R = DAG.getNode(ISD::SUB, dl, VT, R, M);
29194 return R;
29197 // XOP has 128-bit variable logical/arithmetic shifts.
29198 // +ve/-ve Amt = shift left/right.
29199 if (Subtarget.hasXOP() && (VT == MVT::v2i64 || VT == MVT::v4i32 ||
29200 VT == MVT::v8i16 || VT == MVT::v16i8)) {
29201 if (Opc == ISD::SRL || Opc == ISD::SRA) {
29202 SDValue Zero = DAG.getConstant(0, dl, VT);
29203 Amt = DAG.getNode(ISD::SUB, dl, VT, Zero, Amt);
29205 if (Opc == ISD::SHL || Opc == ISD::SRL)
29206 return DAG.getNode(X86ISD::VPSHL, dl, VT, R, Amt);
29207 if (Opc == ISD::SRA)
29208 return DAG.getNode(X86ISD::VPSHA, dl, VT, R, Amt);
29211 // 2i64 vector logical shifts can efficiently avoid scalarization - do the
29212 // shifts per-lane and then shuffle the partial results back together.
29213 if (VT == MVT::v2i64 && Opc != ISD::SRA) {
29214 // Splat the shift amounts so the scalar shifts above will catch it.
29215 SDValue Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {0, 0});
29216 SDValue Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {1, 1});
29217 SDValue R0 = DAG.getNode(Opc, dl, VT, R, Amt0);
29218 SDValue R1 = DAG.getNode(Opc, dl, VT, R, Amt1);
29219 return DAG.getVectorShuffle(VT, dl, R0, R1, {0, 3});
29222 // If possible, lower this shift as a sequence of two shifts by
29223 // constant plus a BLENDing shuffle instead of scalarizing it.
29224 // Example:
29225 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
29227 // Could be rewritten as:
29228 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
29230 // The advantage is that the two shifts from the example would be
29231 // lowered as X86ISD::VSRLI nodes in parallel before blending.
29232 if (ConstantAmt && (VT == MVT::v8i16 || VT == MVT::v4i32 ||
29233 (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
29234 SDValue Amt1, Amt2;
29235 unsigned NumElts = VT.getVectorNumElements();
29236 SmallVector<int, 8> ShuffleMask;
29237 for (unsigned i = 0; i != NumElts; ++i) {
29238 SDValue A = Amt->getOperand(i);
29239 if (A.isUndef()) {
29240 ShuffleMask.push_back(SM_SentinelUndef);
29241 continue;
29243 if (!Amt1 || Amt1 == A) {
29244 ShuffleMask.push_back(i);
29245 Amt1 = A;
29246 continue;
29248 if (!Amt2 || Amt2 == A) {
29249 ShuffleMask.push_back(i + NumElts);
29250 Amt2 = A;
29251 continue;
29253 break;
29256 // Only perform this blend if we can perform it without loading a mask.
29257 if (ShuffleMask.size() == NumElts && Amt1 && Amt2 &&
29258 (VT != MVT::v16i16 ||
29259 is128BitLaneRepeatedShuffleMask(VT, ShuffleMask)) &&
29260 (VT == MVT::v4i32 || Subtarget.hasSSE41() || Opc != ISD::SHL ||
29261 canWidenShuffleElements(ShuffleMask))) {
29262 auto *Cst1 = dyn_cast<ConstantSDNode>(Amt1);
29263 auto *Cst2 = dyn_cast<ConstantSDNode>(Amt2);
29264 if (Cst1 && Cst2 && Cst1->getAPIntValue().ult(EltSizeInBits) &&
29265 Cst2->getAPIntValue().ult(EltSizeInBits)) {
29266 SDValue Shift1 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
29267 Cst1->getZExtValue(), DAG);
29268 SDValue Shift2 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
29269 Cst2->getZExtValue(), DAG);
29270 return DAG.getVectorShuffle(VT, dl, Shift1, Shift2, ShuffleMask);
29275 // If possible, lower this packed shift into a vector multiply instead of
29276 // expanding it into a sequence of scalar shifts.
29277 // For v32i8 cases, it might be quicker to split/extend to vXi16 shifts.
29278 if (Opc == ISD::SHL && !(VT == MVT::v32i8 && (Subtarget.hasXOP() ||
29279 Subtarget.canExtendTo512BW())))
29280 if (SDValue Scale = convertShiftLeftToScale(Amt, dl, Subtarget, DAG))
29281 return DAG.getNode(ISD::MUL, dl, VT, R, Scale);
29283 // Constant ISD::SRL can be performed efficiently on vXi16 vectors as we
29284 // can replace with ISD::MULHU, creating scale factor from (NumEltBits - Amt).
29285 if (Opc == ISD::SRL && ConstantAmt &&
29286 (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
29287 SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
29288 SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
29289 if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
29290 SDValue Zero = DAG.getConstant(0, dl, VT);
29291 SDValue ZAmt = DAG.getSetCC(dl, VT, Amt, Zero, ISD::SETEQ);
29292 SDValue Res = DAG.getNode(ISD::MULHU, dl, VT, R, Scale);
29293 return DAG.getSelect(dl, VT, ZAmt, R, Res);
29297 // Constant ISD::SRA can be performed efficiently on vXi16 vectors as we
29298 // can replace with ISD::MULHS, creating scale factor from (NumEltBits - Amt).
29299 // TODO: Special case handling for shift by 0/1, really we can afford either
29300 // of these cases in pre-SSE41/XOP/AVX512 but not both.
29301 if (Opc == ISD::SRA && ConstantAmt &&
29302 (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256())) &&
29303 ((Subtarget.hasSSE41() && !Subtarget.hasXOP() &&
29304 !Subtarget.hasAVX512()) ||
29305 DAG.isKnownNeverZero(Amt))) {
29306 SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
29307 SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
29308 if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
29309 SDValue Amt0 =
29310 DAG.getSetCC(dl, VT, Amt, DAG.getConstant(0, dl, VT), ISD::SETEQ);
29311 SDValue Amt1 =
29312 DAG.getSetCC(dl, VT, Amt, DAG.getConstant(1, dl, VT), ISD::SETEQ);
29313 SDValue Sra1 =
29314 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, 1, DAG);
29315 SDValue Res = DAG.getNode(ISD::MULHS, dl, VT, R, Scale);
29316 Res = DAG.getSelect(dl, VT, Amt0, R, Res);
29317 return DAG.getSelect(dl, VT, Amt1, Sra1, Res);
29321 // v4i32 Non Uniform Shifts.
29322 // If the shift amount is constant we can shift each lane using the SSE2
29323 // immediate shifts, else we need to zero-extend each lane to the lower i64
29324 // and shift using the SSE2 variable shifts.
29325 // The separate results can then be blended together.
29326 if (VT == MVT::v4i32) {
29327 SDValue Amt0, Amt1, Amt2, Amt3;
29328 if (ConstantAmt) {
29329 Amt0 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {0, 0, 0, 0});
29330 Amt1 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {1, 1, 1, 1});
29331 Amt2 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {2, 2, 2, 2});
29332 Amt3 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {3, 3, 3, 3});
29333 } else {
29334 // The SSE2 shifts use the lower i64 as the same shift amount for
29335 // all lanes and the upper i64 is ignored. On AVX we're better off
29336 // just zero-extending, but for SSE just duplicating the top 16-bits is
29337 // cheaper and has the same effect for out of range values.
29338 if (Subtarget.hasAVX()) {
29339 SDValue Z = DAG.getConstant(0, dl, VT);
29340 Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Z, {0, 4, -1, -1});
29341 Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Z, {1, 5, -1, -1});
29342 Amt2 = DAG.getVectorShuffle(VT, dl, Amt, Z, {2, 6, -1, -1});
29343 Amt3 = DAG.getVectorShuffle(VT, dl, Amt, Z, {3, 7, -1, -1});
29344 } else {
29345 SDValue Amt01 = DAG.getBitcast(MVT::v8i16, Amt);
29346 SDValue Amt23 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
29347 {4, 5, 6, 7, -1, -1, -1, -1});
29348 SDValue Msk02 = getV4X86ShuffleImm8ForMask({0, 1, 1, 1}, dl, DAG);
29349 SDValue Msk13 = getV4X86ShuffleImm8ForMask({2, 3, 3, 3}, dl, DAG);
29350 Amt0 = DAG.getNode(X86ISD::PSHUFLW, dl, MVT::v8i16, Amt01, Msk02);
29351 Amt1 = DAG.getNode(X86ISD::PSHUFLW, dl, MVT::v8i16, Amt01, Msk13);
29352 Amt2 = DAG.getNode(X86ISD::PSHUFLW, dl, MVT::v8i16, Amt23, Msk02);
29353 Amt3 = DAG.getNode(X86ISD::PSHUFLW, dl, MVT::v8i16, Amt23, Msk13);
29357 unsigned ShOpc = ConstantAmt ? Opc : X86OpcV;
29358 SDValue R0 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt0));
29359 SDValue R1 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt1));
29360 SDValue R2 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt2));
29361 SDValue R3 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt3));
29363 // Merge the shifted lane results optimally with/without PBLENDW.
29364 // TODO - ideally shuffle combining would handle this.
29365 if (Subtarget.hasSSE41()) {
29366 SDValue R02 = DAG.getVectorShuffle(VT, dl, R0, R2, {0, -1, 6, -1});
29367 SDValue R13 = DAG.getVectorShuffle(VT, dl, R1, R3, {-1, 1, -1, 7});
29368 return DAG.getVectorShuffle(VT, dl, R02, R13, {0, 5, 2, 7});
29370 SDValue R01 = DAG.getVectorShuffle(VT, dl, R0, R1, {0, -1, -1, 5});
29371 SDValue R23 = DAG.getVectorShuffle(VT, dl, R2, R3, {2, -1, -1, 7});
29372 return DAG.getVectorShuffle(VT, dl, R01, R23, {0, 3, 4, 7});
29375 // It's worth extending once and using the vXi16/vXi32 shifts for smaller
29376 // types, but without AVX512 the extra overheads to get from vXi8 to vXi32
29377 // make the existing SSE solution better.
29378 // NOTE: We honor prefered vector width before promoting to 512-bits.
29379 if ((Subtarget.hasInt256() && VT == MVT::v8i16) ||
29380 (Subtarget.canExtendTo512DQ() && VT == MVT::v16i16) ||
29381 (Subtarget.canExtendTo512DQ() && VT == MVT::v16i8) ||
29382 (Subtarget.canExtendTo512BW() && VT == MVT::v32i8) ||
29383 (Subtarget.hasBWI() && Subtarget.hasVLX() && VT == MVT::v16i8)) {
29384 assert((!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8) &&
29385 "Unexpected vector type");
29386 MVT EvtSVT = Subtarget.hasBWI() ? MVT::i16 : MVT::i32;
29387 MVT ExtVT = MVT::getVectorVT(EvtSVT, VT.getVectorNumElements());
29388 unsigned ExtOpc = Opc == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
29389 R = DAG.getNode(ExtOpc, dl, ExtVT, R);
29390 Amt = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Amt);
29391 return DAG.getNode(ISD::TRUNCATE, dl, VT,
29392 DAG.getNode(Opc, dl, ExtVT, R, Amt));
29395 // Constant ISD::SRA/SRL can be performed efficiently on vXi8 vectors as we
29396 // extend to vXi16 to perform a MUL scale effectively as a MUL_LOHI.
29397 if (ConstantAmt && (Opc == ISD::SRA || Opc == ISD::SRL) &&
29398 (VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||
29399 (VT == MVT::v64i8 && Subtarget.hasBWI())) &&
29400 !Subtarget.hasXOP()) {
29401 int NumElts = VT.getVectorNumElements();
29402 SDValue Cst8 = DAG.getTargetConstant(8, dl, MVT::i8);
29404 // Extend constant shift amount to vXi16 (it doesn't matter if the type
29405 // isn't legal).
29406 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
29407 Amt = DAG.getZExtOrTrunc(Amt, dl, ExVT);
29408 Amt = DAG.getNode(ISD::SUB, dl, ExVT, DAG.getConstant(8, dl, ExVT), Amt);
29409 Amt = DAG.getNode(ISD::SHL, dl, ExVT, DAG.getConstant(1, dl, ExVT), Amt);
29410 assert(ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()) &&
29411 "Constant build vector expected");
29413 if (VT == MVT::v16i8 && Subtarget.hasInt256()) {
29414 bool IsSigned = Opc == ISD::SRA;
29415 R = DAG.getExtOrTrunc(IsSigned, R, dl, ExVT);
29416 R = DAG.getNode(ISD::MUL, dl, ExVT, R, Amt);
29417 R = DAG.getNode(X86ISD::VSRLI, dl, ExVT, R, Cst8);
29418 return DAG.getZExtOrTrunc(R, dl, VT);
29421 SmallVector<SDValue, 16> LoAmt, HiAmt;
29422 for (int i = 0; i != NumElts; i += 16) {
29423 for (int j = 0; j != 8; ++j) {
29424 LoAmt.push_back(Amt.getOperand(i + j));
29425 HiAmt.push_back(Amt.getOperand(i + j + 8));
29429 MVT VT16 = MVT::getVectorVT(MVT::i16, NumElts / 2);
29430 SDValue LoA = DAG.getBuildVector(VT16, dl, LoAmt);
29431 SDValue HiA = DAG.getBuildVector(VT16, dl, HiAmt);
29433 SDValue LoR = DAG.getBitcast(VT16, getUnpackl(DAG, dl, VT, R, R));
29434 SDValue HiR = DAG.getBitcast(VT16, getUnpackh(DAG, dl, VT, R, R));
29435 LoR = DAG.getNode(X86OpcI, dl, VT16, LoR, Cst8);
29436 HiR = DAG.getNode(X86OpcI, dl, VT16, HiR, Cst8);
29437 LoR = DAG.getNode(ISD::MUL, dl, VT16, LoR, LoA);
29438 HiR = DAG.getNode(ISD::MUL, dl, VT16, HiR, HiA);
29439 LoR = DAG.getNode(X86ISD::VSRLI, dl, VT16, LoR, Cst8);
29440 HiR = DAG.getNode(X86ISD::VSRLI, dl, VT16, HiR, Cst8);
29441 return DAG.getNode(X86ISD::PACKUS, dl, VT, LoR, HiR);
29444 if (VT == MVT::v16i8 ||
29445 (VT == MVT::v32i8 && Subtarget.hasInt256() && !Subtarget.hasXOP()) ||
29446 (VT == MVT::v64i8 && Subtarget.hasBWI())) {
29447 MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
29449 auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
29450 if (VT.is512BitVector()) {
29451 // On AVX512BW targets we make use of the fact that VSELECT lowers
29452 // to a masked blend which selects bytes based just on the sign bit
29453 // extracted to a mask.
29454 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
29455 V0 = DAG.getBitcast(VT, V0);
29456 V1 = DAG.getBitcast(VT, V1);
29457 Sel = DAG.getBitcast(VT, Sel);
29458 Sel = DAG.getSetCC(dl, MaskVT, DAG.getConstant(0, dl, VT), Sel,
29459 ISD::SETGT);
29460 return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
29461 } else if (Subtarget.hasSSE41()) {
29462 // On SSE41 targets we can use PBLENDVB which selects bytes based just
29463 // on the sign bit.
29464 V0 = DAG.getBitcast(VT, V0);
29465 V1 = DAG.getBitcast(VT, V1);
29466 Sel = DAG.getBitcast(VT, Sel);
29467 return DAG.getBitcast(SelVT,
29468 DAG.getNode(X86ISD::BLENDV, dl, VT, Sel, V0, V1));
29470 // On pre-SSE41 targets we test for the sign bit by comparing to
29471 // zero - a negative value will set all bits of the lanes to true
29472 // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
29473 SDValue Z = DAG.getConstant(0, dl, SelVT);
29474 SDValue C = DAG.getNode(X86ISD::PCMPGT, dl, SelVT, Z, Sel);
29475 return DAG.getSelect(dl, SelVT, C, V0, V1);
29478 // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
29479 // We can safely do this using i16 shifts as we're only interested in
29480 // the 3 lower bits of each byte.
29481 Amt = DAG.getBitcast(ExtVT, Amt);
29482 Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ExtVT, Amt, 5, DAG);
29483 Amt = DAG.getBitcast(VT, Amt);
29485 if (Opc == ISD::SHL || Opc == ISD::SRL) {
29486 // r = VSELECT(r, shift(r, 4), a);
29487 SDValue M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(4, dl, VT));
29488 R = SignBitSelect(VT, Amt, M, R);
29490 // a += a
29491 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
29493 // r = VSELECT(r, shift(r, 2), a);
29494 M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(2, dl, VT));
29495 R = SignBitSelect(VT, Amt, M, R);
29497 // a += a
29498 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
29500 // return VSELECT(r, shift(r, 1), a);
29501 M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(1, dl, VT));
29502 R = SignBitSelect(VT, Amt, M, R);
29503 return R;
29506 if (Opc == ISD::SRA) {
29507 // For SRA we need to unpack each byte to the higher byte of a i16 vector
29508 // so we can correctly sign extend. We don't care what happens to the
29509 // lower byte.
29510 SDValue ALo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
29511 SDValue AHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
29512 SDValue RLo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), R);
29513 SDValue RHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), R);
29514 ALo = DAG.getBitcast(ExtVT, ALo);
29515 AHi = DAG.getBitcast(ExtVT, AHi);
29516 RLo = DAG.getBitcast(ExtVT, RLo);
29517 RHi = DAG.getBitcast(ExtVT, RHi);
29519 // r = VSELECT(r, shift(r, 4), a);
29520 SDValue MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 4, DAG);
29521 SDValue MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 4, DAG);
29522 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
29523 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
29525 // a += a
29526 ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
29527 AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
29529 // r = VSELECT(r, shift(r, 2), a);
29530 MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 2, DAG);
29531 MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 2, DAG);
29532 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
29533 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
29535 // a += a
29536 ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
29537 AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
29539 // r = VSELECT(r, shift(r, 1), a);
29540 MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 1, DAG);
29541 MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 1, DAG);
29542 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
29543 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
29545 // Logical shift the result back to the lower byte, leaving a zero upper
29546 // byte meaning that we can safely pack with PACKUSWB.
29547 RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RLo, 8, DAG);
29548 RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RHi, 8, DAG);
29549 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
29553 if (Subtarget.hasInt256() && !Subtarget.hasXOP() && VT == MVT::v16i16) {
29554 MVT ExtVT = MVT::v8i32;
29555 SDValue Z = DAG.getConstant(0, dl, VT);
29556 SDValue ALo = getUnpackl(DAG, dl, VT, Amt, Z);
29557 SDValue AHi = getUnpackh(DAG, dl, VT, Amt, Z);
29558 SDValue RLo = getUnpackl(DAG, dl, VT, Z, R);
29559 SDValue RHi = getUnpackh(DAG, dl, VT, Z, R);
29560 ALo = DAG.getBitcast(ExtVT, ALo);
29561 AHi = DAG.getBitcast(ExtVT, AHi);
29562 RLo = DAG.getBitcast(ExtVT, RLo);
29563 RHi = DAG.getBitcast(ExtVT, RHi);
29564 SDValue Lo = DAG.getNode(Opc, dl, ExtVT, RLo, ALo);
29565 SDValue Hi = DAG.getNode(Opc, dl, ExtVT, RHi, AHi);
29566 Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Lo, 16, DAG);
29567 Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Hi, 16, DAG);
29568 return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
29571 if (VT == MVT::v8i16) {
29572 // If we have a constant shift amount, the non-SSE41 path is best as
29573 // avoiding bitcasts make it easier to constant fold and reduce to PBLENDW.
29574 bool UseSSE41 = Subtarget.hasSSE41() &&
29575 !ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
29577 auto SignBitSelect = [&](SDValue Sel, SDValue V0, SDValue V1) {
29578 // On SSE41 targets we can use PBLENDVB which selects bytes based just on
29579 // the sign bit.
29580 if (UseSSE41) {
29581 MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2);
29582 V0 = DAG.getBitcast(ExtVT, V0);
29583 V1 = DAG.getBitcast(ExtVT, V1);
29584 Sel = DAG.getBitcast(ExtVT, Sel);
29585 return DAG.getBitcast(
29586 VT, DAG.getNode(X86ISD::BLENDV, dl, ExtVT, Sel, V0, V1));
29588 // On pre-SSE41 targets we splat the sign bit - a negative value will
29589 // set all bits of the lanes to true and VSELECT uses that in
29590 // its OR(AND(V0,C),AND(V1,~C)) lowering.
29591 SDValue C =
29592 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Sel, 15, DAG);
29593 return DAG.getSelect(dl, VT, C, V0, V1);
29596 // Turn 'a' into a mask suitable for VSELECT: a = a << 12;
29597 if (UseSSE41) {
29598 // On SSE41 targets we need to replicate the shift mask in both
29599 // bytes for PBLENDVB.
29600 Amt = DAG.getNode(
29601 ISD::OR, dl, VT,
29602 getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 4, DAG),
29603 getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG));
29604 } else {
29605 Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG);
29608 // r = VSELECT(r, shift(r, 8), a);
29609 SDValue M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 8, DAG);
29610 R = SignBitSelect(Amt, M, R);
29612 // a += a
29613 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
29615 // r = VSELECT(r, shift(r, 4), a);
29616 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 4, DAG);
29617 R = SignBitSelect(Amt, M, R);
29619 // a += a
29620 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
29622 // r = VSELECT(r, shift(r, 2), a);
29623 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 2, DAG);
29624 R = SignBitSelect(Amt, M, R);
29626 // a += a
29627 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
29629 // return VSELECT(r, shift(r, 1), a);
29630 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 1, DAG);
29631 R = SignBitSelect(Amt, M, R);
29632 return R;
29635 // Decompose 256-bit shifts into 128-bit shifts.
29636 if (VT.is256BitVector())
29637 return splitVectorIntBinary(Op, DAG);
29639 if (VT == MVT::v32i16 || VT == MVT::v64i8)
29640 return splitVectorIntBinary(Op, DAG);
29642 return SDValue();
29645 static SDValue LowerFunnelShift(SDValue Op, const X86Subtarget &Subtarget,
29646 SelectionDAG &DAG) {
29647 MVT VT = Op.getSimpleValueType();
29648 assert((Op.getOpcode() == ISD::FSHL || Op.getOpcode() == ISD::FSHR) &&
29649 "Unexpected funnel shift opcode!");
29651 SDLoc DL(Op);
29652 SDValue Op0 = Op.getOperand(0);
29653 SDValue Op1 = Op.getOperand(1);
29654 SDValue Amt = Op.getOperand(2);
29655 unsigned EltSizeInBits = VT.getScalarSizeInBits();
29656 bool IsFSHR = Op.getOpcode() == ISD::FSHR;
29658 if (VT.isVector()) {
29659 APInt APIntShiftAmt;
29660 bool IsCstSplat = X86::isConstantSplat(Amt, APIntShiftAmt);
29662 if (Subtarget.hasVBMI2() && EltSizeInBits > 8) {
29663 if (IsFSHR)
29664 std::swap(Op0, Op1);
29666 if (IsCstSplat) {
29667 uint64_t ShiftAmt = APIntShiftAmt.urem(EltSizeInBits);
29668 SDValue Imm = DAG.getTargetConstant(ShiftAmt, DL, MVT::i8);
29669 return getAVX512Node(IsFSHR ? X86ISD::VSHRD : X86ISD::VSHLD, DL, VT,
29670 {Op0, Op1, Imm}, DAG, Subtarget);
29672 return getAVX512Node(IsFSHR ? X86ISD::VSHRDV : X86ISD::VSHLDV, DL, VT,
29673 {Op0, Op1, Amt}, DAG, Subtarget);
29675 assert((VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8 ||
29676 VT == MVT::v8i16 || VT == MVT::v16i16 || VT == MVT::v32i16 ||
29677 VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) &&
29678 "Unexpected funnel shift type!");
29680 // fshl(x,y,z) -> unpack(y,x) << (z & (bw-1))) >> bw.
29681 // fshr(x,y,z) -> unpack(y,x) >> (z & (bw-1))).
29682 if (IsCstSplat) {
29683 // TODO: Can't use generic expansion as UNDEF amt elements can be
29684 // converted to other values when folded to shift amounts, losing the
29685 // splat.
29686 uint64_t ShiftAmt = APIntShiftAmt.urem(EltSizeInBits);
29687 uint64_t ShXAmt = IsFSHR ? (EltSizeInBits - ShiftAmt) : ShiftAmt;
29688 uint64_t ShYAmt = IsFSHR ? ShiftAmt : (EltSizeInBits - ShiftAmt);
29689 SDValue ShX = DAG.getNode(ISD::SHL, DL, VT, Op0,
29690 DAG.getShiftAmountConstant(ShXAmt, VT, DL));
29691 SDValue ShY = DAG.getNode(ISD::SRL, DL, VT, Op1,
29692 DAG.getShiftAmountConstant(ShYAmt, VT, DL));
29693 return DAG.getNode(ISD::OR, DL, VT, ShX, ShY);
29696 SDValue AmtMask = DAG.getConstant(EltSizeInBits - 1, DL, VT);
29697 SDValue AmtMod = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
29698 bool IsCst = ISD::isBuildVectorOfConstantSDNodes(AmtMod.getNode());
29700 // Constant vXi16 funnel shifts can be efficiently handled by default.
29701 if (IsCst && EltSizeInBits == 16)
29702 return SDValue();
29704 unsigned ShiftOpc = IsFSHR ? ISD::SRL : ISD::SHL;
29705 unsigned NumElts = VT.getVectorNumElements();
29706 MVT ExtSVT = MVT::getIntegerVT(2 * EltSizeInBits);
29707 MVT ExtVT = MVT::getVectorVT(ExtSVT, NumElts / 2);
29709 // Split 256-bit integers on XOP/pre-AVX2 targets.
29710 // Split 512-bit integers on non 512-bit BWI targets.
29711 if ((VT.is256BitVector() && ((Subtarget.hasXOP() && EltSizeInBits < 16) ||
29712 !Subtarget.hasAVX2())) ||
29713 (VT.is512BitVector() && !Subtarget.useBWIRegs() &&
29714 EltSizeInBits < 32)) {
29715 // Pre-mask the amount modulo using the wider vector.
29716 Op = DAG.getNode(Op.getOpcode(), DL, VT, Op0, Op1, AmtMod);
29717 return splitVectorOp(Op, DAG);
29720 // Attempt to fold scalar shift as unpack(y,x) << zext(splat(z))
29721 if (supportedVectorShiftWithBaseAmnt(ExtVT, Subtarget, ShiftOpc)) {
29722 int ScalarAmtIdx = -1;
29723 if (SDValue ScalarAmt = DAG.getSplatSourceVector(AmtMod, ScalarAmtIdx)) {
29724 // Uniform vXi16 funnel shifts can be efficiently handled by default.
29725 if (EltSizeInBits == 16)
29726 return SDValue();
29728 SDValue Lo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, Op1, Op0));
29729 SDValue Hi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, Op1, Op0));
29730 Lo = getTargetVShiftNode(ShiftOpc, DL, ExtVT, Lo, ScalarAmt,
29731 ScalarAmtIdx, Subtarget, DAG);
29732 Hi = getTargetVShiftNode(ShiftOpc, DL, ExtVT, Hi, ScalarAmt,
29733 ScalarAmtIdx, Subtarget, DAG);
29734 return getPack(DAG, Subtarget, DL, VT, Lo, Hi, !IsFSHR);
29738 MVT WideSVT = MVT::getIntegerVT(
29739 std::min<unsigned>(EltSizeInBits * 2, Subtarget.hasBWI() ? 16 : 32));
29740 MVT WideVT = MVT::getVectorVT(WideSVT, NumElts);
29742 // If per-element shifts are legal, fallback to generic expansion.
29743 if (supportedVectorVarShift(VT, Subtarget, ShiftOpc) || Subtarget.hasXOP())
29744 return SDValue();
29746 // Attempt to fold as:
29747 // fshl(x,y,z) -> (((aext(x) << bw) | zext(y)) << (z & (bw-1))) >> bw.
29748 // fshr(x,y,z) -> (((aext(x) << bw) | zext(y)) >> (z & (bw-1))).
29749 if (supportedVectorVarShift(WideVT, Subtarget, ShiftOpc) &&
29750 supportedVectorShiftWithImm(WideVT, Subtarget, ShiftOpc)) {
29751 Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, WideVT, Op0);
29752 Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Op1);
29753 AmtMod = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, AmtMod);
29754 Op0 = getTargetVShiftByConstNode(X86ISD::VSHLI, DL, WideVT, Op0,
29755 EltSizeInBits, DAG);
29756 SDValue Res = DAG.getNode(ISD::OR, DL, WideVT, Op0, Op1);
29757 Res = DAG.getNode(ShiftOpc, DL, WideVT, Res, AmtMod);
29758 if (!IsFSHR)
29759 Res = getTargetVShiftByConstNode(X86ISD::VSRLI, DL, WideVT, Res,
29760 EltSizeInBits, DAG);
29761 return DAG.getNode(ISD::TRUNCATE, DL, VT, Res);
29764 // Attempt to fold per-element (ExtVT) shift as unpack(y,x) << zext(z)
29765 if (((IsCst || !Subtarget.hasAVX512()) && !IsFSHR && EltSizeInBits <= 16) ||
29766 supportedVectorVarShift(ExtVT, Subtarget, ShiftOpc)) {
29767 SDValue Z = DAG.getConstant(0, DL, VT);
29768 SDValue RLo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, Op1, Op0));
29769 SDValue RHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, Op1, Op0));
29770 SDValue ALo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, AmtMod, Z));
29771 SDValue AHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, AmtMod, Z));
29772 SDValue Lo = DAG.getNode(ShiftOpc, DL, ExtVT, RLo, ALo);
29773 SDValue Hi = DAG.getNode(ShiftOpc, DL, ExtVT, RHi, AHi);
29774 return getPack(DAG, Subtarget, DL, VT, Lo, Hi, !IsFSHR);
29777 // Fallback to generic expansion.
29778 return SDValue();
29780 assert(
29781 (VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) &&
29782 "Unexpected funnel shift type!");
29784 // Expand slow SHLD/SHRD cases if we are not optimizing for size.
29785 bool OptForSize = DAG.shouldOptForSize();
29786 bool ExpandFunnel = !OptForSize && Subtarget.isSHLDSlow();
29788 // fshl(x,y,z) -> (((aext(x) << bw) | zext(y)) << (z & (bw-1))) >> bw.
29789 // fshr(x,y,z) -> (((aext(x) << bw) | zext(y)) >> (z & (bw-1))).
29790 if ((VT == MVT::i8 || (ExpandFunnel && VT == MVT::i16)) &&
29791 !isa<ConstantSDNode>(Amt)) {
29792 SDValue Mask = DAG.getConstant(EltSizeInBits - 1, DL, Amt.getValueType());
29793 SDValue HiShift = DAG.getConstant(EltSizeInBits, DL, Amt.getValueType());
29794 Op0 = DAG.getAnyExtOrTrunc(Op0, DL, MVT::i32);
29795 Op1 = DAG.getZExtOrTrunc(Op1, DL, MVT::i32);
29796 Amt = DAG.getNode(ISD::AND, DL, Amt.getValueType(), Amt, Mask);
29797 SDValue Res = DAG.getNode(ISD::SHL, DL, MVT::i32, Op0, HiShift);
29798 Res = DAG.getNode(ISD::OR, DL, MVT::i32, Res, Op1);
29799 if (IsFSHR) {
29800 Res = DAG.getNode(ISD::SRL, DL, MVT::i32, Res, Amt);
29801 } else {
29802 Res = DAG.getNode(ISD::SHL, DL, MVT::i32, Res, Amt);
29803 Res = DAG.getNode(ISD::SRL, DL, MVT::i32, Res, HiShift);
29805 return DAG.getZExtOrTrunc(Res, DL, VT);
29808 if (VT == MVT::i8 || ExpandFunnel)
29809 return SDValue();
29811 // i16 needs to modulo the shift amount, but i32/i64 have implicit modulo.
29812 if (VT == MVT::i16) {
29813 Amt = DAG.getNode(ISD::AND, DL, Amt.getValueType(), Amt,
29814 DAG.getConstant(15, DL, Amt.getValueType()));
29815 unsigned FSHOp = (IsFSHR ? X86ISD::FSHR : X86ISD::FSHL);
29816 return DAG.getNode(FSHOp, DL, VT, Op0, Op1, Amt);
29819 return Op;
29822 static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
29823 SelectionDAG &DAG) {
29824 MVT VT = Op.getSimpleValueType();
29825 assert(VT.isVector() && "Custom lowering only for vector rotates!");
29827 SDLoc DL(Op);
29828 SDValue R = Op.getOperand(0);
29829 SDValue Amt = Op.getOperand(1);
29830 unsigned Opcode = Op.getOpcode();
29831 unsigned EltSizeInBits = VT.getScalarSizeInBits();
29832 int NumElts = VT.getVectorNumElements();
29833 bool IsROTL = Opcode == ISD::ROTL;
29835 // Check for constant splat rotation amount.
29836 APInt CstSplatValue;
29837 bool IsCstSplat = X86::isConstantSplat(Amt, CstSplatValue);
29839 // Check for splat rotate by zero.
29840 if (IsCstSplat && CstSplatValue.urem(EltSizeInBits) == 0)
29841 return R;
29843 // AVX512 implicitly uses modulo rotation amounts.
29844 if (Subtarget.hasAVX512() && 32 <= EltSizeInBits) {
29845 // Attempt to rotate by immediate.
29846 if (IsCstSplat) {
29847 unsigned RotOpc = IsROTL ? X86ISD::VROTLI : X86ISD::VROTRI;
29848 uint64_t RotAmt = CstSplatValue.urem(EltSizeInBits);
29849 return DAG.getNode(RotOpc, DL, VT, R,
29850 DAG.getTargetConstant(RotAmt, DL, MVT::i8));
29853 // Else, fall-back on VPROLV/VPRORV.
29854 return Op;
29857 // AVX512 VBMI2 vXi16 - lower to funnel shifts.
29858 if (Subtarget.hasVBMI2() && 16 == EltSizeInBits) {
29859 unsigned FunnelOpc = IsROTL ? ISD::FSHL : ISD::FSHR;
29860 return DAG.getNode(FunnelOpc, DL, VT, R, R, Amt);
29863 SDValue Z = DAG.getConstant(0, DL, VT);
29865 if (!IsROTL) {
29866 // If the ISD::ROTR amount is constant, we're always better converting to
29867 // ISD::ROTL.
29868 if (SDValue NegAmt = DAG.FoldConstantArithmetic(ISD::SUB, DL, VT, {Z, Amt}))
29869 return DAG.getNode(ISD::ROTL, DL, VT, R, NegAmt);
29871 // XOP targets always prefers ISD::ROTL.
29872 if (Subtarget.hasXOP())
29873 return DAG.getNode(ISD::ROTL, DL, VT, R,
29874 DAG.getNode(ISD::SUB, DL, VT, Z, Amt));
29877 // Split 256-bit integers on XOP/pre-AVX2 targets.
29878 if (VT.is256BitVector() && (Subtarget.hasXOP() || !Subtarget.hasAVX2()))
29879 return splitVectorIntBinary(Op, DAG);
29881 // XOP has 128-bit vector variable + immediate rotates.
29882 // +ve/-ve Amt = rotate left/right - just need to handle ISD::ROTL.
29883 // XOP implicitly uses modulo rotation amounts.
29884 if (Subtarget.hasXOP()) {
29885 assert(IsROTL && "Only ROTL expected");
29886 assert(VT.is128BitVector() && "Only rotate 128-bit vectors!");
29888 // Attempt to rotate by immediate.
29889 if (IsCstSplat) {
29890 uint64_t RotAmt = CstSplatValue.urem(EltSizeInBits);
29891 return DAG.getNode(X86ISD::VROTLI, DL, VT, R,
29892 DAG.getTargetConstant(RotAmt, DL, MVT::i8));
29895 // Use general rotate by variable (per-element).
29896 return Op;
29899 // Rotate by an uniform constant - expand back to shifts.
29900 // TODO: Can't use generic expansion as UNDEF amt elements can be converted
29901 // to other values when folded to shift amounts, losing the splat.
29902 if (IsCstSplat) {
29903 uint64_t RotAmt = CstSplatValue.urem(EltSizeInBits);
29904 uint64_t ShlAmt = IsROTL ? RotAmt : (EltSizeInBits - RotAmt);
29905 uint64_t SrlAmt = IsROTL ? (EltSizeInBits - RotAmt) : RotAmt;
29906 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, R,
29907 DAG.getShiftAmountConstant(ShlAmt, VT, DL));
29908 SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, R,
29909 DAG.getShiftAmountConstant(SrlAmt, VT, DL));
29910 return DAG.getNode(ISD::OR, DL, VT, Shl, Srl);
29913 // Split 512-bit integers on non 512-bit BWI targets.
29914 if (VT.is512BitVector() && !Subtarget.useBWIRegs())
29915 return splitVectorIntBinary(Op, DAG);
29917 assert(
29918 (VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 ||
29919 ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) &&
29920 Subtarget.hasAVX2()) ||
29921 ((VT == MVT::v32i16 || VT == MVT::v64i8) && Subtarget.useBWIRegs())) &&
29922 "Only vXi32/vXi16/vXi8 vector rotates supported");
29924 MVT ExtSVT = MVT::getIntegerVT(2 * EltSizeInBits);
29925 MVT ExtVT = MVT::getVectorVT(ExtSVT, NumElts / 2);
29927 SDValue AmtMask = DAG.getConstant(EltSizeInBits - 1, DL, VT);
29928 SDValue AmtMod = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
29930 // Attempt to fold as unpack(x,x) << zext(splat(y)):
29931 // rotl(x,y) -> (unpack(x,x) << (y & (bw-1))) >> bw.
29932 // rotr(x,y) -> (unpack(x,x) >> (y & (bw-1))).
29933 if (EltSizeInBits == 8 || EltSizeInBits == 16 || EltSizeInBits == 32) {
29934 int BaseRotAmtIdx = -1;
29935 if (SDValue BaseRotAmt = DAG.getSplatSourceVector(AmtMod, BaseRotAmtIdx)) {
29936 if (EltSizeInBits == 16 && Subtarget.hasSSE41()) {
29937 unsigned FunnelOpc = IsROTL ? ISD::FSHL : ISD::FSHR;
29938 return DAG.getNode(FunnelOpc, DL, VT, R, R, Amt);
29940 unsigned ShiftX86Opc = IsROTL ? X86ISD::VSHLI : X86ISD::VSRLI;
29941 SDValue Lo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, R, R));
29942 SDValue Hi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, R, R));
29943 Lo = getTargetVShiftNode(ShiftX86Opc, DL, ExtVT, Lo, BaseRotAmt,
29944 BaseRotAmtIdx, Subtarget, DAG);
29945 Hi = getTargetVShiftNode(ShiftX86Opc, DL, ExtVT, Hi, BaseRotAmt,
29946 BaseRotAmtIdx, Subtarget, DAG);
29947 return getPack(DAG, Subtarget, DL, VT, Lo, Hi, IsROTL);
29951 bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
29952 unsigned ShiftOpc = IsROTL ? ISD::SHL : ISD::SRL;
29954 // Attempt to fold as unpack(x,x) << zext(y):
29955 // rotl(x,y) -> (unpack(x,x) << (y & (bw-1))) >> bw.
29956 // rotr(x,y) -> (unpack(x,x) >> (y & (bw-1))).
29957 // Const vXi16/vXi32 are excluded in favor of MUL-based lowering.
29958 if (!(ConstantAmt && EltSizeInBits != 8) &&
29959 !supportedVectorVarShift(VT, Subtarget, ShiftOpc) &&
29960 (ConstantAmt || supportedVectorVarShift(ExtVT, Subtarget, ShiftOpc))) {
29961 SDValue RLo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, R, R));
29962 SDValue RHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, R, R));
29963 SDValue ALo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, AmtMod, Z));
29964 SDValue AHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, AmtMod, Z));
29965 SDValue Lo = DAG.getNode(ShiftOpc, DL, ExtVT, RLo, ALo);
29966 SDValue Hi = DAG.getNode(ShiftOpc, DL, ExtVT, RHi, AHi);
29967 return getPack(DAG, Subtarget, DL, VT, Lo, Hi, IsROTL);
29970 // v16i8/v32i8/v64i8: Split rotation into rot4/rot2/rot1 stages and select by
29971 // the amount bit.
29972 // TODO: We're doing nothing here that we couldn't do for funnel shifts.
29973 if (EltSizeInBits == 8) {
29974 MVT WideVT =
29975 MVT::getVectorVT(Subtarget.hasBWI() ? MVT::i16 : MVT::i32, NumElts);
29977 // Attempt to fold as:
29978 // rotl(x,y) -> (((aext(x) << bw) | zext(x)) << (y & (bw-1))) >> bw.
29979 // rotr(x,y) -> (((aext(x) << bw) | zext(x)) >> (y & (bw-1))).
29980 if (supportedVectorVarShift(WideVT, Subtarget, ShiftOpc) &&
29981 supportedVectorShiftWithImm(WideVT, Subtarget, ShiftOpc)) {
29982 // If we're rotating by constant, just use default promotion.
29983 if (ConstantAmt)
29984 return SDValue();
29985 // See if we can perform this by widening to vXi16 or vXi32.
29986 R = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, R);
29987 R = DAG.getNode(
29988 ISD::OR, DL, WideVT, R,
29989 getTargetVShiftByConstNode(X86ISD::VSHLI, DL, WideVT, R, 8, DAG));
29990 Amt = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, AmtMod);
29991 R = DAG.getNode(ShiftOpc, DL, WideVT, R, Amt);
29992 if (IsROTL)
29993 R = getTargetVShiftByConstNode(X86ISD::VSRLI, DL, WideVT, R, 8, DAG);
29994 return DAG.getNode(ISD::TRUNCATE, DL, VT, R);
29997 // We don't need ModuloAmt here as we just peek at individual bits.
29998 auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
29999 if (Subtarget.hasSSE41()) {
30000 // On SSE41 targets we can use PBLENDVB which selects bytes based just
30001 // on the sign bit.
30002 V0 = DAG.getBitcast(VT, V0);
30003 V1 = DAG.getBitcast(VT, V1);
30004 Sel = DAG.getBitcast(VT, Sel);
30005 return DAG.getBitcast(SelVT,
30006 DAG.getNode(X86ISD::BLENDV, DL, VT, Sel, V0, V1));
30008 // On pre-SSE41 targets we test for the sign bit by comparing to
30009 // zero - a negative value will set all bits of the lanes to true
30010 // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
30011 SDValue Z = DAG.getConstant(0, DL, SelVT);
30012 SDValue C = DAG.getNode(X86ISD::PCMPGT, DL, SelVT, Z, Sel);
30013 return DAG.getSelect(DL, SelVT, C, V0, V1);
30016 // ISD::ROTR is currently only profitable on AVX512 targets with VPTERNLOG.
30017 if (!IsROTL && !useVPTERNLOG(Subtarget, VT)) {
30018 Amt = DAG.getNode(ISD::SUB, DL, VT, Z, Amt);
30019 IsROTL = true;
30022 unsigned ShiftLHS = IsROTL ? ISD::SHL : ISD::SRL;
30023 unsigned ShiftRHS = IsROTL ? ISD::SRL : ISD::SHL;
30025 // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
30026 // We can safely do this using i16 shifts as we're only interested in
30027 // the 3 lower bits of each byte.
30028 Amt = DAG.getBitcast(ExtVT, Amt);
30029 Amt = DAG.getNode(ISD::SHL, DL, ExtVT, Amt, DAG.getConstant(5, DL, ExtVT));
30030 Amt = DAG.getBitcast(VT, Amt);
30032 // r = VSELECT(r, rot(r, 4), a);
30033 SDValue M;
30034 M = DAG.getNode(
30035 ISD::OR, DL, VT,
30036 DAG.getNode(ShiftLHS, DL, VT, R, DAG.getConstant(4, DL, VT)),
30037 DAG.getNode(ShiftRHS, DL, VT, R, DAG.getConstant(4, DL, VT)));
30038 R = SignBitSelect(VT, Amt, M, R);
30040 // a += a
30041 Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
30043 // r = VSELECT(r, rot(r, 2), a);
30044 M = DAG.getNode(
30045 ISD::OR, DL, VT,
30046 DAG.getNode(ShiftLHS, DL, VT, R, DAG.getConstant(2, DL, VT)),
30047 DAG.getNode(ShiftRHS, DL, VT, R, DAG.getConstant(6, DL, VT)));
30048 R = SignBitSelect(VT, Amt, M, R);
30050 // a += a
30051 Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
30053 // return VSELECT(r, rot(r, 1), a);
30054 M = DAG.getNode(
30055 ISD::OR, DL, VT,
30056 DAG.getNode(ShiftLHS, DL, VT, R, DAG.getConstant(1, DL, VT)),
30057 DAG.getNode(ShiftRHS, DL, VT, R, DAG.getConstant(7, DL, VT)));
30058 return SignBitSelect(VT, Amt, M, R);
30061 bool IsSplatAmt = DAG.isSplatValue(Amt);
30062 bool LegalVarShifts = supportedVectorVarShift(VT, Subtarget, ISD::SHL) &&
30063 supportedVectorVarShift(VT, Subtarget, ISD::SRL);
30065 // Fallback for splats + all supported variable shifts.
30066 // Fallback for non-constants AVX2 vXi16 as well.
30067 if (IsSplatAmt || LegalVarShifts || (Subtarget.hasAVX2() && !ConstantAmt)) {
30068 Amt = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
30069 SDValue AmtR = DAG.getConstant(EltSizeInBits, DL, VT);
30070 AmtR = DAG.getNode(ISD::SUB, DL, VT, AmtR, Amt);
30071 SDValue SHL = DAG.getNode(IsROTL ? ISD::SHL : ISD::SRL, DL, VT, R, Amt);
30072 SDValue SRL = DAG.getNode(IsROTL ? ISD::SRL : ISD::SHL, DL, VT, R, AmtR);
30073 return DAG.getNode(ISD::OR, DL, VT, SHL, SRL);
30076 // Everything below assumes ISD::ROTL.
30077 if (!IsROTL) {
30078 Amt = DAG.getNode(ISD::SUB, DL, VT, Z, Amt);
30079 IsROTL = true;
30082 // ISD::ROT* uses modulo rotate amounts.
30083 Amt = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
30085 assert(IsROTL && "Only ROTL supported");
30087 // As with shifts, attempt to convert the rotation amount to a multiplication
30088 // factor, fallback to general expansion.
30089 SDValue Scale = convertShiftLeftToScale(Amt, DL, Subtarget, DAG);
30090 if (!Scale)
30091 return SDValue();
30093 // v8i16/v16i16: perform unsigned multiply hi/lo and OR the results.
30094 if (EltSizeInBits == 16) {
30095 SDValue Lo = DAG.getNode(ISD::MUL, DL, VT, R, Scale);
30096 SDValue Hi = DAG.getNode(ISD::MULHU, DL, VT, R, Scale);
30097 return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
30100 // v4i32: make use of the PMULUDQ instruction to multiply 2 lanes of v4i32
30101 // to v2i64 results at a time. The upper 32-bits contain the wrapped bits
30102 // that can then be OR'd with the lower 32-bits.
30103 assert(VT == MVT::v4i32 && "Only v4i32 vector rotate expected");
30104 static const int OddMask[] = {1, -1, 3, -1};
30105 SDValue R13 = DAG.getVectorShuffle(VT, DL, R, R, OddMask);
30106 SDValue Scale13 = DAG.getVectorShuffle(VT, DL, Scale, Scale, OddMask);
30108 SDValue Res02 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
30109 DAG.getBitcast(MVT::v2i64, R),
30110 DAG.getBitcast(MVT::v2i64, Scale));
30111 SDValue Res13 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
30112 DAG.getBitcast(MVT::v2i64, R13),
30113 DAG.getBitcast(MVT::v2i64, Scale13));
30114 Res02 = DAG.getBitcast(VT, Res02);
30115 Res13 = DAG.getBitcast(VT, Res13);
30117 return DAG.getNode(ISD::OR, DL, VT,
30118 DAG.getVectorShuffle(VT, DL, Res02, Res13, {0, 4, 2, 6}),
30119 DAG.getVectorShuffle(VT, DL, Res02, Res13, {1, 5, 3, 7}));
30122 /// Returns true if the operand type is exactly twice the native width, and
30123 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
30124 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
30125 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
30126 bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {
30127 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
30129 if (OpWidth == 64)
30130 return Subtarget.canUseCMPXCHG8B() && !Subtarget.is64Bit();
30131 if (OpWidth == 128)
30132 return Subtarget.canUseCMPXCHG16B();
30134 return false;
30137 TargetLoweringBase::AtomicExpansionKind
30138 X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
30139 Type *MemType = SI->getValueOperand()->getType();
30141 bool NoImplicitFloatOps =
30142 SI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
30143 if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
30144 !Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
30145 (Subtarget.hasSSE1() || Subtarget.hasX87()))
30146 return AtomicExpansionKind::None;
30148 return needsCmpXchgNb(MemType) ? AtomicExpansionKind::Expand
30149 : AtomicExpansionKind::None;
30152 // Note: this turns large loads into lock cmpxchg8b/16b.
30153 // TODO: In 32-bit mode, use MOVLPS when SSE1 is available?
30154 TargetLowering::AtomicExpansionKind
30155 X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
30156 Type *MemType = LI->getType();
30158 // If this a 64 bit atomic load on a 32-bit target and SSE2 is enabled, we
30159 // can use movq to do the load. If we have X87 we can load into an 80-bit
30160 // X87 register and store it to a stack temporary.
30161 bool NoImplicitFloatOps =
30162 LI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
30163 if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
30164 !Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
30165 (Subtarget.hasSSE1() || Subtarget.hasX87()))
30166 return AtomicExpansionKind::None;
30168 return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
30169 : AtomicExpansionKind::None;
30172 enum BitTestKind : unsigned {
30173 UndefBit,
30174 ConstantBit,
30175 NotConstantBit,
30176 ShiftBit,
30177 NotShiftBit
30180 static std::pair<Value *, BitTestKind> FindSingleBitChange(Value *V) {
30181 using namespace llvm::PatternMatch;
30182 BitTestKind BTK = UndefBit;
30183 auto *C = dyn_cast<ConstantInt>(V);
30184 if (C) {
30185 // Check if V is a power of 2 or NOT power of 2.
30186 if (isPowerOf2_64(C->getZExtValue()))
30187 BTK = ConstantBit;
30188 else if (isPowerOf2_64((~C->getValue()).getZExtValue()))
30189 BTK = NotConstantBit;
30190 return {V, BTK};
30193 // Check if V is some power of 2 pattern known to be non-zero
30194 auto *I = dyn_cast<Instruction>(V);
30195 if (I) {
30196 bool Not = false;
30197 // Check if we have a NOT
30198 Value *PeekI;
30199 if (match(I, m_c_Xor(m_Value(PeekI), m_AllOnes())) ||
30200 match(I, m_Sub(m_AllOnes(), m_Value(PeekI)))) {
30201 Not = true;
30202 I = dyn_cast<Instruction>(PeekI);
30204 // If I is constant, it will fold and we can evaluate later. If its an
30205 // argument or something of that nature, we can't analyze.
30206 if (I == nullptr)
30207 return {nullptr, UndefBit};
30209 // We can only use 1 << X without more sophisticated analysis. C << X where
30210 // C is a power of 2 but not 1 can result in zero which cannot be translated
30211 // to bittest. Likewise any C >> X (either arith or logical) can be zero.
30212 if (I->getOpcode() == Instruction::Shl) {
30213 // Todo(1): The cmpxchg case is pretty costly so matching `BLSI(X)`, `X &
30214 // -X` and some other provable power of 2 patterns that we can use CTZ on
30215 // may be profitable.
30216 // Todo(2): It may be possible in some cases to prove that Shl(C, X) is
30217 // non-zero even where C != 1. Likewise LShr(C, X) and AShr(C, X) may also
30218 // be provably a non-zero power of 2.
30219 // Todo(3): ROTL and ROTR patterns on a power of 2 C should also be
30220 // transformable to bittest.
30221 auto *ShiftVal = dyn_cast<ConstantInt>(I->getOperand(0));
30222 if (!ShiftVal)
30223 return {nullptr, UndefBit};
30224 if (ShiftVal->equalsInt(1))
30225 BTK = Not ? NotShiftBit : ShiftBit;
30227 if (BTK == UndefBit)
30228 return {nullptr, UndefBit};
30230 Value *BitV = I->getOperand(1);
30232 Value *AndOp;
30233 const APInt *AndC;
30234 if (match(BitV, m_c_And(m_Value(AndOp), m_APInt(AndC)))) {
30235 // Read past a shiftmask instruction to find count
30236 if (*AndC == (I->getType()->getPrimitiveSizeInBits() - 1))
30237 BitV = AndOp;
30239 return {BitV, BTK};
30242 return {nullptr, UndefBit};
30245 TargetLowering::AtomicExpansionKind
30246 X86TargetLowering::shouldExpandLogicAtomicRMWInIR(AtomicRMWInst *AI) const {
30247 using namespace llvm::PatternMatch;
30248 // If the atomicrmw's result isn't actually used, we can just add a "lock"
30249 // prefix to a normal instruction for these operations.
30250 if (AI->use_empty())
30251 return AtomicExpansionKind::None;
30253 if (AI->getOperation() == AtomicRMWInst::Xor) {
30254 // A ^ SignBit -> A + SignBit. This allows us to use `xadd` which is
30255 // preferable to both `cmpxchg` and `btc`.
30256 if (match(AI->getOperand(1), m_SignMask()))
30257 return AtomicExpansionKind::None;
30260 // If the atomicrmw's result is used by a single bit AND, we may use
30261 // bts/btr/btc instruction for these operations.
30262 // Note: InstCombinePass can cause a de-optimization here. It replaces the
30263 // SETCC(And(AtomicRMW(P, power_of_2), power_of_2)) with LShr and Xor
30264 // (depending on CC). This pattern can only use bts/btr/btc but we don't
30265 // detect it.
30266 Instruction *I = AI->user_back();
30267 auto BitChange = FindSingleBitChange(AI->getValOperand());
30268 if (BitChange.second == UndefBit || !AI->hasOneUse() ||
30269 I->getOpcode() != Instruction::And ||
30270 AI->getType()->getPrimitiveSizeInBits() == 8 ||
30271 AI->getParent() != I->getParent())
30272 return AtomicExpansionKind::CmpXChg;
30274 unsigned OtherIdx = I->getOperand(0) == AI ? 1 : 0;
30276 // This is a redundant AND, it should get cleaned up elsewhere.
30277 if (AI == I->getOperand(OtherIdx))
30278 return AtomicExpansionKind::CmpXChg;
30280 // The following instruction must be a AND single bit.
30281 if (BitChange.second == ConstantBit || BitChange.second == NotConstantBit) {
30282 auto *C1 = cast<ConstantInt>(AI->getValOperand());
30283 auto *C2 = dyn_cast<ConstantInt>(I->getOperand(OtherIdx));
30284 if (!C2 || !isPowerOf2_64(C2->getZExtValue())) {
30285 return AtomicExpansionKind::CmpXChg;
30287 if (AI->getOperation() == AtomicRMWInst::And) {
30288 return ~C1->getValue() == C2->getValue()
30289 ? AtomicExpansionKind::BitTestIntrinsic
30290 : AtomicExpansionKind::CmpXChg;
30292 return C1 == C2 ? AtomicExpansionKind::BitTestIntrinsic
30293 : AtomicExpansionKind::CmpXChg;
30296 assert(BitChange.second == ShiftBit || BitChange.second == NotShiftBit);
30298 auto BitTested = FindSingleBitChange(I->getOperand(OtherIdx));
30299 if (BitTested.second != ShiftBit && BitTested.second != NotShiftBit)
30300 return AtomicExpansionKind::CmpXChg;
30302 assert(BitChange.first != nullptr && BitTested.first != nullptr);
30304 // If shift amounts are not the same we can't use BitTestIntrinsic.
30305 if (BitChange.first != BitTested.first)
30306 return AtomicExpansionKind::CmpXChg;
30308 // If atomic AND need to be masking all be one bit and testing the one bit
30309 // unset in the mask.
30310 if (AI->getOperation() == AtomicRMWInst::And)
30311 return (BitChange.second == NotShiftBit && BitTested.second == ShiftBit)
30312 ? AtomicExpansionKind::BitTestIntrinsic
30313 : AtomicExpansionKind::CmpXChg;
30315 // If atomic XOR/OR need to be setting and testing the same bit.
30316 return (BitChange.second == ShiftBit && BitTested.second == ShiftBit)
30317 ? AtomicExpansionKind::BitTestIntrinsic
30318 : AtomicExpansionKind::CmpXChg;
30321 void X86TargetLowering::emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const {
30322 IRBuilder<> Builder(AI);
30323 Builder.CollectMetadataToCopy(AI, {LLVMContext::MD_pcsections});
30324 Intrinsic::ID IID_C = Intrinsic::not_intrinsic;
30325 Intrinsic::ID IID_I = Intrinsic::not_intrinsic;
30326 switch (AI->getOperation()) {
30327 default:
30328 llvm_unreachable("Unknown atomic operation");
30329 case AtomicRMWInst::Or:
30330 IID_C = Intrinsic::x86_atomic_bts;
30331 IID_I = Intrinsic::x86_atomic_bts_rm;
30332 break;
30333 case AtomicRMWInst::Xor:
30334 IID_C = Intrinsic::x86_atomic_btc;
30335 IID_I = Intrinsic::x86_atomic_btc_rm;
30336 break;
30337 case AtomicRMWInst::And:
30338 IID_C = Intrinsic::x86_atomic_btr;
30339 IID_I = Intrinsic::x86_atomic_btr_rm;
30340 break;
30342 Instruction *I = AI->user_back();
30343 LLVMContext &Ctx = AI->getContext();
30344 Value *Addr = Builder.CreatePointerCast(AI->getPointerOperand(),
30345 PointerType::getUnqual(Ctx));
30346 Function *BitTest = nullptr;
30347 Value *Result = nullptr;
30348 auto BitTested = FindSingleBitChange(AI->getValOperand());
30349 assert(BitTested.first != nullptr);
30351 if (BitTested.second == ConstantBit || BitTested.second == NotConstantBit) {
30352 auto *C = cast<ConstantInt>(I->getOperand(I->getOperand(0) == AI ? 1 : 0));
30354 BitTest = Intrinsic::getDeclaration(AI->getModule(), IID_C, AI->getType());
30356 unsigned Imm = llvm::countr_zero(C->getZExtValue());
30357 Result = Builder.CreateCall(BitTest, {Addr, Builder.getInt8(Imm)});
30358 } else {
30359 BitTest = Intrinsic::getDeclaration(AI->getModule(), IID_I, AI->getType());
30361 assert(BitTested.second == ShiftBit || BitTested.second == NotShiftBit);
30363 Value *SI = BitTested.first;
30364 assert(SI != nullptr);
30366 // BT{S|R|C} on memory operand don't modulo bit position so we need to
30367 // mask it.
30368 unsigned ShiftBits = SI->getType()->getPrimitiveSizeInBits();
30369 Value *BitPos =
30370 Builder.CreateAnd(SI, Builder.getIntN(ShiftBits, ShiftBits - 1));
30371 // Todo(1): In many cases it may be provable that SI is less than
30372 // ShiftBits in which case this mask is unnecessary
30373 // Todo(2): In the fairly idiomatic case of P[X / sizeof_bits(X)] OP 1
30374 // << (X % sizeof_bits(X)) we can drop the shift mask and AGEN in
30375 // favor of just a raw BT{S|R|C}.
30377 Result = Builder.CreateCall(BitTest, {Addr, BitPos});
30378 Result = Builder.CreateZExtOrTrunc(Result, AI->getType());
30380 // If the result is only used for zero/non-zero status then we don't need to
30381 // shift value back. Otherwise do so.
30382 for (auto It = I->user_begin(); It != I->user_end(); ++It) {
30383 if (auto *ICmp = dyn_cast<ICmpInst>(*It)) {
30384 if (ICmp->isEquality()) {
30385 auto *C0 = dyn_cast<ConstantInt>(ICmp->getOperand(0));
30386 auto *C1 = dyn_cast<ConstantInt>(ICmp->getOperand(1));
30387 if (C0 || C1) {
30388 assert(C0 == nullptr || C1 == nullptr);
30389 if ((C0 ? C0 : C1)->isZero())
30390 continue;
30394 Result = Builder.CreateShl(Result, BitPos);
30395 break;
30399 I->replaceAllUsesWith(Result);
30400 I->eraseFromParent();
30401 AI->eraseFromParent();
30404 static bool shouldExpandCmpArithRMWInIR(AtomicRMWInst *AI) {
30405 using namespace llvm::PatternMatch;
30406 if (!AI->hasOneUse())
30407 return false;
30409 Value *Op = AI->getOperand(1);
30410 ICmpInst::Predicate Pred;
30411 Instruction *I = AI->user_back();
30412 AtomicRMWInst::BinOp Opc = AI->getOperation();
30413 if (Opc == AtomicRMWInst::Add) {
30414 if (match(I, m_c_ICmp(Pred, m_Sub(m_ZeroInt(), m_Specific(Op)), m_Value())))
30415 return Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE;
30416 if (match(I, m_OneUse(m_c_Add(m_Specific(Op), m_Value())))) {
30417 if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_ZeroInt())))
30418 return Pred == CmpInst::ICMP_SLT;
30419 if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_AllOnes())))
30420 return Pred == CmpInst::ICMP_SGT;
30422 return false;
30424 if (Opc == AtomicRMWInst::Sub) {
30425 if (match(I, m_c_ICmp(Pred, m_Specific(Op), m_Value())))
30426 return Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE;
30427 if (match(I, m_OneUse(m_Sub(m_Value(), m_Specific(Op))))) {
30428 if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_ZeroInt())))
30429 return Pred == CmpInst::ICMP_SLT;
30430 if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_AllOnes())))
30431 return Pred == CmpInst::ICMP_SGT;
30433 return false;
30435 if ((Opc == AtomicRMWInst::Or &&
30436 match(I, m_OneUse(m_c_Or(m_Specific(Op), m_Value())))) ||
30437 (Opc == AtomicRMWInst::And &&
30438 match(I, m_OneUse(m_c_And(m_Specific(Op), m_Value()))))) {
30439 if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_ZeroInt())))
30440 return Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE ||
30441 Pred == CmpInst::ICMP_SLT;
30442 if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_AllOnes())))
30443 return Pred == CmpInst::ICMP_SGT;
30444 return false;
30446 if (Opc == AtomicRMWInst::Xor) {
30447 if (match(I, m_c_ICmp(Pred, m_Specific(Op), m_Value())))
30448 return Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE;
30449 if (match(I, m_OneUse(m_c_Xor(m_Specific(Op), m_Value())))) {
30450 if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_ZeroInt())))
30451 return Pred == CmpInst::ICMP_SLT;
30452 if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_AllOnes())))
30453 return Pred == CmpInst::ICMP_SGT;
30455 return false;
30458 return false;
30461 void X86TargetLowering::emitCmpArithAtomicRMWIntrinsic(
30462 AtomicRMWInst *AI) const {
30463 IRBuilder<> Builder(AI);
30464 Builder.CollectMetadataToCopy(AI, {LLVMContext::MD_pcsections});
30465 Instruction *TempI = nullptr;
30466 LLVMContext &Ctx = AI->getContext();
30467 ICmpInst *ICI = dyn_cast<ICmpInst>(AI->user_back());
30468 if (!ICI) {
30469 TempI = AI->user_back();
30470 assert(TempI->hasOneUse() && "Must have one use");
30471 ICI = cast<ICmpInst>(TempI->user_back());
30473 X86::CondCode CC = X86::COND_INVALID;
30474 ICmpInst::Predicate Pred = ICI->getPredicate();
30475 switch (Pred) {
30476 default:
30477 llvm_unreachable("Not supported Pred");
30478 case CmpInst::ICMP_EQ:
30479 CC = X86::COND_E;
30480 break;
30481 case CmpInst::ICMP_NE:
30482 CC = X86::COND_NE;
30483 break;
30484 case CmpInst::ICMP_SLT:
30485 CC = X86::COND_S;
30486 break;
30487 case CmpInst::ICMP_SGT:
30488 CC = X86::COND_NS;
30489 break;
30491 Intrinsic::ID IID = Intrinsic::not_intrinsic;
30492 switch (AI->getOperation()) {
30493 default:
30494 llvm_unreachable("Unknown atomic operation");
30495 case AtomicRMWInst::Add:
30496 IID = Intrinsic::x86_atomic_add_cc;
30497 break;
30498 case AtomicRMWInst::Sub:
30499 IID = Intrinsic::x86_atomic_sub_cc;
30500 break;
30501 case AtomicRMWInst::Or:
30502 IID = Intrinsic::x86_atomic_or_cc;
30503 break;
30504 case AtomicRMWInst::And:
30505 IID = Intrinsic::x86_atomic_and_cc;
30506 break;
30507 case AtomicRMWInst::Xor:
30508 IID = Intrinsic::x86_atomic_xor_cc;
30509 break;
30511 Function *CmpArith =
30512 Intrinsic::getDeclaration(AI->getModule(), IID, AI->getType());
30513 Value *Addr = Builder.CreatePointerCast(AI->getPointerOperand(),
30514 PointerType::getUnqual(Ctx));
30515 Value *Call = Builder.CreateCall(
30516 CmpArith, {Addr, AI->getValOperand(), Builder.getInt32((unsigned)CC)});
30517 Value *Result = Builder.CreateTrunc(Call, Type::getInt1Ty(Ctx));
30518 ICI->replaceAllUsesWith(Result);
30519 ICI->eraseFromParent();
30520 if (TempI)
30521 TempI->eraseFromParent();
30522 AI->eraseFromParent();
30525 TargetLowering::AtomicExpansionKind
30526 X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
30527 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
30528 Type *MemType = AI->getType();
30530 // If the operand is too big, we must see if cmpxchg8/16b is available
30531 // and default to library calls otherwise.
30532 if (MemType->getPrimitiveSizeInBits() > NativeWidth) {
30533 return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
30534 : AtomicExpansionKind::None;
30537 AtomicRMWInst::BinOp Op = AI->getOperation();
30538 switch (Op) {
30539 case AtomicRMWInst::Xchg:
30540 return AtomicExpansionKind::None;
30541 case AtomicRMWInst::Add:
30542 case AtomicRMWInst::Sub:
30543 if (shouldExpandCmpArithRMWInIR(AI))
30544 return AtomicExpansionKind::CmpArithIntrinsic;
30545 // It's better to use xadd, xsub or xchg for these in other cases.
30546 return AtomicExpansionKind::None;
30547 case AtomicRMWInst::Or:
30548 case AtomicRMWInst::And:
30549 case AtomicRMWInst::Xor:
30550 if (shouldExpandCmpArithRMWInIR(AI))
30551 return AtomicExpansionKind::CmpArithIntrinsic;
30552 return shouldExpandLogicAtomicRMWInIR(AI);
30553 case AtomicRMWInst::Nand:
30554 case AtomicRMWInst::Max:
30555 case AtomicRMWInst::Min:
30556 case AtomicRMWInst::UMax:
30557 case AtomicRMWInst::UMin:
30558 case AtomicRMWInst::FAdd:
30559 case AtomicRMWInst::FSub:
30560 case AtomicRMWInst::FMax:
30561 case AtomicRMWInst::FMin:
30562 case AtomicRMWInst::UIncWrap:
30563 case AtomicRMWInst::UDecWrap:
30564 default:
30565 // These always require a non-trivial set of data operations on x86. We must
30566 // use a cmpxchg loop.
30567 return AtomicExpansionKind::CmpXChg;
30571 LoadInst *
30572 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
30573 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
30574 Type *MemType = AI->getType();
30575 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
30576 // there is no benefit in turning such RMWs into loads, and it is actually
30577 // harmful as it introduces a mfence.
30578 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
30579 return nullptr;
30581 // If this is a canonical idempotent atomicrmw w/no uses, we have a better
30582 // lowering available in lowerAtomicArith.
30583 // TODO: push more cases through this path.
30584 if (auto *C = dyn_cast<ConstantInt>(AI->getValOperand()))
30585 if (AI->getOperation() == AtomicRMWInst::Or && C->isZero() &&
30586 AI->use_empty())
30587 return nullptr;
30589 IRBuilder<> Builder(AI);
30590 Builder.CollectMetadataToCopy(AI, {LLVMContext::MD_pcsections});
30591 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
30592 auto SSID = AI->getSyncScopeID();
30593 // We must restrict the ordering to avoid generating loads with Release or
30594 // ReleaseAcquire orderings.
30595 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
30597 // Before the load we need a fence. Here is an example lifted from
30598 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
30599 // is required:
30600 // Thread 0:
30601 // x.store(1, relaxed);
30602 // r1 = y.fetch_add(0, release);
30603 // Thread 1:
30604 // y.fetch_add(42, acquire);
30605 // r2 = x.load(relaxed);
30606 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
30607 // lowered to just a load without a fence. A mfence flushes the store buffer,
30608 // making the optimization clearly correct.
30609 // FIXME: it is required if isReleaseOrStronger(Order) but it is not clear
30610 // otherwise, we might be able to be more aggressive on relaxed idempotent
30611 // rmw. In practice, they do not look useful, so we don't try to be
30612 // especially clever.
30613 if (SSID == SyncScope::SingleThread)
30614 // FIXME: we could just insert an ISD::MEMBARRIER here, except we are at
30615 // the IR level, so we must wrap it in an intrinsic.
30616 return nullptr;
30618 if (!Subtarget.hasMFence())
30619 // FIXME: it might make sense to use a locked operation here but on a
30620 // different cache-line to prevent cache-line bouncing. In practice it
30621 // is probably a small win, and x86 processors without mfence are rare
30622 // enough that we do not bother.
30623 return nullptr;
30625 Function *MFence =
30626 llvm::Intrinsic::getDeclaration(M, Intrinsic::x86_sse2_mfence);
30627 Builder.CreateCall(MFence, {});
30629 // Finally we can emit the atomic load.
30630 LoadInst *Loaded = Builder.CreateAlignedLoad(
30631 AI->getType(), AI->getPointerOperand(), AI->getAlign());
30632 Loaded->setAtomic(Order, SSID);
30633 AI->replaceAllUsesWith(Loaded);
30634 AI->eraseFromParent();
30635 return Loaded;
30638 /// Emit a locked operation on a stack location which does not change any
30639 /// memory location, but does involve a lock prefix. Location is chosen to be
30640 /// a) very likely accessed only by a single thread to minimize cache traffic,
30641 /// and b) definitely dereferenceable. Returns the new Chain result.
30642 static SDValue emitLockedStackOp(SelectionDAG &DAG,
30643 const X86Subtarget &Subtarget, SDValue Chain,
30644 const SDLoc &DL) {
30645 // Implementation notes:
30646 // 1) LOCK prefix creates a full read/write reordering barrier for memory
30647 // operations issued by the current processor. As such, the location
30648 // referenced is not relevant for the ordering properties of the instruction.
30649 // See: Intel® 64 and IA-32 ArchitecturesSoftware Developer’s Manual,
30650 // 8.2.3.9 Loads and Stores Are Not Reordered with Locked Instructions
30651 // 2) Using an immediate operand appears to be the best encoding choice
30652 // here since it doesn't require an extra register.
30653 // 3) OR appears to be very slightly faster than ADD. (Though, the difference
30654 // is small enough it might just be measurement noise.)
30655 // 4) When choosing offsets, there are several contributing factors:
30656 // a) If there's no redzone, we default to TOS. (We could allocate a cache
30657 // line aligned stack object to improve this case.)
30658 // b) To minimize our chances of introducing a false dependence, we prefer
30659 // to offset the stack usage from TOS slightly.
30660 // c) To minimize concerns about cross thread stack usage - in particular,
30661 // the idiomatic MyThreadPool.run([&StackVars]() {...}) pattern which
30662 // captures state in the TOS frame and accesses it from many threads -
30663 // we want to use an offset such that the offset is in a distinct cache
30664 // line from the TOS frame.
30666 // For a general discussion of the tradeoffs and benchmark results, see:
30667 // https://shipilev.net/blog/2014/on-the-fence-with-dependencies/
30669 auto &MF = DAG.getMachineFunction();
30670 auto &TFL = *Subtarget.getFrameLowering();
30671 const unsigned SPOffset = TFL.has128ByteRedZone(MF) ? -64 : 0;
30673 if (Subtarget.is64Bit()) {
30674 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
30675 SDValue Ops[] = {
30676 DAG.getRegister(X86::RSP, MVT::i64), // Base
30677 DAG.getTargetConstant(1, DL, MVT::i8), // Scale
30678 DAG.getRegister(0, MVT::i64), // Index
30679 DAG.getTargetConstant(SPOffset, DL, MVT::i32), // Disp
30680 DAG.getRegister(0, MVT::i16), // Segment.
30681 Zero,
30682 Chain};
30683 SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
30684 MVT::Other, Ops);
30685 return SDValue(Res, 1);
30688 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
30689 SDValue Ops[] = {
30690 DAG.getRegister(X86::ESP, MVT::i32), // Base
30691 DAG.getTargetConstant(1, DL, MVT::i8), // Scale
30692 DAG.getRegister(0, MVT::i32), // Index
30693 DAG.getTargetConstant(SPOffset, DL, MVT::i32), // Disp
30694 DAG.getRegister(0, MVT::i16), // Segment.
30695 Zero,
30696 Chain
30698 SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
30699 MVT::Other, Ops);
30700 return SDValue(Res, 1);
30703 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget,
30704 SelectionDAG &DAG) {
30705 SDLoc dl(Op);
30706 AtomicOrdering FenceOrdering =
30707 static_cast<AtomicOrdering>(Op.getConstantOperandVal(1));
30708 SyncScope::ID FenceSSID =
30709 static_cast<SyncScope::ID>(Op.getConstantOperandVal(2));
30711 // The only fence that needs an instruction is a sequentially-consistent
30712 // cross-thread fence.
30713 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
30714 FenceSSID == SyncScope::System) {
30715 if (Subtarget.hasMFence())
30716 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
30718 SDValue Chain = Op.getOperand(0);
30719 return emitLockedStackOp(DAG, Subtarget, Chain, dl);
30722 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
30723 return DAG.getNode(ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
30726 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget &Subtarget,
30727 SelectionDAG &DAG) {
30728 MVT T = Op.getSimpleValueType();
30729 SDLoc DL(Op);
30730 unsigned Reg = 0;
30731 unsigned size = 0;
30732 switch(T.SimpleTy) {
30733 default: llvm_unreachable("Invalid value type!");
30734 case MVT::i8: Reg = X86::AL; size = 1; break;
30735 case MVT::i16: Reg = X86::AX; size = 2; break;
30736 case MVT::i32: Reg = X86::EAX; size = 4; break;
30737 case MVT::i64:
30738 assert(Subtarget.is64Bit() && "Node not type legal!");
30739 Reg = X86::RAX; size = 8;
30740 break;
30742 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
30743 Op.getOperand(2), SDValue());
30744 SDValue Ops[] = { cpIn.getValue(0),
30745 Op.getOperand(1),
30746 Op.getOperand(3),
30747 DAG.getTargetConstant(size, DL, MVT::i8),
30748 cpIn.getValue(1) };
30749 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
30750 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
30751 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
30752 Ops, T, MMO);
30754 SDValue cpOut =
30755 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
30756 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
30757 MVT::i32, cpOut.getValue(2));
30758 SDValue Success = getSETCC(X86::COND_E, EFLAGS, DL, DAG);
30760 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
30761 cpOut, Success, EFLAGS.getValue(1));
30764 // Create MOVMSKB, taking into account whether we need to split for AVX1.
30765 static SDValue getPMOVMSKB(const SDLoc &DL, SDValue V, SelectionDAG &DAG,
30766 const X86Subtarget &Subtarget) {
30767 MVT InVT = V.getSimpleValueType();
30769 if (InVT == MVT::v64i8) {
30770 SDValue Lo, Hi;
30771 std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
30772 Lo = getPMOVMSKB(DL, Lo, DAG, Subtarget);
30773 Hi = getPMOVMSKB(DL, Hi, DAG, Subtarget);
30774 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Lo);
30775 Hi = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Hi);
30776 Hi = DAG.getNode(ISD::SHL, DL, MVT::i64, Hi,
30777 DAG.getConstant(32, DL, MVT::i8));
30778 return DAG.getNode(ISD::OR, DL, MVT::i64, Lo, Hi);
30780 if (InVT == MVT::v32i8 && !Subtarget.hasInt256()) {
30781 SDValue Lo, Hi;
30782 std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
30783 Lo = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Lo);
30784 Hi = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Hi);
30785 Hi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi,
30786 DAG.getConstant(16, DL, MVT::i8));
30787 return DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi);
30790 return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
30793 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget,
30794 SelectionDAG &DAG) {
30795 SDValue Src = Op.getOperand(0);
30796 MVT SrcVT = Src.getSimpleValueType();
30797 MVT DstVT = Op.getSimpleValueType();
30799 // Legalize (v64i1 (bitcast i64 (X))) by splitting the i64, bitcasting each
30800 // half to v32i1 and concatenating the result.
30801 if (SrcVT == MVT::i64 && DstVT == MVT::v64i1) {
30802 assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
30803 assert(Subtarget.hasBWI() && "Expected BWI target");
30804 SDLoc dl(Op);
30805 SDValue Lo, Hi;
30806 std::tie(Lo, Hi) = DAG.SplitScalar(Src, dl, MVT::i32, MVT::i32);
30807 Lo = DAG.getBitcast(MVT::v32i1, Lo);
30808 Hi = DAG.getBitcast(MVT::v32i1, Hi);
30809 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
30812 // Use MOVMSK for vector to scalar conversion to prevent scalarization.
30813 if ((SrcVT == MVT::v16i1 || SrcVT == MVT::v32i1) && DstVT.isScalarInteger()) {
30814 assert(!Subtarget.hasAVX512() && "Should use K-registers with AVX512");
30815 MVT SExtVT = SrcVT == MVT::v16i1 ? MVT::v16i8 : MVT::v32i8;
30816 SDLoc DL(Op);
30817 SDValue V = DAG.getSExtOrTrunc(Src, DL, SExtVT);
30818 V = getPMOVMSKB(DL, V, DAG, Subtarget);
30819 return DAG.getZExtOrTrunc(V, DL, DstVT);
30822 assert((SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8 ||
30823 SrcVT == MVT::i64) && "Unexpected VT!");
30825 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
30826 if (!(DstVT == MVT::f64 && SrcVT == MVT::i64) &&
30827 !(DstVT == MVT::x86mmx && SrcVT.isVector()))
30828 // This conversion needs to be expanded.
30829 return SDValue();
30831 SDLoc dl(Op);
30832 if (SrcVT.isVector()) {
30833 // Widen the vector in input in the case of MVT::v2i32.
30834 // Example: from MVT::v2i32 to MVT::v4i32.
30835 MVT NewVT = MVT::getVectorVT(SrcVT.getVectorElementType(),
30836 SrcVT.getVectorNumElements() * 2);
30837 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewVT, Src,
30838 DAG.getUNDEF(SrcVT));
30839 } else {
30840 assert(SrcVT == MVT::i64 && !Subtarget.is64Bit() &&
30841 "Unexpected source type in LowerBITCAST");
30842 Src = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Src);
30845 MVT V2X64VT = DstVT == MVT::f64 ? MVT::v2f64 : MVT::v2i64;
30846 Src = DAG.getNode(ISD::BITCAST, dl, V2X64VT, Src);
30848 if (DstVT == MVT::x86mmx)
30849 return DAG.getNode(X86ISD::MOVDQ2Q, dl, DstVT, Src);
30851 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, DstVT, Src,
30852 DAG.getIntPtrConstant(0, dl));
30855 /// Compute the horizontal sum of bytes in V for the elements of VT.
30857 /// Requires V to be a byte vector and VT to be an integer vector type with
30858 /// wider elements than V's type. The width of the elements of VT determines
30859 /// how many bytes of V are summed horizontally to produce each element of the
30860 /// result.
30861 static SDValue LowerHorizontalByteSum(SDValue V, MVT VT,
30862 const X86Subtarget &Subtarget,
30863 SelectionDAG &DAG) {
30864 SDLoc DL(V);
30865 MVT ByteVecVT = V.getSimpleValueType();
30866 MVT EltVT = VT.getVectorElementType();
30867 assert(ByteVecVT.getVectorElementType() == MVT::i8 &&
30868 "Expected value to have byte element type.");
30869 assert(EltVT != MVT::i8 &&
30870 "Horizontal byte sum only makes sense for wider elements!");
30871 unsigned VecSize = VT.getSizeInBits();
30872 assert(ByteVecVT.getSizeInBits() == VecSize && "Cannot change vector size!");
30874 // PSADBW instruction horizontally add all bytes and leave the result in i64
30875 // chunks, thus directly computes the pop count for v2i64 and v4i64.
30876 if (EltVT == MVT::i64) {
30877 SDValue Zeros = DAG.getConstant(0, DL, ByteVecVT);
30878 MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
30879 V = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT, V, Zeros);
30880 return DAG.getBitcast(VT, V);
30883 if (EltVT == MVT::i32) {
30884 // We unpack the low half and high half into i32s interleaved with zeros so
30885 // that we can use PSADBW to horizontally sum them. The most useful part of
30886 // this is that it lines up the results of two PSADBW instructions to be
30887 // two v2i64 vectors which concatenated are the 4 population counts. We can
30888 // then use PACKUSWB to shrink and concatenate them into a v4i32 again.
30889 SDValue Zeros = DAG.getConstant(0, DL, VT);
30890 SDValue V32 = DAG.getBitcast(VT, V);
30891 SDValue Low = getUnpackl(DAG, DL, VT, V32, Zeros);
30892 SDValue High = getUnpackh(DAG, DL, VT, V32, Zeros);
30894 // Do the horizontal sums into two v2i64s.
30895 Zeros = DAG.getConstant(0, DL, ByteVecVT);
30896 MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
30897 Low = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
30898 DAG.getBitcast(ByteVecVT, Low), Zeros);
30899 High = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
30900 DAG.getBitcast(ByteVecVT, High), Zeros);
30902 // Merge them together.
30903 MVT ShortVecVT = MVT::getVectorVT(MVT::i16, VecSize / 16);
30904 V = DAG.getNode(X86ISD::PACKUS, DL, ByteVecVT,
30905 DAG.getBitcast(ShortVecVT, Low),
30906 DAG.getBitcast(ShortVecVT, High));
30908 return DAG.getBitcast(VT, V);
30911 // The only element type left is i16.
30912 assert(EltVT == MVT::i16 && "Unknown how to handle type");
30914 // To obtain pop count for each i16 element starting from the pop count for
30915 // i8 elements, shift the i16s left by 8, sum as i8s, and then shift as i16s
30916 // right by 8. It is important to shift as i16s as i8 vector shift isn't
30917 // directly supported.
30918 SDValue ShifterV = DAG.getConstant(8, DL, VT);
30919 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
30920 V = DAG.getNode(ISD::ADD, DL, ByteVecVT, DAG.getBitcast(ByteVecVT, Shl),
30921 DAG.getBitcast(ByteVecVT, V));
30922 return DAG.getNode(ISD::SRL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
30925 static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, const SDLoc &DL,
30926 const X86Subtarget &Subtarget,
30927 SelectionDAG &DAG) {
30928 MVT VT = Op.getSimpleValueType();
30929 MVT EltVT = VT.getVectorElementType();
30930 int NumElts = VT.getVectorNumElements();
30931 (void)EltVT;
30932 assert(EltVT == MVT::i8 && "Only vXi8 vector CTPOP lowering supported.");
30934 // Implement a lookup table in register by using an algorithm based on:
30935 // http://wm.ite.pl/articles/sse-popcount.html
30937 // The general idea is that every lower byte nibble in the input vector is an
30938 // index into a in-register pre-computed pop count table. We then split up the
30939 // input vector in two new ones: (1) a vector with only the shifted-right
30940 // higher nibbles for each byte and (2) a vector with the lower nibbles (and
30941 // masked out higher ones) for each byte. PSHUFB is used separately with both
30942 // to index the in-register table. Next, both are added and the result is a
30943 // i8 vector where each element contains the pop count for input byte.
30944 const int LUT[16] = {/* 0 */ 0, /* 1 */ 1, /* 2 */ 1, /* 3 */ 2,
30945 /* 4 */ 1, /* 5 */ 2, /* 6 */ 2, /* 7 */ 3,
30946 /* 8 */ 1, /* 9 */ 2, /* a */ 2, /* b */ 3,
30947 /* c */ 2, /* d */ 3, /* e */ 3, /* f */ 4};
30949 SmallVector<SDValue, 64> LUTVec;
30950 for (int i = 0; i < NumElts; ++i)
30951 LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
30952 SDValue InRegLUT = DAG.getBuildVector(VT, DL, LUTVec);
30953 SDValue M0F = DAG.getConstant(0x0F, DL, VT);
30955 // High nibbles
30956 SDValue FourV = DAG.getConstant(4, DL, VT);
30957 SDValue HiNibbles = DAG.getNode(ISD::SRL, DL, VT, Op, FourV);
30959 // Low nibbles
30960 SDValue LoNibbles = DAG.getNode(ISD::AND, DL, VT, Op, M0F);
30962 // The input vector is used as the shuffle mask that index elements into the
30963 // LUT. After counting low and high nibbles, add the vector to obtain the
30964 // final pop count per i8 element.
30965 SDValue HiPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, HiNibbles);
30966 SDValue LoPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, LoNibbles);
30967 return DAG.getNode(ISD::ADD, DL, VT, HiPopCnt, LoPopCnt);
30970 // Please ensure that any codegen change from LowerVectorCTPOP is reflected in
30971 // updated cost models in X86TTIImpl::getIntrinsicInstrCost.
30972 static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget &Subtarget,
30973 SelectionDAG &DAG) {
30974 MVT VT = Op.getSimpleValueType();
30975 assert((VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector()) &&
30976 "Unknown CTPOP type to handle");
30977 SDLoc DL(Op.getNode());
30978 SDValue Op0 = Op.getOperand(0);
30980 // TRUNC(CTPOP(ZEXT(X))) to make use of vXi32/vXi64 VPOPCNT instructions.
30981 if (Subtarget.hasVPOPCNTDQ()) {
30982 unsigned NumElems = VT.getVectorNumElements();
30983 assert((VT.getVectorElementType() == MVT::i8 ||
30984 VT.getVectorElementType() == MVT::i16) && "Unexpected type");
30985 if (NumElems < 16 || (NumElems == 16 && Subtarget.canExtendTo512DQ())) {
30986 MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
30987 Op = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, Op0);
30988 Op = DAG.getNode(ISD::CTPOP, DL, NewVT, Op);
30989 return DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
30993 // Decompose 256-bit ops into smaller 128-bit ops.
30994 if (VT.is256BitVector() && !Subtarget.hasInt256())
30995 return splitVectorIntUnary(Op, DAG);
30997 // Decompose 512-bit ops into smaller 256-bit ops.
30998 if (VT.is512BitVector() && !Subtarget.hasBWI())
30999 return splitVectorIntUnary(Op, DAG);
31001 // For element types greater than i8, do vXi8 pop counts and a bytesum.
31002 if (VT.getScalarType() != MVT::i8) {
31003 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
31004 SDValue ByteOp = DAG.getBitcast(ByteVT, Op0);
31005 SDValue PopCnt8 = DAG.getNode(ISD::CTPOP, DL, ByteVT, ByteOp);
31006 return LowerHorizontalByteSum(PopCnt8, VT, Subtarget, DAG);
31009 // We can't use the fast LUT approach, so fall back on LegalizeDAG.
31010 if (!Subtarget.hasSSSE3())
31011 return SDValue();
31013 return LowerVectorCTPOPInRegLUT(Op0, DL, Subtarget, DAG);
31016 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget &Subtarget,
31017 SelectionDAG &DAG) {
31018 assert(Op.getSimpleValueType().isVector() &&
31019 "We only do custom lowering for vector population count.");
31020 return LowerVectorCTPOP(Op, Subtarget, DAG);
31023 static SDValue LowerBITREVERSE_XOP(SDValue Op, SelectionDAG &DAG) {
31024 MVT VT = Op.getSimpleValueType();
31025 SDValue In = Op.getOperand(0);
31026 SDLoc DL(Op);
31028 // For scalars, its still beneficial to transfer to/from the SIMD unit to
31029 // perform the BITREVERSE.
31030 if (!VT.isVector()) {
31031 MVT VecVT = MVT::getVectorVT(VT, 128 / VT.getSizeInBits());
31032 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, In);
31033 Res = DAG.getNode(ISD::BITREVERSE, DL, VecVT, Res);
31034 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Res,
31035 DAG.getIntPtrConstant(0, DL));
31038 int NumElts = VT.getVectorNumElements();
31039 int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
31041 // Decompose 256-bit ops into smaller 128-bit ops.
31042 if (VT.is256BitVector())
31043 return splitVectorIntUnary(Op, DAG);
31045 assert(VT.is128BitVector() &&
31046 "Only 128-bit vector bitreverse lowering supported.");
31048 // VPPERM reverses the bits of a byte with the permute Op (2 << 5), and we
31049 // perform the BSWAP in the shuffle.
31050 // Its best to shuffle using the second operand as this will implicitly allow
31051 // memory folding for multiple vectors.
31052 SmallVector<SDValue, 16> MaskElts;
31053 for (int i = 0; i != NumElts; ++i) {
31054 for (int j = ScalarSizeInBytes - 1; j >= 0; --j) {
31055 int SourceByte = 16 + (i * ScalarSizeInBytes) + j;
31056 int PermuteByte = SourceByte | (2 << 5);
31057 MaskElts.push_back(DAG.getConstant(PermuteByte, DL, MVT::i8));
31061 SDValue Mask = DAG.getBuildVector(MVT::v16i8, DL, MaskElts);
31062 SDValue Res = DAG.getBitcast(MVT::v16i8, In);
31063 Res = DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, DAG.getUNDEF(MVT::v16i8),
31064 Res, Mask);
31065 return DAG.getBitcast(VT, Res);
31068 static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget,
31069 SelectionDAG &DAG) {
31070 MVT VT = Op.getSimpleValueType();
31072 if (Subtarget.hasXOP() && !VT.is512BitVector())
31073 return LowerBITREVERSE_XOP(Op, DAG);
31075 assert(Subtarget.hasSSSE3() && "SSSE3 required for BITREVERSE");
31077 SDValue In = Op.getOperand(0);
31078 SDLoc DL(Op);
31080 assert(VT.getScalarType() == MVT::i8 &&
31081 "Only byte vector BITREVERSE supported");
31083 // Split v64i8 without BWI so that we can still use the PSHUFB lowering.
31084 if (VT == MVT::v64i8 && !Subtarget.hasBWI())
31085 return splitVectorIntUnary(Op, DAG);
31087 // Decompose 256-bit ops into smaller 128-bit ops on pre-AVX2.
31088 if (VT == MVT::v32i8 && !Subtarget.hasInt256())
31089 return splitVectorIntUnary(Op, DAG);
31091 unsigned NumElts = VT.getVectorNumElements();
31093 // If we have GFNI, we can use GF2P8AFFINEQB to reverse the bits.
31094 if (Subtarget.hasGFNI()) {
31095 MVT MatrixVT = MVT::getVectorVT(MVT::i64, NumElts / 8);
31096 SDValue Matrix = DAG.getConstant(0x8040201008040201ULL, DL, MatrixVT);
31097 Matrix = DAG.getBitcast(VT, Matrix);
31098 return DAG.getNode(X86ISD::GF2P8AFFINEQB, DL, VT, In, Matrix,
31099 DAG.getTargetConstant(0, DL, MVT::i8));
31102 // Perform BITREVERSE using PSHUFB lookups. Each byte is split into
31103 // two nibbles and a PSHUFB lookup to find the bitreverse of each
31104 // 0-15 value (moved to the other nibble).
31105 SDValue NibbleMask = DAG.getConstant(0xF, DL, VT);
31106 SDValue Lo = DAG.getNode(ISD::AND, DL, VT, In, NibbleMask);
31107 SDValue Hi = DAG.getNode(ISD::SRL, DL, VT, In, DAG.getConstant(4, DL, VT));
31109 const int LoLUT[16] = {
31110 /* 0 */ 0x00, /* 1 */ 0x80, /* 2 */ 0x40, /* 3 */ 0xC0,
31111 /* 4 */ 0x20, /* 5 */ 0xA0, /* 6 */ 0x60, /* 7 */ 0xE0,
31112 /* 8 */ 0x10, /* 9 */ 0x90, /* a */ 0x50, /* b */ 0xD0,
31113 /* c */ 0x30, /* d */ 0xB0, /* e */ 0x70, /* f */ 0xF0};
31114 const int HiLUT[16] = {
31115 /* 0 */ 0x00, /* 1 */ 0x08, /* 2 */ 0x04, /* 3 */ 0x0C,
31116 /* 4 */ 0x02, /* 5 */ 0x0A, /* 6 */ 0x06, /* 7 */ 0x0E,
31117 /* 8 */ 0x01, /* 9 */ 0x09, /* a */ 0x05, /* b */ 0x0D,
31118 /* c */ 0x03, /* d */ 0x0B, /* e */ 0x07, /* f */ 0x0F};
31120 SmallVector<SDValue, 16> LoMaskElts, HiMaskElts;
31121 for (unsigned i = 0; i < NumElts; ++i) {
31122 LoMaskElts.push_back(DAG.getConstant(LoLUT[i % 16], DL, MVT::i8));
31123 HiMaskElts.push_back(DAG.getConstant(HiLUT[i % 16], DL, MVT::i8));
31126 SDValue LoMask = DAG.getBuildVector(VT, DL, LoMaskElts);
31127 SDValue HiMask = DAG.getBuildVector(VT, DL, HiMaskElts);
31128 Lo = DAG.getNode(X86ISD::PSHUFB, DL, VT, LoMask, Lo);
31129 Hi = DAG.getNode(X86ISD::PSHUFB, DL, VT, HiMask, Hi);
31130 return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
31133 static SDValue LowerPARITY(SDValue Op, const X86Subtarget &Subtarget,
31134 SelectionDAG &DAG) {
31135 SDLoc DL(Op);
31136 SDValue X = Op.getOperand(0);
31137 MVT VT = Op.getSimpleValueType();
31139 // Special case. If the input fits in 8-bits we can use a single 8-bit TEST.
31140 if (VT == MVT::i8 ||
31141 DAG.MaskedValueIsZero(X, APInt::getBitsSetFrom(VT.getSizeInBits(), 8))) {
31142 X = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, X);
31143 SDValue Flags = DAG.getNode(X86ISD::CMP, DL, MVT::i32, X,
31144 DAG.getConstant(0, DL, MVT::i8));
31145 // Copy the inverse of the parity flag into a register with setcc.
31146 SDValue Setnp = getSETCC(X86::COND_NP, Flags, DL, DAG);
31147 // Extend to the original type.
31148 return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Setnp);
31151 // If we have POPCNT, use the default expansion.
31152 if (Subtarget.hasPOPCNT())
31153 return SDValue();
31155 if (VT == MVT::i64) {
31156 // Xor the high and low 16-bits together using a 32-bit operation.
31157 SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32,
31158 DAG.getNode(ISD::SRL, DL, MVT::i64, X,
31159 DAG.getConstant(32, DL, MVT::i8)));
31160 SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, X);
31161 X = DAG.getNode(ISD::XOR, DL, MVT::i32, Lo, Hi);
31164 if (VT != MVT::i16) {
31165 // Xor the high and low 16-bits together using a 32-bit operation.
31166 SDValue Hi16 = DAG.getNode(ISD::SRL, DL, MVT::i32, X,
31167 DAG.getConstant(16, DL, MVT::i8));
31168 X = DAG.getNode(ISD::XOR, DL, MVT::i32, X, Hi16);
31169 } else {
31170 // If the input is 16-bits, we need to extend to use an i32 shift below.
31171 X = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, X);
31174 // Finally xor the low 2 bytes together and use a 8-bit flag setting xor.
31175 // This should allow an h-reg to be used to save a shift.
31176 SDValue Hi = DAG.getNode(
31177 ISD::TRUNCATE, DL, MVT::i8,
31178 DAG.getNode(ISD::SRL, DL, MVT::i32, X, DAG.getConstant(8, DL, MVT::i8)));
31179 SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, X);
31180 SDVTList VTs = DAG.getVTList(MVT::i8, MVT::i32);
31181 SDValue Flags = DAG.getNode(X86ISD::XOR, DL, VTs, Lo, Hi).getValue(1);
31183 // Copy the inverse of the parity flag into a register with setcc.
31184 SDValue Setnp = getSETCC(X86::COND_NP, Flags, DL, DAG);
31185 // Extend to the original type.
31186 return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Setnp);
31189 static SDValue lowerAtomicArithWithLOCK(SDValue N, SelectionDAG &DAG,
31190 const X86Subtarget &Subtarget) {
31191 unsigned NewOpc = 0;
31192 switch (N->getOpcode()) {
31193 case ISD::ATOMIC_LOAD_ADD:
31194 NewOpc = X86ISD::LADD;
31195 break;
31196 case ISD::ATOMIC_LOAD_SUB:
31197 NewOpc = X86ISD::LSUB;
31198 break;
31199 case ISD::ATOMIC_LOAD_OR:
31200 NewOpc = X86ISD::LOR;
31201 break;
31202 case ISD::ATOMIC_LOAD_XOR:
31203 NewOpc = X86ISD::LXOR;
31204 break;
31205 case ISD::ATOMIC_LOAD_AND:
31206 NewOpc = X86ISD::LAND;
31207 break;
31208 default:
31209 llvm_unreachable("Unknown ATOMIC_LOAD_ opcode");
31212 MachineMemOperand *MMO = cast<MemSDNode>(N)->getMemOperand();
31214 return DAG.getMemIntrinsicNode(
31215 NewOpc, SDLoc(N), DAG.getVTList(MVT::i32, MVT::Other),
31216 {N->getOperand(0), N->getOperand(1), N->getOperand(2)},
31217 /*MemVT=*/N->getSimpleValueType(0), MMO);
31220 /// Lower atomic_load_ops into LOCK-prefixed operations.
31221 static SDValue lowerAtomicArith(SDValue N, SelectionDAG &DAG,
31222 const X86Subtarget &Subtarget) {
31223 AtomicSDNode *AN = cast<AtomicSDNode>(N.getNode());
31224 SDValue Chain = N->getOperand(0);
31225 SDValue LHS = N->getOperand(1);
31226 SDValue RHS = N->getOperand(2);
31227 unsigned Opc = N->getOpcode();
31228 MVT VT = N->getSimpleValueType(0);
31229 SDLoc DL(N);
31231 // We can lower atomic_load_add into LXADD. However, any other atomicrmw op
31232 // can only be lowered when the result is unused. They should have already
31233 // been transformed into a cmpxchg loop in AtomicExpand.
31234 if (N->hasAnyUseOfValue(0)) {
31235 // Handle (atomic_load_sub p, v) as (atomic_load_add p, -v), to be able to
31236 // select LXADD if LOCK_SUB can't be selected.
31237 // Handle (atomic_load_xor p, SignBit) as (atomic_load_add p, SignBit) so we
31238 // can use LXADD as opposed to cmpxchg.
31239 if (Opc == ISD::ATOMIC_LOAD_SUB ||
31240 (Opc == ISD::ATOMIC_LOAD_XOR && isMinSignedConstant(RHS))) {
31241 RHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), RHS);
31242 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, VT, Chain, LHS, RHS,
31243 AN->getMemOperand());
31245 assert(Opc == ISD::ATOMIC_LOAD_ADD &&
31246 "Used AtomicRMW ops other than Add should have been expanded!");
31247 return N;
31250 // Specialized lowering for the canonical form of an idemptotent atomicrmw.
31251 // The core idea here is that since the memory location isn't actually
31252 // changing, all we need is a lowering for the *ordering* impacts of the
31253 // atomicrmw. As such, we can chose a different operation and memory
31254 // location to minimize impact on other code.
31255 // The above holds unless the node is marked volatile in which
31256 // case it needs to be preserved according to the langref.
31257 if (Opc == ISD::ATOMIC_LOAD_OR && isNullConstant(RHS) && !AN->isVolatile()) {
31258 // On X86, the only ordering which actually requires an instruction is
31259 // seq_cst which isn't SingleThread, everything just needs to be preserved
31260 // during codegen and then dropped. Note that we expect (but don't assume),
31261 // that orderings other than seq_cst and acq_rel have been canonicalized to
31262 // a store or load.
31263 if (AN->getSuccessOrdering() == AtomicOrdering::SequentiallyConsistent &&
31264 AN->getSyncScopeID() == SyncScope::System) {
31265 // Prefer a locked operation against a stack location to minimize cache
31266 // traffic. This assumes that stack locations are very likely to be
31267 // accessed only by the owning thread.
31268 SDValue NewChain = emitLockedStackOp(DAG, Subtarget, Chain, DL);
31269 assert(!N->hasAnyUseOfValue(0));
31270 // NOTE: The getUNDEF is needed to give something for the unused result 0.
31271 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
31272 DAG.getUNDEF(VT), NewChain);
31274 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
31275 SDValue NewChain = DAG.getNode(ISD::MEMBARRIER, DL, MVT::Other, Chain);
31276 assert(!N->hasAnyUseOfValue(0));
31277 // NOTE: The getUNDEF is needed to give something for the unused result 0.
31278 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
31279 DAG.getUNDEF(VT), NewChain);
31282 SDValue LockOp = lowerAtomicArithWithLOCK(N, DAG, Subtarget);
31283 // RAUW the chain, but don't worry about the result, as it's unused.
31284 assert(!N->hasAnyUseOfValue(0));
31285 // NOTE: The getUNDEF is needed to give something for the unused result 0.
31286 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
31287 DAG.getUNDEF(VT), LockOp.getValue(1));
31290 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG,
31291 const X86Subtarget &Subtarget) {
31292 auto *Node = cast<AtomicSDNode>(Op.getNode());
31293 SDLoc dl(Node);
31294 EVT VT = Node->getMemoryVT();
31296 bool IsSeqCst =
31297 Node->getSuccessOrdering() == AtomicOrdering::SequentiallyConsistent;
31298 bool IsTypeLegal = DAG.getTargetLoweringInfo().isTypeLegal(VT);
31300 // If this store is not sequentially consistent and the type is legal
31301 // we can just keep it.
31302 if (!IsSeqCst && IsTypeLegal)
31303 return Op;
31305 if (VT == MVT::i64 && !IsTypeLegal) {
31306 // For illegal i64 atomic_stores, we can try to use MOVQ or MOVLPS if SSE
31307 // is enabled.
31308 bool NoImplicitFloatOps =
31309 DAG.getMachineFunction().getFunction().hasFnAttribute(
31310 Attribute::NoImplicitFloat);
31311 if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps) {
31312 SDValue Chain;
31313 if (Subtarget.hasSSE1()) {
31314 SDValue SclToVec =
31315 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Node->getVal());
31316 MVT StVT = Subtarget.hasSSE2() ? MVT::v2i64 : MVT::v4f32;
31317 SclToVec = DAG.getBitcast(StVT, SclToVec);
31318 SDVTList Tys = DAG.getVTList(MVT::Other);
31319 SDValue Ops[] = {Node->getChain(), SclToVec, Node->getBasePtr()};
31320 Chain = DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys, Ops,
31321 MVT::i64, Node->getMemOperand());
31322 } else if (Subtarget.hasX87()) {
31323 // First load this into an 80-bit X87 register using a stack temporary.
31324 // This will put the whole integer into the significand.
31325 SDValue StackPtr = DAG.CreateStackTemporary(MVT::i64);
31326 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
31327 MachinePointerInfo MPI =
31328 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
31329 Chain = DAG.getStore(Node->getChain(), dl, Node->getVal(), StackPtr,
31330 MPI, MaybeAlign(), MachineMemOperand::MOStore);
31331 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
31332 SDValue LdOps[] = {Chain, StackPtr};
31333 SDValue Value = DAG.getMemIntrinsicNode(
31334 X86ISD::FILD, dl, Tys, LdOps, MVT::i64, MPI,
31335 /*Align*/ std::nullopt, MachineMemOperand::MOLoad);
31336 Chain = Value.getValue(1);
31338 // Now use an FIST to do the atomic store.
31339 SDValue StoreOps[] = {Chain, Value, Node->getBasePtr()};
31340 Chain =
31341 DAG.getMemIntrinsicNode(X86ISD::FIST, dl, DAG.getVTList(MVT::Other),
31342 StoreOps, MVT::i64, Node->getMemOperand());
31345 if (Chain) {
31346 // If this is a sequentially consistent store, also emit an appropriate
31347 // barrier.
31348 if (IsSeqCst)
31349 Chain = emitLockedStackOp(DAG, Subtarget, Chain, dl);
31351 return Chain;
31356 // Convert seq_cst store -> xchg
31357 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
31358 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
31359 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl, Node->getMemoryVT(),
31360 Node->getOperand(0), Node->getOperand(2),
31361 Node->getOperand(1), Node->getMemOperand());
31362 return Swap.getValue(1);
31365 static SDValue LowerADDSUBO_CARRY(SDValue Op, SelectionDAG &DAG) {
31366 SDNode *N = Op.getNode();
31367 MVT VT = N->getSimpleValueType(0);
31368 unsigned Opc = Op.getOpcode();
31370 // Let legalize expand this if it isn't a legal type yet.
31371 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
31372 return SDValue();
31374 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
31375 SDLoc DL(N);
31377 // Set the carry flag.
31378 SDValue Carry = Op.getOperand(2);
31379 EVT CarryVT = Carry.getValueType();
31380 Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
31381 Carry, DAG.getAllOnesConstant(DL, CarryVT));
31383 bool IsAdd = Opc == ISD::UADDO_CARRY || Opc == ISD::SADDO_CARRY;
31384 SDValue Sum = DAG.getNode(IsAdd ? X86ISD::ADC : X86ISD::SBB, DL, VTs,
31385 Op.getOperand(0), Op.getOperand(1),
31386 Carry.getValue(1));
31388 bool IsSigned = Opc == ISD::SADDO_CARRY || Opc == ISD::SSUBO_CARRY;
31389 SDValue SetCC = getSETCC(IsSigned ? X86::COND_O : X86::COND_B,
31390 Sum.getValue(1), DL, DAG);
31391 if (N->getValueType(1) == MVT::i1)
31392 SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
31394 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
31397 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget,
31398 SelectionDAG &DAG) {
31399 assert(Subtarget.isTargetDarwin() && Subtarget.is64Bit());
31401 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
31402 // which returns the values as { float, float } (in XMM0) or
31403 // { double, double } (which is returned in XMM0, XMM1).
31404 SDLoc dl(Op);
31405 SDValue Arg = Op.getOperand(0);
31406 EVT ArgVT = Arg.getValueType();
31407 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
31409 TargetLowering::ArgListTy Args;
31410 TargetLowering::ArgListEntry Entry;
31412 Entry.Node = Arg;
31413 Entry.Ty = ArgTy;
31414 Entry.IsSExt = false;
31415 Entry.IsZExt = false;
31416 Args.push_back(Entry);
31418 bool isF64 = ArgVT == MVT::f64;
31419 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
31420 // the small struct {f32, f32} is returned in (eax, edx). For f64,
31421 // the results are returned via SRet in memory.
31422 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
31423 RTLIB::Libcall LC = isF64 ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32;
31424 const char *LibcallName = TLI.getLibcallName(LC);
31425 SDValue Callee =
31426 DAG.getExternalSymbol(LibcallName, TLI.getPointerTy(DAG.getDataLayout()));
31428 Type *RetTy = isF64 ? (Type *)StructType::get(ArgTy, ArgTy)
31429 : (Type *)FixedVectorType::get(ArgTy, 4);
31431 TargetLowering::CallLoweringInfo CLI(DAG);
31432 CLI.setDebugLoc(dl)
31433 .setChain(DAG.getEntryNode())
31434 .setLibCallee(CallingConv::C, RetTy, Callee, std::move(Args));
31436 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
31438 if (isF64)
31439 // Returned in xmm0 and xmm1.
31440 return CallResult.first;
31442 // Returned in bits 0:31 and 32:64 xmm0.
31443 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
31444 CallResult.first, DAG.getIntPtrConstant(0, dl));
31445 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
31446 CallResult.first, DAG.getIntPtrConstant(1, dl));
31447 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
31448 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
31451 /// Widen a vector input to a vector of NVT. The
31452 /// input vector must have the same element type as NVT.
31453 static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
31454 bool FillWithZeroes = false) {
31455 // Check if InOp already has the right width.
31456 MVT InVT = InOp.getSimpleValueType();
31457 if (InVT == NVT)
31458 return InOp;
31460 if (InOp.isUndef())
31461 return DAG.getUNDEF(NVT);
31463 assert(InVT.getVectorElementType() == NVT.getVectorElementType() &&
31464 "input and widen element type must match");
31466 unsigned InNumElts = InVT.getVectorNumElements();
31467 unsigned WidenNumElts = NVT.getVectorNumElements();
31468 assert(WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 &&
31469 "Unexpected request for vector widening");
31471 SDLoc dl(InOp);
31472 if (InOp.getOpcode() == ISD::CONCAT_VECTORS &&
31473 InOp.getNumOperands() == 2) {
31474 SDValue N1 = InOp.getOperand(1);
31475 if ((ISD::isBuildVectorAllZeros(N1.getNode()) && FillWithZeroes) ||
31476 N1.isUndef()) {
31477 InOp = InOp.getOperand(0);
31478 InVT = InOp.getSimpleValueType();
31479 InNumElts = InVT.getVectorNumElements();
31482 if (ISD::isBuildVectorOfConstantSDNodes(InOp.getNode()) ||
31483 ISD::isBuildVectorOfConstantFPSDNodes(InOp.getNode())) {
31484 SmallVector<SDValue, 16> Ops;
31485 for (unsigned i = 0; i < InNumElts; ++i)
31486 Ops.push_back(InOp.getOperand(i));
31488 EVT EltVT = InOp.getOperand(0).getValueType();
31490 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, EltVT) :
31491 DAG.getUNDEF(EltVT);
31492 for (unsigned i = 0; i < WidenNumElts - InNumElts; ++i)
31493 Ops.push_back(FillVal);
31494 return DAG.getBuildVector(NVT, dl, Ops);
31496 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, NVT) :
31497 DAG.getUNDEF(NVT);
31498 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NVT, FillVal,
31499 InOp, DAG.getIntPtrConstant(0, dl));
31502 static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget &Subtarget,
31503 SelectionDAG &DAG) {
31504 assert(Subtarget.hasAVX512() &&
31505 "MGATHER/MSCATTER are supported on AVX-512 arch only");
31507 MaskedScatterSDNode *N = cast<MaskedScatterSDNode>(Op.getNode());
31508 SDValue Src = N->getValue();
31509 MVT VT = Src.getSimpleValueType();
31510 assert(VT.getScalarSizeInBits() >= 32 && "Unsupported scatter op");
31511 SDLoc dl(Op);
31513 SDValue Scale = N->getScale();
31514 SDValue Index = N->getIndex();
31515 SDValue Mask = N->getMask();
31516 SDValue Chain = N->getChain();
31517 SDValue BasePtr = N->getBasePtr();
31519 if (VT == MVT::v2f32 || VT == MVT::v2i32) {
31520 assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
31521 // If the index is v2i64 and we have VLX we can use xmm for data and index.
31522 if (Index.getValueType() == MVT::v2i64 && Subtarget.hasVLX()) {
31523 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
31524 EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
31525 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Src, DAG.getUNDEF(VT));
31526 SDVTList VTs = DAG.getVTList(MVT::Other);
31527 SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
31528 return DAG.getMemIntrinsicNode(X86ISD::MSCATTER, dl, VTs, Ops,
31529 N->getMemoryVT(), N->getMemOperand());
31531 return SDValue();
31534 MVT IndexVT = Index.getSimpleValueType();
31536 // If the index is v2i32, we're being called by type legalization and we
31537 // should just let the default handling take care of it.
31538 if (IndexVT == MVT::v2i32)
31539 return SDValue();
31541 // If we don't have VLX and neither the passthru or index is 512-bits, we
31542 // need to widen until one is.
31543 if (!Subtarget.hasVLX() && !VT.is512BitVector() &&
31544 !Index.getSimpleValueType().is512BitVector()) {
31545 // Determine how much we need to widen by to get a 512-bit type.
31546 unsigned Factor = std::min(512/VT.getSizeInBits(),
31547 512/IndexVT.getSizeInBits());
31548 unsigned NumElts = VT.getVectorNumElements() * Factor;
31550 VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
31551 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
31552 MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
31554 Src = ExtendToType(Src, VT, DAG);
31555 Index = ExtendToType(Index, IndexVT, DAG);
31556 Mask = ExtendToType(Mask, MaskVT, DAG, true);
31559 SDVTList VTs = DAG.getVTList(MVT::Other);
31560 SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
31561 return DAG.getMemIntrinsicNode(X86ISD::MSCATTER, dl, VTs, Ops,
31562 N->getMemoryVT(), N->getMemOperand());
31565 static SDValue LowerMLOAD(SDValue Op, const X86Subtarget &Subtarget,
31566 SelectionDAG &DAG) {
31568 MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode());
31569 MVT VT = Op.getSimpleValueType();
31570 MVT ScalarVT = VT.getScalarType();
31571 SDValue Mask = N->getMask();
31572 MVT MaskVT = Mask.getSimpleValueType();
31573 SDValue PassThru = N->getPassThru();
31574 SDLoc dl(Op);
31576 // Handle AVX masked loads which don't support passthru other than 0.
31577 if (MaskVT.getVectorElementType() != MVT::i1) {
31578 // We also allow undef in the isel pattern.
31579 if (PassThru.isUndef() || ISD::isBuildVectorAllZeros(PassThru.getNode()))
31580 return Op;
31582 SDValue NewLoad = DAG.getMaskedLoad(
31583 VT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask,
31584 getZeroVector(VT, Subtarget, DAG, dl), N->getMemoryVT(),
31585 N->getMemOperand(), N->getAddressingMode(), N->getExtensionType(),
31586 N->isExpandingLoad());
31587 // Emit a blend.
31588 SDValue Select = DAG.getNode(ISD::VSELECT, dl, VT, Mask, NewLoad, PassThru);
31589 return DAG.getMergeValues({ Select, NewLoad.getValue(1) }, dl);
31592 assert((!N->isExpandingLoad() || Subtarget.hasAVX512()) &&
31593 "Expanding masked load is supported on AVX-512 target only!");
31595 assert((!N->isExpandingLoad() || ScalarVT.getSizeInBits() >= 32) &&
31596 "Expanding masked load is supported for 32 and 64-bit types only!");
31598 assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
31599 "Cannot lower masked load op.");
31601 assert((ScalarVT.getSizeInBits() >= 32 ||
31602 (Subtarget.hasBWI() &&
31603 (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
31604 "Unsupported masked load op.");
31606 // This operation is legal for targets with VLX, but without
31607 // VLX the vector should be widened to 512 bit
31608 unsigned NumEltsInWideVec = 512 / VT.getScalarSizeInBits();
31609 MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
31610 PassThru = ExtendToType(PassThru, WideDataVT, DAG);
31612 // Mask element has to be i1.
31613 assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
31614 "Unexpected mask type");
31616 MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
31618 Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
31619 SDValue NewLoad = DAG.getMaskedLoad(
31620 WideDataVT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask,
31621 PassThru, N->getMemoryVT(), N->getMemOperand(), N->getAddressingMode(),
31622 N->getExtensionType(), N->isExpandingLoad());
31624 SDValue Extract =
31625 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, NewLoad.getValue(0),
31626 DAG.getIntPtrConstant(0, dl));
31627 SDValue RetOps[] = {Extract, NewLoad.getValue(1)};
31628 return DAG.getMergeValues(RetOps, dl);
31631 static SDValue LowerMSTORE(SDValue Op, const X86Subtarget &Subtarget,
31632 SelectionDAG &DAG) {
31633 MaskedStoreSDNode *N = cast<MaskedStoreSDNode>(Op.getNode());
31634 SDValue DataToStore = N->getValue();
31635 MVT VT = DataToStore.getSimpleValueType();
31636 MVT ScalarVT = VT.getScalarType();
31637 SDValue Mask = N->getMask();
31638 SDLoc dl(Op);
31640 assert((!N->isCompressingStore() || Subtarget.hasAVX512()) &&
31641 "Expanding masked load is supported on AVX-512 target only!");
31643 assert((!N->isCompressingStore() || ScalarVT.getSizeInBits() >= 32) &&
31644 "Expanding masked load is supported for 32 and 64-bit types only!");
31646 assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
31647 "Cannot lower masked store op.");
31649 assert((ScalarVT.getSizeInBits() >= 32 ||
31650 (Subtarget.hasBWI() &&
31651 (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
31652 "Unsupported masked store op.");
31654 // This operation is legal for targets with VLX, but without
31655 // VLX the vector should be widened to 512 bit
31656 unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
31657 MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
31659 // Mask element has to be i1.
31660 assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
31661 "Unexpected mask type");
31663 MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
31665 DataToStore = ExtendToType(DataToStore, WideDataVT, DAG);
31666 Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
31667 return DAG.getMaskedStore(N->getChain(), dl, DataToStore, N->getBasePtr(),
31668 N->getOffset(), Mask, N->getMemoryVT(),
31669 N->getMemOperand(), N->getAddressingMode(),
31670 N->isTruncatingStore(), N->isCompressingStore());
31673 static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget,
31674 SelectionDAG &DAG) {
31675 assert(Subtarget.hasAVX2() &&
31676 "MGATHER/MSCATTER are supported on AVX-512/AVX-2 arch only");
31678 MaskedGatherSDNode *N = cast<MaskedGatherSDNode>(Op.getNode());
31679 SDLoc dl(Op);
31680 MVT VT = Op.getSimpleValueType();
31681 SDValue Index = N->getIndex();
31682 SDValue Mask = N->getMask();
31683 SDValue PassThru = N->getPassThru();
31684 MVT IndexVT = Index.getSimpleValueType();
31686 assert(VT.getScalarSizeInBits() >= 32 && "Unsupported gather op");
31688 // If the index is v2i32, we're being called by type legalization.
31689 if (IndexVT == MVT::v2i32)
31690 return SDValue();
31692 // If we don't have VLX and neither the passthru or index is 512-bits, we
31693 // need to widen until one is.
31694 MVT OrigVT = VT;
31695 if (Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
31696 !IndexVT.is512BitVector()) {
31697 // Determine how much we need to widen by to get a 512-bit type.
31698 unsigned Factor = std::min(512/VT.getSizeInBits(),
31699 512/IndexVT.getSizeInBits());
31701 unsigned NumElts = VT.getVectorNumElements() * Factor;
31703 VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
31704 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
31705 MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
31707 PassThru = ExtendToType(PassThru, VT, DAG);
31708 Index = ExtendToType(Index, IndexVT, DAG);
31709 Mask = ExtendToType(Mask, MaskVT, DAG, true);
31712 // Break dependency on the data register.
31713 if (PassThru.isUndef())
31714 PassThru = getZeroVector(VT, Subtarget, DAG, dl);
31716 SDValue Ops[] = { N->getChain(), PassThru, Mask, N->getBasePtr(), Index,
31717 N->getScale() };
31718 SDValue NewGather = DAG.getMemIntrinsicNode(
31719 X86ISD::MGATHER, dl, DAG.getVTList(VT, MVT::Other), Ops, N->getMemoryVT(),
31720 N->getMemOperand());
31721 SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OrigVT,
31722 NewGather, DAG.getIntPtrConstant(0, dl));
31723 return DAG.getMergeValues({Extract, NewGather.getValue(1)}, dl);
31726 static SDValue LowerADDRSPACECAST(SDValue Op, SelectionDAG &DAG) {
31727 SDLoc dl(Op);
31728 SDValue Src = Op.getOperand(0);
31729 MVT DstVT = Op.getSimpleValueType();
31731 AddrSpaceCastSDNode *N = cast<AddrSpaceCastSDNode>(Op.getNode());
31732 unsigned SrcAS = N->getSrcAddressSpace();
31734 assert(SrcAS != N->getDestAddressSpace() &&
31735 "addrspacecast must be between different address spaces");
31737 if (SrcAS == X86AS::PTR32_UPTR && DstVT == MVT::i64) {
31738 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Src);
31739 } else if (DstVT == MVT::i64) {
31740 Op = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Src);
31741 } else if (DstVT == MVT::i32) {
31742 Op = DAG.getNode(ISD::TRUNCATE, dl, DstVT, Src);
31743 } else {
31744 report_fatal_error("Bad address space in addrspacecast");
31746 return Op;
31749 SDValue X86TargetLowering::LowerGC_TRANSITION(SDValue Op,
31750 SelectionDAG &DAG) const {
31751 // TODO: Eventually, the lowering of these nodes should be informed by or
31752 // deferred to the GC strategy for the function in which they appear. For
31753 // now, however, they must be lowered to something. Since they are logically
31754 // no-ops in the case of a null GC strategy (or a GC strategy which does not
31755 // require special handling for these nodes), lower them as literal NOOPs for
31756 // the time being.
31757 SmallVector<SDValue, 2> Ops;
31758 Ops.push_back(Op.getOperand(0));
31759 if (Op->getGluedNode())
31760 Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
31762 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
31763 return SDValue(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
31766 // Custom split CVTPS2PH with wide types.
31767 static SDValue LowerCVTPS2PH(SDValue Op, SelectionDAG &DAG) {
31768 SDLoc dl(Op);
31769 EVT VT = Op.getValueType();
31770 SDValue Lo, Hi;
31771 std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
31772 EVT LoVT, HiVT;
31773 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
31774 SDValue RC = Op.getOperand(1);
31775 Lo = DAG.getNode(X86ISD::CVTPS2PH, dl, LoVT, Lo, RC);
31776 Hi = DAG.getNode(X86ISD::CVTPS2PH, dl, HiVT, Hi, RC);
31777 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
31780 static SDValue LowerPREFETCH(SDValue Op, const X86Subtarget &Subtarget,
31781 SelectionDAG &DAG) {
31782 unsigned IsData = Op.getConstantOperandVal(4);
31784 // We don't support non-data prefetch without PREFETCHI.
31785 // Just preserve the chain.
31786 if (!IsData && !Subtarget.hasPREFETCHI())
31787 return Op.getOperand(0);
31789 return Op;
31792 static StringRef getInstrStrFromOpNo(const SmallVectorImpl<StringRef> &AsmStrs,
31793 unsigned OpNo) {
31794 const APInt Operand(32, OpNo);
31795 std::string OpNoStr = llvm::toString(Operand, 10, false);
31796 std::string Str(" $");
31798 std::string OpNoStr1(Str + OpNoStr); // e.g. " $1" (OpNo=1)
31799 std::string OpNoStr2(Str + "{" + OpNoStr + ":"); // With modifier, e.g. ${1:P}
31801 auto I = StringRef::npos;
31802 for (auto &AsmStr : AsmStrs) {
31803 // Match the OpNo string. We should match exactly to exclude match
31804 // sub-string, e.g. "$12" contain "$1"
31805 if (AsmStr.ends_with(OpNoStr1))
31806 I = AsmStr.size() - OpNoStr1.size();
31808 // Get the index of operand in AsmStr.
31809 if (I == StringRef::npos)
31810 I = AsmStr.find(OpNoStr1 + ",");
31811 if (I == StringRef::npos)
31812 I = AsmStr.find(OpNoStr2);
31814 if (I == StringRef::npos)
31815 continue;
31817 assert(I > 0 && "Unexpected inline asm string!");
31818 // Remove the operand string and label (if exsit).
31819 // For example:
31820 // ".L__MSASMLABEL_.${:uid}__l:call dword ptr ${0:P}"
31821 // ==>
31822 // ".L__MSASMLABEL_.${:uid}__l:call dword ptr "
31823 // ==>
31824 // "call dword ptr "
31825 auto TmpStr = AsmStr.substr(0, I);
31826 I = TmpStr.rfind(':');
31827 if (I != StringRef::npos)
31828 TmpStr = TmpStr.substr(I + 1);
31829 return TmpStr.take_while(llvm::isAlpha);
31832 return StringRef();
31835 bool X86TargetLowering::isInlineAsmTargetBranch(
31836 const SmallVectorImpl<StringRef> &AsmStrs, unsigned OpNo) const {
31837 // In a __asm block, __asm inst foo where inst is CALL or JMP should be
31838 // changed from indirect TargetLowering::C_Memory to direct
31839 // TargetLowering::C_Address.
31840 // We don't need to special case LOOP* and Jcc, which cannot target a memory
31841 // location.
31842 StringRef Inst = getInstrStrFromOpNo(AsmStrs, OpNo);
31843 return Inst.equals_insensitive("call") || Inst.equals_insensitive("jmp");
31846 /// Provide custom lowering hooks for some operations.
31847 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
31848 switch (Op.getOpcode()) {
31849 default: llvm_unreachable("Should not custom lower this!");
31850 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
31851 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
31852 return LowerCMP_SWAP(Op, Subtarget, DAG);
31853 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
31854 case ISD::ATOMIC_LOAD_ADD:
31855 case ISD::ATOMIC_LOAD_SUB:
31856 case ISD::ATOMIC_LOAD_OR:
31857 case ISD::ATOMIC_LOAD_XOR:
31858 case ISD::ATOMIC_LOAD_AND: return lowerAtomicArith(Op, DAG, Subtarget);
31859 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG, Subtarget);
31860 case ISD::BITREVERSE: return LowerBITREVERSE(Op, Subtarget, DAG);
31861 case ISD::PARITY: return LowerPARITY(Op, Subtarget, DAG);
31862 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
31863 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, Subtarget, DAG);
31864 case ISD::VECTOR_SHUFFLE: return lowerVECTOR_SHUFFLE(Op, Subtarget, DAG);
31865 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
31866 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
31867 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
31868 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
31869 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
31870 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, Subtarget,DAG);
31871 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
31872 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
31873 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
31874 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
31875 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
31876 case ISD::SHL_PARTS:
31877 case ISD::SRA_PARTS:
31878 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
31879 case ISD::FSHL:
31880 case ISD::FSHR: return LowerFunnelShift(Op, Subtarget, DAG);
31881 case ISD::STRICT_SINT_TO_FP:
31882 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
31883 case ISD::STRICT_UINT_TO_FP:
31884 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
31885 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
31886 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
31887 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
31888 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
31889 case ISD::ZERO_EXTEND_VECTOR_INREG:
31890 case ISD::SIGN_EXTEND_VECTOR_INREG:
31891 return LowerEXTEND_VECTOR_INREG(Op, Subtarget, DAG);
31892 case ISD::FP_TO_SINT:
31893 case ISD::STRICT_FP_TO_SINT:
31894 case ISD::FP_TO_UINT:
31895 case ISD::STRICT_FP_TO_UINT: return LowerFP_TO_INT(Op, DAG);
31896 case ISD::FP_TO_SINT_SAT:
31897 case ISD::FP_TO_UINT_SAT: return LowerFP_TO_INT_SAT(Op, DAG);
31898 case ISD::FP_EXTEND:
31899 case ISD::STRICT_FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
31900 case ISD::FP_ROUND:
31901 case ISD::STRICT_FP_ROUND: return LowerFP_ROUND(Op, DAG);
31902 case ISD::FP16_TO_FP:
31903 case ISD::STRICT_FP16_TO_FP: return LowerFP16_TO_FP(Op, DAG);
31904 case ISD::FP_TO_FP16:
31905 case ISD::STRICT_FP_TO_FP16: return LowerFP_TO_FP16(Op, DAG);
31906 case ISD::FP_TO_BF16: return LowerFP_TO_BF16(Op, DAG);
31907 case ISD::LOAD: return LowerLoad(Op, Subtarget, DAG);
31908 case ISD::STORE: return LowerStore(Op, Subtarget, DAG);
31909 case ISD::FADD:
31910 case ISD::FSUB: return lowerFaddFsub(Op, DAG);
31911 case ISD::FROUND: return LowerFROUND(Op, DAG);
31912 case ISD::FABS:
31913 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
31914 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
31915 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
31916 case ISD::LRINT:
31917 case ISD::LLRINT: return LowerLRINT_LLRINT(Op, DAG);
31918 case ISD::SETCC:
31919 case ISD::STRICT_FSETCC:
31920 case ISD::STRICT_FSETCCS: return LowerSETCC(Op, DAG);
31921 case ISD::SETCCCARRY: return LowerSETCCCARRY(Op, DAG);
31922 case ISD::SELECT: return LowerSELECT(Op, DAG);
31923 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
31924 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
31925 case ISD::VASTART: return LowerVASTART(Op, DAG);
31926 case ISD::VAARG: return LowerVAARG(Op, DAG);
31927 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
31928 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
31929 case ISD::INTRINSIC_VOID:
31930 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
31931 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
31932 case ISD::ADDROFRETURNADDR: return LowerADDROFRETURNADDR(Op, DAG);
31933 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
31934 case ISD::FRAME_TO_ARGS_OFFSET:
31935 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
31936 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
31937 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
31938 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
31939 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
31940 case ISD::EH_SJLJ_SETUP_DISPATCH:
31941 return lowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
31942 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
31943 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
31944 case ISD::GET_ROUNDING: return LowerGET_ROUNDING(Op, DAG);
31945 case ISD::SET_ROUNDING: return LowerSET_ROUNDING(Op, DAG);
31946 case ISD::GET_FPENV_MEM: return LowerGET_FPENV_MEM(Op, DAG);
31947 case ISD::SET_FPENV_MEM: return LowerSET_FPENV_MEM(Op, DAG);
31948 case ISD::RESET_FPENV: return LowerRESET_FPENV(Op, DAG);
31949 case ISD::CTLZ:
31950 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ(Op, Subtarget, DAG);
31951 case ISD::CTTZ:
31952 case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op, Subtarget, DAG);
31953 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
31954 case ISD::MULHS:
31955 case ISD::MULHU: return LowerMULH(Op, Subtarget, DAG);
31956 case ISD::ROTL:
31957 case ISD::ROTR: return LowerRotate(Op, Subtarget, DAG);
31958 case ISD::SRA:
31959 case ISD::SRL:
31960 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
31961 case ISD::SADDO:
31962 case ISD::UADDO:
31963 case ISD::SSUBO:
31964 case ISD::USUBO: return LowerXALUO(Op, DAG);
31965 case ISD::SMULO:
31966 case ISD::UMULO: return LowerMULO(Op, Subtarget, DAG);
31967 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
31968 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
31969 case ISD::SADDO_CARRY:
31970 case ISD::SSUBO_CARRY:
31971 case ISD::UADDO_CARRY:
31972 case ISD::USUBO_CARRY: return LowerADDSUBO_CARRY(Op, DAG);
31973 case ISD::ADD:
31974 case ISD::SUB: return lowerAddSub(Op, DAG, Subtarget);
31975 case ISD::UADDSAT:
31976 case ISD::SADDSAT:
31977 case ISD::USUBSAT:
31978 case ISD::SSUBSAT: return LowerADDSAT_SUBSAT(Op, DAG, Subtarget);
31979 case ISD::SMAX:
31980 case ISD::SMIN:
31981 case ISD::UMAX:
31982 case ISD::UMIN: return LowerMINMAX(Op, Subtarget, DAG);
31983 case ISD::FMINIMUM:
31984 case ISD::FMAXIMUM:
31985 return LowerFMINIMUM_FMAXIMUM(Op, Subtarget, DAG);
31986 case ISD::ABS: return LowerABS(Op, Subtarget, DAG);
31987 case ISD::ABDS:
31988 case ISD::ABDU: return LowerABD(Op, Subtarget, DAG);
31989 case ISD::AVGCEILU: return LowerAVG(Op, Subtarget, DAG);
31990 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
31991 case ISD::MLOAD: return LowerMLOAD(Op, Subtarget, DAG);
31992 case ISD::MSTORE: return LowerMSTORE(Op, Subtarget, DAG);
31993 case ISD::MGATHER: return LowerMGATHER(Op, Subtarget, DAG);
31994 case ISD::MSCATTER: return LowerMSCATTER(Op, Subtarget, DAG);
31995 case ISD::GC_TRANSITION_START:
31996 case ISD::GC_TRANSITION_END: return LowerGC_TRANSITION(Op, DAG);
31997 case ISD::ADDRSPACECAST: return LowerADDRSPACECAST(Op, DAG);
31998 case X86ISD::CVTPS2PH: return LowerCVTPS2PH(Op, DAG);
31999 case ISD::PREFETCH: return LowerPREFETCH(Op, Subtarget, DAG);
32003 /// Replace a node with an illegal result type with a new node built out of
32004 /// custom code.
32005 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
32006 SmallVectorImpl<SDValue>&Results,
32007 SelectionDAG &DAG) const {
32008 SDLoc dl(N);
32009 switch (N->getOpcode()) {
32010 default:
32011 #ifndef NDEBUG
32012 dbgs() << "ReplaceNodeResults: ";
32013 N->dump(&DAG);
32014 #endif
32015 llvm_unreachable("Do not know how to custom type legalize this operation!");
32016 case X86ISD::CVTPH2PS: {
32017 EVT VT = N->getValueType(0);
32018 SDValue Lo, Hi;
32019 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
32020 EVT LoVT, HiVT;
32021 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
32022 Lo = DAG.getNode(X86ISD::CVTPH2PS, dl, LoVT, Lo);
32023 Hi = DAG.getNode(X86ISD::CVTPH2PS, dl, HiVT, Hi);
32024 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
32025 Results.push_back(Res);
32026 return;
32028 case X86ISD::STRICT_CVTPH2PS: {
32029 EVT VT = N->getValueType(0);
32030 SDValue Lo, Hi;
32031 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 1);
32032 EVT LoVT, HiVT;
32033 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
32034 Lo = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {LoVT, MVT::Other},
32035 {N->getOperand(0), Lo});
32036 Hi = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {HiVT, MVT::Other},
32037 {N->getOperand(0), Hi});
32038 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
32039 Lo.getValue(1), Hi.getValue(1));
32040 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
32041 Results.push_back(Res);
32042 Results.push_back(Chain);
32043 return;
32045 case X86ISD::CVTPS2PH:
32046 Results.push_back(LowerCVTPS2PH(SDValue(N, 0), DAG));
32047 return;
32048 case ISD::CTPOP: {
32049 assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
32050 // Use a v2i64 if possible.
32051 bool NoImplicitFloatOps =
32052 DAG.getMachineFunction().getFunction().hasFnAttribute(
32053 Attribute::NoImplicitFloat);
32054 if (isTypeLegal(MVT::v2i64) && !NoImplicitFloatOps) {
32055 SDValue Wide =
32056 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, N->getOperand(0));
32057 Wide = DAG.getNode(ISD::CTPOP, dl, MVT::v2i64, Wide);
32058 // Bit count should fit in 32-bits, extract it as that and then zero
32059 // extend to i64. Otherwise we end up extracting bits 63:32 separately.
32060 Wide = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Wide);
32061 Wide = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, Wide,
32062 DAG.getIntPtrConstant(0, dl));
32063 Wide = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Wide);
32064 Results.push_back(Wide);
32066 return;
32068 case ISD::MUL: {
32069 EVT VT = N->getValueType(0);
32070 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
32071 VT.getVectorElementType() == MVT::i8 && "Unexpected VT!");
32072 // Pre-promote these to vXi16 to avoid op legalization thinking all 16
32073 // elements are needed.
32074 MVT MulVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
32075 SDValue Op0 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(0));
32076 SDValue Op1 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(1));
32077 SDValue Res = DAG.getNode(ISD::MUL, dl, MulVT, Op0, Op1);
32078 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
32079 unsigned NumConcats = 16 / VT.getVectorNumElements();
32080 SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
32081 ConcatOps[0] = Res;
32082 Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i8, ConcatOps);
32083 Results.push_back(Res);
32084 return;
32086 case ISD::SMULO:
32087 case ISD::UMULO: {
32088 EVT VT = N->getValueType(0);
32089 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
32090 VT == MVT::v2i32 && "Unexpected VT!");
32091 bool IsSigned = N->getOpcode() == ISD::SMULO;
32092 unsigned ExtOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
32093 SDValue Op0 = DAG.getNode(ExtOpc, dl, MVT::v2i64, N->getOperand(0));
32094 SDValue Op1 = DAG.getNode(ExtOpc, dl, MVT::v2i64, N->getOperand(1));
32095 SDValue Res = DAG.getNode(ISD::MUL, dl, MVT::v2i64, Op0, Op1);
32096 // Extract the high 32 bits from each result using PSHUFD.
32097 // TODO: Could use SRL+TRUNCATE but that doesn't become a PSHUFD.
32098 SDValue Hi = DAG.getBitcast(MVT::v4i32, Res);
32099 Hi = DAG.getVectorShuffle(MVT::v4i32, dl, Hi, Hi, {1, 3, -1, -1});
32100 Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Hi,
32101 DAG.getIntPtrConstant(0, dl));
32103 // Truncate the low bits of the result. This will become PSHUFD.
32104 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
32106 SDValue HiCmp;
32107 if (IsSigned) {
32108 // SMULO overflows if the high bits don't match the sign of the low.
32109 HiCmp = DAG.getNode(ISD::SRA, dl, VT, Res, DAG.getConstant(31, dl, VT));
32110 } else {
32111 // UMULO overflows if the high bits are non-zero.
32112 HiCmp = DAG.getConstant(0, dl, VT);
32114 SDValue Ovf = DAG.getSetCC(dl, N->getValueType(1), Hi, HiCmp, ISD::SETNE);
32116 // Widen the result with by padding with undef.
32117 Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Res,
32118 DAG.getUNDEF(VT));
32119 Results.push_back(Res);
32120 Results.push_back(Ovf);
32121 return;
32123 case X86ISD::VPMADDWD: {
32124 // Legalize types for X86ISD::VPMADDWD by widening.
32125 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
32127 EVT VT = N->getValueType(0);
32128 EVT InVT = N->getOperand(0).getValueType();
32129 assert(VT.getSizeInBits() < 128 && 128 % VT.getSizeInBits() == 0 &&
32130 "Expected a VT that divides into 128 bits.");
32131 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
32132 "Unexpected type action!");
32133 unsigned NumConcat = 128 / InVT.getSizeInBits();
32135 EVT InWideVT = EVT::getVectorVT(*DAG.getContext(),
32136 InVT.getVectorElementType(),
32137 NumConcat * InVT.getVectorNumElements());
32138 EVT WideVT = EVT::getVectorVT(*DAG.getContext(),
32139 VT.getVectorElementType(),
32140 NumConcat * VT.getVectorNumElements());
32142 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getUNDEF(InVT));
32143 Ops[0] = N->getOperand(0);
32144 SDValue InVec0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
32145 Ops[0] = N->getOperand(1);
32146 SDValue InVec1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
32148 SDValue Res = DAG.getNode(N->getOpcode(), dl, WideVT, InVec0, InVec1);
32149 Results.push_back(Res);
32150 return;
32152 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
32153 case X86ISD::FMINC:
32154 case X86ISD::FMIN:
32155 case X86ISD::FMAXC:
32156 case X86ISD::FMAX: {
32157 EVT VT = N->getValueType(0);
32158 assert(VT == MVT::v2f32 && "Unexpected type (!= v2f32) on FMIN/FMAX.");
32159 SDValue UNDEF = DAG.getUNDEF(VT);
32160 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
32161 N->getOperand(0), UNDEF);
32162 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
32163 N->getOperand(1), UNDEF);
32164 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
32165 return;
32167 case ISD::SDIV:
32168 case ISD::UDIV:
32169 case ISD::SREM:
32170 case ISD::UREM: {
32171 EVT VT = N->getValueType(0);
32172 if (VT.isVector()) {
32173 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
32174 "Unexpected type action!");
32175 // If this RHS is a constant splat vector we can widen this and let
32176 // division/remainder by constant optimize it.
32177 // TODO: Can we do something for non-splat?
32178 APInt SplatVal;
32179 if (ISD::isConstantSplatVector(N->getOperand(1).getNode(), SplatVal)) {
32180 unsigned NumConcats = 128 / VT.getSizeInBits();
32181 SmallVector<SDValue, 8> Ops0(NumConcats, DAG.getUNDEF(VT));
32182 Ops0[0] = N->getOperand(0);
32183 EVT ResVT = getTypeToTransformTo(*DAG.getContext(), VT);
32184 SDValue N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Ops0);
32185 SDValue N1 = DAG.getConstant(SplatVal, dl, ResVT);
32186 SDValue Res = DAG.getNode(N->getOpcode(), dl, ResVT, N0, N1);
32187 Results.push_back(Res);
32189 return;
32192 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
32193 Results.push_back(V);
32194 return;
32196 case ISD::TRUNCATE: {
32197 MVT VT = N->getSimpleValueType(0);
32198 if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
32199 return;
32201 // The generic legalizer will try to widen the input type to the same
32202 // number of elements as the widened result type. But this isn't always
32203 // the best thing so do some custom legalization to avoid some cases.
32204 MVT WidenVT = getTypeToTransformTo(*DAG.getContext(), VT).getSimpleVT();
32205 SDValue In = N->getOperand(0);
32206 EVT InVT = In.getValueType();
32207 EVT InEltVT = InVT.getVectorElementType();
32208 EVT EltVT = VT.getVectorElementType();
32209 unsigned MinElts = VT.getVectorNumElements();
32210 unsigned WidenNumElts = WidenVT.getVectorNumElements();
32211 unsigned InBits = InVT.getSizeInBits();
32213 // See if there are sufficient leading bits to perform a PACKUS/PACKSS.
32214 unsigned PackOpcode;
32215 if (SDValue Src =
32216 matchTruncateWithPACK(PackOpcode, VT, In, dl, DAG, Subtarget)) {
32217 if (SDValue Res = truncateVectorWithPACK(PackOpcode, VT, Src,
32218 dl, DAG, Subtarget)) {
32219 Res = widenSubVector(WidenVT, Res, false, Subtarget, DAG, dl);
32220 Results.push_back(Res);
32221 return;
32225 if (128 % InBits == 0) {
32226 // 128 bit and smaller inputs should avoid truncate all together and
32227 // just use a build_vector that will become a shuffle.
32228 // TODO: Widen and use a shuffle directly?
32229 SmallVector<SDValue, 16> Ops(WidenNumElts, DAG.getUNDEF(EltVT));
32230 // Use the original element count so we don't do more scalar opts than
32231 // necessary.
32232 for (unsigned i=0; i < MinElts; ++i) {
32233 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, In,
32234 DAG.getIntPtrConstant(i, dl));
32235 Ops[i] = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Val);
32237 Results.push_back(DAG.getBuildVector(WidenVT, dl, Ops));
32238 return;
32241 // With AVX512 there are some cases that can use a target specific
32242 // truncate node to go from 256/512 to less than 128 with zeros in the
32243 // upper elements of the 128 bit result.
32244 if (Subtarget.hasAVX512() && isTypeLegal(InVT)) {
32245 // We can use VTRUNC directly if for 256 bits with VLX or for any 512.
32246 if ((InBits == 256 && Subtarget.hasVLX()) || InBits == 512) {
32247 Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
32248 return;
32250 // There's one case we can widen to 512 bits and use VTRUNC.
32251 if (InVT == MVT::v4i64 && VT == MVT::v4i8 && isTypeLegal(MVT::v8i64)) {
32252 In = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i64, In,
32253 DAG.getUNDEF(MVT::v4i64));
32254 Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
32255 return;
32258 if (Subtarget.hasVLX() && InVT == MVT::v8i64 && VT == MVT::v8i8 &&
32259 getTypeAction(*DAG.getContext(), InVT) == TypeSplitVector &&
32260 isTypeLegal(MVT::v4i64)) {
32261 // Input needs to be split and output needs to widened. Let's use two
32262 // VTRUNCs, and shuffle their results together into the wider type.
32263 SDValue Lo, Hi;
32264 std::tie(Lo, Hi) = DAG.SplitVector(In, dl);
32266 Lo = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Lo);
32267 Hi = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Hi);
32268 SDValue Res = DAG.getVectorShuffle(MVT::v16i8, dl, Lo, Hi,
32269 { 0, 1, 2, 3, 16, 17, 18, 19,
32270 -1, -1, -1, -1, -1, -1, -1, -1 });
32271 Results.push_back(Res);
32272 return;
32275 // Attempt to widen the truncation input vector to let LowerTRUNCATE handle
32276 // this via type legalization.
32277 if ((InEltVT == MVT::i16 || InEltVT == MVT::i32 || InEltVT == MVT::i64) &&
32278 (EltVT == MVT::i8 || EltVT == MVT::i16 || EltVT == MVT::i32) &&
32279 (!Subtarget.hasSSSE3() ||
32280 (!isTypeLegal(InVT) &&
32281 !(MinElts <= 4 && InEltVT == MVT::i64 && EltVT == MVT::i8)))) {
32282 SDValue WidenIn = widenSubVector(In, false, Subtarget, DAG, dl,
32283 InEltVT.getSizeInBits() * WidenNumElts);
32284 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, WidenVT, WidenIn));
32285 return;
32288 return;
32290 case ISD::ANY_EXTEND:
32291 // Right now, only MVT::v8i8 has Custom action for an illegal type.
32292 // It's intended to custom handle the input type.
32293 assert(N->getValueType(0) == MVT::v8i8 &&
32294 "Do not know how to legalize this Node");
32295 return;
32296 case ISD::SIGN_EXTEND:
32297 case ISD::ZERO_EXTEND: {
32298 EVT VT = N->getValueType(0);
32299 SDValue In = N->getOperand(0);
32300 EVT InVT = In.getValueType();
32301 if (!Subtarget.hasSSE41() && VT == MVT::v4i64 &&
32302 (InVT == MVT::v4i16 || InVT == MVT::v4i8)){
32303 assert(getTypeAction(*DAG.getContext(), InVT) == TypeWidenVector &&
32304 "Unexpected type action!");
32305 assert(N->getOpcode() == ISD::SIGN_EXTEND && "Unexpected opcode");
32306 // Custom split this so we can extend i8/i16->i32 invec. This is better
32307 // since sign_extend_inreg i8/i16->i64 requires an extend to i32 using
32308 // sra. Then extending from i32 to i64 using pcmpgt. By custom splitting
32309 // we allow the sra from the extend to i32 to be shared by the split.
32310 In = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, In);
32312 // Fill a vector with sign bits for each element.
32313 SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
32314 SDValue SignBits = DAG.getSetCC(dl, MVT::v4i32, Zero, In, ISD::SETGT);
32316 // Create an unpackl and unpackh to interleave the sign bits then bitcast
32317 // to v2i64.
32318 SDValue Lo = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
32319 {0, 4, 1, 5});
32320 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Lo);
32321 SDValue Hi = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
32322 {2, 6, 3, 7});
32323 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Hi);
32325 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
32326 Results.push_back(Res);
32327 return;
32330 if (VT == MVT::v16i32 || VT == MVT::v8i64) {
32331 if (!InVT.is128BitVector()) {
32332 // Not a 128 bit vector, but maybe type legalization will promote
32333 // it to 128 bits.
32334 if (getTypeAction(*DAG.getContext(), InVT) != TypePromoteInteger)
32335 return;
32336 InVT = getTypeToTransformTo(*DAG.getContext(), InVT);
32337 if (!InVT.is128BitVector())
32338 return;
32340 // Promote the input to 128 bits. Type legalization will turn this into
32341 // zext_inreg/sext_inreg.
32342 In = DAG.getNode(N->getOpcode(), dl, InVT, In);
32345 // Perform custom splitting instead of the two stage extend we would get
32346 // by default.
32347 EVT LoVT, HiVT;
32348 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
32349 assert(isTypeLegal(LoVT) && "Split VT not legal?");
32351 SDValue Lo = getEXTEND_VECTOR_INREG(N->getOpcode(), dl, LoVT, In, DAG);
32353 // We need to shift the input over by half the number of elements.
32354 unsigned NumElts = InVT.getVectorNumElements();
32355 unsigned HalfNumElts = NumElts / 2;
32356 SmallVector<int, 16> ShufMask(NumElts, SM_SentinelUndef);
32357 for (unsigned i = 0; i != HalfNumElts; ++i)
32358 ShufMask[i] = i + HalfNumElts;
32360 SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
32361 Hi = getEXTEND_VECTOR_INREG(N->getOpcode(), dl, HiVT, Hi, DAG);
32363 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
32364 Results.push_back(Res);
32366 return;
32368 case ISD::FP_TO_SINT:
32369 case ISD::STRICT_FP_TO_SINT:
32370 case ISD::FP_TO_UINT:
32371 case ISD::STRICT_FP_TO_UINT: {
32372 bool IsStrict = N->isStrictFPOpcode();
32373 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
32374 N->getOpcode() == ISD::STRICT_FP_TO_SINT;
32375 EVT VT = N->getValueType(0);
32376 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
32377 SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
32378 EVT SrcVT = Src.getValueType();
32380 SDValue Res;
32381 if (isSoftF16(SrcVT, Subtarget)) {
32382 EVT NVT = VT.isVector() ? VT.changeVectorElementType(MVT::f32) : MVT::f32;
32383 if (IsStrict) {
32384 Res =
32385 DAG.getNode(N->getOpcode(), dl, {VT, MVT::Other},
32386 {Chain, DAG.getNode(ISD::STRICT_FP_EXTEND, dl,
32387 {NVT, MVT::Other}, {Chain, Src})});
32388 Chain = Res.getValue(1);
32389 } else {
32390 Res = DAG.getNode(N->getOpcode(), dl, VT,
32391 DAG.getNode(ISD::FP_EXTEND, dl, NVT, Src));
32393 Results.push_back(Res);
32394 if (IsStrict)
32395 Results.push_back(Chain);
32397 return;
32400 if (VT.isVector() && Subtarget.hasFP16() &&
32401 SrcVT.getVectorElementType() == MVT::f16) {
32402 EVT EleVT = VT.getVectorElementType();
32403 EVT ResVT = EleVT == MVT::i32 ? MVT::v4i32 : MVT::v8i16;
32405 if (SrcVT != MVT::v8f16) {
32406 SDValue Tmp =
32407 IsStrict ? DAG.getConstantFP(0.0, dl, SrcVT) : DAG.getUNDEF(SrcVT);
32408 SmallVector<SDValue, 4> Ops(SrcVT == MVT::v2f16 ? 4 : 2, Tmp);
32409 Ops[0] = Src;
32410 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8f16, Ops);
32413 if (IsStrict) {
32414 unsigned Opc =
32415 IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
32416 Res =
32417 DAG.getNode(Opc, dl, {ResVT, MVT::Other}, {N->getOperand(0), Src});
32418 Chain = Res.getValue(1);
32419 } else {
32420 unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
32421 Res = DAG.getNode(Opc, dl, ResVT, Src);
32424 // TODO: Need to add exception check code for strict FP.
32425 if (EleVT.getSizeInBits() < 16) {
32426 MVT TmpVT = MVT::getVectorVT(EleVT.getSimpleVT(), 8);
32427 Res = DAG.getNode(ISD::TRUNCATE, dl, TmpVT, Res);
32429 // Now widen to 128 bits.
32430 unsigned NumConcats = 128 / TmpVT.getSizeInBits();
32431 MVT ConcatVT = MVT::getVectorVT(EleVT.getSimpleVT(), 8 * NumConcats);
32432 SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(TmpVT));
32433 ConcatOps[0] = Res;
32434 Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatVT, ConcatOps);
32437 Results.push_back(Res);
32438 if (IsStrict)
32439 Results.push_back(Chain);
32441 return;
32444 if (VT.isVector() && VT.getScalarSizeInBits() < 32) {
32445 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
32446 "Unexpected type action!");
32448 // Try to create a 128 bit vector, but don't exceed a 32 bit element.
32449 unsigned NewEltWidth = std::min(128 / VT.getVectorNumElements(), 32U);
32450 MVT PromoteVT = MVT::getVectorVT(MVT::getIntegerVT(NewEltWidth),
32451 VT.getVectorNumElements());
32452 SDValue Res;
32453 SDValue Chain;
32454 if (IsStrict) {
32455 Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, {PromoteVT, MVT::Other},
32456 {N->getOperand(0), Src});
32457 Chain = Res.getValue(1);
32458 } else
32459 Res = DAG.getNode(ISD::FP_TO_SINT, dl, PromoteVT, Src);
32461 // Preserve what we know about the size of the original result. If the
32462 // result is v2i32, we have to manually widen the assert.
32463 if (PromoteVT == MVT::v2i32)
32464 Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Res,
32465 DAG.getUNDEF(MVT::v2i32));
32467 Res = DAG.getNode(!IsSigned ? ISD::AssertZext : ISD::AssertSext, dl,
32468 Res.getValueType(), Res,
32469 DAG.getValueType(VT.getVectorElementType()));
32471 if (PromoteVT == MVT::v2i32)
32472 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res,
32473 DAG.getIntPtrConstant(0, dl));
32475 // Truncate back to the original width.
32476 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
32478 // Now widen to 128 bits.
32479 unsigned NumConcats = 128 / VT.getSizeInBits();
32480 MVT ConcatVT = MVT::getVectorVT(VT.getSimpleVT().getVectorElementType(),
32481 VT.getVectorNumElements() * NumConcats);
32482 SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
32483 ConcatOps[0] = Res;
32484 Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatVT, ConcatOps);
32485 Results.push_back(Res);
32486 if (IsStrict)
32487 Results.push_back(Chain);
32488 return;
32492 if (VT == MVT::v2i32) {
32493 assert((!IsStrict || IsSigned || Subtarget.hasAVX512()) &&
32494 "Strict unsigned conversion requires AVX512");
32495 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
32496 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
32497 "Unexpected type action!");
32498 if (Src.getValueType() == MVT::v2f64) {
32499 if (!IsSigned && !Subtarget.hasAVX512()) {
32500 SDValue Res =
32501 expandFP_TO_UINT_SSE(MVT::v4i32, Src, dl, DAG, Subtarget);
32502 Results.push_back(Res);
32503 return;
32506 unsigned Opc;
32507 if (IsStrict)
32508 Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
32509 else
32510 Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
32512 // If we have VLX we can emit a target specific FP_TO_UINT node,.
32513 if (!IsSigned && !Subtarget.hasVLX()) {
32514 // Otherwise we can defer to the generic legalizer which will widen
32515 // the input as well. This will be further widened during op
32516 // legalization to v8i32<-v8f64.
32517 // For strict nodes we'll need to widen ourselves.
32518 // FIXME: Fix the type legalizer to safely widen strict nodes?
32519 if (!IsStrict)
32520 return;
32521 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f64, Src,
32522 DAG.getConstantFP(0.0, dl, MVT::v2f64));
32523 Opc = N->getOpcode();
32525 SDValue Res;
32526 SDValue Chain;
32527 if (IsStrict) {
32528 Res = DAG.getNode(Opc, dl, {MVT::v4i32, MVT::Other},
32529 {N->getOperand(0), Src});
32530 Chain = Res.getValue(1);
32531 } else {
32532 Res = DAG.getNode(Opc, dl, MVT::v4i32, Src);
32534 Results.push_back(Res);
32535 if (IsStrict)
32536 Results.push_back(Chain);
32537 return;
32540 // Custom widen strict v2f32->v2i32 by padding with zeros.
32541 // FIXME: Should generic type legalizer do this?
32542 if (Src.getValueType() == MVT::v2f32 && IsStrict) {
32543 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
32544 DAG.getConstantFP(0.0, dl, MVT::v2f32));
32545 SDValue Res = DAG.getNode(N->getOpcode(), dl, {MVT::v4i32, MVT::Other},
32546 {N->getOperand(0), Src});
32547 Results.push_back(Res);
32548 Results.push_back(Res.getValue(1));
32549 return;
32552 // The FP_TO_INTHelper below only handles f32/f64/f80 scalar inputs,
32553 // so early out here.
32554 return;
32557 assert(!VT.isVector() && "Vectors should have been handled above!");
32559 if ((Subtarget.hasDQI() && VT == MVT::i64 &&
32560 (SrcVT == MVT::f32 || SrcVT == MVT::f64)) ||
32561 (Subtarget.hasFP16() && SrcVT == MVT::f16)) {
32562 assert(!Subtarget.is64Bit() && "i64 should be legal");
32563 unsigned NumElts = Subtarget.hasVLX() ? 2 : 8;
32564 // If we use a 128-bit result we might need to use a target specific node.
32565 unsigned SrcElts =
32566 std::max(NumElts, 128U / (unsigned)SrcVT.getSizeInBits());
32567 MVT VecVT = MVT::getVectorVT(MVT::i64, NumElts);
32568 MVT VecInVT = MVT::getVectorVT(SrcVT.getSimpleVT(), SrcElts);
32569 unsigned Opc = N->getOpcode();
32570 if (NumElts != SrcElts) {
32571 if (IsStrict)
32572 Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
32573 else
32574 Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
32577 SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
32578 SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecInVT,
32579 DAG.getConstantFP(0.0, dl, VecInVT), Src,
32580 ZeroIdx);
32581 SDValue Chain;
32582 if (IsStrict) {
32583 SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
32584 Res = DAG.getNode(Opc, SDLoc(N), Tys, N->getOperand(0), Res);
32585 Chain = Res.getValue(1);
32586 } else
32587 Res = DAG.getNode(Opc, SDLoc(N), VecVT, Res);
32588 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res, ZeroIdx);
32589 Results.push_back(Res);
32590 if (IsStrict)
32591 Results.push_back(Chain);
32592 return;
32595 if (VT == MVT::i128 && Subtarget.isTargetWin64()) {
32596 SDValue Chain;
32597 SDValue V = LowerWin64_FP_TO_INT128(SDValue(N, 0), DAG, Chain);
32598 Results.push_back(V);
32599 if (IsStrict)
32600 Results.push_back(Chain);
32601 return;
32604 if (SDValue V = FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, Chain)) {
32605 Results.push_back(V);
32606 if (IsStrict)
32607 Results.push_back(Chain);
32609 return;
32611 case ISD::LRINT:
32612 case ISD::LLRINT: {
32613 if (SDValue V = LRINT_LLRINTHelper(N, DAG))
32614 Results.push_back(V);
32615 return;
32618 case ISD::SINT_TO_FP:
32619 case ISD::STRICT_SINT_TO_FP:
32620 case ISD::UINT_TO_FP:
32621 case ISD::STRICT_UINT_TO_FP: {
32622 bool IsStrict = N->isStrictFPOpcode();
32623 bool IsSigned = N->getOpcode() == ISD::SINT_TO_FP ||
32624 N->getOpcode() == ISD::STRICT_SINT_TO_FP;
32625 EVT VT = N->getValueType(0);
32626 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
32627 if (VT.getVectorElementType() == MVT::f16 && Subtarget.hasFP16() &&
32628 Subtarget.hasVLX()) {
32629 if (Src.getValueType().getVectorElementType() == MVT::i16)
32630 return;
32632 if (VT == MVT::v2f16 && Src.getValueType() == MVT::v2i32)
32633 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
32634 IsStrict ? DAG.getConstant(0, dl, MVT::v2i32)
32635 : DAG.getUNDEF(MVT::v2i32));
32636 if (IsStrict) {
32637 unsigned Opc =
32638 IsSigned ? X86ISD::STRICT_CVTSI2P : X86ISD::STRICT_CVTUI2P;
32639 SDValue Res = DAG.getNode(Opc, dl, {MVT::v8f16, MVT::Other},
32640 {N->getOperand(0), Src});
32641 Results.push_back(Res);
32642 Results.push_back(Res.getValue(1));
32643 } else {
32644 unsigned Opc = IsSigned ? X86ISD::CVTSI2P : X86ISD::CVTUI2P;
32645 Results.push_back(DAG.getNode(Opc, dl, MVT::v8f16, Src));
32647 return;
32649 if (VT != MVT::v2f32)
32650 return;
32651 EVT SrcVT = Src.getValueType();
32652 if (Subtarget.hasDQI() && Subtarget.hasVLX() && SrcVT == MVT::v2i64) {
32653 if (IsStrict) {
32654 unsigned Opc = IsSigned ? X86ISD::STRICT_CVTSI2P
32655 : X86ISD::STRICT_CVTUI2P;
32656 SDValue Res = DAG.getNode(Opc, dl, {MVT::v4f32, MVT::Other},
32657 {N->getOperand(0), Src});
32658 Results.push_back(Res);
32659 Results.push_back(Res.getValue(1));
32660 } else {
32661 unsigned Opc = IsSigned ? X86ISD::CVTSI2P : X86ISD::CVTUI2P;
32662 Results.push_back(DAG.getNode(Opc, dl, MVT::v4f32, Src));
32664 return;
32666 if (SrcVT == MVT::v2i64 && !IsSigned && Subtarget.is64Bit() &&
32667 Subtarget.hasSSE41() && !Subtarget.hasAVX512()) {
32668 SDValue Zero = DAG.getConstant(0, dl, SrcVT);
32669 SDValue One = DAG.getConstant(1, dl, SrcVT);
32670 SDValue Sign = DAG.getNode(ISD::OR, dl, SrcVT,
32671 DAG.getNode(ISD::SRL, dl, SrcVT, Src, One),
32672 DAG.getNode(ISD::AND, dl, SrcVT, Src, One));
32673 SDValue IsNeg = DAG.getSetCC(dl, MVT::v2i64, Src, Zero, ISD::SETLT);
32674 SDValue SignSrc = DAG.getSelect(dl, SrcVT, IsNeg, Sign, Src);
32675 SmallVector<SDValue, 4> SignCvts(4, DAG.getConstantFP(0.0, dl, MVT::f32));
32676 for (int i = 0; i != 2; ++i) {
32677 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64,
32678 SignSrc, DAG.getIntPtrConstant(i, dl));
32679 if (IsStrict)
32680 SignCvts[i] =
32681 DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {MVT::f32, MVT::Other},
32682 {N->getOperand(0), Elt});
32683 else
32684 SignCvts[i] = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Elt);
32686 SDValue SignCvt = DAG.getBuildVector(MVT::v4f32, dl, SignCvts);
32687 SDValue Slow, Chain;
32688 if (IsStrict) {
32689 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
32690 SignCvts[0].getValue(1), SignCvts[1].getValue(1));
32691 Slow = DAG.getNode(ISD::STRICT_FADD, dl, {MVT::v4f32, MVT::Other},
32692 {Chain, SignCvt, SignCvt});
32693 Chain = Slow.getValue(1);
32694 } else {
32695 Slow = DAG.getNode(ISD::FADD, dl, MVT::v4f32, SignCvt, SignCvt);
32697 IsNeg = DAG.getBitcast(MVT::v4i32, IsNeg);
32698 IsNeg =
32699 DAG.getVectorShuffle(MVT::v4i32, dl, IsNeg, IsNeg, {1, 3, -1, -1});
32700 SDValue Cvt = DAG.getSelect(dl, MVT::v4f32, IsNeg, Slow, SignCvt);
32701 Results.push_back(Cvt);
32702 if (IsStrict)
32703 Results.push_back(Chain);
32704 return;
32707 if (SrcVT != MVT::v2i32)
32708 return;
32710 if (IsSigned || Subtarget.hasAVX512()) {
32711 if (!IsStrict)
32712 return;
32714 // Custom widen strict v2i32->v2f32 to avoid scalarization.
32715 // FIXME: Should generic type legalizer do this?
32716 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
32717 DAG.getConstant(0, dl, MVT::v2i32));
32718 SDValue Res = DAG.getNode(N->getOpcode(), dl, {MVT::v4f32, MVT::Other},
32719 {N->getOperand(0), Src});
32720 Results.push_back(Res);
32721 Results.push_back(Res.getValue(1));
32722 return;
32725 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
32726 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, Src);
32727 SDValue VBias = DAG.getConstantFP(
32728 llvm::bit_cast<double>(0x4330000000000000ULL), dl, MVT::v2f64);
32729 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
32730 DAG.getBitcast(MVT::v2i64, VBias));
32731 Or = DAG.getBitcast(MVT::v2f64, Or);
32732 if (IsStrict) {
32733 SDValue Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::v2f64, MVT::Other},
32734 {N->getOperand(0), Or, VBias});
32735 SDValue Res = DAG.getNode(X86ISD::STRICT_VFPROUND, dl,
32736 {MVT::v4f32, MVT::Other},
32737 {Sub.getValue(1), Sub});
32738 Results.push_back(Res);
32739 Results.push_back(Res.getValue(1));
32740 } else {
32741 // TODO: Are there any fast-math-flags to propagate here?
32742 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
32743 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
32745 return;
32747 case ISD::STRICT_FP_ROUND:
32748 case ISD::FP_ROUND: {
32749 bool IsStrict = N->isStrictFPOpcode();
32750 SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
32751 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
32752 SDValue Rnd = N->getOperand(IsStrict ? 2 : 1);
32753 EVT SrcVT = Src.getValueType();
32754 EVT VT = N->getValueType(0);
32755 SDValue V;
32756 if (VT == MVT::v2f16 && Src.getValueType() == MVT::v2f32) {
32757 SDValue Ext = IsStrict ? DAG.getConstantFP(0.0, dl, MVT::v2f32)
32758 : DAG.getUNDEF(MVT::v2f32);
32759 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src, Ext);
32761 if (!Subtarget.hasFP16() && VT.getVectorElementType() == MVT::f16) {
32762 assert(Subtarget.hasF16C() && "Cannot widen f16 without F16C");
32763 if (SrcVT.getVectorElementType() != MVT::f32)
32764 return;
32766 if (IsStrict)
32767 V = DAG.getNode(X86ISD::STRICT_CVTPS2PH, dl, {MVT::v8i16, MVT::Other},
32768 {Chain, Src, Rnd});
32769 else
32770 V = DAG.getNode(X86ISD::CVTPS2PH, dl, MVT::v8i16, Src, Rnd);
32772 Results.push_back(DAG.getBitcast(MVT::v8f16, V));
32773 if (IsStrict)
32774 Results.push_back(V.getValue(1));
32775 return;
32777 if (!isTypeLegal(Src.getValueType()))
32778 return;
32779 EVT NewVT = VT.getVectorElementType() == MVT::f16 ? MVT::v8f16 : MVT::v4f32;
32780 if (IsStrict)
32781 V = DAG.getNode(X86ISD::STRICT_VFPROUND, dl, {NewVT, MVT::Other},
32782 {Chain, Src});
32783 else
32784 V = DAG.getNode(X86ISD::VFPROUND, dl, NewVT, Src);
32785 Results.push_back(V);
32786 if (IsStrict)
32787 Results.push_back(V.getValue(1));
32788 return;
32790 case ISD::FP_EXTEND:
32791 case ISD::STRICT_FP_EXTEND: {
32792 // Right now, only MVT::v2f32 has OperationAction for FP_EXTEND.
32793 // No other ValueType for FP_EXTEND should reach this point.
32794 assert(N->getValueType(0) == MVT::v2f32 &&
32795 "Do not know how to legalize this Node");
32796 if (!Subtarget.hasFP16() || !Subtarget.hasVLX())
32797 return;
32798 bool IsStrict = N->isStrictFPOpcode();
32799 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
32800 SDValue Ext = IsStrict ? DAG.getConstantFP(0.0, dl, MVT::v2f16)
32801 : DAG.getUNDEF(MVT::v2f16);
32802 SDValue V = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f16, Src, Ext);
32803 if (IsStrict)
32804 V = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::v4f32, MVT::Other},
32805 {N->getOperand(0), V});
32806 else
32807 V = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v4f32, V);
32808 Results.push_back(V);
32809 if (IsStrict)
32810 Results.push_back(V.getValue(1));
32811 return;
32813 case ISD::INTRINSIC_W_CHAIN: {
32814 unsigned IntNo = N->getConstantOperandVal(1);
32815 switch (IntNo) {
32816 default : llvm_unreachable("Do not know how to custom type "
32817 "legalize this intrinsic operation!");
32818 case Intrinsic::x86_rdtsc:
32819 return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget,
32820 Results);
32821 case Intrinsic::x86_rdtscp:
32822 return getReadTimeStampCounter(N, dl, X86::RDTSCP, DAG, Subtarget,
32823 Results);
32824 case Intrinsic::x86_rdpmc:
32825 expandIntrinsicWChainHelper(N, dl, DAG, X86::RDPMC, X86::ECX, Subtarget,
32826 Results);
32827 return;
32828 case Intrinsic::x86_rdpru:
32829 expandIntrinsicWChainHelper(N, dl, DAG, X86::RDPRU, X86::ECX, Subtarget,
32830 Results);
32831 return;
32832 case Intrinsic::x86_xgetbv:
32833 expandIntrinsicWChainHelper(N, dl, DAG, X86::XGETBV, X86::ECX, Subtarget,
32834 Results);
32835 return;
32838 case ISD::READCYCLECOUNTER: {
32839 return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget, Results);
32841 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
32842 EVT T = N->getValueType(0);
32843 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
32844 bool Regs64bit = T == MVT::i128;
32845 assert((!Regs64bit || Subtarget.canUseCMPXCHG16B()) &&
32846 "64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS requires CMPXCHG16B");
32847 MVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
32848 SDValue cpInL, cpInH;
32849 std::tie(cpInL, cpInH) =
32850 DAG.SplitScalar(N->getOperand(2), dl, HalfT, HalfT);
32851 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
32852 Regs64bit ? X86::RAX : X86::EAX, cpInL, SDValue());
32853 cpInH =
32854 DAG.getCopyToReg(cpInL.getValue(0), dl, Regs64bit ? X86::RDX : X86::EDX,
32855 cpInH, cpInL.getValue(1));
32856 SDValue swapInL, swapInH;
32857 std::tie(swapInL, swapInH) =
32858 DAG.SplitScalar(N->getOperand(3), dl, HalfT, HalfT);
32859 swapInH =
32860 DAG.getCopyToReg(cpInH.getValue(0), dl, Regs64bit ? X86::RCX : X86::ECX,
32861 swapInH, cpInH.getValue(1));
32863 // In 64-bit mode we might need the base pointer in RBX, but we can't know
32864 // until later. So we keep the RBX input in a vreg and use a custom
32865 // inserter.
32866 // Since RBX will be a reserved register the register allocator will not
32867 // make sure its value will be properly saved and restored around this
32868 // live-range.
32869 SDValue Result;
32870 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
32871 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
32872 if (Regs64bit) {
32873 SDValue Ops[] = {swapInH.getValue(0), N->getOperand(1), swapInL,
32874 swapInH.getValue(1)};
32875 Result =
32876 DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG16_DAG, dl, Tys, Ops, T, MMO);
32877 } else {
32878 swapInL = DAG.getCopyToReg(swapInH.getValue(0), dl, X86::EBX, swapInL,
32879 swapInH.getValue(1));
32880 SDValue Ops[] = {swapInL.getValue(0), N->getOperand(1),
32881 swapInL.getValue(1)};
32882 Result =
32883 DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG8_DAG, dl, Tys, Ops, T, MMO);
32886 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
32887 Regs64bit ? X86::RAX : X86::EAX,
32888 HalfT, Result.getValue(1));
32889 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
32890 Regs64bit ? X86::RDX : X86::EDX,
32891 HalfT, cpOutL.getValue(2));
32892 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
32894 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
32895 MVT::i32, cpOutH.getValue(2));
32896 SDValue Success = getSETCC(X86::COND_E, EFLAGS, dl, DAG);
32897 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
32899 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
32900 Results.push_back(Success);
32901 Results.push_back(EFLAGS.getValue(1));
32902 return;
32904 case ISD::ATOMIC_LOAD: {
32905 assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
32906 bool NoImplicitFloatOps =
32907 DAG.getMachineFunction().getFunction().hasFnAttribute(
32908 Attribute::NoImplicitFloat);
32909 if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps) {
32910 auto *Node = cast<AtomicSDNode>(N);
32911 if (Subtarget.hasSSE1()) {
32912 // Use a VZEXT_LOAD which will be selected as MOVQ or XORPS+MOVLPS.
32913 // Then extract the lower 64-bits.
32914 MVT LdVT = Subtarget.hasSSE2() ? MVT::v2i64 : MVT::v4f32;
32915 SDVTList Tys = DAG.getVTList(LdVT, MVT::Other);
32916 SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
32917 SDValue Ld = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
32918 MVT::i64, Node->getMemOperand());
32919 if (Subtarget.hasSSE2()) {
32920 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Ld,
32921 DAG.getIntPtrConstant(0, dl));
32922 Results.push_back(Res);
32923 Results.push_back(Ld.getValue(1));
32924 return;
32926 // We use an alternative sequence for SSE1 that extracts as v2f32 and
32927 // then casts to i64. This avoids a 128-bit stack temporary being
32928 // created by type legalization if we were to cast v4f32->v2i64.
32929 SDValue Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2f32, Ld,
32930 DAG.getIntPtrConstant(0, dl));
32931 Res = DAG.getBitcast(MVT::i64, Res);
32932 Results.push_back(Res);
32933 Results.push_back(Ld.getValue(1));
32934 return;
32936 if (Subtarget.hasX87()) {
32937 // First load this into an 80-bit X87 register. This will put the whole
32938 // integer into the significand.
32939 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
32940 SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
32941 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::FILD,
32942 dl, Tys, Ops, MVT::i64,
32943 Node->getMemOperand());
32944 SDValue Chain = Result.getValue(1);
32946 // Now store the X87 register to a stack temporary and convert to i64.
32947 // This store is not atomic and doesn't need to be.
32948 // FIXME: We don't need a stack temporary if the result of the load
32949 // is already being stored. We could just directly store there.
32950 SDValue StackPtr = DAG.CreateStackTemporary(MVT::i64);
32951 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
32952 MachinePointerInfo MPI =
32953 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
32954 SDValue StoreOps[] = { Chain, Result, StackPtr };
32955 Chain = DAG.getMemIntrinsicNode(
32956 X86ISD::FIST, dl, DAG.getVTList(MVT::Other), StoreOps, MVT::i64,
32957 MPI, std::nullopt /*Align*/, MachineMemOperand::MOStore);
32959 // Finally load the value back from the stack temporary and return it.
32960 // This load is not atomic and doesn't need to be.
32961 // This load will be further type legalized.
32962 Result = DAG.getLoad(MVT::i64, dl, Chain, StackPtr, MPI);
32963 Results.push_back(Result);
32964 Results.push_back(Result.getValue(1));
32965 return;
32968 // TODO: Use MOVLPS when SSE1 is available?
32969 // Delegate to generic TypeLegalization. Situations we can really handle
32970 // should have already been dealt with by AtomicExpandPass.cpp.
32971 break;
32973 case ISD::ATOMIC_SWAP:
32974 case ISD::ATOMIC_LOAD_ADD:
32975 case ISD::ATOMIC_LOAD_SUB:
32976 case ISD::ATOMIC_LOAD_AND:
32977 case ISD::ATOMIC_LOAD_OR:
32978 case ISD::ATOMIC_LOAD_XOR:
32979 case ISD::ATOMIC_LOAD_NAND:
32980 case ISD::ATOMIC_LOAD_MIN:
32981 case ISD::ATOMIC_LOAD_MAX:
32982 case ISD::ATOMIC_LOAD_UMIN:
32983 case ISD::ATOMIC_LOAD_UMAX:
32984 // Delegate to generic TypeLegalization. Situations we can really handle
32985 // should have already been dealt with by AtomicExpandPass.cpp.
32986 break;
32988 case ISD::BITCAST: {
32989 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
32990 EVT DstVT = N->getValueType(0);
32991 EVT SrcVT = N->getOperand(0).getValueType();
32993 // If this is a bitcast from a v64i1 k-register to a i64 on a 32-bit target
32994 // we can split using the k-register rather than memory.
32995 if (SrcVT == MVT::v64i1 && DstVT == MVT::i64 && Subtarget.hasBWI()) {
32996 assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
32997 SDValue Lo, Hi;
32998 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
32999 Lo = DAG.getBitcast(MVT::i32, Lo);
33000 Hi = DAG.getBitcast(MVT::i32, Hi);
33001 SDValue Res = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
33002 Results.push_back(Res);
33003 return;
33006 if (DstVT.isVector() && SrcVT == MVT::x86mmx) {
33007 // FIXME: Use v4f32 for SSE1?
33008 assert(Subtarget.hasSSE2() && "Requires SSE2");
33009 assert(getTypeAction(*DAG.getContext(), DstVT) == TypeWidenVector &&
33010 "Unexpected type action!");
33011 EVT WideVT = getTypeToTransformTo(*DAG.getContext(), DstVT);
33012 SDValue Res = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64,
33013 N->getOperand(0));
33014 Res = DAG.getBitcast(WideVT, Res);
33015 Results.push_back(Res);
33016 return;
33019 return;
33021 case ISD::MGATHER: {
33022 EVT VT = N->getValueType(0);
33023 if ((VT == MVT::v2f32 || VT == MVT::v2i32) &&
33024 (Subtarget.hasVLX() || !Subtarget.hasAVX512())) {
33025 auto *Gather = cast<MaskedGatherSDNode>(N);
33026 SDValue Index = Gather->getIndex();
33027 if (Index.getValueType() != MVT::v2i64)
33028 return;
33029 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
33030 "Unexpected type action!");
33031 EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
33032 SDValue Mask = Gather->getMask();
33033 assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
33034 SDValue PassThru = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT,
33035 Gather->getPassThru(),
33036 DAG.getUNDEF(VT));
33037 if (!Subtarget.hasVLX()) {
33038 // We need to widen the mask, but the instruction will only use 2
33039 // of its elements. So we can use undef.
33040 Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
33041 DAG.getUNDEF(MVT::v2i1));
33042 Mask = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Mask);
33044 SDValue Ops[] = { Gather->getChain(), PassThru, Mask,
33045 Gather->getBasePtr(), Index, Gather->getScale() };
33046 SDValue Res = DAG.getMemIntrinsicNode(
33047 X86ISD::MGATHER, dl, DAG.getVTList(WideVT, MVT::Other), Ops,
33048 Gather->getMemoryVT(), Gather->getMemOperand());
33049 Results.push_back(Res);
33050 Results.push_back(Res.getValue(1));
33051 return;
33053 return;
33055 case ISD::LOAD: {
33056 // Use an f64/i64 load and a scalar_to_vector for v2f32/v2i32 loads. This
33057 // avoids scalarizing in 32-bit mode. In 64-bit mode this avoids a int->fp
33058 // cast since type legalization will try to use an i64 load.
33059 MVT VT = N->getSimpleValueType(0);
33060 assert(VT.isVector() && VT.getSizeInBits() == 64 && "Unexpected VT");
33061 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
33062 "Unexpected type action!");
33063 if (!ISD::isNON_EXTLoad(N))
33064 return;
33065 auto *Ld = cast<LoadSDNode>(N);
33066 if (Subtarget.hasSSE2()) {
33067 MVT LdVT = Subtarget.is64Bit() && VT.isInteger() ? MVT::i64 : MVT::f64;
33068 SDValue Res = DAG.getLoad(LdVT, dl, Ld->getChain(), Ld->getBasePtr(),
33069 Ld->getPointerInfo(), Ld->getOriginalAlign(),
33070 Ld->getMemOperand()->getFlags());
33071 SDValue Chain = Res.getValue(1);
33072 MVT VecVT = MVT::getVectorVT(LdVT, 2);
33073 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Res);
33074 EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
33075 Res = DAG.getBitcast(WideVT, Res);
33076 Results.push_back(Res);
33077 Results.push_back(Chain);
33078 return;
33080 assert(Subtarget.hasSSE1() && "Expected SSE");
33081 SDVTList Tys = DAG.getVTList(MVT::v4f32, MVT::Other);
33082 SDValue Ops[] = {Ld->getChain(), Ld->getBasePtr()};
33083 SDValue Res = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
33084 MVT::i64, Ld->getMemOperand());
33085 Results.push_back(Res);
33086 Results.push_back(Res.getValue(1));
33087 return;
33089 case ISD::ADDRSPACECAST: {
33090 SDValue V = LowerADDRSPACECAST(SDValue(N,0), DAG);
33091 Results.push_back(V);
33092 return;
33094 case ISD::BITREVERSE: {
33095 assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
33096 assert(Subtarget.hasXOP() && "Expected XOP");
33097 // We can use VPPERM by copying to a vector register and back. We'll need
33098 // to move the scalar in two i32 pieces.
33099 Results.push_back(LowerBITREVERSE(SDValue(N, 0), Subtarget, DAG));
33100 return;
33102 case ISD::EXTRACT_VECTOR_ELT: {
33103 // f16 = extract vXf16 %vec, i64 %idx
33104 assert(N->getSimpleValueType(0) == MVT::f16 &&
33105 "Unexpected Value type of EXTRACT_VECTOR_ELT!");
33106 assert(Subtarget.hasFP16() && "Expected FP16");
33107 SDValue VecOp = N->getOperand(0);
33108 EVT ExtVT = VecOp.getValueType().changeVectorElementTypeToInteger();
33109 SDValue Split = DAG.getBitcast(ExtVT, N->getOperand(0));
33110 Split = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Split,
33111 N->getOperand(1));
33112 Split = DAG.getBitcast(MVT::f16, Split);
33113 Results.push_back(Split);
33114 return;
33119 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
33120 switch ((X86ISD::NodeType)Opcode) {
33121 case X86ISD::FIRST_NUMBER: break;
33122 #define NODE_NAME_CASE(NODE) case X86ISD::NODE: return "X86ISD::" #NODE;
33123 NODE_NAME_CASE(BSF)
33124 NODE_NAME_CASE(BSR)
33125 NODE_NAME_CASE(FSHL)
33126 NODE_NAME_CASE(FSHR)
33127 NODE_NAME_CASE(FAND)
33128 NODE_NAME_CASE(FANDN)
33129 NODE_NAME_CASE(FOR)
33130 NODE_NAME_CASE(FXOR)
33131 NODE_NAME_CASE(FILD)
33132 NODE_NAME_CASE(FIST)
33133 NODE_NAME_CASE(FP_TO_INT_IN_MEM)
33134 NODE_NAME_CASE(FLD)
33135 NODE_NAME_CASE(FST)
33136 NODE_NAME_CASE(CALL)
33137 NODE_NAME_CASE(CALL_RVMARKER)
33138 NODE_NAME_CASE(BT)
33139 NODE_NAME_CASE(CMP)
33140 NODE_NAME_CASE(FCMP)
33141 NODE_NAME_CASE(STRICT_FCMP)
33142 NODE_NAME_CASE(STRICT_FCMPS)
33143 NODE_NAME_CASE(COMI)
33144 NODE_NAME_CASE(UCOMI)
33145 NODE_NAME_CASE(CMPM)
33146 NODE_NAME_CASE(CMPMM)
33147 NODE_NAME_CASE(STRICT_CMPM)
33148 NODE_NAME_CASE(CMPMM_SAE)
33149 NODE_NAME_CASE(SETCC)
33150 NODE_NAME_CASE(SETCC_CARRY)
33151 NODE_NAME_CASE(FSETCC)
33152 NODE_NAME_CASE(FSETCCM)
33153 NODE_NAME_CASE(FSETCCM_SAE)
33154 NODE_NAME_CASE(CMOV)
33155 NODE_NAME_CASE(BRCOND)
33156 NODE_NAME_CASE(RET_GLUE)
33157 NODE_NAME_CASE(IRET)
33158 NODE_NAME_CASE(REP_STOS)
33159 NODE_NAME_CASE(REP_MOVS)
33160 NODE_NAME_CASE(GlobalBaseReg)
33161 NODE_NAME_CASE(Wrapper)
33162 NODE_NAME_CASE(WrapperRIP)
33163 NODE_NAME_CASE(MOVQ2DQ)
33164 NODE_NAME_CASE(MOVDQ2Q)
33165 NODE_NAME_CASE(MMX_MOVD2W)
33166 NODE_NAME_CASE(MMX_MOVW2D)
33167 NODE_NAME_CASE(PEXTRB)
33168 NODE_NAME_CASE(PEXTRW)
33169 NODE_NAME_CASE(INSERTPS)
33170 NODE_NAME_CASE(PINSRB)
33171 NODE_NAME_CASE(PINSRW)
33172 NODE_NAME_CASE(PSHUFB)
33173 NODE_NAME_CASE(ANDNP)
33174 NODE_NAME_CASE(BLENDI)
33175 NODE_NAME_CASE(BLENDV)
33176 NODE_NAME_CASE(HADD)
33177 NODE_NAME_CASE(HSUB)
33178 NODE_NAME_CASE(FHADD)
33179 NODE_NAME_CASE(FHSUB)
33180 NODE_NAME_CASE(CONFLICT)
33181 NODE_NAME_CASE(FMAX)
33182 NODE_NAME_CASE(FMAXS)
33183 NODE_NAME_CASE(FMAX_SAE)
33184 NODE_NAME_CASE(FMAXS_SAE)
33185 NODE_NAME_CASE(FMIN)
33186 NODE_NAME_CASE(FMINS)
33187 NODE_NAME_CASE(FMIN_SAE)
33188 NODE_NAME_CASE(FMINS_SAE)
33189 NODE_NAME_CASE(FMAXC)
33190 NODE_NAME_CASE(FMINC)
33191 NODE_NAME_CASE(FRSQRT)
33192 NODE_NAME_CASE(FRCP)
33193 NODE_NAME_CASE(EXTRQI)
33194 NODE_NAME_CASE(INSERTQI)
33195 NODE_NAME_CASE(TLSADDR)
33196 NODE_NAME_CASE(TLSBASEADDR)
33197 NODE_NAME_CASE(TLSCALL)
33198 NODE_NAME_CASE(EH_SJLJ_SETJMP)
33199 NODE_NAME_CASE(EH_SJLJ_LONGJMP)
33200 NODE_NAME_CASE(EH_SJLJ_SETUP_DISPATCH)
33201 NODE_NAME_CASE(EH_RETURN)
33202 NODE_NAME_CASE(TC_RETURN)
33203 NODE_NAME_CASE(FNSTCW16m)
33204 NODE_NAME_CASE(FLDCW16m)
33205 NODE_NAME_CASE(FNSTENVm)
33206 NODE_NAME_CASE(FLDENVm)
33207 NODE_NAME_CASE(LCMPXCHG_DAG)
33208 NODE_NAME_CASE(LCMPXCHG8_DAG)
33209 NODE_NAME_CASE(LCMPXCHG16_DAG)
33210 NODE_NAME_CASE(LCMPXCHG16_SAVE_RBX_DAG)
33211 NODE_NAME_CASE(LADD)
33212 NODE_NAME_CASE(LSUB)
33213 NODE_NAME_CASE(LOR)
33214 NODE_NAME_CASE(LXOR)
33215 NODE_NAME_CASE(LAND)
33216 NODE_NAME_CASE(LBTS)
33217 NODE_NAME_CASE(LBTC)
33218 NODE_NAME_CASE(LBTR)
33219 NODE_NAME_CASE(LBTS_RM)
33220 NODE_NAME_CASE(LBTC_RM)
33221 NODE_NAME_CASE(LBTR_RM)
33222 NODE_NAME_CASE(AADD)
33223 NODE_NAME_CASE(AOR)
33224 NODE_NAME_CASE(AXOR)
33225 NODE_NAME_CASE(AAND)
33226 NODE_NAME_CASE(VZEXT_MOVL)
33227 NODE_NAME_CASE(VZEXT_LOAD)
33228 NODE_NAME_CASE(VEXTRACT_STORE)
33229 NODE_NAME_CASE(VTRUNC)
33230 NODE_NAME_CASE(VTRUNCS)
33231 NODE_NAME_CASE(VTRUNCUS)
33232 NODE_NAME_CASE(VMTRUNC)
33233 NODE_NAME_CASE(VMTRUNCS)
33234 NODE_NAME_CASE(VMTRUNCUS)
33235 NODE_NAME_CASE(VTRUNCSTORES)
33236 NODE_NAME_CASE(VTRUNCSTOREUS)
33237 NODE_NAME_CASE(VMTRUNCSTORES)
33238 NODE_NAME_CASE(VMTRUNCSTOREUS)
33239 NODE_NAME_CASE(VFPEXT)
33240 NODE_NAME_CASE(STRICT_VFPEXT)
33241 NODE_NAME_CASE(VFPEXT_SAE)
33242 NODE_NAME_CASE(VFPEXTS)
33243 NODE_NAME_CASE(VFPEXTS_SAE)
33244 NODE_NAME_CASE(VFPROUND)
33245 NODE_NAME_CASE(STRICT_VFPROUND)
33246 NODE_NAME_CASE(VMFPROUND)
33247 NODE_NAME_CASE(VFPROUND_RND)
33248 NODE_NAME_CASE(VFPROUNDS)
33249 NODE_NAME_CASE(VFPROUNDS_RND)
33250 NODE_NAME_CASE(VSHLDQ)
33251 NODE_NAME_CASE(VSRLDQ)
33252 NODE_NAME_CASE(VSHL)
33253 NODE_NAME_CASE(VSRL)
33254 NODE_NAME_CASE(VSRA)
33255 NODE_NAME_CASE(VSHLI)
33256 NODE_NAME_CASE(VSRLI)
33257 NODE_NAME_CASE(VSRAI)
33258 NODE_NAME_CASE(VSHLV)
33259 NODE_NAME_CASE(VSRLV)
33260 NODE_NAME_CASE(VSRAV)
33261 NODE_NAME_CASE(VROTLI)
33262 NODE_NAME_CASE(VROTRI)
33263 NODE_NAME_CASE(VPPERM)
33264 NODE_NAME_CASE(CMPP)
33265 NODE_NAME_CASE(STRICT_CMPP)
33266 NODE_NAME_CASE(PCMPEQ)
33267 NODE_NAME_CASE(PCMPGT)
33268 NODE_NAME_CASE(PHMINPOS)
33269 NODE_NAME_CASE(ADD)
33270 NODE_NAME_CASE(SUB)
33271 NODE_NAME_CASE(ADC)
33272 NODE_NAME_CASE(SBB)
33273 NODE_NAME_CASE(SMUL)
33274 NODE_NAME_CASE(UMUL)
33275 NODE_NAME_CASE(OR)
33276 NODE_NAME_CASE(XOR)
33277 NODE_NAME_CASE(AND)
33278 NODE_NAME_CASE(BEXTR)
33279 NODE_NAME_CASE(BEXTRI)
33280 NODE_NAME_CASE(BZHI)
33281 NODE_NAME_CASE(PDEP)
33282 NODE_NAME_CASE(PEXT)
33283 NODE_NAME_CASE(MUL_IMM)
33284 NODE_NAME_CASE(MOVMSK)
33285 NODE_NAME_CASE(PTEST)
33286 NODE_NAME_CASE(TESTP)
33287 NODE_NAME_CASE(KORTEST)
33288 NODE_NAME_CASE(KTEST)
33289 NODE_NAME_CASE(KADD)
33290 NODE_NAME_CASE(KSHIFTL)
33291 NODE_NAME_CASE(KSHIFTR)
33292 NODE_NAME_CASE(PACKSS)
33293 NODE_NAME_CASE(PACKUS)
33294 NODE_NAME_CASE(PALIGNR)
33295 NODE_NAME_CASE(VALIGN)
33296 NODE_NAME_CASE(VSHLD)
33297 NODE_NAME_CASE(VSHRD)
33298 NODE_NAME_CASE(VSHLDV)
33299 NODE_NAME_CASE(VSHRDV)
33300 NODE_NAME_CASE(PSHUFD)
33301 NODE_NAME_CASE(PSHUFHW)
33302 NODE_NAME_CASE(PSHUFLW)
33303 NODE_NAME_CASE(SHUFP)
33304 NODE_NAME_CASE(SHUF128)
33305 NODE_NAME_CASE(MOVLHPS)
33306 NODE_NAME_CASE(MOVHLPS)
33307 NODE_NAME_CASE(MOVDDUP)
33308 NODE_NAME_CASE(MOVSHDUP)
33309 NODE_NAME_CASE(MOVSLDUP)
33310 NODE_NAME_CASE(MOVSD)
33311 NODE_NAME_CASE(MOVSS)
33312 NODE_NAME_CASE(MOVSH)
33313 NODE_NAME_CASE(UNPCKL)
33314 NODE_NAME_CASE(UNPCKH)
33315 NODE_NAME_CASE(VBROADCAST)
33316 NODE_NAME_CASE(VBROADCAST_LOAD)
33317 NODE_NAME_CASE(VBROADCASTM)
33318 NODE_NAME_CASE(SUBV_BROADCAST_LOAD)
33319 NODE_NAME_CASE(VPERMILPV)
33320 NODE_NAME_CASE(VPERMILPI)
33321 NODE_NAME_CASE(VPERM2X128)
33322 NODE_NAME_CASE(VPERMV)
33323 NODE_NAME_CASE(VPERMV3)
33324 NODE_NAME_CASE(VPERMI)
33325 NODE_NAME_CASE(VPTERNLOG)
33326 NODE_NAME_CASE(VFIXUPIMM)
33327 NODE_NAME_CASE(VFIXUPIMM_SAE)
33328 NODE_NAME_CASE(VFIXUPIMMS)
33329 NODE_NAME_CASE(VFIXUPIMMS_SAE)
33330 NODE_NAME_CASE(VRANGE)
33331 NODE_NAME_CASE(VRANGE_SAE)
33332 NODE_NAME_CASE(VRANGES)
33333 NODE_NAME_CASE(VRANGES_SAE)
33334 NODE_NAME_CASE(PMULUDQ)
33335 NODE_NAME_CASE(PMULDQ)
33336 NODE_NAME_CASE(PSADBW)
33337 NODE_NAME_CASE(DBPSADBW)
33338 NODE_NAME_CASE(VASTART_SAVE_XMM_REGS)
33339 NODE_NAME_CASE(VAARG_64)
33340 NODE_NAME_CASE(VAARG_X32)
33341 NODE_NAME_CASE(DYN_ALLOCA)
33342 NODE_NAME_CASE(MFENCE)
33343 NODE_NAME_CASE(SEG_ALLOCA)
33344 NODE_NAME_CASE(PROBED_ALLOCA)
33345 NODE_NAME_CASE(RDRAND)
33346 NODE_NAME_CASE(RDSEED)
33347 NODE_NAME_CASE(RDPKRU)
33348 NODE_NAME_CASE(WRPKRU)
33349 NODE_NAME_CASE(VPMADDUBSW)
33350 NODE_NAME_CASE(VPMADDWD)
33351 NODE_NAME_CASE(VPSHA)
33352 NODE_NAME_CASE(VPSHL)
33353 NODE_NAME_CASE(VPCOM)
33354 NODE_NAME_CASE(VPCOMU)
33355 NODE_NAME_CASE(VPERMIL2)
33356 NODE_NAME_CASE(FMSUB)
33357 NODE_NAME_CASE(STRICT_FMSUB)
33358 NODE_NAME_CASE(FNMADD)
33359 NODE_NAME_CASE(STRICT_FNMADD)
33360 NODE_NAME_CASE(FNMSUB)
33361 NODE_NAME_CASE(STRICT_FNMSUB)
33362 NODE_NAME_CASE(FMADDSUB)
33363 NODE_NAME_CASE(FMSUBADD)
33364 NODE_NAME_CASE(FMADD_RND)
33365 NODE_NAME_CASE(FNMADD_RND)
33366 NODE_NAME_CASE(FMSUB_RND)
33367 NODE_NAME_CASE(FNMSUB_RND)
33368 NODE_NAME_CASE(FMADDSUB_RND)
33369 NODE_NAME_CASE(FMSUBADD_RND)
33370 NODE_NAME_CASE(VFMADDC)
33371 NODE_NAME_CASE(VFMADDC_RND)
33372 NODE_NAME_CASE(VFCMADDC)
33373 NODE_NAME_CASE(VFCMADDC_RND)
33374 NODE_NAME_CASE(VFMULC)
33375 NODE_NAME_CASE(VFMULC_RND)
33376 NODE_NAME_CASE(VFCMULC)
33377 NODE_NAME_CASE(VFCMULC_RND)
33378 NODE_NAME_CASE(VFMULCSH)
33379 NODE_NAME_CASE(VFMULCSH_RND)
33380 NODE_NAME_CASE(VFCMULCSH)
33381 NODE_NAME_CASE(VFCMULCSH_RND)
33382 NODE_NAME_CASE(VFMADDCSH)
33383 NODE_NAME_CASE(VFMADDCSH_RND)
33384 NODE_NAME_CASE(VFCMADDCSH)
33385 NODE_NAME_CASE(VFCMADDCSH_RND)
33386 NODE_NAME_CASE(VPMADD52H)
33387 NODE_NAME_CASE(VPMADD52L)
33388 NODE_NAME_CASE(VRNDSCALE)
33389 NODE_NAME_CASE(STRICT_VRNDSCALE)
33390 NODE_NAME_CASE(VRNDSCALE_SAE)
33391 NODE_NAME_CASE(VRNDSCALES)
33392 NODE_NAME_CASE(VRNDSCALES_SAE)
33393 NODE_NAME_CASE(VREDUCE)
33394 NODE_NAME_CASE(VREDUCE_SAE)
33395 NODE_NAME_CASE(VREDUCES)
33396 NODE_NAME_CASE(VREDUCES_SAE)
33397 NODE_NAME_CASE(VGETMANT)
33398 NODE_NAME_CASE(VGETMANT_SAE)
33399 NODE_NAME_CASE(VGETMANTS)
33400 NODE_NAME_CASE(VGETMANTS_SAE)
33401 NODE_NAME_CASE(PCMPESTR)
33402 NODE_NAME_CASE(PCMPISTR)
33403 NODE_NAME_CASE(XTEST)
33404 NODE_NAME_CASE(COMPRESS)
33405 NODE_NAME_CASE(EXPAND)
33406 NODE_NAME_CASE(SELECTS)
33407 NODE_NAME_CASE(ADDSUB)
33408 NODE_NAME_CASE(RCP14)
33409 NODE_NAME_CASE(RCP14S)
33410 NODE_NAME_CASE(RCP28)
33411 NODE_NAME_CASE(RCP28_SAE)
33412 NODE_NAME_CASE(RCP28S)
33413 NODE_NAME_CASE(RCP28S_SAE)
33414 NODE_NAME_CASE(EXP2)
33415 NODE_NAME_CASE(EXP2_SAE)
33416 NODE_NAME_CASE(RSQRT14)
33417 NODE_NAME_CASE(RSQRT14S)
33418 NODE_NAME_CASE(RSQRT28)
33419 NODE_NAME_CASE(RSQRT28_SAE)
33420 NODE_NAME_CASE(RSQRT28S)
33421 NODE_NAME_CASE(RSQRT28S_SAE)
33422 NODE_NAME_CASE(FADD_RND)
33423 NODE_NAME_CASE(FADDS)
33424 NODE_NAME_CASE(FADDS_RND)
33425 NODE_NAME_CASE(FSUB_RND)
33426 NODE_NAME_CASE(FSUBS)
33427 NODE_NAME_CASE(FSUBS_RND)
33428 NODE_NAME_CASE(FMUL_RND)
33429 NODE_NAME_CASE(FMULS)
33430 NODE_NAME_CASE(FMULS_RND)
33431 NODE_NAME_CASE(FDIV_RND)
33432 NODE_NAME_CASE(FDIVS)
33433 NODE_NAME_CASE(FDIVS_RND)
33434 NODE_NAME_CASE(FSQRT_RND)
33435 NODE_NAME_CASE(FSQRTS)
33436 NODE_NAME_CASE(FSQRTS_RND)
33437 NODE_NAME_CASE(FGETEXP)
33438 NODE_NAME_CASE(FGETEXP_SAE)
33439 NODE_NAME_CASE(FGETEXPS)
33440 NODE_NAME_CASE(FGETEXPS_SAE)
33441 NODE_NAME_CASE(SCALEF)
33442 NODE_NAME_CASE(SCALEF_RND)
33443 NODE_NAME_CASE(SCALEFS)
33444 NODE_NAME_CASE(SCALEFS_RND)
33445 NODE_NAME_CASE(MULHRS)
33446 NODE_NAME_CASE(SINT_TO_FP_RND)
33447 NODE_NAME_CASE(UINT_TO_FP_RND)
33448 NODE_NAME_CASE(CVTTP2SI)
33449 NODE_NAME_CASE(CVTTP2UI)
33450 NODE_NAME_CASE(STRICT_CVTTP2SI)
33451 NODE_NAME_CASE(STRICT_CVTTP2UI)
33452 NODE_NAME_CASE(MCVTTP2SI)
33453 NODE_NAME_CASE(MCVTTP2UI)
33454 NODE_NAME_CASE(CVTTP2SI_SAE)
33455 NODE_NAME_CASE(CVTTP2UI_SAE)
33456 NODE_NAME_CASE(CVTTS2SI)
33457 NODE_NAME_CASE(CVTTS2UI)
33458 NODE_NAME_CASE(CVTTS2SI_SAE)
33459 NODE_NAME_CASE(CVTTS2UI_SAE)
33460 NODE_NAME_CASE(CVTSI2P)
33461 NODE_NAME_CASE(CVTUI2P)
33462 NODE_NAME_CASE(STRICT_CVTSI2P)
33463 NODE_NAME_CASE(STRICT_CVTUI2P)
33464 NODE_NAME_CASE(MCVTSI2P)
33465 NODE_NAME_CASE(MCVTUI2P)
33466 NODE_NAME_CASE(VFPCLASS)
33467 NODE_NAME_CASE(VFPCLASSS)
33468 NODE_NAME_CASE(MULTISHIFT)
33469 NODE_NAME_CASE(SCALAR_SINT_TO_FP)
33470 NODE_NAME_CASE(SCALAR_SINT_TO_FP_RND)
33471 NODE_NAME_CASE(SCALAR_UINT_TO_FP)
33472 NODE_NAME_CASE(SCALAR_UINT_TO_FP_RND)
33473 NODE_NAME_CASE(CVTPS2PH)
33474 NODE_NAME_CASE(STRICT_CVTPS2PH)
33475 NODE_NAME_CASE(CVTPS2PH_SAE)
33476 NODE_NAME_CASE(MCVTPS2PH)
33477 NODE_NAME_CASE(MCVTPS2PH_SAE)
33478 NODE_NAME_CASE(CVTPH2PS)
33479 NODE_NAME_CASE(STRICT_CVTPH2PS)
33480 NODE_NAME_CASE(CVTPH2PS_SAE)
33481 NODE_NAME_CASE(CVTP2SI)
33482 NODE_NAME_CASE(CVTP2UI)
33483 NODE_NAME_CASE(MCVTP2SI)
33484 NODE_NAME_CASE(MCVTP2UI)
33485 NODE_NAME_CASE(CVTP2SI_RND)
33486 NODE_NAME_CASE(CVTP2UI_RND)
33487 NODE_NAME_CASE(CVTS2SI)
33488 NODE_NAME_CASE(CVTS2UI)
33489 NODE_NAME_CASE(CVTS2SI_RND)
33490 NODE_NAME_CASE(CVTS2UI_RND)
33491 NODE_NAME_CASE(CVTNE2PS2BF16)
33492 NODE_NAME_CASE(CVTNEPS2BF16)
33493 NODE_NAME_CASE(MCVTNEPS2BF16)
33494 NODE_NAME_CASE(DPBF16PS)
33495 NODE_NAME_CASE(LWPINS)
33496 NODE_NAME_CASE(MGATHER)
33497 NODE_NAME_CASE(MSCATTER)
33498 NODE_NAME_CASE(VPDPBUSD)
33499 NODE_NAME_CASE(VPDPBUSDS)
33500 NODE_NAME_CASE(VPDPWSSD)
33501 NODE_NAME_CASE(VPDPWSSDS)
33502 NODE_NAME_CASE(VPSHUFBITQMB)
33503 NODE_NAME_CASE(GF2P8MULB)
33504 NODE_NAME_CASE(GF2P8AFFINEQB)
33505 NODE_NAME_CASE(GF2P8AFFINEINVQB)
33506 NODE_NAME_CASE(NT_CALL)
33507 NODE_NAME_CASE(NT_BRIND)
33508 NODE_NAME_CASE(UMWAIT)
33509 NODE_NAME_CASE(TPAUSE)
33510 NODE_NAME_CASE(ENQCMD)
33511 NODE_NAME_CASE(ENQCMDS)
33512 NODE_NAME_CASE(VP2INTERSECT)
33513 NODE_NAME_CASE(VPDPBSUD)
33514 NODE_NAME_CASE(VPDPBSUDS)
33515 NODE_NAME_CASE(VPDPBUUD)
33516 NODE_NAME_CASE(VPDPBUUDS)
33517 NODE_NAME_CASE(VPDPBSSD)
33518 NODE_NAME_CASE(VPDPBSSDS)
33519 NODE_NAME_CASE(AESENC128KL)
33520 NODE_NAME_CASE(AESDEC128KL)
33521 NODE_NAME_CASE(AESENC256KL)
33522 NODE_NAME_CASE(AESDEC256KL)
33523 NODE_NAME_CASE(AESENCWIDE128KL)
33524 NODE_NAME_CASE(AESDECWIDE128KL)
33525 NODE_NAME_CASE(AESENCWIDE256KL)
33526 NODE_NAME_CASE(AESDECWIDE256KL)
33527 NODE_NAME_CASE(CMPCCXADD)
33528 NODE_NAME_CASE(TESTUI)
33529 NODE_NAME_CASE(FP80_ADD)
33530 NODE_NAME_CASE(STRICT_FP80_ADD)
33532 return nullptr;
33533 #undef NODE_NAME_CASE
33536 /// Return true if the addressing mode represented by AM is legal for this
33537 /// target, for a load/store of the specified type.
33538 bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL,
33539 const AddrMode &AM, Type *Ty,
33540 unsigned AS,
33541 Instruction *I) const {
33542 // X86 supports extremely general addressing modes.
33543 CodeModel::Model M = getTargetMachine().getCodeModel();
33545 // X86 allows a sign-extended 32-bit immediate field as a displacement.
33546 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
33547 return false;
33549 if (AM.BaseGV) {
33550 unsigned GVFlags = Subtarget.classifyGlobalReference(AM.BaseGV);
33552 // If a reference to this global requires an extra load, we can't fold it.
33553 if (isGlobalStubReference(GVFlags))
33554 return false;
33556 // If BaseGV requires a register for the PIC base, we cannot also have a
33557 // BaseReg specified.
33558 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
33559 return false;
33561 // If lower 4G is not available, then we must use rip-relative addressing.
33562 if ((M != CodeModel::Small || isPositionIndependent()) &&
33563 Subtarget.is64Bit() && (AM.BaseOffs || AM.Scale > 1))
33564 return false;
33567 switch (AM.Scale) {
33568 case 0:
33569 case 1:
33570 case 2:
33571 case 4:
33572 case 8:
33573 // These scales always work.
33574 break;
33575 case 3:
33576 case 5:
33577 case 9:
33578 // These scales are formed with basereg+scalereg. Only accept if there is
33579 // no basereg yet.
33580 if (AM.HasBaseReg)
33581 return false;
33582 break;
33583 default: // Other stuff never works.
33584 return false;
33587 return true;
33590 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
33591 unsigned Bits = Ty->getScalarSizeInBits();
33593 // XOP has v16i8/v8i16/v4i32/v2i64 variable vector shifts.
33594 // Splitting for v32i8/v16i16 on XOP+AVX2 targets is still preferred.
33595 if (Subtarget.hasXOP() &&
33596 (Bits == 8 || Bits == 16 || Bits == 32 || Bits == 64))
33597 return false;
33599 // AVX2 has vpsllv[dq] instructions (and other shifts) that make variable
33600 // shifts just as cheap as scalar ones.
33601 if (Subtarget.hasAVX2() && (Bits == 32 || Bits == 64))
33602 return false;
33604 // AVX512BW has shifts such as vpsllvw.
33605 if (Subtarget.hasBWI() && Bits == 16)
33606 return false;
33608 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
33609 // fully general vector.
33610 return true;
33613 bool X86TargetLowering::isBinOp(unsigned Opcode) const {
33614 switch (Opcode) {
33615 // These are non-commutative binops.
33616 // TODO: Add more X86ISD opcodes once we have test coverage.
33617 case X86ISD::ANDNP:
33618 case X86ISD::PCMPGT:
33619 case X86ISD::FMAX:
33620 case X86ISD::FMIN:
33621 case X86ISD::FANDN:
33622 case X86ISD::VPSHA:
33623 case X86ISD::VPSHL:
33624 case X86ISD::VSHLV:
33625 case X86ISD::VSRLV:
33626 case X86ISD::VSRAV:
33627 return true;
33630 return TargetLoweringBase::isBinOp(Opcode);
33633 bool X86TargetLowering::isCommutativeBinOp(unsigned Opcode) const {
33634 switch (Opcode) {
33635 // TODO: Add more X86ISD opcodes once we have test coverage.
33636 case X86ISD::PCMPEQ:
33637 case X86ISD::PMULDQ:
33638 case X86ISD::PMULUDQ:
33639 case X86ISD::FMAXC:
33640 case X86ISD::FMINC:
33641 case X86ISD::FAND:
33642 case X86ISD::FOR:
33643 case X86ISD::FXOR:
33644 return true;
33647 return TargetLoweringBase::isCommutativeBinOp(Opcode);
33650 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
33651 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
33652 return false;
33653 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
33654 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
33655 return NumBits1 > NumBits2;
33658 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
33659 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
33660 return false;
33662 if (!isTypeLegal(EVT::getEVT(Ty1)))
33663 return false;
33665 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
33667 // Assuming the caller doesn't have a zeroext or signext return parameter,
33668 // truncation all the way down to i1 is valid.
33669 return true;
33672 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
33673 return isInt<32>(Imm);
33676 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
33677 // Can also use sub to handle negated immediates.
33678 return isInt<32>(Imm);
33681 bool X86TargetLowering::isLegalStoreImmediate(int64_t Imm) const {
33682 return isInt<32>(Imm);
33685 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
33686 if (!VT1.isScalarInteger() || !VT2.isScalarInteger())
33687 return false;
33688 unsigned NumBits1 = VT1.getSizeInBits();
33689 unsigned NumBits2 = VT2.getSizeInBits();
33690 return NumBits1 > NumBits2;
33693 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
33694 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
33695 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget.is64Bit();
33698 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
33699 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
33700 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget.is64Bit();
33703 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
33704 EVT VT1 = Val.getValueType();
33705 if (isZExtFree(VT1, VT2))
33706 return true;
33708 if (Val.getOpcode() != ISD::LOAD)
33709 return false;
33711 if (!VT1.isSimple() || !VT1.isInteger() ||
33712 !VT2.isSimple() || !VT2.isInteger())
33713 return false;
33715 switch (VT1.getSimpleVT().SimpleTy) {
33716 default: break;
33717 case MVT::i8:
33718 case MVT::i16:
33719 case MVT::i32:
33720 // X86 has 8, 16, and 32-bit zero-extending loads.
33721 return true;
33724 return false;
33727 bool X86TargetLowering::shouldSinkOperands(Instruction *I,
33728 SmallVectorImpl<Use *> &Ops) const {
33729 using namespace llvm::PatternMatch;
33731 FixedVectorType *VTy = dyn_cast<FixedVectorType>(I->getType());
33732 if (!VTy)
33733 return false;
33735 if (I->getOpcode() == Instruction::Mul &&
33736 VTy->getElementType()->isIntegerTy(64)) {
33737 for (auto &Op : I->operands()) {
33738 // Make sure we are not already sinking this operand
33739 if (any_of(Ops, [&](Use *U) { return U->get() == Op; }))
33740 continue;
33742 // Look for PMULDQ pattern where the input is a sext_inreg from vXi32 or
33743 // the PMULUDQ pattern where the input is a zext_inreg from vXi32.
33744 if (Subtarget.hasSSE41() &&
33745 match(Op.get(), m_AShr(m_Shl(m_Value(), m_SpecificInt(32)),
33746 m_SpecificInt(32)))) {
33747 Ops.push_back(&cast<Instruction>(Op)->getOperandUse(0));
33748 Ops.push_back(&Op);
33749 } else if (Subtarget.hasSSE2() &&
33750 match(Op.get(),
33751 m_And(m_Value(), m_SpecificInt(UINT64_C(0xffffffff))))) {
33752 Ops.push_back(&Op);
33756 return !Ops.empty();
33759 // A uniform shift amount in a vector shift or funnel shift may be much
33760 // cheaper than a generic variable vector shift, so make that pattern visible
33761 // to SDAG by sinking the shuffle instruction next to the shift.
33762 int ShiftAmountOpNum = -1;
33763 if (I->isShift())
33764 ShiftAmountOpNum = 1;
33765 else if (auto *II = dyn_cast<IntrinsicInst>(I)) {
33766 if (II->getIntrinsicID() == Intrinsic::fshl ||
33767 II->getIntrinsicID() == Intrinsic::fshr)
33768 ShiftAmountOpNum = 2;
33771 if (ShiftAmountOpNum == -1)
33772 return false;
33774 auto *Shuf = dyn_cast<ShuffleVectorInst>(I->getOperand(ShiftAmountOpNum));
33775 if (Shuf && getSplatIndex(Shuf->getShuffleMask()) >= 0 &&
33776 isVectorShiftByScalarCheap(I->getType())) {
33777 Ops.push_back(&I->getOperandUse(ShiftAmountOpNum));
33778 return true;
33781 return false;
33784 bool X86TargetLowering::shouldConvertPhiType(Type *From, Type *To) const {
33785 if (!Subtarget.is64Bit())
33786 return false;
33787 return TargetLowering::shouldConvertPhiType(From, To);
33790 bool X86TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
33791 if (isa<MaskedLoadSDNode>(ExtVal.getOperand(0)))
33792 return false;
33794 EVT SrcVT = ExtVal.getOperand(0).getValueType();
33796 // There is no extending load for vXi1.
33797 if (SrcVT.getScalarType() == MVT::i1)
33798 return false;
33800 return true;
33803 bool X86TargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
33804 EVT VT) const {
33805 if (!Subtarget.hasAnyFMA())
33806 return false;
33808 VT = VT.getScalarType();
33810 if (!VT.isSimple())
33811 return false;
33813 switch (VT.getSimpleVT().SimpleTy) {
33814 case MVT::f16:
33815 return Subtarget.hasFP16();
33816 case MVT::f32:
33817 case MVT::f64:
33818 return true;
33819 default:
33820 break;
33823 return false;
33826 bool X86TargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
33827 // i16 instructions are longer (0x66 prefix) and potentially slower.
33828 return !(SrcVT == MVT::i32 && DestVT == MVT::i16);
33831 bool X86TargetLowering::shouldFoldSelectWithIdentityConstant(unsigned Opcode,
33832 EVT VT) const {
33833 // TODO: This is too general. There are cases where pre-AVX512 codegen would
33834 // benefit. The transform may also be profitable for scalar code.
33835 if (!Subtarget.hasAVX512())
33836 return false;
33837 if (!Subtarget.hasVLX() && !VT.is512BitVector())
33838 return false;
33839 if (!VT.isVector() || VT.getScalarType() == MVT::i1)
33840 return false;
33842 return true;
33845 /// Targets can use this to indicate that they only support *some*
33846 /// VECTOR_SHUFFLE operations, those with specific masks.
33847 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
33848 /// are assumed to be legal.
33849 bool X86TargetLowering::isShuffleMaskLegal(ArrayRef<int> Mask, EVT VT) const {
33850 if (!VT.isSimple())
33851 return false;
33853 // Not for i1 vectors
33854 if (VT.getSimpleVT().getScalarType() == MVT::i1)
33855 return false;
33857 // Very little shuffling can be done for 64-bit vectors right now.
33858 if (VT.getSimpleVT().getSizeInBits() == 64)
33859 return false;
33861 // We only care that the types being shuffled are legal. The lowering can
33862 // handle any possible shuffle mask that results.
33863 return isTypeLegal(VT.getSimpleVT());
33866 bool X86TargetLowering::isVectorClearMaskLegal(ArrayRef<int> Mask,
33867 EVT VT) const {
33868 // Don't convert an 'and' into a shuffle that we don't directly support.
33869 // vpblendw and vpshufb for 256-bit vectors are not available on AVX1.
33870 if (!Subtarget.hasAVX2())
33871 if (VT == MVT::v32i8 || VT == MVT::v16i16)
33872 return false;
33874 // Just delegate to the generic legality, clear masks aren't special.
33875 return isShuffleMaskLegal(Mask, VT);
33878 bool X86TargetLowering::areJTsAllowed(const Function *Fn) const {
33879 // If the subtarget is using thunks, we need to not generate jump tables.
33880 if (Subtarget.useIndirectThunkBranches())
33881 return false;
33883 // Otherwise, fallback on the generic logic.
33884 return TargetLowering::areJTsAllowed(Fn);
33887 MVT X86TargetLowering::getPreferredSwitchConditionType(LLVMContext &Context,
33888 EVT ConditionVT) const {
33889 // Avoid 8 and 16 bit types because they increase the chance for unnecessary
33890 // zero-extensions.
33891 if (ConditionVT.getSizeInBits() < 32)
33892 return MVT::i32;
33893 return TargetLoweringBase::getPreferredSwitchConditionType(Context,
33894 ConditionVT);
33897 //===----------------------------------------------------------------------===//
33898 // X86 Scheduler Hooks
33899 //===----------------------------------------------------------------------===//
33901 // Returns true if EFLAG is consumed after this iterator in the rest of the
33902 // basic block or any successors of the basic block.
33903 static bool isEFLAGSLiveAfter(MachineBasicBlock::iterator Itr,
33904 MachineBasicBlock *BB) {
33905 // Scan forward through BB for a use/def of EFLAGS.
33906 for (const MachineInstr &mi : llvm::make_range(std::next(Itr), BB->end())) {
33907 if (mi.readsRegister(X86::EFLAGS))
33908 return true;
33909 // If we found a def, we can stop searching.
33910 if (mi.definesRegister(X86::EFLAGS))
33911 return false;
33914 // If we hit the end of the block, check whether EFLAGS is live into a
33915 // successor.
33916 for (MachineBasicBlock *Succ : BB->successors())
33917 if (Succ->isLiveIn(X86::EFLAGS))
33918 return true;
33920 return false;
33923 /// Utility function to emit xbegin specifying the start of an RTM region.
33924 static MachineBasicBlock *emitXBegin(MachineInstr &MI, MachineBasicBlock *MBB,
33925 const TargetInstrInfo *TII) {
33926 const MIMetadata MIMD(MI);
33928 const BasicBlock *BB = MBB->getBasicBlock();
33929 MachineFunction::iterator I = ++MBB->getIterator();
33931 // For the v = xbegin(), we generate
33933 // thisMBB:
33934 // xbegin sinkMBB
33936 // mainMBB:
33937 // s0 = -1
33939 // fallBB:
33940 // eax = # XABORT_DEF
33941 // s1 = eax
33943 // sinkMBB:
33944 // v = phi(s0/mainBB, s1/fallBB)
33946 MachineBasicBlock *thisMBB = MBB;
33947 MachineFunction *MF = MBB->getParent();
33948 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
33949 MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
33950 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
33951 MF->insert(I, mainMBB);
33952 MF->insert(I, fallMBB);
33953 MF->insert(I, sinkMBB);
33955 if (isEFLAGSLiveAfter(MI, MBB)) {
33956 mainMBB->addLiveIn(X86::EFLAGS);
33957 fallMBB->addLiveIn(X86::EFLAGS);
33958 sinkMBB->addLiveIn(X86::EFLAGS);
33961 // Transfer the remainder of BB and its successor edges to sinkMBB.
33962 sinkMBB->splice(sinkMBB->begin(), MBB,
33963 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
33964 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
33966 MachineRegisterInfo &MRI = MF->getRegInfo();
33967 Register DstReg = MI.getOperand(0).getReg();
33968 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
33969 Register mainDstReg = MRI.createVirtualRegister(RC);
33970 Register fallDstReg = MRI.createVirtualRegister(RC);
33972 // thisMBB:
33973 // xbegin fallMBB
33974 // # fallthrough to mainMBB
33975 // # abortion to fallMBB
33976 BuildMI(thisMBB, MIMD, TII->get(X86::XBEGIN_4)).addMBB(fallMBB);
33977 thisMBB->addSuccessor(mainMBB);
33978 thisMBB->addSuccessor(fallMBB);
33980 // mainMBB:
33981 // mainDstReg := -1
33982 BuildMI(mainMBB, MIMD, TII->get(X86::MOV32ri), mainDstReg).addImm(-1);
33983 BuildMI(mainMBB, MIMD, TII->get(X86::JMP_1)).addMBB(sinkMBB);
33984 mainMBB->addSuccessor(sinkMBB);
33986 // fallMBB:
33987 // ; pseudo instruction to model hardware's definition from XABORT
33988 // EAX := XABORT_DEF
33989 // fallDstReg := EAX
33990 BuildMI(fallMBB, MIMD, TII->get(X86::XABORT_DEF));
33991 BuildMI(fallMBB, MIMD, TII->get(TargetOpcode::COPY), fallDstReg)
33992 .addReg(X86::EAX);
33993 fallMBB->addSuccessor(sinkMBB);
33995 // sinkMBB:
33996 // DstReg := phi(mainDstReg/mainBB, fallDstReg/fallBB)
33997 BuildMI(*sinkMBB, sinkMBB->begin(), MIMD, TII->get(X86::PHI), DstReg)
33998 .addReg(mainDstReg).addMBB(mainMBB)
33999 .addReg(fallDstReg).addMBB(fallMBB);
34001 MI.eraseFromParent();
34002 return sinkMBB;
34005 MachineBasicBlock *
34006 X86TargetLowering::EmitVAARGWithCustomInserter(MachineInstr &MI,
34007 MachineBasicBlock *MBB) const {
34008 // Emit va_arg instruction on X86-64.
34010 // Operands to this pseudo-instruction:
34011 // 0 ) Output : destination address (reg)
34012 // 1-5) Input : va_list address (addr, i64mem)
34013 // 6 ) ArgSize : Size (in bytes) of vararg type
34014 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
34015 // 8 ) Align : Alignment of type
34016 // 9 ) EFLAGS (implicit-def)
34018 assert(MI.getNumOperands() == 10 && "VAARG should have 10 operands!");
34019 static_assert(X86::AddrNumOperands == 5, "VAARG assumes 5 address operands");
34021 Register DestReg = MI.getOperand(0).getReg();
34022 MachineOperand &Base = MI.getOperand(1);
34023 MachineOperand &Scale = MI.getOperand(2);
34024 MachineOperand &Index = MI.getOperand(3);
34025 MachineOperand &Disp = MI.getOperand(4);
34026 MachineOperand &Segment = MI.getOperand(5);
34027 unsigned ArgSize = MI.getOperand(6).getImm();
34028 unsigned ArgMode = MI.getOperand(7).getImm();
34029 Align Alignment = Align(MI.getOperand(8).getImm());
34031 MachineFunction *MF = MBB->getParent();
34033 // Memory Reference
34034 assert(MI.hasOneMemOperand() && "Expected VAARG to have one memoperand");
34036 MachineMemOperand *OldMMO = MI.memoperands().front();
34038 // Clone the MMO into two separate MMOs for loading and storing
34039 MachineMemOperand *LoadOnlyMMO = MF->getMachineMemOperand(
34040 OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOStore);
34041 MachineMemOperand *StoreOnlyMMO = MF->getMachineMemOperand(
34042 OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOLoad);
34044 // Machine Information
34045 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
34046 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
34047 const TargetRegisterClass *AddrRegClass =
34048 getRegClassFor(getPointerTy(MBB->getParent()->getDataLayout()));
34049 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
34050 const MIMetadata MIMD(MI);
34052 // struct va_list {
34053 // i32 gp_offset
34054 // i32 fp_offset
34055 // i64 overflow_area (address)
34056 // i64 reg_save_area (address)
34057 // }
34058 // sizeof(va_list) = 24
34059 // alignment(va_list) = 8
34061 unsigned TotalNumIntRegs = 6;
34062 unsigned TotalNumXMMRegs = 8;
34063 bool UseGPOffset = (ArgMode == 1);
34064 bool UseFPOffset = (ArgMode == 2);
34065 unsigned MaxOffset = TotalNumIntRegs * 8 +
34066 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
34068 /* Align ArgSize to a multiple of 8 */
34069 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
34070 bool NeedsAlign = (Alignment > 8);
34072 MachineBasicBlock *thisMBB = MBB;
34073 MachineBasicBlock *overflowMBB;
34074 MachineBasicBlock *offsetMBB;
34075 MachineBasicBlock *endMBB;
34077 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
34078 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
34079 unsigned OffsetReg = 0;
34081 if (!UseGPOffset && !UseFPOffset) {
34082 // If we only pull from the overflow region, we don't create a branch.
34083 // We don't need to alter control flow.
34084 OffsetDestReg = 0; // unused
34085 OverflowDestReg = DestReg;
34087 offsetMBB = nullptr;
34088 overflowMBB = thisMBB;
34089 endMBB = thisMBB;
34090 } else {
34091 // First emit code to check if gp_offset (or fp_offset) is below the bound.
34092 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
34093 // If not, pull from overflow_area. (branch to overflowMBB)
34095 // thisMBB
34096 // | .
34097 // | .
34098 // offsetMBB overflowMBB
34099 // | .
34100 // | .
34101 // endMBB
34103 // Registers for the PHI in endMBB
34104 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
34105 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
34107 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
34108 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34109 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34110 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34112 MachineFunction::iterator MBBIter = ++MBB->getIterator();
34114 // Insert the new basic blocks
34115 MF->insert(MBBIter, offsetMBB);
34116 MF->insert(MBBIter, overflowMBB);
34117 MF->insert(MBBIter, endMBB);
34119 // Transfer the remainder of MBB and its successor edges to endMBB.
34120 endMBB->splice(endMBB->begin(), thisMBB,
34121 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
34122 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
34124 // Make offsetMBB and overflowMBB successors of thisMBB
34125 thisMBB->addSuccessor(offsetMBB);
34126 thisMBB->addSuccessor(overflowMBB);
34128 // endMBB is a successor of both offsetMBB and overflowMBB
34129 offsetMBB->addSuccessor(endMBB);
34130 overflowMBB->addSuccessor(endMBB);
34132 // Load the offset value into a register
34133 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
34134 BuildMI(thisMBB, MIMD, TII->get(X86::MOV32rm), OffsetReg)
34135 .add(Base)
34136 .add(Scale)
34137 .add(Index)
34138 .addDisp(Disp, UseFPOffset ? 4 : 0)
34139 .add(Segment)
34140 .setMemRefs(LoadOnlyMMO);
34142 // Check if there is enough room left to pull this argument.
34143 BuildMI(thisMBB, MIMD, TII->get(X86::CMP32ri))
34144 .addReg(OffsetReg)
34145 .addImm(MaxOffset + 8 - ArgSizeA8);
34147 // Branch to "overflowMBB" if offset >= max
34148 // Fall through to "offsetMBB" otherwise
34149 BuildMI(thisMBB, MIMD, TII->get(X86::JCC_1))
34150 .addMBB(overflowMBB).addImm(X86::COND_AE);
34153 // In offsetMBB, emit code to use the reg_save_area.
34154 if (offsetMBB) {
34155 assert(OffsetReg != 0);
34157 // Read the reg_save_area address.
34158 Register RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
34159 BuildMI(
34160 offsetMBB, MIMD,
34161 TII->get(Subtarget.isTarget64BitLP64() ? X86::MOV64rm : X86::MOV32rm),
34162 RegSaveReg)
34163 .add(Base)
34164 .add(Scale)
34165 .add(Index)
34166 .addDisp(Disp, Subtarget.isTarget64BitLP64() ? 16 : 12)
34167 .add(Segment)
34168 .setMemRefs(LoadOnlyMMO);
34170 if (Subtarget.isTarget64BitLP64()) {
34171 // Zero-extend the offset
34172 Register OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
34173 BuildMI(offsetMBB, MIMD, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
34174 .addImm(0)
34175 .addReg(OffsetReg)
34176 .addImm(X86::sub_32bit);
34178 // Add the offset to the reg_save_area to get the final address.
34179 BuildMI(offsetMBB, MIMD, TII->get(X86::ADD64rr), OffsetDestReg)
34180 .addReg(OffsetReg64)
34181 .addReg(RegSaveReg);
34182 } else {
34183 // Add the offset to the reg_save_area to get the final address.
34184 BuildMI(offsetMBB, MIMD, TII->get(X86::ADD32rr), OffsetDestReg)
34185 .addReg(OffsetReg)
34186 .addReg(RegSaveReg);
34189 // Compute the offset for the next argument
34190 Register NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
34191 BuildMI(offsetMBB, MIMD, TII->get(X86::ADD32ri), NextOffsetReg)
34192 .addReg(OffsetReg)
34193 .addImm(UseFPOffset ? 16 : 8);
34195 // Store it back into the va_list.
34196 BuildMI(offsetMBB, MIMD, TII->get(X86::MOV32mr))
34197 .add(Base)
34198 .add(Scale)
34199 .add(Index)
34200 .addDisp(Disp, UseFPOffset ? 4 : 0)
34201 .add(Segment)
34202 .addReg(NextOffsetReg)
34203 .setMemRefs(StoreOnlyMMO);
34205 // Jump to endMBB
34206 BuildMI(offsetMBB, MIMD, TII->get(X86::JMP_1))
34207 .addMBB(endMBB);
34211 // Emit code to use overflow area
34214 // Load the overflow_area address into a register.
34215 Register OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
34216 BuildMI(overflowMBB, MIMD,
34217 TII->get(Subtarget.isTarget64BitLP64() ? X86::MOV64rm : X86::MOV32rm),
34218 OverflowAddrReg)
34219 .add(Base)
34220 .add(Scale)
34221 .add(Index)
34222 .addDisp(Disp, 8)
34223 .add(Segment)
34224 .setMemRefs(LoadOnlyMMO);
34226 // If we need to align it, do so. Otherwise, just copy the address
34227 // to OverflowDestReg.
34228 if (NeedsAlign) {
34229 // Align the overflow address
34230 Register TmpReg = MRI.createVirtualRegister(AddrRegClass);
34232 // aligned_addr = (addr + (align-1)) & ~(align-1)
34233 BuildMI(
34234 overflowMBB, MIMD,
34235 TII->get(Subtarget.isTarget64BitLP64() ? X86::ADD64ri32 : X86::ADD32ri),
34236 TmpReg)
34237 .addReg(OverflowAddrReg)
34238 .addImm(Alignment.value() - 1);
34240 BuildMI(
34241 overflowMBB, MIMD,
34242 TII->get(Subtarget.isTarget64BitLP64() ? X86::AND64ri32 : X86::AND32ri),
34243 OverflowDestReg)
34244 .addReg(TmpReg)
34245 .addImm(~(uint64_t)(Alignment.value() - 1));
34246 } else {
34247 BuildMI(overflowMBB, MIMD, TII->get(TargetOpcode::COPY), OverflowDestReg)
34248 .addReg(OverflowAddrReg);
34251 // Compute the next overflow address after this argument.
34252 // (the overflow address should be kept 8-byte aligned)
34253 Register NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
34254 BuildMI(
34255 overflowMBB, MIMD,
34256 TII->get(Subtarget.isTarget64BitLP64() ? X86::ADD64ri32 : X86::ADD32ri),
34257 NextAddrReg)
34258 .addReg(OverflowDestReg)
34259 .addImm(ArgSizeA8);
34261 // Store the new overflow address.
34262 BuildMI(overflowMBB, MIMD,
34263 TII->get(Subtarget.isTarget64BitLP64() ? X86::MOV64mr : X86::MOV32mr))
34264 .add(Base)
34265 .add(Scale)
34266 .add(Index)
34267 .addDisp(Disp, 8)
34268 .add(Segment)
34269 .addReg(NextAddrReg)
34270 .setMemRefs(StoreOnlyMMO);
34272 // If we branched, emit the PHI to the front of endMBB.
34273 if (offsetMBB) {
34274 BuildMI(*endMBB, endMBB->begin(), MIMD,
34275 TII->get(X86::PHI), DestReg)
34276 .addReg(OffsetDestReg).addMBB(offsetMBB)
34277 .addReg(OverflowDestReg).addMBB(overflowMBB);
34280 // Erase the pseudo instruction
34281 MI.eraseFromParent();
34283 return endMBB;
34286 // The EFLAGS operand of SelectItr might be missing a kill marker
34287 // because there were multiple uses of EFLAGS, and ISel didn't know
34288 // which to mark. Figure out whether SelectItr should have had a
34289 // kill marker, and set it if it should. Returns the correct kill
34290 // marker value.
34291 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
34292 MachineBasicBlock* BB,
34293 const TargetRegisterInfo* TRI) {
34294 if (isEFLAGSLiveAfter(SelectItr, BB))
34295 return false;
34297 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
34298 // out. SelectMI should have a kill flag on EFLAGS.
34299 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
34300 return true;
34303 // Return true if it is OK for this CMOV pseudo-opcode to be cascaded
34304 // together with other CMOV pseudo-opcodes into a single basic-block with
34305 // conditional jump around it.
34306 static bool isCMOVPseudo(MachineInstr &MI) {
34307 switch (MI.getOpcode()) {
34308 case X86::CMOV_FR16:
34309 case X86::CMOV_FR16X:
34310 case X86::CMOV_FR32:
34311 case X86::CMOV_FR32X:
34312 case X86::CMOV_FR64:
34313 case X86::CMOV_FR64X:
34314 case X86::CMOV_GR8:
34315 case X86::CMOV_GR16:
34316 case X86::CMOV_GR32:
34317 case X86::CMOV_RFP32:
34318 case X86::CMOV_RFP64:
34319 case X86::CMOV_RFP80:
34320 case X86::CMOV_VR64:
34321 case X86::CMOV_VR128:
34322 case X86::CMOV_VR128X:
34323 case X86::CMOV_VR256:
34324 case X86::CMOV_VR256X:
34325 case X86::CMOV_VR512:
34326 case X86::CMOV_VK1:
34327 case X86::CMOV_VK2:
34328 case X86::CMOV_VK4:
34329 case X86::CMOV_VK8:
34330 case X86::CMOV_VK16:
34331 case X86::CMOV_VK32:
34332 case X86::CMOV_VK64:
34333 return true;
34335 default:
34336 return false;
34340 // Helper function, which inserts PHI functions into SinkMBB:
34341 // %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ],
34342 // where %FalseValue(i) and %TrueValue(i) are taken from the consequent CMOVs
34343 // in [MIItBegin, MIItEnd) range. It returns the last MachineInstrBuilder for
34344 // the last PHI function inserted.
34345 static MachineInstrBuilder createPHIsForCMOVsInSinkBB(
34346 MachineBasicBlock::iterator MIItBegin, MachineBasicBlock::iterator MIItEnd,
34347 MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
34348 MachineBasicBlock *SinkMBB) {
34349 MachineFunction *MF = TrueMBB->getParent();
34350 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
34351 const MIMetadata MIMD(*MIItBegin);
34353 X86::CondCode CC = X86::CondCode(MIItBegin->getOperand(3).getImm());
34354 X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
34356 MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
34358 // As we are creating the PHIs, we have to be careful if there is more than
34359 // one. Later CMOVs may reference the results of earlier CMOVs, but later
34360 // PHIs have to reference the individual true/false inputs from earlier PHIs.
34361 // That also means that PHI construction must work forward from earlier to
34362 // later, and that the code must maintain a mapping from earlier PHI's
34363 // destination registers, and the registers that went into the PHI.
34364 DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
34365 MachineInstrBuilder MIB;
34367 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
34368 Register DestReg = MIIt->getOperand(0).getReg();
34369 Register Op1Reg = MIIt->getOperand(1).getReg();
34370 Register Op2Reg = MIIt->getOperand(2).getReg();
34372 // If this CMOV we are generating is the opposite condition from
34373 // the jump we generated, then we have to swap the operands for the
34374 // PHI that is going to be generated.
34375 if (MIIt->getOperand(3).getImm() == OppCC)
34376 std::swap(Op1Reg, Op2Reg);
34378 if (RegRewriteTable.contains(Op1Reg))
34379 Op1Reg = RegRewriteTable[Op1Reg].first;
34381 if (RegRewriteTable.contains(Op2Reg))
34382 Op2Reg = RegRewriteTable[Op2Reg].second;
34384 MIB =
34385 BuildMI(*SinkMBB, SinkInsertionPoint, MIMD, TII->get(X86::PHI), DestReg)
34386 .addReg(Op1Reg)
34387 .addMBB(FalseMBB)
34388 .addReg(Op2Reg)
34389 .addMBB(TrueMBB);
34391 // Add this PHI to the rewrite table.
34392 RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
34395 return MIB;
34398 // Lower cascaded selects in form of (SecondCmov (FirstCMOV F, T, cc1), T, cc2).
34399 MachineBasicBlock *
34400 X86TargetLowering::EmitLoweredCascadedSelect(MachineInstr &FirstCMOV,
34401 MachineInstr &SecondCascadedCMOV,
34402 MachineBasicBlock *ThisMBB) const {
34403 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
34404 const MIMetadata MIMD(FirstCMOV);
34406 // We lower cascaded CMOVs such as
34408 // (SecondCascadedCMOV (FirstCMOV F, T, cc1), T, cc2)
34410 // to two successive branches.
34412 // Without this, we would add a PHI between the two jumps, which ends up
34413 // creating a few copies all around. For instance, for
34415 // (sitofp (zext (fcmp une)))
34417 // we would generate:
34419 // ucomiss %xmm1, %xmm0
34420 // movss <1.0f>, %xmm0
34421 // movaps %xmm0, %xmm1
34422 // jne .LBB5_2
34423 // xorps %xmm1, %xmm1
34424 // .LBB5_2:
34425 // jp .LBB5_4
34426 // movaps %xmm1, %xmm0
34427 // .LBB5_4:
34428 // retq
34430 // because this custom-inserter would have generated:
34432 // A
34433 // | \
34434 // | B
34435 // | /
34436 // C
34437 // | \
34438 // | D
34439 // | /
34440 // E
34442 // A: X = ...; Y = ...
34443 // B: empty
34444 // C: Z = PHI [X, A], [Y, B]
34445 // D: empty
34446 // E: PHI [X, C], [Z, D]
34448 // If we lower both CMOVs in a single step, we can instead generate:
34450 // A
34451 // | \
34452 // | C
34453 // | /|
34454 // |/ |
34455 // | |
34456 // | D
34457 // | /
34458 // E
34460 // A: X = ...; Y = ...
34461 // D: empty
34462 // E: PHI [X, A], [X, C], [Y, D]
34464 // Which, in our sitofp/fcmp example, gives us something like:
34466 // ucomiss %xmm1, %xmm0
34467 // movss <1.0f>, %xmm0
34468 // jne .LBB5_4
34469 // jp .LBB5_4
34470 // xorps %xmm0, %xmm0
34471 // .LBB5_4:
34472 // retq
34475 // We lower cascaded CMOV into two successive branches to the same block.
34476 // EFLAGS is used by both, so mark it as live in the second.
34477 const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
34478 MachineFunction *F = ThisMBB->getParent();
34479 MachineBasicBlock *FirstInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
34480 MachineBasicBlock *SecondInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
34481 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
34483 MachineFunction::iterator It = ++ThisMBB->getIterator();
34484 F->insert(It, FirstInsertedMBB);
34485 F->insert(It, SecondInsertedMBB);
34486 F->insert(It, SinkMBB);
34488 // For a cascaded CMOV, we lower it to two successive branches to
34489 // the same block (SinkMBB). EFLAGS is used by both, so mark it as live in
34490 // the FirstInsertedMBB.
34491 FirstInsertedMBB->addLiveIn(X86::EFLAGS);
34493 // If the EFLAGS register isn't dead in the terminator, then claim that it's
34494 // live into the sink and copy blocks.
34495 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
34496 if (!SecondCascadedCMOV.killsRegister(X86::EFLAGS) &&
34497 !checkAndUpdateEFLAGSKill(SecondCascadedCMOV, ThisMBB, TRI)) {
34498 SecondInsertedMBB->addLiveIn(X86::EFLAGS);
34499 SinkMBB->addLiveIn(X86::EFLAGS);
34502 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
34503 SinkMBB->splice(SinkMBB->begin(), ThisMBB,
34504 std::next(MachineBasicBlock::iterator(FirstCMOV)),
34505 ThisMBB->end());
34506 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
34508 // Fallthrough block for ThisMBB.
34509 ThisMBB->addSuccessor(FirstInsertedMBB);
34510 // The true block target of the first branch is always SinkMBB.
34511 ThisMBB->addSuccessor(SinkMBB);
34512 // Fallthrough block for FirstInsertedMBB.
34513 FirstInsertedMBB->addSuccessor(SecondInsertedMBB);
34514 // The true block for the branch of FirstInsertedMBB.
34515 FirstInsertedMBB->addSuccessor(SinkMBB);
34516 // This is fallthrough.
34517 SecondInsertedMBB->addSuccessor(SinkMBB);
34519 // Create the conditional branch instructions.
34520 X86::CondCode FirstCC = X86::CondCode(FirstCMOV.getOperand(3).getImm());
34521 BuildMI(ThisMBB, MIMD, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(FirstCC);
34523 X86::CondCode SecondCC =
34524 X86::CondCode(SecondCascadedCMOV.getOperand(3).getImm());
34525 BuildMI(FirstInsertedMBB, MIMD, TII->get(X86::JCC_1))
34526 .addMBB(SinkMBB)
34527 .addImm(SecondCC);
34529 // SinkMBB:
34530 // %Result = phi [ %FalseValue, SecondInsertedMBB ], [ %TrueValue, ThisMBB ]
34531 Register DestReg = SecondCascadedCMOV.getOperand(0).getReg();
34532 Register Op1Reg = FirstCMOV.getOperand(1).getReg();
34533 Register Op2Reg = FirstCMOV.getOperand(2).getReg();
34534 MachineInstrBuilder MIB =
34535 BuildMI(*SinkMBB, SinkMBB->begin(), MIMD, TII->get(X86::PHI), DestReg)
34536 .addReg(Op1Reg)
34537 .addMBB(SecondInsertedMBB)
34538 .addReg(Op2Reg)
34539 .addMBB(ThisMBB);
34541 // The second SecondInsertedMBB provides the same incoming value as the
34542 // FirstInsertedMBB (the True operand of the SELECT_CC/CMOV nodes).
34543 MIB.addReg(FirstCMOV.getOperand(2).getReg()).addMBB(FirstInsertedMBB);
34545 // Now remove the CMOVs.
34546 FirstCMOV.eraseFromParent();
34547 SecondCascadedCMOV.eraseFromParent();
34549 return SinkMBB;
34552 MachineBasicBlock *
34553 X86TargetLowering::EmitLoweredSelect(MachineInstr &MI,
34554 MachineBasicBlock *ThisMBB) const {
34555 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
34556 const MIMetadata MIMD(MI);
34558 // To "insert" a SELECT_CC instruction, we actually have to insert the
34559 // diamond control-flow pattern. The incoming instruction knows the
34560 // destination vreg to set, the condition code register to branch on, the
34561 // true/false values to select between and a branch opcode to use.
34563 // ThisMBB:
34564 // ...
34565 // TrueVal = ...
34566 // cmpTY ccX, r1, r2
34567 // bCC copy1MBB
34568 // fallthrough --> FalseMBB
34570 // This code lowers all pseudo-CMOV instructions. Generally it lowers these
34571 // as described above, by inserting a BB, and then making a PHI at the join
34572 // point to select the true and false operands of the CMOV in the PHI.
34574 // The code also handles two different cases of multiple CMOV opcodes
34575 // in a row.
34577 // Case 1:
34578 // In this case, there are multiple CMOVs in a row, all which are based on
34579 // the same condition setting (or the exact opposite condition setting).
34580 // In this case we can lower all the CMOVs using a single inserted BB, and
34581 // then make a number of PHIs at the join point to model the CMOVs. The only
34582 // trickiness here, is that in a case like:
34584 // t2 = CMOV cond1 t1, f1
34585 // t3 = CMOV cond1 t2, f2
34587 // when rewriting this into PHIs, we have to perform some renaming on the
34588 // temps since you cannot have a PHI operand refer to a PHI result earlier
34589 // in the same block. The "simple" but wrong lowering would be:
34591 // t2 = PHI t1(BB1), f1(BB2)
34592 // t3 = PHI t2(BB1), f2(BB2)
34594 // but clearly t2 is not defined in BB1, so that is incorrect. The proper
34595 // renaming is to note that on the path through BB1, t2 is really just a
34596 // copy of t1, and do that renaming, properly generating:
34598 // t2 = PHI t1(BB1), f1(BB2)
34599 // t3 = PHI t1(BB1), f2(BB2)
34601 // Case 2:
34602 // CMOV ((CMOV F, T, cc1), T, cc2) is checked here and handled by a separate
34603 // function - EmitLoweredCascadedSelect.
34605 X86::CondCode CC = X86::CondCode(MI.getOperand(3).getImm());
34606 X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
34607 MachineInstr *LastCMOV = &MI;
34608 MachineBasicBlock::iterator NextMIIt = MachineBasicBlock::iterator(MI);
34610 // Check for case 1, where there are multiple CMOVs with the same condition
34611 // first. Of the two cases of multiple CMOV lowerings, case 1 reduces the
34612 // number of jumps the most.
34614 if (isCMOVPseudo(MI)) {
34615 // See if we have a string of CMOVS with the same condition. Skip over
34616 // intervening debug insts.
34617 while (NextMIIt != ThisMBB->end() && isCMOVPseudo(*NextMIIt) &&
34618 (NextMIIt->getOperand(3).getImm() == CC ||
34619 NextMIIt->getOperand(3).getImm() == OppCC)) {
34620 LastCMOV = &*NextMIIt;
34621 NextMIIt = next_nodbg(NextMIIt, ThisMBB->end());
34625 // This checks for case 2, but only do this if we didn't already find
34626 // case 1, as indicated by LastCMOV == MI.
34627 if (LastCMOV == &MI && NextMIIt != ThisMBB->end() &&
34628 NextMIIt->getOpcode() == MI.getOpcode() &&
34629 NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
34630 NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
34631 NextMIIt->getOperand(1).isKill()) {
34632 return EmitLoweredCascadedSelect(MI, *NextMIIt, ThisMBB);
34635 const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
34636 MachineFunction *F = ThisMBB->getParent();
34637 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
34638 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
34640 MachineFunction::iterator It = ++ThisMBB->getIterator();
34641 F->insert(It, FalseMBB);
34642 F->insert(It, SinkMBB);
34644 // Set the call frame size on entry to the new basic blocks.
34645 unsigned CallFrameSize = TII->getCallFrameSizeAt(MI);
34646 FalseMBB->setCallFrameSize(CallFrameSize);
34647 SinkMBB->setCallFrameSize(CallFrameSize);
34649 // If the EFLAGS register isn't dead in the terminator, then claim that it's
34650 // live into the sink and copy blocks.
34651 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
34652 if (!LastCMOV->killsRegister(X86::EFLAGS) &&
34653 !checkAndUpdateEFLAGSKill(LastCMOV, ThisMBB, TRI)) {
34654 FalseMBB->addLiveIn(X86::EFLAGS);
34655 SinkMBB->addLiveIn(X86::EFLAGS);
34658 // Transfer any debug instructions inside the CMOV sequence to the sunk block.
34659 auto DbgRange = llvm::make_range(MachineBasicBlock::iterator(MI),
34660 MachineBasicBlock::iterator(LastCMOV));
34661 for (MachineInstr &MI : llvm::make_early_inc_range(DbgRange))
34662 if (MI.isDebugInstr())
34663 SinkMBB->push_back(MI.removeFromParent());
34665 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
34666 SinkMBB->splice(SinkMBB->end(), ThisMBB,
34667 std::next(MachineBasicBlock::iterator(LastCMOV)),
34668 ThisMBB->end());
34669 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
34671 // Fallthrough block for ThisMBB.
34672 ThisMBB->addSuccessor(FalseMBB);
34673 // The true block target of the first (or only) branch is always a SinkMBB.
34674 ThisMBB->addSuccessor(SinkMBB);
34675 // Fallthrough block for FalseMBB.
34676 FalseMBB->addSuccessor(SinkMBB);
34678 // Create the conditional branch instruction.
34679 BuildMI(ThisMBB, MIMD, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(CC);
34681 // SinkMBB:
34682 // %Result = phi [ %FalseValue, FalseMBB ], [ %TrueValue, ThisMBB ]
34683 // ...
34684 MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
34685 MachineBasicBlock::iterator MIItEnd =
34686 std::next(MachineBasicBlock::iterator(LastCMOV));
34687 createPHIsForCMOVsInSinkBB(MIItBegin, MIItEnd, ThisMBB, FalseMBB, SinkMBB);
34689 // Now remove the CMOV(s).
34690 ThisMBB->erase(MIItBegin, MIItEnd);
34692 return SinkMBB;
34695 static unsigned getSUBriOpcode(bool IsLP64) {
34696 if (IsLP64)
34697 return X86::SUB64ri32;
34698 else
34699 return X86::SUB32ri;
34702 MachineBasicBlock *
34703 X86TargetLowering::EmitLoweredProbedAlloca(MachineInstr &MI,
34704 MachineBasicBlock *MBB) const {
34705 MachineFunction *MF = MBB->getParent();
34706 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
34707 const X86FrameLowering &TFI = *Subtarget.getFrameLowering();
34708 const MIMetadata MIMD(MI);
34709 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
34711 const unsigned ProbeSize = getStackProbeSize(*MF);
34713 MachineRegisterInfo &MRI = MF->getRegInfo();
34714 MachineBasicBlock *testMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34715 MachineBasicBlock *tailMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34716 MachineBasicBlock *blockMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34718 MachineFunction::iterator MBBIter = ++MBB->getIterator();
34719 MF->insert(MBBIter, testMBB);
34720 MF->insert(MBBIter, blockMBB);
34721 MF->insert(MBBIter, tailMBB);
34723 Register sizeVReg = MI.getOperand(1).getReg();
34725 Register physSPReg = TFI.Uses64BitFramePtr ? X86::RSP : X86::ESP;
34727 Register TmpStackPtr = MRI.createVirtualRegister(
34728 TFI.Uses64BitFramePtr ? &X86::GR64RegClass : &X86::GR32RegClass);
34729 Register FinalStackPtr = MRI.createVirtualRegister(
34730 TFI.Uses64BitFramePtr ? &X86::GR64RegClass : &X86::GR32RegClass);
34732 BuildMI(*MBB, {MI}, MIMD, TII->get(TargetOpcode::COPY), TmpStackPtr)
34733 .addReg(physSPReg);
34735 const unsigned Opc = TFI.Uses64BitFramePtr ? X86::SUB64rr : X86::SUB32rr;
34736 BuildMI(*MBB, {MI}, MIMD, TII->get(Opc), FinalStackPtr)
34737 .addReg(TmpStackPtr)
34738 .addReg(sizeVReg);
34741 // test rsp size
34743 BuildMI(testMBB, MIMD,
34744 TII->get(TFI.Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr))
34745 .addReg(FinalStackPtr)
34746 .addReg(physSPReg);
34748 BuildMI(testMBB, MIMD, TII->get(X86::JCC_1))
34749 .addMBB(tailMBB)
34750 .addImm(X86::COND_GE);
34751 testMBB->addSuccessor(blockMBB);
34752 testMBB->addSuccessor(tailMBB);
34754 // Touch the block then extend it. This is done on the opposite side of
34755 // static probe where we allocate then touch, to avoid the need of probing the
34756 // tail of the static alloca. Possible scenarios are:
34758 // + ---- <- ------------ <- ------------- <- ------------ +
34759 // | |
34760 // [free probe] -> [page alloc] -> [alloc probe] -> [tail alloc] + -> [dyn probe] -> [page alloc] -> [dyn probe] -> [tail alloc] +
34761 // | |
34762 // + <- ----------- <- ------------ <- ----------- <- ------------ +
34764 // The property we want to enforce is to never have more than [page alloc] between two probes.
34766 const unsigned XORMIOpc =
34767 TFI.Uses64BitFramePtr ? X86::XOR64mi32 : X86::XOR32mi;
34768 addRegOffset(BuildMI(blockMBB, MIMD, TII->get(XORMIOpc)), physSPReg, false, 0)
34769 .addImm(0);
34771 BuildMI(blockMBB, MIMD, TII->get(getSUBriOpcode(TFI.Uses64BitFramePtr)),
34772 physSPReg)
34773 .addReg(physSPReg)
34774 .addImm(ProbeSize);
34776 BuildMI(blockMBB, MIMD, TII->get(X86::JMP_1)).addMBB(testMBB);
34777 blockMBB->addSuccessor(testMBB);
34779 // Replace original instruction by the expected stack ptr
34780 BuildMI(tailMBB, MIMD, TII->get(TargetOpcode::COPY),
34781 MI.getOperand(0).getReg())
34782 .addReg(FinalStackPtr);
34784 tailMBB->splice(tailMBB->end(), MBB,
34785 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
34786 tailMBB->transferSuccessorsAndUpdatePHIs(MBB);
34787 MBB->addSuccessor(testMBB);
34789 // Delete the original pseudo instruction.
34790 MI.eraseFromParent();
34792 // And we're done.
34793 return tailMBB;
34796 MachineBasicBlock *
34797 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
34798 MachineBasicBlock *BB) const {
34799 MachineFunction *MF = BB->getParent();
34800 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
34801 const MIMetadata MIMD(MI);
34802 const BasicBlock *LLVM_BB = BB->getBasicBlock();
34804 assert(MF->shouldSplitStack());
34806 const bool Is64Bit = Subtarget.is64Bit();
34807 const bool IsLP64 = Subtarget.isTarget64BitLP64();
34809 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
34810 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
34812 // BB:
34813 // ... [Till the alloca]
34814 // If stacklet is not large enough, jump to mallocMBB
34816 // bumpMBB:
34817 // Allocate by subtracting from RSP
34818 // Jump to continueMBB
34820 // mallocMBB:
34821 // Allocate by call to runtime
34823 // continueMBB:
34824 // ...
34825 // [rest of original BB]
34828 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34829 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34830 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34832 MachineRegisterInfo &MRI = MF->getRegInfo();
34833 const TargetRegisterClass *AddrRegClass =
34834 getRegClassFor(getPointerTy(MF->getDataLayout()));
34836 Register mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
34837 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
34838 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
34839 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
34840 sizeVReg = MI.getOperand(1).getReg(),
34841 physSPReg =
34842 IsLP64 || Subtarget.isTargetNaCl64() ? X86::RSP : X86::ESP;
34844 MachineFunction::iterator MBBIter = ++BB->getIterator();
34846 MF->insert(MBBIter, bumpMBB);
34847 MF->insert(MBBIter, mallocMBB);
34848 MF->insert(MBBIter, continueMBB);
34850 continueMBB->splice(continueMBB->begin(), BB,
34851 std::next(MachineBasicBlock::iterator(MI)), BB->end());
34852 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
34854 // Add code to the main basic block to check if the stack limit has been hit,
34855 // and if so, jump to mallocMBB otherwise to bumpMBB.
34856 BuildMI(BB, MIMD, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
34857 BuildMI(BB, MIMD, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
34858 .addReg(tmpSPVReg).addReg(sizeVReg);
34859 BuildMI(BB, MIMD, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
34860 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
34861 .addReg(SPLimitVReg);
34862 BuildMI(BB, MIMD, TII->get(X86::JCC_1)).addMBB(mallocMBB).addImm(X86::COND_G);
34864 // bumpMBB simply decreases the stack pointer, since we know the current
34865 // stacklet has enough space.
34866 BuildMI(bumpMBB, MIMD, TII->get(TargetOpcode::COPY), physSPReg)
34867 .addReg(SPLimitVReg);
34868 BuildMI(bumpMBB, MIMD, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
34869 .addReg(SPLimitVReg);
34870 BuildMI(bumpMBB, MIMD, TII->get(X86::JMP_1)).addMBB(continueMBB);
34872 // Calls into a routine in libgcc to allocate more space from the heap.
34873 const uint32_t *RegMask =
34874 Subtarget.getRegisterInfo()->getCallPreservedMask(*MF, CallingConv::C);
34875 if (IsLP64) {
34876 BuildMI(mallocMBB, MIMD, TII->get(X86::MOV64rr), X86::RDI)
34877 .addReg(sizeVReg);
34878 BuildMI(mallocMBB, MIMD, TII->get(X86::CALL64pcrel32))
34879 .addExternalSymbol("__morestack_allocate_stack_space")
34880 .addRegMask(RegMask)
34881 .addReg(X86::RDI, RegState::Implicit)
34882 .addReg(X86::RAX, RegState::ImplicitDefine);
34883 } else if (Is64Bit) {
34884 BuildMI(mallocMBB, MIMD, TII->get(X86::MOV32rr), X86::EDI)
34885 .addReg(sizeVReg);
34886 BuildMI(mallocMBB, MIMD, TII->get(X86::CALL64pcrel32))
34887 .addExternalSymbol("__morestack_allocate_stack_space")
34888 .addRegMask(RegMask)
34889 .addReg(X86::EDI, RegState::Implicit)
34890 .addReg(X86::EAX, RegState::ImplicitDefine);
34891 } else {
34892 BuildMI(mallocMBB, MIMD, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
34893 .addImm(12);
34894 BuildMI(mallocMBB, MIMD, TII->get(X86::PUSH32r)).addReg(sizeVReg);
34895 BuildMI(mallocMBB, MIMD, TII->get(X86::CALLpcrel32))
34896 .addExternalSymbol("__morestack_allocate_stack_space")
34897 .addRegMask(RegMask)
34898 .addReg(X86::EAX, RegState::ImplicitDefine);
34901 if (!Is64Bit)
34902 BuildMI(mallocMBB, MIMD, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
34903 .addImm(16);
34905 BuildMI(mallocMBB, MIMD, TII->get(TargetOpcode::COPY), mallocPtrVReg)
34906 .addReg(IsLP64 ? X86::RAX : X86::EAX);
34907 BuildMI(mallocMBB, MIMD, TII->get(X86::JMP_1)).addMBB(continueMBB);
34909 // Set up the CFG correctly.
34910 BB->addSuccessor(bumpMBB);
34911 BB->addSuccessor(mallocMBB);
34912 mallocMBB->addSuccessor(continueMBB);
34913 bumpMBB->addSuccessor(continueMBB);
34915 // Take care of the PHI nodes.
34916 BuildMI(*continueMBB, continueMBB->begin(), MIMD, TII->get(X86::PHI),
34917 MI.getOperand(0).getReg())
34918 .addReg(mallocPtrVReg)
34919 .addMBB(mallocMBB)
34920 .addReg(bumpSPPtrVReg)
34921 .addMBB(bumpMBB);
34923 // Delete the original pseudo instruction.
34924 MI.eraseFromParent();
34926 // And we're done.
34927 return continueMBB;
34930 MachineBasicBlock *
34931 X86TargetLowering::EmitLoweredCatchRet(MachineInstr &MI,
34932 MachineBasicBlock *BB) const {
34933 MachineFunction *MF = BB->getParent();
34934 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
34935 MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB();
34936 const MIMetadata MIMD(MI);
34938 assert(!isAsynchronousEHPersonality(
34939 classifyEHPersonality(MF->getFunction().getPersonalityFn())) &&
34940 "SEH does not use catchret!");
34942 // Only 32-bit EH needs to worry about manually restoring stack pointers.
34943 if (!Subtarget.is32Bit())
34944 return BB;
34946 // C++ EH creates a new target block to hold the restore code, and wires up
34947 // the new block to the return destination with a normal JMP_4.
34948 MachineBasicBlock *RestoreMBB =
34949 MF->CreateMachineBasicBlock(BB->getBasicBlock());
34950 assert(BB->succ_size() == 1);
34951 MF->insert(std::next(BB->getIterator()), RestoreMBB);
34952 RestoreMBB->transferSuccessorsAndUpdatePHIs(BB);
34953 BB->addSuccessor(RestoreMBB);
34954 MI.getOperand(0).setMBB(RestoreMBB);
34956 // Marking this as an EH pad but not a funclet entry block causes PEI to
34957 // restore stack pointers in the block.
34958 RestoreMBB->setIsEHPad(true);
34960 auto RestoreMBBI = RestoreMBB->begin();
34961 BuildMI(*RestoreMBB, RestoreMBBI, MIMD, TII.get(X86::JMP_4)).addMBB(TargetMBB);
34962 return BB;
34965 MachineBasicBlock *
34966 X86TargetLowering::EmitLoweredTLSAddr(MachineInstr &MI,
34967 MachineBasicBlock *BB) const {
34968 // So, here we replace TLSADDR with the sequence:
34969 // adjust_stackdown -> TLSADDR -> adjust_stackup.
34970 // We need this because TLSADDR is lowered into calls
34971 // inside MC, therefore without the two markers shrink-wrapping
34972 // may push the prologue/epilogue pass them.
34973 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
34974 const MIMetadata MIMD(MI);
34975 MachineFunction &MF = *BB->getParent();
34977 // Emit CALLSEQ_START right before the instruction.
34978 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
34979 MachineInstrBuilder CallseqStart =
34980 BuildMI(MF, MIMD, TII.get(AdjStackDown)).addImm(0).addImm(0).addImm(0);
34981 BB->insert(MachineBasicBlock::iterator(MI), CallseqStart);
34983 // Emit CALLSEQ_END right after the instruction.
34984 // We don't call erase from parent because we want to keep the
34985 // original instruction around.
34986 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
34987 MachineInstrBuilder CallseqEnd =
34988 BuildMI(MF, MIMD, TII.get(AdjStackUp)).addImm(0).addImm(0);
34989 BB->insertAfter(MachineBasicBlock::iterator(MI), CallseqEnd);
34991 return BB;
34994 MachineBasicBlock *
34995 X86TargetLowering::EmitLoweredTLSCall(MachineInstr &MI,
34996 MachineBasicBlock *BB) const {
34997 // This is pretty easy. We're taking the value that we received from
34998 // our load from the relocation, sticking it in either RDI (x86-64)
34999 // or EAX and doing an indirect call. The return value will then
35000 // be in the normal return register.
35001 MachineFunction *F = BB->getParent();
35002 const X86InstrInfo *TII = Subtarget.getInstrInfo();
35003 const MIMetadata MIMD(MI);
35005 assert(Subtarget.isTargetDarwin() && "Darwin only instr emitted?");
35006 assert(MI.getOperand(3).isGlobal() && "This should be a global");
35008 // Get a register mask for the lowered call.
35009 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
35010 // proper register mask.
35011 const uint32_t *RegMask =
35012 Subtarget.is64Bit() ?
35013 Subtarget.getRegisterInfo()->getDarwinTLSCallPreservedMask() :
35014 Subtarget.getRegisterInfo()->getCallPreservedMask(*F, CallingConv::C);
35015 if (Subtarget.is64Bit()) {
35016 MachineInstrBuilder MIB =
35017 BuildMI(*BB, MI, MIMD, TII->get(X86::MOV64rm), X86::RDI)
35018 .addReg(X86::RIP)
35019 .addImm(0)
35020 .addReg(0)
35021 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
35022 MI.getOperand(3).getTargetFlags())
35023 .addReg(0);
35024 MIB = BuildMI(*BB, MI, MIMD, TII->get(X86::CALL64m));
35025 addDirectMem(MIB, X86::RDI);
35026 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
35027 } else if (!isPositionIndependent()) {
35028 MachineInstrBuilder MIB =
35029 BuildMI(*BB, MI, MIMD, TII->get(X86::MOV32rm), X86::EAX)
35030 .addReg(0)
35031 .addImm(0)
35032 .addReg(0)
35033 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
35034 MI.getOperand(3).getTargetFlags())
35035 .addReg(0);
35036 MIB = BuildMI(*BB, MI, MIMD, TII->get(X86::CALL32m));
35037 addDirectMem(MIB, X86::EAX);
35038 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
35039 } else {
35040 MachineInstrBuilder MIB =
35041 BuildMI(*BB, MI, MIMD, TII->get(X86::MOV32rm), X86::EAX)
35042 .addReg(TII->getGlobalBaseReg(F))
35043 .addImm(0)
35044 .addReg(0)
35045 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
35046 MI.getOperand(3).getTargetFlags())
35047 .addReg(0);
35048 MIB = BuildMI(*BB, MI, MIMD, TII->get(X86::CALL32m));
35049 addDirectMem(MIB, X86::EAX);
35050 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
35053 MI.eraseFromParent(); // The pseudo instruction is gone now.
35054 return BB;
35057 static unsigned getOpcodeForIndirectThunk(unsigned RPOpc) {
35058 switch (RPOpc) {
35059 case X86::INDIRECT_THUNK_CALL32:
35060 return X86::CALLpcrel32;
35061 case X86::INDIRECT_THUNK_CALL64:
35062 return X86::CALL64pcrel32;
35063 case X86::INDIRECT_THUNK_TCRETURN32:
35064 return X86::TCRETURNdi;
35065 case X86::INDIRECT_THUNK_TCRETURN64:
35066 return X86::TCRETURNdi64;
35068 llvm_unreachable("not indirect thunk opcode");
35071 static const char *getIndirectThunkSymbol(const X86Subtarget &Subtarget,
35072 unsigned Reg) {
35073 if (Subtarget.useRetpolineExternalThunk()) {
35074 // When using an external thunk for retpolines, we pick names that match the
35075 // names GCC happens to use as well. This helps simplify the implementation
35076 // of the thunks for kernels where they have no easy ability to create
35077 // aliases and are doing non-trivial configuration of the thunk's body. For
35078 // example, the Linux kernel will do boot-time hot patching of the thunk
35079 // bodies and cannot easily export aliases of these to loaded modules.
35081 // Note that at any point in the future, we may need to change the semantics
35082 // of how we implement retpolines and at that time will likely change the
35083 // name of the called thunk. Essentially, there is no hard guarantee that
35084 // LLVM will generate calls to specific thunks, we merely make a best-effort
35085 // attempt to help out kernels and other systems where duplicating the
35086 // thunks is costly.
35087 switch (Reg) {
35088 case X86::EAX:
35089 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35090 return "__x86_indirect_thunk_eax";
35091 case X86::ECX:
35092 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35093 return "__x86_indirect_thunk_ecx";
35094 case X86::EDX:
35095 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35096 return "__x86_indirect_thunk_edx";
35097 case X86::EDI:
35098 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35099 return "__x86_indirect_thunk_edi";
35100 case X86::R11:
35101 assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
35102 return "__x86_indirect_thunk_r11";
35104 llvm_unreachable("unexpected reg for external indirect thunk");
35107 if (Subtarget.useRetpolineIndirectCalls() ||
35108 Subtarget.useRetpolineIndirectBranches()) {
35109 // When targeting an internal COMDAT thunk use an LLVM-specific name.
35110 switch (Reg) {
35111 case X86::EAX:
35112 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35113 return "__llvm_retpoline_eax";
35114 case X86::ECX:
35115 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35116 return "__llvm_retpoline_ecx";
35117 case X86::EDX:
35118 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35119 return "__llvm_retpoline_edx";
35120 case X86::EDI:
35121 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35122 return "__llvm_retpoline_edi";
35123 case X86::R11:
35124 assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
35125 return "__llvm_retpoline_r11";
35127 llvm_unreachable("unexpected reg for retpoline");
35130 if (Subtarget.useLVIControlFlowIntegrity()) {
35131 assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
35132 return "__llvm_lvi_thunk_r11";
35134 llvm_unreachable("getIndirectThunkSymbol() invoked without thunk feature");
35137 MachineBasicBlock *
35138 X86TargetLowering::EmitLoweredIndirectThunk(MachineInstr &MI,
35139 MachineBasicBlock *BB) const {
35140 // Copy the virtual register into the R11 physical register and
35141 // call the retpoline thunk.
35142 const MIMetadata MIMD(MI);
35143 const X86InstrInfo *TII = Subtarget.getInstrInfo();
35144 Register CalleeVReg = MI.getOperand(0).getReg();
35145 unsigned Opc = getOpcodeForIndirectThunk(MI.getOpcode());
35147 // Find an available scratch register to hold the callee. On 64-bit, we can
35148 // just use R11, but we scan for uses anyway to ensure we don't generate
35149 // incorrect code. On 32-bit, we use one of EAX, ECX, or EDX that isn't
35150 // already a register use operand to the call to hold the callee. If none
35151 // are available, use EDI instead. EDI is chosen because EBX is the PIC base
35152 // register and ESI is the base pointer to realigned stack frames with VLAs.
35153 SmallVector<unsigned, 3> AvailableRegs;
35154 if (Subtarget.is64Bit())
35155 AvailableRegs.push_back(X86::R11);
35156 else
35157 AvailableRegs.append({X86::EAX, X86::ECX, X86::EDX, X86::EDI});
35159 // Zero out any registers that are already used.
35160 for (const auto &MO : MI.operands()) {
35161 if (MO.isReg() && MO.isUse())
35162 for (unsigned &Reg : AvailableRegs)
35163 if (Reg == MO.getReg())
35164 Reg = 0;
35167 // Choose the first remaining non-zero available register.
35168 unsigned AvailableReg = 0;
35169 for (unsigned MaybeReg : AvailableRegs) {
35170 if (MaybeReg) {
35171 AvailableReg = MaybeReg;
35172 break;
35175 if (!AvailableReg)
35176 report_fatal_error("calling convention incompatible with retpoline, no "
35177 "available registers");
35179 const char *Symbol = getIndirectThunkSymbol(Subtarget, AvailableReg);
35181 BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), AvailableReg)
35182 .addReg(CalleeVReg);
35183 MI.getOperand(0).ChangeToES(Symbol);
35184 MI.setDesc(TII->get(Opc));
35185 MachineInstrBuilder(*BB->getParent(), &MI)
35186 .addReg(AvailableReg, RegState::Implicit | RegState::Kill);
35187 return BB;
35190 /// SetJmp implies future control flow change upon calling the corresponding
35191 /// LongJmp.
35192 /// Instead of using the 'return' instruction, the long jump fixes the stack and
35193 /// performs an indirect branch. To do so it uses the registers that were stored
35194 /// in the jump buffer (when calling SetJmp).
35195 /// In case the shadow stack is enabled we need to fix it as well, because some
35196 /// return addresses will be skipped.
35197 /// The function will save the SSP for future fixing in the function
35198 /// emitLongJmpShadowStackFix.
35199 /// \sa emitLongJmpShadowStackFix
35200 /// \param [in] MI The temporary Machine Instruction for the builtin.
35201 /// \param [in] MBB The Machine Basic Block that will be modified.
35202 void X86TargetLowering::emitSetJmpShadowStackFix(MachineInstr &MI,
35203 MachineBasicBlock *MBB) const {
35204 const MIMetadata MIMD(MI);
35205 MachineFunction *MF = MBB->getParent();
35206 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
35207 MachineRegisterInfo &MRI = MF->getRegInfo();
35208 MachineInstrBuilder MIB;
35210 // Memory Reference.
35211 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
35212 MI.memoperands_end());
35214 // Initialize a register with zero.
35215 MVT PVT = getPointerTy(MF->getDataLayout());
35216 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
35217 Register ZReg = MRI.createVirtualRegister(PtrRC);
35218 unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
35219 BuildMI(*MBB, MI, MIMD, TII->get(XorRROpc))
35220 .addDef(ZReg)
35221 .addReg(ZReg, RegState::Undef)
35222 .addReg(ZReg, RegState::Undef);
35224 // Read the current SSP Register value to the zeroed register.
35225 Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
35226 unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
35227 BuildMI(*MBB, MI, MIMD, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
35229 // Write the SSP register value to offset 3 in input memory buffer.
35230 unsigned PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
35231 MIB = BuildMI(*MBB, MI, MIMD, TII->get(PtrStoreOpc));
35232 const int64_t SSPOffset = 3 * PVT.getStoreSize();
35233 const unsigned MemOpndSlot = 1;
35234 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
35235 if (i == X86::AddrDisp)
35236 MIB.addDisp(MI.getOperand(MemOpndSlot + i), SSPOffset);
35237 else
35238 MIB.add(MI.getOperand(MemOpndSlot + i));
35240 MIB.addReg(SSPCopyReg);
35241 MIB.setMemRefs(MMOs);
35244 MachineBasicBlock *
35245 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
35246 MachineBasicBlock *MBB) const {
35247 const MIMetadata MIMD(MI);
35248 MachineFunction *MF = MBB->getParent();
35249 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
35250 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
35251 MachineRegisterInfo &MRI = MF->getRegInfo();
35253 const BasicBlock *BB = MBB->getBasicBlock();
35254 MachineFunction::iterator I = ++MBB->getIterator();
35256 // Memory Reference
35257 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
35258 MI.memoperands_end());
35260 unsigned DstReg;
35261 unsigned MemOpndSlot = 0;
35263 unsigned CurOp = 0;
35265 DstReg = MI.getOperand(CurOp++).getReg();
35266 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
35267 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
35268 (void)TRI;
35269 Register mainDstReg = MRI.createVirtualRegister(RC);
35270 Register restoreDstReg = MRI.createVirtualRegister(RC);
35272 MemOpndSlot = CurOp;
35274 MVT PVT = getPointerTy(MF->getDataLayout());
35275 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
35276 "Invalid Pointer Size!");
35278 // For v = setjmp(buf), we generate
35280 // thisMBB:
35281 // buf[LabelOffset] = restoreMBB <-- takes address of restoreMBB
35282 // SjLjSetup restoreMBB
35284 // mainMBB:
35285 // v_main = 0
35287 // sinkMBB:
35288 // v = phi(main, restore)
35290 // restoreMBB:
35291 // if base pointer being used, load it from frame
35292 // v_restore = 1
35294 MachineBasicBlock *thisMBB = MBB;
35295 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
35296 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
35297 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
35298 MF->insert(I, mainMBB);
35299 MF->insert(I, sinkMBB);
35300 MF->push_back(restoreMBB);
35301 restoreMBB->setMachineBlockAddressTaken();
35303 MachineInstrBuilder MIB;
35305 // Transfer the remainder of BB and its successor edges to sinkMBB.
35306 sinkMBB->splice(sinkMBB->begin(), MBB,
35307 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
35308 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
35310 // thisMBB:
35311 unsigned PtrStoreOpc = 0;
35312 unsigned LabelReg = 0;
35313 const int64_t LabelOffset = 1 * PVT.getStoreSize();
35314 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
35315 !isPositionIndependent();
35317 // Prepare IP either in reg or imm.
35318 if (!UseImmLabel) {
35319 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
35320 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
35321 LabelReg = MRI.createVirtualRegister(PtrRC);
35322 if (Subtarget.is64Bit()) {
35323 MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(X86::LEA64r), LabelReg)
35324 .addReg(X86::RIP)
35325 .addImm(0)
35326 .addReg(0)
35327 .addMBB(restoreMBB)
35328 .addReg(0);
35329 } else {
35330 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
35331 MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(X86::LEA32r), LabelReg)
35332 .addReg(XII->getGlobalBaseReg(MF))
35333 .addImm(0)
35334 .addReg(0)
35335 .addMBB(restoreMBB, Subtarget.classifyBlockAddressReference())
35336 .addReg(0);
35338 } else
35339 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
35340 // Store IP
35341 MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(PtrStoreOpc));
35342 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
35343 if (i == X86::AddrDisp)
35344 MIB.addDisp(MI.getOperand(MemOpndSlot + i), LabelOffset);
35345 else
35346 MIB.add(MI.getOperand(MemOpndSlot + i));
35348 if (!UseImmLabel)
35349 MIB.addReg(LabelReg);
35350 else
35351 MIB.addMBB(restoreMBB);
35352 MIB.setMemRefs(MMOs);
35354 if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
35355 emitSetJmpShadowStackFix(MI, thisMBB);
35358 // Setup
35359 MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(X86::EH_SjLj_Setup))
35360 .addMBB(restoreMBB);
35362 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
35363 MIB.addRegMask(RegInfo->getNoPreservedMask());
35364 thisMBB->addSuccessor(mainMBB);
35365 thisMBB->addSuccessor(restoreMBB);
35367 // mainMBB:
35368 // EAX = 0
35369 BuildMI(mainMBB, MIMD, TII->get(X86::MOV32r0), mainDstReg);
35370 mainMBB->addSuccessor(sinkMBB);
35372 // sinkMBB:
35373 BuildMI(*sinkMBB, sinkMBB->begin(), MIMD, TII->get(X86::PHI), DstReg)
35374 .addReg(mainDstReg)
35375 .addMBB(mainMBB)
35376 .addReg(restoreDstReg)
35377 .addMBB(restoreMBB);
35379 // restoreMBB:
35380 if (RegInfo->hasBasePointer(*MF)) {
35381 const bool Uses64BitFramePtr =
35382 Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
35383 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
35384 X86FI->setRestoreBasePointer(MF);
35385 Register FramePtr = RegInfo->getFrameRegister(*MF);
35386 Register BasePtr = RegInfo->getBaseRegister();
35387 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
35388 addRegOffset(BuildMI(restoreMBB, MIMD, TII->get(Opm), BasePtr),
35389 FramePtr, true, X86FI->getRestoreBasePointerOffset())
35390 .setMIFlag(MachineInstr::FrameSetup);
35392 BuildMI(restoreMBB, MIMD, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
35393 BuildMI(restoreMBB, MIMD, TII->get(X86::JMP_1)).addMBB(sinkMBB);
35394 restoreMBB->addSuccessor(sinkMBB);
35396 MI.eraseFromParent();
35397 return sinkMBB;
35400 /// Fix the shadow stack using the previously saved SSP pointer.
35401 /// \sa emitSetJmpShadowStackFix
35402 /// \param [in] MI The temporary Machine Instruction for the builtin.
35403 /// \param [in] MBB The Machine Basic Block that will be modified.
35404 /// \return The sink MBB that will perform the future indirect branch.
35405 MachineBasicBlock *
35406 X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI,
35407 MachineBasicBlock *MBB) const {
35408 const MIMetadata MIMD(MI);
35409 MachineFunction *MF = MBB->getParent();
35410 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
35411 MachineRegisterInfo &MRI = MF->getRegInfo();
35413 // Memory Reference
35414 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
35415 MI.memoperands_end());
35417 MVT PVT = getPointerTy(MF->getDataLayout());
35418 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
35420 // checkSspMBB:
35421 // xor vreg1, vreg1
35422 // rdssp vreg1
35423 // test vreg1, vreg1
35424 // je sinkMBB # Jump if Shadow Stack is not supported
35425 // fallMBB:
35426 // mov buf+24/12(%rip), vreg2
35427 // sub vreg1, vreg2
35428 // jbe sinkMBB # No need to fix the Shadow Stack
35429 // fixShadowMBB:
35430 // shr 3/2, vreg2
35431 // incssp vreg2 # fix the SSP according to the lower 8 bits
35432 // shr 8, vreg2
35433 // je sinkMBB
35434 // fixShadowLoopPrepareMBB:
35435 // shl vreg2
35436 // mov 128, vreg3
35437 // fixShadowLoopMBB:
35438 // incssp vreg3
35439 // dec vreg2
35440 // jne fixShadowLoopMBB # Iterate until you finish fixing
35441 // # the Shadow Stack
35442 // sinkMBB:
35444 MachineFunction::iterator I = ++MBB->getIterator();
35445 const BasicBlock *BB = MBB->getBasicBlock();
35447 MachineBasicBlock *checkSspMBB = MF->CreateMachineBasicBlock(BB);
35448 MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
35449 MachineBasicBlock *fixShadowMBB = MF->CreateMachineBasicBlock(BB);
35450 MachineBasicBlock *fixShadowLoopPrepareMBB = MF->CreateMachineBasicBlock(BB);
35451 MachineBasicBlock *fixShadowLoopMBB = MF->CreateMachineBasicBlock(BB);
35452 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
35453 MF->insert(I, checkSspMBB);
35454 MF->insert(I, fallMBB);
35455 MF->insert(I, fixShadowMBB);
35456 MF->insert(I, fixShadowLoopPrepareMBB);
35457 MF->insert(I, fixShadowLoopMBB);
35458 MF->insert(I, sinkMBB);
35460 // Transfer the remainder of BB and its successor edges to sinkMBB.
35461 sinkMBB->splice(sinkMBB->begin(), MBB, MachineBasicBlock::iterator(MI),
35462 MBB->end());
35463 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
35465 MBB->addSuccessor(checkSspMBB);
35467 // Initialize a register with zero.
35468 Register ZReg = MRI.createVirtualRegister(&X86::GR32RegClass);
35469 BuildMI(checkSspMBB, MIMD, TII->get(X86::MOV32r0), ZReg);
35471 if (PVT == MVT::i64) {
35472 Register TmpZReg = MRI.createVirtualRegister(PtrRC);
35473 BuildMI(checkSspMBB, MIMD, TII->get(X86::SUBREG_TO_REG), TmpZReg)
35474 .addImm(0)
35475 .addReg(ZReg)
35476 .addImm(X86::sub_32bit);
35477 ZReg = TmpZReg;
35480 // Read the current SSP Register value to the zeroed register.
35481 Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
35482 unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
35483 BuildMI(checkSspMBB, MIMD, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
35485 // Check whether the result of the SSP register is zero and jump directly
35486 // to the sink.
35487 unsigned TestRROpc = (PVT == MVT::i64) ? X86::TEST64rr : X86::TEST32rr;
35488 BuildMI(checkSspMBB, MIMD, TII->get(TestRROpc))
35489 .addReg(SSPCopyReg)
35490 .addReg(SSPCopyReg);
35491 BuildMI(checkSspMBB, MIMD, TII->get(X86::JCC_1))
35492 .addMBB(sinkMBB)
35493 .addImm(X86::COND_E);
35494 checkSspMBB->addSuccessor(sinkMBB);
35495 checkSspMBB->addSuccessor(fallMBB);
35497 // Reload the previously saved SSP register value.
35498 Register PrevSSPReg = MRI.createVirtualRegister(PtrRC);
35499 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
35500 const int64_t SPPOffset = 3 * PVT.getStoreSize();
35501 MachineInstrBuilder MIB =
35502 BuildMI(fallMBB, MIMD, TII->get(PtrLoadOpc), PrevSSPReg);
35503 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
35504 const MachineOperand &MO = MI.getOperand(i);
35505 if (i == X86::AddrDisp)
35506 MIB.addDisp(MO, SPPOffset);
35507 else if (MO.isReg()) // Don't add the whole operand, we don't want to
35508 // preserve kill flags.
35509 MIB.addReg(MO.getReg());
35510 else
35511 MIB.add(MO);
35513 MIB.setMemRefs(MMOs);
35515 // Subtract the current SSP from the previous SSP.
35516 Register SspSubReg = MRI.createVirtualRegister(PtrRC);
35517 unsigned SubRROpc = (PVT == MVT::i64) ? X86::SUB64rr : X86::SUB32rr;
35518 BuildMI(fallMBB, MIMD, TII->get(SubRROpc), SspSubReg)
35519 .addReg(PrevSSPReg)
35520 .addReg(SSPCopyReg);
35522 // Jump to sink in case PrevSSPReg <= SSPCopyReg.
35523 BuildMI(fallMBB, MIMD, TII->get(X86::JCC_1))
35524 .addMBB(sinkMBB)
35525 .addImm(X86::COND_BE);
35526 fallMBB->addSuccessor(sinkMBB);
35527 fallMBB->addSuccessor(fixShadowMBB);
35529 // Shift right by 2/3 for 32/64 because incssp multiplies the argument by 4/8.
35530 unsigned ShrRIOpc = (PVT == MVT::i64) ? X86::SHR64ri : X86::SHR32ri;
35531 unsigned Offset = (PVT == MVT::i64) ? 3 : 2;
35532 Register SspFirstShrReg = MRI.createVirtualRegister(PtrRC);
35533 BuildMI(fixShadowMBB, MIMD, TII->get(ShrRIOpc), SspFirstShrReg)
35534 .addReg(SspSubReg)
35535 .addImm(Offset);
35537 // Increase SSP when looking only on the lower 8 bits of the delta.
35538 unsigned IncsspOpc = (PVT == MVT::i64) ? X86::INCSSPQ : X86::INCSSPD;
35539 BuildMI(fixShadowMBB, MIMD, TII->get(IncsspOpc)).addReg(SspFirstShrReg);
35541 // Reset the lower 8 bits.
35542 Register SspSecondShrReg = MRI.createVirtualRegister(PtrRC);
35543 BuildMI(fixShadowMBB, MIMD, TII->get(ShrRIOpc), SspSecondShrReg)
35544 .addReg(SspFirstShrReg)
35545 .addImm(8);
35547 // Jump if the result of the shift is zero.
35548 BuildMI(fixShadowMBB, MIMD, TII->get(X86::JCC_1))
35549 .addMBB(sinkMBB)
35550 .addImm(X86::COND_E);
35551 fixShadowMBB->addSuccessor(sinkMBB);
35552 fixShadowMBB->addSuccessor(fixShadowLoopPrepareMBB);
35554 // Do a single shift left.
35555 unsigned ShlR1Opc = (PVT == MVT::i64) ? X86::SHL64ri : X86::SHL32ri;
35556 Register SspAfterShlReg = MRI.createVirtualRegister(PtrRC);
35557 BuildMI(fixShadowLoopPrepareMBB, MIMD, TII->get(ShlR1Opc), SspAfterShlReg)
35558 .addReg(SspSecondShrReg)
35559 .addImm(1);
35561 // Save the value 128 to a register (will be used next with incssp).
35562 Register Value128InReg = MRI.createVirtualRegister(PtrRC);
35563 unsigned MovRIOpc = (PVT == MVT::i64) ? X86::MOV64ri32 : X86::MOV32ri;
35564 BuildMI(fixShadowLoopPrepareMBB, MIMD, TII->get(MovRIOpc), Value128InReg)
35565 .addImm(128);
35566 fixShadowLoopPrepareMBB->addSuccessor(fixShadowLoopMBB);
35568 // Since incssp only looks at the lower 8 bits, we might need to do several
35569 // iterations of incssp until we finish fixing the shadow stack.
35570 Register DecReg = MRI.createVirtualRegister(PtrRC);
35571 Register CounterReg = MRI.createVirtualRegister(PtrRC);
35572 BuildMI(fixShadowLoopMBB, MIMD, TII->get(X86::PHI), CounterReg)
35573 .addReg(SspAfterShlReg)
35574 .addMBB(fixShadowLoopPrepareMBB)
35575 .addReg(DecReg)
35576 .addMBB(fixShadowLoopMBB);
35578 // Every iteration we increase the SSP by 128.
35579 BuildMI(fixShadowLoopMBB, MIMD, TII->get(IncsspOpc)).addReg(Value128InReg);
35581 // Every iteration we decrement the counter by 1.
35582 unsigned DecROpc = (PVT == MVT::i64) ? X86::DEC64r : X86::DEC32r;
35583 BuildMI(fixShadowLoopMBB, MIMD, TII->get(DecROpc), DecReg).addReg(CounterReg);
35585 // Jump if the counter is not zero yet.
35586 BuildMI(fixShadowLoopMBB, MIMD, TII->get(X86::JCC_1))
35587 .addMBB(fixShadowLoopMBB)
35588 .addImm(X86::COND_NE);
35589 fixShadowLoopMBB->addSuccessor(sinkMBB);
35590 fixShadowLoopMBB->addSuccessor(fixShadowLoopMBB);
35592 return sinkMBB;
35595 MachineBasicBlock *
35596 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
35597 MachineBasicBlock *MBB) const {
35598 const MIMetadata MIMD(MI);
35599 MachineFunction *MF = MBB->getParent();
35600 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
35601 MachineRegisterInfo &MRI = MF->getRegInfo();
35603 // Memory Reference
35604 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
35605 MI.memoperands_end());
35607 MVT PVT = getPointerTy(MF->getDataLayout());
35608 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
35609 "Invalid Pointer Size!");
35611 const TargetRegisterClass *RC =
35612 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
35613 Register Tmp = MRI.createVirtualRegister(RC);
35614 // Since FP is only updated here but NOT referenced, it's treated as GPR.
35615 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
35616 Register FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
35617 Register SP = RegInfo->getStackRegister();
35619 MachineInstrBuilder MIB;
35621 const int64_t LabelOffset = 1 * PVT.getStoreSize();
35622 const int64_t SPOffset = 2 * PVT.getStoreSize();
35624 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
35625 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
35627 MachineBasicBlock *thisMBB = MBB;
35629 // When CET and shadow stack is enabled, we need to fix the Shadow Stack.
35630 if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
35631 thisMBB = emitLongJmpShadowStackFix(MI, thisMBB);
35634 // Reload FP
35635 MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(PtrLoadOpc), FP);
35636 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
35637 const MachineOperand &MO = MI.getOperand(i);
35638 if (MO.isReg()) // Don't add the whole operand, we don't want to
35639 // preserve kill flags.
35640 MIB.addReg(MO.getReg());
35641 else
35642 MIB.add(MO);
35644 MIB.setMemRefs(MMOs);
35646 // Reload IP
35647 MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(PtrLoadOpc), Tmp);
35648 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
35649 const MachineOperand &MO = MI.getOperand(i);
35650 if (i == X86::AddrDisp)
35651 MIB.addDisp(MO, LabelOffset);
35652 else if (MO.isReg()) // Don't add the whole operand, we don't want to
35653 // preserve kill flags.
35654 MIB.addReg(MO.getReg());
35655 else
35656 MIB.add(MO);
35658 MIB.setMemRefs(MMOs);
35660 // Reload SP
35661 MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(PtrLoadOpc), SP);
35662 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
35663 if (i == X86::AddrDisp)
35664 MIB.addDisp(MI.getOperand(i), SPOffset);
35665 else
35666 MIB.add(MI.getOperand(i)); // We can preserve the kill flags here, it's
35667 // the last instruction of the expansion.
35669 MIB.setMemRefs(MMOs);
35671 // Jump
35672 BuildMI(*thisMBB, MI, MIMD, TII->get(IJmpOpc)).addReg(Tmp);
35674 MI.eraseFromParent();
35675 return thisMBB;
35678 void X86TargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
35679 MachineBasicBlock *MBB,
35680 MachineBasicBlock *DispatchBB,
35681 int FI) const {
35682 const MIMetadata MIMD(MI);
35683 MachineFunction *MF = MBB->getParent();
35684 MachineRegisterInfo *MRI = &MF->getRegInfo();
35685 const X86InstrInfo *TII = Subtarget.getInstrInfo();
35687 MVT PVT = getPointerTy(MF->getDataLayout());
35688 assert((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!");
35690 unsigned Op = 0;
35691 unsigned VR = 0;
35693 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
35694 !isPositionIndependent();
35696 if (UseImmLabel) {
35697 Op = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
35698 } else {
35699 const TargetRegisterClass *TRC =
35700 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
35701 VR = MRI->createVirtualRegister(TRC);
35702 Op = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
35704 if (Subtarget.is64Bit())
35705 BuildMI(*MBB, MI, MIMD, TII->get(X86::LEA64r), VR)
35706 .addReg(X86::RIP)
35707 .addImm(1)
35708 .addReg(0)
35709 .addMBB(DispatchBB)
35710 .addReg(0);
35711 else
35712 BuildMI(*MBB, MI, MIMD, TII->get(X86::LEA32r), VR)
35713 .addReg(0) /* TII->getGlobalBaseReg(MF) */
35714 .addImm(1)
35715 .addReg(0)
35716 .addMBB(DispatchBB, Subtarget.classifyBlockAddressReference())
35717 .addReg(0);
35720 MachineInstrBuilder MIB = BuildMI(*MBB, MI, MIMD, TII->get(Op));
35721 addFrameReference(MIB, FI, Subtarget.is64Bit() ? 56 : 36);
35722 if (UseImmLabel)
35723 MIB.addMBB(DispatchBB);
35724 else
35725 MIB.addReg(VR);
35728 MachineBasicBlock *
35729 X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
35730 MachineBasicBlock *BB) const {
35731 const MIMetadata MIMD(MI);
35732 MachineFunction *MF = BB->getParent();
35733 MachineRegisterInfo *MRI = &MF->getRegInfo();
35734 const X86InstrInfo *TII = Subtarget.getInstrInfo();
35735 int FI = MF->getFrameInfo().getFunctionContextIndex();
35737 // Get a mapping of the call site numbers to all of the landing pads they're
35738 // associated with.
35739 DenseMap<unsigned, SmallVector<MachineBasicBlock *, 2>> CallSiteNumToLPad;
35740 unsigned MaxCSNum = 0;
35741 for (auto &MBB : *MF) {
35742 if (!MBB.isEHPad())
35743 continue;
35745 MCSymbol *Sym = nullptr;
35746 for (const auto &MI : MBB) {
35747 if (MI.isDebugInstr())
35748 continue;
35750 assert(MI.isEHLabel() && "expected EH_LABEL");
35751 Sym = MI.getOperand(0).getMCSymbol();
35752 break;
35755 if (!MF->hasCallSiteLandingPad(Sym))
35756 continue;
35758 for (unsigned CSI : MF->getCallSiteLandingPad(Sym)) {
35759 CallSiteNumToLPad[CSI].push_back(&MBB);
35760 MaxCSNum = std::max(MaxCSNum, CSI);
35764 // Get an ordered list of the machine basic blocks for the jump table.
35765 std::vector<MachineBasicBlock *> LPadList;
35766 SmallPtrSet<MachineBasicBlock *, 32> InvokeBBs;
35767 LPadList.reserve(CallSiteNumToLPad.size());
35769 for (unsigned CSI = 1; CSI <= MaxCSNum; ++CSI) {
35770 for (auto &LP : CallSiteNumToLPad[CSI]) {
35771 LPadList.push_back(LP);
35772 InvokeBBs.insert(LP->pred_begin(), LP->pred_end());
35776 assert(!LPadList.empty() &&
35777 "No landing pad destinations for the dispatch jump table!");
35779 // Create the MBBs for the dispatch code.
35781 // Shove the dispatch's address into the return slot in the function context.
35782 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
35783 DispatchBB->setIsEHPad(true);
35785 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
35786 BuildMI(TrapBB, MIMD, TII->get(X86::TRAP));
35787 DispatchBB->addSuccessor(TrapBB);
35789 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
35790 DispatchBB->addSuccessor(DispContBB);
35792 // Insert MBBs.
35793 MF->push_back(DispatchBB);
35794 MF->push_back(DispContBB);
35795 MF->push_back(TrapBB);
35797 // Insert code into the entry block that creates and registers the function
35798 // context.
35799 SetupEntryBlockForSjLj(MI, BB, DispatchBB, FI);
35801 // Create the jump table and associated information
35802 unsigned JTE = getJumpTableEncoding();
35803 MachineJumpTableInfo *JTI = MF->getOrCreateJumpTableInfo(JTE);
35804 unsigned MJTI = JTI->createJumpTableIndex(LPadList);
35806 const X86RegisterInfo &RI = TII->getRegisterInfo();
35807 // Add a register mask with no preserved registers. This results in all
35808 // registers being marked as clobbered.
35809 if (RI.hasBasePointer(*MF)) {
35810 const bool FPIs64Bit =
35811 Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
35812 X86MachineFunctionInfo *MFI = MF->getInfo<X86MachineFunctionInfo>();
35813 MFI->setRestoreBasePointer(MF);
35815 Register FP = RI.getFrameRegister(*MF);
35816 Register BP = RI.getBaseRegister();
35817 unsigned Op = FPIs64Bit ? X86::MOV64rm : X86::MOV32rm;
35818 addRegOffset(BuildMI(DispatchBB, MIMD, TII->get(Op), BP), FP, true,
35819 MFI->getRestoreBasePointerOffset())
35820 .addRegMask(RI.getNoPreservedMask());
35821 } else {
35822 BuildMI(DispatchBB, MIMD, TII->get(X86::NOOP))
35823 .addRegMask(RI.getNoPreservedMask());
35826 // IReg is used as an index in a memory operand and therefore can't be SP
35827 Register IReg = MRI->createVirtualRegister(&X86::GR32_NOSPRegClass);
35828 addFrameReference(BuildMI(DispatchBB, MIMD, TII->get(X86::MOV32rm), IReg), FI,
35829 Subtarget.is64Bit() ? 8 : 4);
35830 BuildMI(DispatchBB, MIMD, TII->get(X86::CMP32ri))
35831 .addReg(IReg)
35832 .addImm(LPadList.size());
35833 BuildMI(DispatchBB, MIMD, TII->get(X86::JCC_1))
35834 .addMBB(TrapBB)
35835 .addImm(X86::COND_AE);
35837 if (Subtarget.is64Bit()) {
35838 Register BReg = MRI->createVirtualRegister(&X86::GR64RegClass);
35839 Register IReg64 = MRI->createVirtualRegister(&X86::GR64_NOSPRegClass);
35841 // leaq .LJTI0_0(%rip), BReg
35842 BuildMI(DispContBB, MIMD, TII->get(X86::LEA64r), BReg)
35843 .addReg(X86::RIP)
35844 .addImm(1)
35845 .addReg(0)
35846 .addJumpTableIndex(MJTI)
35847 .addReg(0);
35848 // movzx IReg64, IReg
35849 BuildMI(DispContBB, MIMD, TII->get(TargetOpcode::SUBREG_TO_REG), IReg64)
35850 .addImm(0)
35851 .addReg(IReg)
35852 .addImm(X86::sub_32bit);
35854 switch (JTE) {
35855 case MachineJumpTableInfo::EK_BlockAddress:
35856 // jmpq *(BReg,IReg64,8)
35857 BuildMI(DispContBB, MIMD, TII->get(X86::JMP64m))
35858 .addReg(BReg)
35859 .addImm(8)
35860 .addReg(IReg64)
35861 .addImm(0)
35862 .addReg(0);
35863 break;
35864 case MachineJumpTableInfo::EK_LabelDifference32: {
35865 Register OReg = MRI->createVirtualRegister(&X86::GR32RegClass);
35866 Register OReg64 = MRI->createVirtualRegister(&X86::GR64RegClass);
35867 Register TReg = MRI->createVirtualRegister(&X86::GR64RegClass);
35869 // movl (BReg,IReg64,4), OReg
35870 BuildMI(DispContBB, MIMD, TII->get(X86::MOV32rm), OReg)
35871 .addReg(BReg)
35872 .addImm(4)
35873 .addReg(IReg64)
35874 .addImm(0)
35875 .addReg(0);
35876 // movsx OReg64, OReg
35877 BuildMI(DispContBB, MIMD, TII->get(X86::MOVSX64rr32), OReg64)
35878 .addReg(OReg);
35879 // addq BReg, OReg64, TReg
35880 BuildMI(DispContBB, MIMD, TII->get(X86::ADD64rr), TReg)
35881 .addReg(OReg64)
35882 .addReg(BReg);
35883 // jmpq *TReg
35884 BuildMI(DispContBB, MIMD, TII->get(X86::JMP64r)).addReg(TReg);
35885 break;
35887 default:
35888 llvm_unreachable("Unexpected jump table encoding");
35890 } else {
35891 // jmpl *.LJTI0_0(,IReg,4)
35892 BuildMI(DispContBB, MIMD, TII->get(X86::JMP32m))
35893 .addReg(0)
35894 .addImm(4)
35895 .addReg(IReg)
35896 .addJumpTableIndex(MJTI)
35897 .addReg(0);
35900 // Add the jump table entries as successors to the MBB.
35901 SmallPtrSet<MachineBasicBlock *, 8> SeenMBBs;
35902 for (auto &LP : LPadList)
35903 if (SeenMBBs.insert(LP).second)
35904 DispContBB->addSuccessor(LP);
35906 // N.B. the order the invoke BBs are processed in doesn't matter here.
35907 SmallVector<MachineBasicBlock *, 64> MBBLPads;
35908 const MCPhysReg *SavedRegs = MF->getRegInfo().getCalleeSavedRegs();
35909 for (MachineBasicBlock *MBB : InvokeBBs) {
35910 // Remove the landing pad successor from the invoke block and replace it
35911 // with the new dispatch block.
35912 // Keep a copy of Successors since it's modified inside the loop.
35913 SmallVector<MachineBasicBlock *, 8> Successors(MBB->succ_rbegin(),
35914 MBB->succ_rend());
35915 // FIXME: Avoid quadratic complexity.
35916 for (auto *MBBS : Successors) {
35917 if (MBBS->isEHPad()) {
35918 MBB->removeSuccessor(MBBS);
35919 MBBLPads.push_back(MBBS);
35923 MBB->addSuccessor(DispatchBB);
35925 // Find the invoke call and mark all of the callee-saved registers as
35926 // 'implicit defined' so that they're spilled. This prevents code from
35927 // moving instructions to before the EH block, where they will never be
35928 // executed.
35929 for (auto &II : reverse(*MBB)) {
35930 if (!II.isCall())
35931 continue;
35933 DenseMap<unsigned, bool> DefRegs;
35934 for (auto &MOp : II.operands())
35935 if (MOp.isReg())
35936 DefRegs[MOp.getReg()] = true;
35938 MachineInstrBuilder MIB(*MF, &II);
35939 for (unsigned RegIdx = 0; SavedRegs[RegIdx]; ++RegIdx) {
35940 unsigned Reg = SavedRegs[RegIdx];
35941 if (!DefRegs[Reg])
35942 MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
35945 break;
35949 // Mark all former landing pads as non-landing pads. The dispatch is the only
35950 // landing pad now.
35951 for (auto &LP : MBBLPads)
35952 LP->setIsEHPad(false);
35954 // The instruction is gone now.
35955 MI.eraseFromParent();
35956 return BB;
35959 MachineBasicBlock *
35960 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
35961 MachineBasicBlock *BB) const {
35962 MachineFunction *MF = BB->getParent();
35963 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
35964 const MIMetadata MIMD(MI);
35966 auto TMMImmToTMMReg = [](unsigned Imm) {
35967 assert (Imm < 8 && "Illegal tmm index");
35968 return X86::TMM0 + Imm;
35970 switch (MI.getOpcode()) {
35971 default: llvm_unreachable("Unexpected instr type to insert");
35972 case X86::TLS_addr32:
35973 case X86::TLS_addr64:
35974 case X86::TLS_addrX32:
35975 case X86::TLS_base_addr32:
35976 case X86::TLS_base_addr64:
35977 case X86::TLS_base_addrX32:
35978 return EmitLoweredTLSAddr(MI, BB);
35979 case X86::INDIRECT_THUNK_CALL32:
35980 case X86::INDIRECT_THUNK_CALL64:
35981 case X86::INDIRECT_THUNK_TCRETURN32:
35982 case X86::INDIRECT_THUNK_TCRETURN64:
35983 return EmitLoweredIndirectThunk(MI, BB);
35984 case X86::CATCHRET:
35985 return EmitLoweredCatchRet(MI, BB);
35986 case X86::SEG_ALLOCA_32:
35987 case X86::SEG_ALLOCA_64:
35988 return EmitLoweredSegAlloca(MI, BB);
35989 case X86::PROBED_ALLOCA_32:
35990 case X86::PROBED_ALLOCA_64:
35991 return EmitLoweredProbedAlloca(MI, BB);
35992 case X86::TLSCall_32:
35993 case X86::TLSCall_64:
35994 return EmitLoweredTLSCall(MI, BB);
35995 case X86::CMOV_FR16:
35996 case X86::CMOV_FR16X:
35997 case X86::CMOV_FR32:
35998 case X86::CMOV_FR32X:
35999 case X86::CMOV_FR64:
36000 case X86::CMOV_FR64X:
36001 case X86::CMOV_GR8:
36002 case X86::CMOV_GR16:
36003 case X86::CMOV_GR32:
36004 case X86::CMOV_RFP32:
36005 case X86::CMOV_RFP64:
36006 case X86::CMOV_RFP80:
36007 case X86::CMOV_VR64:
36008 case X86::CMOV_VR128:
36009 case X86::CMOV_VR128X:
36010 case X86::CMOV_VR256:
36011 case X86::CMOV_VR256X:
36012 case X86::CMOV_VR512:
36013 case X86::CMOV_VK1:
36014 case X86::CMOV_VK2:
36015 case X86::CMOV_VK4:
36016 case X86::CMOV_VK8:
36017 case X86::CMOV_VK16:
36018 case X86::CMOV_VK32:
36019 case X86::CMOV_VK64:
36020 return EmitLoweredSelect(MI, BB);
36022 case X86::FP80_ADDr:
36023 case X86::FP80_ADDm32: {
36024 // Change the floating point control register to use double extended
36025 // precision when performing the addition.
36026 int OrigCWFrameIdx =
36027 MF->getFrameInfo().CreateStackObject(2, Align(2), false);
36028 addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::FNSTCW16m)),
36029 OrigCWFrameIdx);
36031 // Load the old value of the control word...
36032 Register OldCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
36033 addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::MOVZX32rm16), OldCW),
36034 OrigCWFrameIdx);
36036 // OR 0b11 into bit 8 and 9. 0b11 is the encoding for double extended
36037 // precision.
36038 Register NewCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
36039 BuildMI(*BB, MI, MIMD, TII->get(X86::OR32ri), NewCW)
36040 .addReg(OldCW, RegState::Kill)
36041 .addImm(0x300);
36043 // Extract to 16 bits.
36044 Register NewCW16 =
36045 MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
36046 BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), NewCW16)
36047 .addReg(NewCW, RegState::Kill, X86::sub_16bit);
36049 // Prepare memory for FLDCW.
36050 int NewCWFrameIdx =
36051 MF->getFrameInfo().CreateStackObject(2, Align(2), false);
36052 addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::MOV16mr)),
36053 NewCWFrameIdx)
36054 .addReg(NewCW16, RegState::Kill);
36056 // Reload the modified control word now...
36057 addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::FLDCW16m)),
36058 NewCWFrameIdx);
36060 // Do the addition.
36061 if (MI.getOpcode() == X86::FP80_ADDr) {
36062 BuildMI(*BB, MI, MIMD, TII->get(X86::ADD_Fp80))
36063 .add(MI.getOperand(0))
36064 .add(MI.getOperand(1))
36065 .add(MI.getOperand(2));
36066 } else {
36067 BuildMI(*BB, MI, MIMD, TII->get(X86::ADD_Fp80m32))
36068 .add(MI.getOperand(0))
36069 .add(MI.getOperand(1))
36070 .add(MI.getOperand(2))
36071 .add(MI.getOperand(3))
36072 .add(MI.getOperand(4))
36073 .add(MI.getOperand(5))
36074 .add(MI.getOperand(6));
36077 // Reload the original control word now.
36078 addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::FLDCW16m)),
36079 OrigCWFrameIdx);
36081 MI.eraseFromParent(); // The pseudo instruction is gone now.
36082 return BB;
36085 case X86::FP32_TO_INT16_IN_MEM:
36086 case X86::FP32_TO_INT32_IN_MEM:
36087 case X86::FP32_TO_INT64_IN_MEM:
36088 case X86::FP64_TO_INT16_IN_MEM:
36089 case X86::FP64_TO_INT32_IN_MEM:
36090 case X86::FP64_TO_INT64_IN_MEM:
36091 case X86::FP80_TO_INT16_IN_MEM:
36092 case X86::FP80_TO_INT32_IN_MEM:
36093 case X86::FP80_TO_INT64_IN_MEM: {
36094 // Change the floating point control register to use "round towards zero"
36095 // mode when truncating to an integer value.
36096 int OrigCWFrameIdx =
36097 MF->getFrameInfo().CreateStackObject(2, Align(2), false);
36098 addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::FNSTCW16m)),
36099 OrigCWFrameIdx);
36101 // Load the old value of the control word...
36102 Register OldCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
36103 addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::MOVZX32rm16), OldCW),
36104 OrigCWFrameIdx);
36106 // OR 0b11 into bit 10 and 11. 0b11 is the encoding for round toward zero.
36107 Register NewCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
36108 BuildMI(*BB, MI, MIMD, TII->get(X86::OR32ri), NewCW)
36109 .addReg(OldCW, RegState::Kill).addImm(0xC00);
36111 // Extract to 16 bits.
36112 Register NewCW16 =
36113 MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
36114 BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), NewCW16)
36115 .addReg(NewCW, RegState::Kill, X86::sub_16bit);
36117 // Prepare memory for FLDCW.
36118 int NewCWFrameIdx =
36119 MF->getFrameInfo().CreateStackObject(2, Align(2), false);
36120 addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::MOV16mr)),
36121 NewCWFrameIdx)
36122 .addReg(NewCW16, RegState::Kill);
36124 // Reload the modified control word now...
36125 addFrameReference(BuildMI(*BB, MI, MIMD,
36126 TII->get(X86::FLDCW16m)), NewCWFrameIdx);
36128 // Get the X86 opcode to use.
36129 unsigned Opc;
36130 switch (MI.getOpcode()) {
36131 default: llvm_unreachable("illegal opcode!");
36132 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
36133 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
36134 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
36135 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
36136 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
36137 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
36138 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
36139 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
36140 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
36143 X86AddressMode AM = getAddressFromInstr(&MI, 0);
36144 addFullAddress(BuildMI(*BB, MI, MIMD, TII->get(Opc)), AM)
36145 .addReg(MI.getOperand(X86::AddrNumOperands).getReg());
36147 // Reload the original control word now.
36148 addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::FLDCW16m)),
36149 OrigCWFrameIdx);
36151 MI.eraseFromParent(); // The pseudo instruction is gone now.
36152 return BB;
36155 // xbegin
36156 case X86::XBEGIN:
36157 return emitXBegin(MI, BB, Subtarget.getInstrInfo());
36159 case X86::VAARG_64:
36160 case X86::VAARG_X32:
36161 return EmitVAARGWithCustomInserter(MI, BB);
36163 case X86::EH_SjLj_SetJmp32:
36164 case X86::EH_SjLj_SetJmp64:
36165 return emitEHSjLjSetJmp(MI, BB);
36167 case X86::EH_SjLj_LongJmp32:
36168 case X86::EH_SjLj_LongJmp64:
36169 return emitEHSjLjLongJmp(MI, BB);
36171 case X86::Int_eh_sjlj_setup_dispatch:
36172 return EmitSjLjDispatchBlock(MI, BB);
36174 case TargetOpcode::STATEPOINT:
36175 // As an implementation detail, STATEPOINT shares the STACKMAP format at
36176 // this point in the process. We diverge later.
36177 return emitPatchPoint(MI, BB);
36179 case TargetOpcode::STACKMAP:
36180 case TargetOpcode::PATCHPOINT:
36181 return emitPatchPoint(MI, BB);
36183 case TargetOpcode::PATCHABLE_EVENT_CALL:
36184 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
36185 return BB;
36187 case X86::LCMPXCHG8B: {
36188 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
36189 // In addition to 4 E[ABCD] registers implied by encoding, CMPXCHG8B
36190 // requires a memory operand. If it happens that current architecture is
36191 // i686 and for current function we need a base pointer
36192 // - which is ESI for i686 - register allocator would not be able to
36193 // allocate registers for an address in form of X(%reg, %reg, Y)
36194 // - there never would be enough unreserved registers during regalloc
36195 // (without the need for base ptr the only option would be X(%edi, %esi, Y).
36196 // We are giving a hand to register allocator by precomputing the address in
36197 // a new vreg using LEA.
36199 // If it is not i686 or there is no base pointer - nothing to do here.
36200 if (!Subtarget.is32Bit() || !TRI->hasBasePointer(*MF))
36201 return BB;
36203 // Even though this code does not necessarily needs the base pointer to
36204 // be ESI, we check for that. The reason: if this assert fails, there are
36205 // some changes happened in the compiler base pointer handling, which most
36206 // probably have to be addressed somehow here.
36207 assert(TRI->getBaseRegister() == X86::ESI &&
36208 "LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a "
36209 "base pointer in mind");
36211 MachineRegisterInfo &MRI = MF->getRegInfo();
36212 MVT SPTy = getPointerTy(MF->getDataLayout());
36213 const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
36214 Register computedAddrVReg = MRI.createVirtualRegister(AddrRegClass);
36216 X86AddressMode AM = getAddressFromInstr(&MI, 0);
36217 // Regalloc does not need any help when the memory operand of CMPXCHG8B
36218 // does not use index register.
36219 if (AM.IndexReg == X86::NoRegister)
36220 return BB;
36222 // After X86TargetLowering::ReplaceNodeResults CMPXCHG8B is glued to its
36223 // four operand definitions that are E[ABCD] registers. We skip them and
36224 // then insert the LEA.
36225 MachineBasicBlock::reverse_iterator RMBBI(MI.getReverseIterator());
36226 while (RMBBI != BB->rend() && (RMBBI->definesRegister(X86::EAX) ||
36227 RMBBI->definesRegister(X86::EBX) ||
36228 RMBBI->definesRegister(X86::ECX) ||
36229 RMBBI->definesRegister(X86::EDX))) {
36230 ++RMBBI;
36232 MachineBasicBlock::iterator MBBI(RMBBI);
36233 addFullAddress(
36234 BuildMI(*BB, *MBBI, MIMD, TII->get(X86::LEA32r), computedAddrVReg), AM);
36236 setDirectAddressInInstr(&MI, 0, computedAddrVReg);
36238 return BB;
36240 case X86::LCMPXCHG16B_NO_RBX: {
36241 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
36242 Register BasePtr = TRI->getBaseRegister();
36243 if (TRI->hasBasePointer(*MF) &&
36244 (BasePtr == X86::RBX || BasePtr == X86::EBX)) {
36245 if (!BB->isLiveIn(BasePtr))
36246 BB->addLiveIn(BasePtr);
36247 // Save RBX into a virtual register.
36248 Register SaveRBX =
36249 MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
36250 BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), SaveRBX)
36251 .addReg(X86::RBX);
36252 Register Dst = MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
36253 MachineInstrBuilder MIB =
36254 BuildMI(*BB, MI, MIMD, TII->get(X86::LCMPXCHG16B_SAVE_RBX), Dst);
36255 for (unsigned Idx = 0; Idx < X86::AddrNumOperands; ++Idx)
36256 MIB.add(MI.getOperand(Idx));
36257 MIB.add(MI.getOperand(X86::AddrNumOperands));
36258 MIB.addReg(SaveRBX);
36259 } else {
36260 // Simple case, just copy the virtual register to RBX.
36261 BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::RBX)
36262 .add(MI.getOperand(X86::AddrNumOperands));
36263 MachineInstrBuilder MIB =
36264 BuildMI(*BB, MI, MIMD, TII->get(X86::LCMPXCHG16B));
36265 for (unsigned Idx = 0; Idx < X86::AddrNumOperands; ++Idx)
36266 MIB.add(MI.getOperand(Idx));
36268 MI.eraseFromParent();
36269 return BB;
36271 case X86::MWAITX: {
36272 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
36273 Register BasePtr = TRI->getBaseRegister();
36274 bool IsRBX = (BasePtr == X86::RBX || BasePtr == X86::EBX);
36275 // If no need to save the base pointer, we generate MWAITXrrr,
36276 // else we generate pseudo MWAITX_SAVE_RBX.
36277 if (!IsRBX || !TRI->hasBasePointer(*MF)) {
36278 BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::ECX)
36279 .addReg(MI.getOperand(0).getReg());
36280 BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::EAX)
36281 .addReg(MI.getOperand(1).getReg());
36282 BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::EBX)
36283 .addReg(MI.getOperand(2).getReg());
36284 BuildMI(*BB, MI, MIMD, TII->get(X86::MWAITXrrr));
36285 MI.eraseFromParent();
36286 } else {
36287 if (!BB->isLiveIn(BasePtr)) {
36288 BB->addLiveIn(BasePtr);
36290 // Parameters can be copied into ECX and EAX but not EBX yet.
36291 BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::ECX)
36292 .addReg(MI.getOperand(0).getReg());
36293 BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::EAX)
36294 .addReg(MI.getOperand(1).getReg());
36295 assert(Subtarget.is64Bit() && "Expected 64-bit mode!");
36296 // Save RBX into a virtual register.
36297 Register SaveRBX =
36298 MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
36299 BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), SaveRBX)
36300 .addReg(X86::RBX);
36301 // Generate mwaitx pseudo.
36302 Register Dst = MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
36303 BuildMI(*BB, MI, MIMD, TII->get(X86::MWAITX_SAVE_RBX))
36304 .addDef(Dst) // Destination tied in with SaveRBX.
36305 .addReg(MI.getOperand(2).getReg()) // input value of EBX.
36306 .addUse(SaveRBX); // Save of base pointer.
36307 MI.eraseFromParent();
36309 return BB;
36311 case TargetOpcode::PREALLOCATED_SETUP: {
36312 assert(Subtarget.is32Bit() && "preallocated only used in 32-bit");
36313 auto MFI = MF->getInfo<X86MachineFunctionInfo>();
36314 MFI->setHasPreallocatedCall(true);
36315 int64_t PreallocatedId = MI.getOperand(0).getImm();
36316 size_t StackAdjustment = MFI->getPreallocatedStackSize(PreallocatedId);
36317 assert(StackAdjustment != 0 && "0 stack adjustment");
36318 LLVM_DEBUG(dbgs() << "PREALLOCATED_SETUP stack adjustment "
36319 << StackAdjustment << "\n");
36320 BuildMI(*BB, MI, MIMD, TII->get(X86::SUB32ri), X86::ESP)
36321 .addReg(X86::ESP)
36322 .addImm(StackAdjustment);
36323 MI.eraseFromParent();
36324 return BB;
36326 case TargetOpcode::PREALLOCATED_ARG: {
36327 assert(Subtarget.is32Bit() && "preallocated calls only used in 32-bit");
36328 int64_t PreallocatedId = MI.getOperand(1).getImm();
36329 int64_t ArgIdx = MI.getOperand(2).getImm();
36330 auto MFI = MF->getInfo<X86MachineFunctionInfo>();
36331 size_t ArgOffset = MFI->getPreallocatedArgOffsets(PreallocatedId)[ArgIdx];
36332 LLVM_DEBUG(dbgs() << "PREALLOCATED_ARG arg index " << ArgIdx
36333 << ", arg offset " << ArgOffset << "\n");
36334 // stack pointer + offset
36335 addRegOffset(BuildMI(*BB, MI, MIMD, TII->get(X86::LEA32r),
36336 MI.getOperand(0).getReg()),
36337 X86::ESP, false, ArgOffset);
36338 MI.eraseFromParent();
36339 return BB;
36341 case X86::PTDPBSSD:
36342 case X86::PTDPBSUD:
36343 case X86::PTDPBUSD:
36344 case X86::PTDPBUUD:
36345 case X86::PTDPBF16PS:
36346 case X86::PTDPFP16PS: {
36347 unsigned Opc;
36348 switch (MI.getOpcode()) {
36349 default: llvm_unreachable("illegal opcode!");
36350 case X86::PTDPBSSD: Opc = X86::TDPBSSD; break;
36351 case X86::PTDPBSUD: Opc = X86::TDPBSUD; break;
36352 case X86::PTDPBUSD: Opc = X86::TDPBUSD; break;
36353 case X86::PTDPBUUD: Opc = X86::TDPBUUD; break;
36354 case X86::PTDPBF16PS: Opc = X86::TDPBF16PS; break;
36355 case X86::PTDPFP16PS: Opc = X86::TDPFP16PS; break;
36358 MachineInstrBuilder MIB = BuildMI(*BB, MI, MIMD, TII->get(Opc));
36359 MIB.addReg(TMMImmToTMMReg(MI.getOperand(0).getImm()), RegState::Define);
36360 MIB.addReg(TMMImmToTMMReg(MI.getOperand(0).getImm()), RegState::Undef);
36361 MIB.addReg(TMMImmToTMMReg(MI.getOperand(1).getImm()), RegState::Undef);
36362 MIB.addReg(TMMImmToTMMReg(MI.getOperand(2).getImm()), RegState::Undef);
36364 MI.eraseFromParent(); // The pseudo is gone now.
36365 return BB;
36367 case X86::PTILEZERO: {
36368 unsigned Imm = MI.getOperand(0).getImm();
36369 BuildMI(*BB, MI, MIMD, TII->get(X86::TILEZERO), TMMImmToTMMReg(Imm));
36370 MI.eraseFromParent(); // The pseudo is gone now.
36371 return BB;
36373 case X86::PTILELOADD:
36374 case X86::PTILELOADDT1:
36375 case X86::PTILESTORED: {
36376 unsigned Opc;
36377 switch (MI.getOpcode()) {
36378 default: llvm_unreachable("illegal opcode!");
36379 #define GET_EGPR_IF_ENABLED(OPC) (Subtarget.hasEGPR() ? OPC##_EVEX : OPC)
36380 case X86::PTILELOADD:
36381 Opc = GET_EGPR_IF_ENABLED(X86::TILELOADD);
36382 break;
36383 case X86::PTILELOADDT1:
36384 Opc = GET_EGPR_IF_ENABLED(X86::TILELOADDT1);
36385 break;
36386 case X86::PTILESTORED:
36387 Opc = GET_EGPR_IF_ENABLED(X86::TILESTORED);
36388 break;
36389 #undef GET_EGPR_IF_ENABLED
36392 MachineInstrBuilder MIB = BuildMI(*BB, MI, MIMD, TII->get(Opc));
36393 unsigned CurOp = 0;
36394 if (Opc != X86::TILESTORED && Opc != X86::TILESTORED_EVEX)
36395 MIB.addReg(TMMImmToTMMReg(MI.getOperand(CurOp++).getImm()),
36396 RegState::Define);
36398 MIB.add(MI.getOperand(CurOp++)); // base
36399 MIB.add(MI.getOperand(CurOp++)); // scale
36400 MIB.add(MI.getOperand(CurOp++)); // index -- stride
36401 MIB.add(MI.getOperand(CurOp++)); // displacement
36402 MIB.add(MI.getOperand(CurOp++)); // segment
36404 if (Opc == X86::TILESTORED || Opc == X86::TILESTORED_EVEX)
36405 MIB.addReg(TMMImmToTMMReg(MI.getOperand(CurOp++).getImm()),
36406 RegState::Undef);
36408 MI.eraseFromParent(); // The pseudo is gone now.
36409 return BB;
36411 case X86::PTCMMIMFP16PS:
36412 case X86::PTCMMRLFP16PS: {
36413 const MIMetadata MIMD(MI);
36414 unsigned Opc;
36415 switch (MI.getOpcode()) {
36416 default: llvm_unreachable("Unexpected instruction!");
36417 case X86::PTCMMIMFP16PS: Opc = X86::TCMMIMFP16PS; break;
36418 case X86::PTCMMRLFP16PS: Opc = X86::TCMMRLFP16PS; break;
36420 MachineInstrBuilder MIB = BuildMI(*BB, MI, MIMD, TII->get(Opc));
36421 MIB.addReg(TMMImmToTMMReg(MI.getOperand(0).getImm()), RegState::Define);
36422 MIB.addReg(TMMImmToTMMReg(MI.getOperand(0).getImm()), RegState::Undef);
36423 MIB.addReg(TMMImmToTMMReg(MI.getOperand(1).getImm()), RegState::Undef);
36424 MIB.addReg(TMMImmToTMMReg(MI.getOperand(2).getImm()), RegState::Undef);
36425 MI.eraseFromParent(); // The pseudo is gone now.
36426 return BB;
36431 //===----------------------------------------------------------------------===//
36432 // X86 Optimization Hooks
36433 //===----------------------------------------------------------------------===//
36435 bool
36436 X86TargetLowering::targetShrinkDemandedConstant(SDValue Op,
36437 const APInt &DemandedBits,
36438 const APInt &DemandedElts,
36439 TargetLoweringOpt &TLO) const {
36440 EVT VT = Op.getValueType();
36441 unsigned Opcode = Op.getOpcode();
36442 unsigned EltSize = VT.getScalarSizeInBits();
36444 if (VT.isVector()) {
36445 // If the constant is only all signbits in the active bits, then we should
36446 // extend it to the entire constant to allow it act as a boolean constant
36447 // vector.
36448 auto NeedsSignExtension = [&](SDValue V, unsigned ActiveBits) {
36449 if (!ISD::isBuildVectorOfConstantSDNodes(V.getNode()))
36450 return false;
36451 for (unsigned i = 0, e = V.getNumOperands(); i != e; ++i) {
36452 if (!DemandedElts[i] || V.getOperand(i).isUndef())
36453 continue;
36454 const APInt &Val = V.getConstantOperandAPInt(i);
36455 if (Val.getBitWidth() > Val.getNumSignBits() &&
36456 Val.trunc(ActiveBits).getNumSignBits() == ActiveBits)
36457 return true;
36459 return false;
36461 // For vectors - if we have a constant, then try to sign extend.
36462 // TODO: Handle AND cases.
36463 unsigned ActiveBits = DemandedBits.getActiveBits();
36464 if (EltSize > ActiveBits && EltSize > 1 && isTypeLegal(VT) &&
36465 (Opcode == ISD::OR || Opcode == ISD::XOR || Opcode == X86ISD::ANDNP) &&
36466 NeedsSignExtension(Op.getOperand(1), ActiveBits)) {
36467 EVT ExtSVT = EVT::getIntegerVT(*TLO.DAG.getContext(), ActiveBits);
36468 EVT ExtVT = EVT::getVectorVT(*TLO.DAG.getContext(), ExtSVT,
36469 VT.getVectorNumElements());
36470 SDValue NewC =
36471 TLO.DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(Op), VT,
36472 Op.getOperand(1), TLO.DAG.getValueType(ExtVT));
36473 SDValue NewOp =
36474 TLO.DAG.getNode(Opcode, SDLoc(Op), VT, Op.getOperand(0), NewC);
36475 return TLO.CombineTo(Op, NewOp);
36477 return false;
36480 // Only optimize Ands to prevent shrinking a constant that could be
36481 // matched by movzx.
36482 if (Opcode != ISD::AND)
36483 return false;
36485 // Make sure the RHS really is a constant.
36486 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
36487 if (!C)
36488 return false;
36490 const APInt &Mask = C->getAPIntValue();
36492 // Clear all non-demanded bits initially.
36493 APInt ShrunkMask = Mask & DemandedBits;
36495 // Find the width of the shrunk mask.
36496 unsigned Width = ShrunkMask.getActiveBits();
36498 // If the mask is all 0s there's nothing to do here.
36499 if (Width == 0)
36500 return false;
36502 // Find the next power of 2 width, rounding up to a byte.
36503 Width = llvm::bit_ceil(std::max(Width, 8U));
36504 // Truncate the width to size to handle illegal types.
36505 Width = std::min(Width, EltSize);
36507 // Calculate a possible zero extend mask for this constant.
36508 APInt ZeroExtendMask = APInt::getLowBitsSet(EltSize, Width);
36510 // If we aren't changing the mask, just return true to keep it and prevent
36511 // the caller from optimizing.
36512 if (ZeroExtendMask == Mask)
36513 return true;
36515 // Make sure the new mask can be represented by a combination of mask bits
36516 // and non-demanded bits.
36517 if (!ZeroExtendMask.isSubsetOf(Mask | ~DemandedBits))
36518 return false;
36520 // Replace the constant with the zero extend mask.
36521 SDLoc DL(Op);
36522 SDValue NewC = TLO.DAG.getConstant(ZeroExtendMask, DL, VT);
36523 SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
36524 return TLO.CombineTo(Op, NewOp);
36527 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
36528 KnownBits &Known,
36529 const APInt &DemandedElts,
36530 const SelectionDAG &DAG,
36531 unsigned Depth) const {
36532 unsigned BitWidth = Known.getBitWidth();
36533 unsigned NumElts = DemandedElts.getBitWidth();
36534 unsigned Opc = Op.getOpcode();
36535 EVT VT = Op.getValueType();
36536 assert((Opc >= ISD::BUILTIN_OP_END ||
36537 Opc == ISD::INTRINSIC_WO_CHAIN ||
36538 Opc == ISD::INTRINSIC_W_CHAIN ||
36539 Opc == ISD::INTRINSIC_VOID) &&
36540 "Should use MaskedValueIsZero if you don't know whether Op"
36541 " is a target node!");
36543 Known.resetAll();
36544 switch (Opc) {
36545 default: break;
36546 case X86ISD::MUL_IMM: {
36547 KnownBits Known2;
36548 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36549 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36550 Known = KnownBits::mul(Known, Known2);
36551 break;
36553 case X86ISD::SETCC:
36554 Known.Zero.setBitsFrom(1);
36555 break;
36556 case X86ISD::MOVMSK: {
36557 unsigned NumLoBits = Op.getOperand(0).getValueType().getVectorNumElements();
36558 Known.Zero.setBitsFrom(NumLoBits);
36559 break;
36561 case X86ISD::PEXTRB:
36562 case X86ISD::PEXTRW: {
36563 SDValue Src = Op.getOperand(0);
36564 EVT SrcVT = Src.getValueType();
36565 APInt DemandedElt = APInt::getOneBitSet(SrcVT.getVectorNumElements(),
36566 Op.getConstantOperandVal(1));
36567 Known = DAG.computeKnownBits(Src, DemandedElt, Depth + 1);
36568 Known = Known.anyextOrTrunc(BitWidth);
36569 Known.Zero.setBitsFrom(SrcVT.getScalarSizeInBits());
36570 break;
36572 case X86ISD::VSRAI:
36573 case X86ISD::VSHLI:
36574 case X86ISD::VSRLI: {
36575 unsigned ShAmt = Op.getConstantOperandVal(1);
36576 if (ShAmt >= VT.getScalarSizeInBits()) {
36577 // Out of range logical bit shifts are guaranteed to be zero.
36578 // Out of range arithmetic bit shifts splat the sign bit.
36579 if (Opc != X86ISD::VSRAI) {
36580 Known.setAllZero();
36581 break;
36584 ShAmt = VT.getScalarSizeInBits() - 1;
36587 Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36588 if (Opc == X86ISD::VSHLI) {
36589 Known.Zero <<= ShAmt;
36590 Known.One <<= ShAmt;
36591 // Low bits are known zero.
36592 Known.Zero.setLowBits(ShAmt);
36593 } else if (Opc == X86ISD::VSRLI) {
36594 Known.Zero.lshrInPlace(ShAmt);
36595 Known.One.lshrInPlace(ShAmt);
36596 // High bits are known zero.
36597 Known.Zero.setHighBits(ShAmt);
36598 } else {
36599 Known.Zero.ashrInPlace(ShAmt);
36600 Known.One.ashrInPlace(ShAmt);
36602 break;
36604 case X86ISD::PACKUS: {
36605 // PACKUS is just a truncation if the upper half is zero.
36606 APInt DemandedLHS, DemandedRHS;
36607 getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
36609 Known.One = APInt::getAllOnes(BitWidth * 2);
36610 Known.Zero = APInt::getAllOnes(BitWidth * 2);
36612 KnownBits Known2;
36613 if (!!DemandedLHS) {
36614 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedLHS, Depth + 1);
36615 Known = Known.intersectWith(Known2);
36617 if (!!DemandedRHS) {
36618 Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedRHS, Depth + 1);
36619 Known = Known.intersectWith(Known2);
36622 if (Known.countMinLeadingZeros() < BitWidth)
36623 Known.resetAll();
36624 Known = Known.trunc(BitWidth);
36625 break;
36627 case X86ISD::VBROADCAST: {
36628 SDValue Src = Op.getOperand(0);
36629 if (!Src.getSimpleValueType().isVector()) {
36630 Known = DAG.computeKnownBits(Src, Depth + 1);
36631 return;
36633 break;
36635 case X86ISD::AND: {
36636 if (Op.getResNo() == 0) {
36637 KnownBits Known2;
36638 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36639 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36640 Known &= Known2;
36642 break;
36644 case X86ISD::ANDNP: {
36645 KnownBits Known2;
36646 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36647 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36649 // ANDNP = (~X & Y);
36650 Known.One &= Known2.Zero;
36651 Known.Zero |= Known2.One;
36652 break;
36654 case X86ISD::FOR: {
36655 KnownBits Known2;
36656 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36657 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36659 Known |= Known2;
36660 break;
36662 case X86ISD::PSADBW: {
36663 assert(VT.getScalarType() == MVT::i64 &&
36664 Op.getOperand(0).getValueType().getScalarType() == MVT::i8 &&
36665 "Unexpected PSADBW types");
36667 // PSADBW - fills low 16 bits and zeros upper 48 bits of each i64 result.
36668 Known.Zero.setBitsFrom(16);
36669 break;
36671 case X86ISD::PCMPGT:
36672 case X86ISD::PCMPEQ: {
36673 KnownBits KnownLhs =
36674 DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36675 KnownBits KnownRhs =
36676 DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36677 std::optional<bool> Res = Opc == X86ISD::PCMPEQ
36678 ? KnownBits::eq(KnownLhs, KnownRhs)
36679 : KnownBits::sgt(KnownLhs, KnownRhs);
36680 if (Res) {
36681 if (*Res)
36682 Known.setAllOnes();
36683 else
36684 Known.setAllZero();
36686 break;
36688 case X86ISD::PMULUDQ: {
36689 KnownBits Known2;
36690 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36691 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36693 Known = Known.trunc(BitWidth / 2).zext(BitWidth);
36694 Known2 = Known2.trunc(BitWidth / 2).zext(BitWidth);
36695 Known = KnownBits::mul(Known, Known2);
36696 break;
36698 case X86ISD::CMOV: {
36699 Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
36700 // If we don't know any bits, early out.
36701 if (Known.isUnknown())
36702 break;
36703 KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
36705 // Only known if known in both the LHS and RHS.
36706 Known = Known.intersectWith(Known2);
36707 break;
36709 case X86ISD::BEXTR:
36710 case X86ISD::BEXTRI: {
36711 SDValue Op0 = Op.getOperand(0);
36712 SDValue Op1 = Op.getOperand(1);
36714 if (auto* Cst1 = dyn_cast<ConstantSDNode>(Op1)) {
36715 unsigned Shift = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 0);
36716 unsigned Length = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 8);
36718 // If the length is 0, the result is 0.
36719 if (Length == 0) {
36720 Known.setAllZero();
36721 break;
36724 if ((Shift + Length) <= BitWidth) {
36725 Known = DAG.computeKnownBits(Op0, Depth + 1);
36726 Known = Known.extractBits(Length, Shift);
36727 Known = Known.zextOrTrunc(BitWidth);
36730 break;
36732 case X86ISD::PDEP: {
36733 KnownBits Known2;
36734 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36735 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36736 // Zeros are retained from the mask operand. But not ones.
36737 Known.One.clearAllBits();
36738 // The result will have at least as many trailing zeros as the non-mask
36739 // operand since bits can only map to the same or higher bit position.
36740 Known.Zero.setLowBits(Known2.countMinTrailingZeros());
36741 break;
36743 case X86ISD::PEXT: {
36744 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36745 // The result has as many leading zeros as the number of zeroes in the mask.
36746 unsigned Count = Known.Zero.popcount();
36747 Known.Zero = APInt::getHighBitsSet(BitWidth, Count);
36748 Known.One.clearAllBits();
36749 break;
36751 case X86ISD::VTRUNC:
36752 case X86ISD::VTRUNCS:
36753 case X86ISD::VTRUNCUS:
36754 case X86ISD::CVTSI2P:
36755 case X86ISD::CVTUI2P:
36756 case X86ISD::CVTP2SI:
36757 case X86ISD::CVTP2UI:
36758 case X86ISD::MCVTP2SI:
36759 case X86ISD::MCVTP2UI:
36760 case X86ISD::CVTTP2SI:
36761 case X86ISD::CVTTP2UI:
36762 case X86ISD::MCVTTP2SI:
36763 case X86ISD::MCVTTP2UI:
36764 case X86ISD::MCVTSI2P:
36765 case X86ISD::MCVTUI2P:
36766 case X86ISD::VFPROUND:
36767 case X86ISD::VMFPROUND:
36768 case X86ISD::CVTPS2PH:
36769 case X86ISD::MCVTPS2PH: {
36770 // Truncations/Conversions - upper elements are known zero.
36771 EVT SrcVT = Op.getOperand(0).getValueType();
36772 if (SrcVT.isVector()) {
36773 unsigned NumSrcElts = SrcVT.getVectorNumElements();
36774 if (NumElts > NumSrcElts && DemandedElts.countr_zero() >= NumSrcElts)
36775 Known.setAllZero();
36777 break;
36779 case X86ISD::STRICT_CVTTP2SI:
36780 case X86ISD::STRICT_CVTTP2UI:
36781 case X86ISD::STRICT_CVTSI2P:
36782 case X86ISD::STRICT_CVTUI2P:
36783 case X86ISD::STRICT_VFPROUND:
36784 case X86ISD::STRICT_CVTPS2PH: {
36785 // Strict Conversions - upper elements are known zero.
36786 EVT SrcVT = Op.getOperand(1).getValueType();
36787 if (SrcVT.isVector()) {
36788 unsigned NumSrcElts = SrcVT.getVectorNumElements();
36789 if (NumElts > NumSrcElts && DemandedElts.countr_zero() >= NumSrcElts)
36790 Known.setAllZero();
36792 break;
36794 case X86ISD::MOVQ2DQ: {
36795 // Move from MMX to XMM. Upper half of XMM should be 0.
36796 if (DemandedElts.countr_zero() >= (NumElts / 2))
36797 Known.setAllZero();
36798 break;
36800 case X86ISD::VBROADCAST_LOAD: {
36801 APInt UndefElts;
36802 SmallVector<APInt, 16> EltBits;
36803 if (getTargetConstantBitsFromNode(Op, BitWidth, UndefElts, EltBits,
36804 /*AllowWholeUndefs*/ false,
36805 /*AllowPartialUndefs*/ false)) {
36806 Known.Zero.setAllBits();
36807 Known.One.setAllBits();
36808 for (unsigned I = 0; I != NumElts; ++I) {
36809 if (!DemandedElts[I])
36810 continue;
36811 if (UndefElts[I]) {
36812 Known.resetAll();
36813 break;
36815 KnownBits Known2 = KnownBits::makeConstant(EltBits[I]);
36816 Known = Known.intersectWith(Known2);
36818 return;
36820 break;
36824 // Handle target shuffles.
36825 // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
36826 if (isTargetShuffle(Opc)) {
36827 SmallVector<int, 64> Mask;
36828 SmallVector<SDValue, 2> Ops;
36829 if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask)) {
36830 unsigned NumOps = Ops.size();
36831 unsigned NumElts = VT.getVectorNumElements();
36832 if (Mask.size() == NumElts) {
36833 SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
36834 Known.Zero.setAllBits(); Known.One.setAllBits();
36835 for (unsigned i = 0; i != NumElts; ++i) {
36836 if (!DemandedElts[i])
36837 continue;
36838 int M = Mask[i];
36839 if (M == SM_SentinelUndef) {
36840 // For UNDEF elements, we don't know anything about the common state
36841 // of the shuffle result.
36842 Known.resetAll();
36843 break;
36845 if (M == SM_SentinelZero) {
36846 Known.One.clearAllBits();
36847 continue;
36849 assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
36850 "Shuffle index out of range");
36852 unsigned OpIdx = (unsigned)M / NumElts;
36853 unsigned EltIdx = (unsigned)M % NumElts;
36854 if (Ops[OpIdx].getValueType() != VT) {
36855 // TODO - handle target shuffle ops with different value types.
36856 Known.resetAll();
36857 break;
36859 DemandedOps[OpIdx].setBit(EltIdx);
36861 // Known bits are the values that are shared by every demanded element.
36862 for (unsigned i = 0; i != NumOps && !Known.isUnknown(); ++i) {
36863 if (!DemandedOps[i])
36864 continue;
36865 KnownBits Known2 =
36866 DAG.computeKnownBits(Ops[i], DemandedOps[i], Depth + 1);
36867 Known = Known.intersectWith(Known2);
36874 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
36875 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
36876 unsigned Depth) const {
36877 EVT VT = Op.getValueType();
36878 unsigned VTBits = VT.getScalarSizeInBits();
36879 unsigned Opcode = Op.getOpcode();
36880 switch (Opcode) {
36881 case X86ISD::SETCC_CARRY:
36882 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
36883 return VTBits;
36885 case X86ISD::VTRUNC: {
36886 SDValue Src = Op.getOperand(0);
36887 MVT SrcVT = Src.getSimpleValueType();
36888 unsigned NumSrcBits = SrcVT.getScalarSizeInBits();
36889 assert(VTBits < NumSrcBits && "Illegal truncation input type");
36890 APInt DemandedSrc = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
36891 unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedSrc, Depth + 1);
36892 if (Tmp > (NumSrcBits - VTBits))
36893 return Tmp - (NumSrcBits - VTBits);
36894 return 1;
36897 case X86ISD::PACKSS: {
36898 // PACKSS is just a truncation if the sign bits extend to the packed size.
36899 APInt DemandedLHS, DemandedRHS;
36900 getPackDemandedElts(Op.getValueType(), DemandedElts, DemandedLHS,
36901 DemandedRHS);
36903 // Helper to detect PACKSSDW(BITCAST(PACKSSDW(X)),BITCAST(PACKSSDW(Y)))
36904 // patterns often used to compact vXi64 allsignbit patterns.
36905 auto NumSignBitsPACKSS = [&](SDValue V, const APInt &Elts) -> unsigned {
36906 SDValue BC = peekThroughBitcasts(V);
36907 if (BC.getOpcode() == X86ISD::PACKSS &&
36908 BC.getScalarValueSizeInBits() == 16 &&
36909 V.getScalarValueSizeInBits() == 32) {
36910 SDValue BC0 = peekThroughBitcasts(BC.getOperand(0));
36911 SDValue BC1 = peekThroughBitcasts(BC.getOperand(1));
36912 if (BC0.getScalarValueSizeInBits() == 64 &&
36913 BC1.getScalarValueSizeInBits() == 64 &&
36914 DAG.ComputeNumSignBits(BC0, Depth + 1) == 64 &&
36915 DAG.ComputeNumSignBits(BC1, Depth + 1) == 64)
36916 return 32;
36918 return DAG.ComputeNumSignBits(V, Elts, Depth + 1);
36921 unsigned SrcBits = Op.getOperand(0).getScalarValueSizeInBits();
36922 unsigned Tmp0 = SrcBits, Tmp1 = SrcBits;
36923 if (!!DemandedLHS)
36924 Tmp0 = NumSignBitsPACKSS(Op.getOperand(0), DemandedLHS);
36925 if (!!DemandedRHS)
36926 Tmp1 = NumSignBitsPACKSS(Op.getOperand(1), DemandedRHS);
36927 unsigned Tmp = std::min(Tmp0, Tmp1);
36928 if (Tmp > (SrcBits - VTBits))
36929 return Tmp - (SrcBits - VTBits);
36930 return 1;
36933 case X86ISD::VBROADCAST: {
36934 SDValue Src = Op.getOperand(0);
36935 if (!Src.getSimpleValueType().isVector())
36936 return DAG.ComputeNumSignBits(Src, Depth + 1);
36937 break;
36940 case X86ISD::VSHLI: {
36941 SDValue Src = Op.getOperand(0);
36942 const APInt &ShiftVal = Op.getConstantOperandAPInt(1);
36943 if (ShiftVal.uge(VTBits))
36944 return VTBits; // Shifted all bits out --> zero.
36945 unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
36946 if (ShiftVal.uge(Tmp))
36947 return 1; // Shifted all sign bits out --> unknown.
36948 return Tmp - ShiftVal.getZExtValue();
36951 case X86ISD::VSRAI: {
36952 SDValue Src = Op.getOperand(0);
36953 APInt ShiftVal = Op.getConstantOperandAPInt(1);
36954 if (ShiftVal.uge(VTBits - 1))
36955 return VTBits; // Sign splat.
36956 unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
36957 ShiftVal += Tmp;
36958 return ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue();
36961 case X86ISD::FSETCC:
36962 // cmpss/cmpsd return zero/all-bits result values in the bottom element.
36963 if (VT == MVT::f32 || VT == MVT::f64 ||
36964 ((VT == MVT::v4f32 || VT == MVT::v2f64) && DemandedElts == 1))
36965 return VTBits;
36966 break;
36968 case X86ISD::PCMPGT:
36969 case X86ISD::PCMPEQ:
36970 case X86ISD::CMPP:
36971 case X86ISD::VPCOM:
36972 case X86ISD::VPCOMU:
36973 // Vector compares return zero/all-bits result values.
36974 return VTBits;
36976 case X86ISD::ANDNP: {
36977 unsigned Tmp0 =
36978 DAG.ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
36979 if (Tmp0 == 1) return 1; // Early out.
36980 unsigned Tmp1 =
36981 DAG.ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
36982 return std::min(Tmp0, Tmp1);
36985 case X86ISD::CMOV: {
36986 unsigned Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), Depth+1);
36987 if (Tmp0 == 1) return 1; // Early out.
36988 unsigned Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), Depth+1);
36989 return std::min(Tmp0, Tmp1);
36993 // Handle target shuffles.
36994 // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
36995 if (isTargetShuffle(Opcode)) {
36996 SmallVector<int, 64> Mask;
36997 SmallVector<SDValue, 2> Ops;
36998 if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask)) {
36999 unsigned NumOps = Ops.size();
37000 unsigned NumElts = VT.getVectorNumElements();
37001 if (Mask.size() == NumElts) {
37002 SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
37003 for (unsigned i = 0; i != NumElts; ++i) {
37004 if (!DemandedElts[i])
37005 continue;
37006 int M = Mask[i];
37007 if (M == SM_SentinelUndef) {
37008 // For UNDEF elements, we don't know anything about the common state
37009 // of the shuffle result.
37010 return 1;
37011 } else if (M == SM_SentinelZero) {
37012 // Zero = all sign bits.
37013 continue;
37015 assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
37016 "Shuffle index out of range");
37018 unsigned OpIdx = (unsigned)M / NumElts;
37019 unsigned EltIdx = (unsigned)M % NumElts;
37020 if (Ops[OpIdx].getValueType() != VT) {
37021 // TODO - handle target shuffle ops with different value types.
37022 return 1;
37024 DemandedOps[OpIdx].setBit(EltIdx);
37026 unsigned Tmp0 = VTBits;
37027 for (unsigned i = 0; i != NumOps && Tmp0 > 1; ++i) {
37028 if (!DemandedOps[i])
37029 continue;
37030 unsigned Tmp1 =
37031 DAG.ComputeNumSignBits(Ops[i], DemandedOps[i], Depth + 1);
37032 Tmp0 = std::min(Tmp0, Tmp1);
37034 return Tmp0;
37039 // Fallback case.
37040 return 1;
37043 SDValue X86TargetLowering::unwrapAddress(SDValue N) const {
37044 if (N->getOpcode() == X86ISD::Wrapper || N->getOpcode() == X86ISD::WrapperRIP)
37045 return N->getOperand(0);
37046 return N;
37049 // Helper to look for a normal load that can be narrowed into a vzload with the
37050 // specified VT and memory VT. Returns SDValue() on failure.
37051 static SDValue narrowLoadToVZLoad(LoadSDNode *LN, MVT MemVT, MVT VT,
37052 SelectionDAG &DAG) {
37053 // Can't if the load is volatile or atomic.
37054 if (!LN->isSimple())
37055 return SDValue();
37057 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
37058 SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
37059 return DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, SDLoc(LN), Tys, Ops, MemVT,
37060 LN->getPointerInfo(), LN->getOriginalAlign(),
37061 LN->getMemOperand()->getFlags());
37064 // Attempt to match a combined shuffle mask against supported unary shuffle
37065 // instructions.
37066 // TODO: Investigate sharing more of this with shuffle lowering.
37067 static bool matchUnaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
37068 bool AllowFloatDomain, bool AllowIntDomain,
37069 SDValue V1, const SelectionDAG &DAG,
37070 const X86Subtarget &Subtarget, unsigned &Shuffle,
37071 MVT &SrcVT, MVT &DstVT) {
37072 unsigned NumMaskElts = Mask.size();
37073 unsigned MaskEltSize = MaskVT.getScalarSizeInBits();
37075 // Match against a VZEXT_MOVL vXi32 and vXi16 zero-extending instruction.
37076 if (Mask[0] == 0 &&
37077 (MaskEltSize == 32 || (MaskEltSize == 16 && Subtarget.hasFP16()))) {
37078 if ((isUndefOrZero(Mask[1]) && isUndefInRange(Mask, 2, NumMaskElts - 2)) ||
37079 (V1.getOpcode() == ISD::SCALAR_TO_VECTOR &&
37080 isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1))) {
37081 Shuffle = X86ISD::VZEXT_MOVL;
37082 if (MaskEltSize == 16)
37083 SrcVT = DstVT = MaskVT.changeVectorElementType(MVT::f16);
37084 else
37085 SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
37086 return true;
37090 // Match against a ANY/SIGN/ZERO_EXTEND_VECTOR_INREG instruction.
37091 // TODO: Add 512-bit vector support (split AVX512F and AVX512BW).
37092 if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE41()) ||
37093 (MaskVT.is256BitVector() && Subtarget.hasInt256()))) {
37094 unsigned MaxScale = 64 / MaskEltSize;
37095 bool UseSign = V1.getScalarValueSizeInBits() == MaskEltSize &&
37096 DAG.ComputeNumSignBits(V1) == MaskEltSize;
37097 for (unsigned Scale = 2; Scale <= MaxScale; Scale *= 2) {
37098 bool MatchAny = true;
37099 bool MatchZero = true;
37100 bool MatchSign = UseSign;
37101 unsigned NumDstElts = NumMaskElts / Scale;
37102 for (unsigned i = 0;
37103 i != NumDstElts && (MatchAny || MatchSign || MatchZero); ++i) {
37104 if (!isUndefOrEqual(Mask[i * Scale], (int)i)) {
37105 MatchAny = MatchSign = MatchZero = false;
37106 break;
37108 unsigned Pos = (i * Scale) + 1;
37109 unsigned Len = Scale - 1;
37110 MatchAny &= isUndefInRange(Mask, Pos, Len);
37111 MatchZero &= isUndefOrZeroInRange(Mask, Pos, Len);
37112 MatchSign &= isUndefOrEqualInRange(Mask, (int)i, Pos, Len);
37114 if (MatchAny || MatchSign || MatchZero) {
37115 assert((MatchSign || MatchZero) &&
37116 "Failed to match sext/zext but matched aext?");
37117 unsigned SrcSize = std::max(128u, NumDstElts * MaskEltSize);
37118 MVT ScalarTy = MaskVT.isInteger() ? MaskVT.getScalarType()
37119 : MVT::getIntegerVT(MaskEltSize);
37120 SrcVT = MVT::getVectorVT(ScalarTy, SrcSize / MaskEltSize);
37122 Shuffle = unsigned(
37123 MatchAny ? ISD::ANY_EXTEND
37124 : (MatchSign ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND));
37125 if (SrcVT.getVectorNumElements() != NumDstElts)
37126 Shuffle = DAG.getOpcode_EXTEND_VECTOR_INREG(Shuffle);
37128 DstVT = MVT::getIntegerVT(Scale * MaskEltSize);
37129 DstVT = MVT::getVectorVT(DstVT, NumDstElts);
37130 return true;
37135 // Match against a VZEXT_MOVL instruction, SSE1 only supports 32-bits (MOVSS).
37136 if (((MaskEltSize == 32) || (MaskEltSize == 64 && Subtarget.hasSSE2()) ||
37137 (MaskEltSize == 16 && Subtarget.hasFP16())) &&
37138 isUndefOrEqual(Mask[0], 0) &&
37139 isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1)) {
37140 Shuffle = X86ISD::VZEXT_MOVL;
37141 if (MaskEltSize == 16)
37142 SrcVT = DstVT = MaskVT.changeVectorElementType(MVT::f16);
37143 else
37144 SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
37145 return true;
37148 // Check if we have SSE3 which will let us use MOVDDUP etc. The
37149 // instructions are no slower than UNPCKLPD but has the option to
37150 // fold the input operand into even an unaligned memory load.
37151 if (MaskVT.is128BitVector() && Subtarget.hasSSE3() && AllowFloatDomain) {
37152 if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0}, DAG, V1)) {
37153 Shuffle = X86ISD::MOVDDUP;
37154 SrcVT = DstVT = MVT::v2f64;
37155 return true;
37157 if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2}, DAG, V1)) {
37158 Shuffle = X86ISD::MOVSLDUP;
37159 SrcVT = DstVT = MVT::v4f32;
37160 return true;
37162 if (isTargetShuffleEquivalent(MaskVT, Mask, {1, 1, 3, 3}, DAG, V1)) {
37163 Shuffle = X86ISD::MOVSHDUP;
37164 SrcVT = DstVT = MVT::v4f32;
37165 return true;
37169 if (MaskVT.is256BitVector() && AllowFloatDomain) {
37170 assert(Subtarget.hasAVX() && "AVX required for 256-bit vector shuffles");
37171 if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2}, DAG, V1)) {
37172 Shuffle = X86ISD::MOVDDUP;
37173 SrcVT = DstVT = MVT::v4f64;
37174 return true;
37176 if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2, 4, 4, 6, 6}, DAG,
37177 V1)) {
37178 Shuffle = X86ISD::MOVSLDUP;
37179 SrcVT = DstVT = MVT::v8f32;
37180 return true;
37182 if (isTargetShuffleEquivalent(MaskVT, Mask, {1, 1, 3, 3, 5, 5, 7, 7}, DAG,
37183 V1)) {
37184 Shuffle = X86ISD::MOVSHDUP;
37185 SrcVT = DstVT = MVT::v8f32;
37186 return true;
37190 if (MaskVT.is512BitVector() && AllowFloatDomain) {
37191 assert(Subtarget.hasAVX512() &&
37192 "AVX512 required for 512-bit vector shuffles");
37193 if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2, 4, 4, 6, 6}, DAG,
37194 V1)) {
37195 Shuffle = X86ISD::MOVDDUP;
37196 SrcVT = DstVT = MVT::v8f64;
37197 return true;
37199 if (isTargetShuffleEquivalent(
37200 MaskVT, Mask,
37201 {0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14}, DAG, V1)) {
37202 Shuffle = X86ISD::MOVSLDUP;
37203 SrcVT = DstVT = MVT::v16f32;
37204 return true;
37206 if (isTargetShuffleEquivalent(
37207 MaskVT, Mask,
37208 {1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15}, DAG, V1)) {
37209 Shuffle = X86ISD::MOVSHDUP;
37210 SrcVT = DstVT = MVT::v16f32;
37211 return true;
37215 return false;
37218 // Attempt to match a combined shuffle mask against supported unary immediate
37219 // permute instructions.
37220 // TODO: Investigate sharing more of this with shuffle lowering.
37221 static bool matchUnaryPermuteShuffle(MVT MaskVT, ArrayRef<int> Mask,
37222 const APInt &Zeroable,
37223 bool AllowFloatDomain, bool AllowIntDomain,
37224 const SelectionDAG &DAG,
37225 const X86Subtarget &Subtarget,
37226 unsigned &Shuffle, MVT &ShuffleVT,
37227 unsigned &PermuteImm) {
37228 unsigned NumMaskElts = Mask.size();
37229 unsigned InputSizeInBits = MaskVT.getSizeInBits();
37230 unsigned MaskScalarSizeInBits = InputSizeInBits / NumMaskElts;
37231 MVT MaskEltVT = MVT::getIntegerVT(MaskScalarSizeInBits);
37232 bool ContainsZeros = isAnyZero(Mask);
37234 // Handle VPERMI/VPERMILPD vXi64/vXi64 patterns.
37235 if (!ContainsZeros && MaskScalarSizeInBits == 64) {
37236 // Check for lane crossing permutes.
37237 if (is128BitLaneCrossingShuffleMask(MaskEltVT, Mask)) {
37238 // PERMPD/PERMQ permutes within a 256-bit vector (AVX2+).
37239 if (Subtarget.hasAVX2() && MaskVT.is256BitVector()) {
37240 Shuffle = X86ISD::VPERMI;
37241 ShuffleVT = (AllowFloatDomain ? MVT::v4f64 : MVT::v4i64);
37242 PermuteImm = getV4X86ShuffleImm(Mask);
37243 return true;
37245 if (Subtarget.hasAVX512() && MaskVT.is512BitVector()) {
37246 SmallVector<int, 4> RepeatedMask;
37247 if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask)) {
37248 Shuffle = X86ISD::VPERMI;
37249 ShuffleVT = (AllowFloatDomain ? MVT::v8f64 : MVT::v8i64);
37250 PermuteImm = getV4X86ShuffleImm(RepeatedMask);
37251 return true;
37254 } else if (AllowFloatDomain && Subtarget.hasAVX()) {
37255 // VPERMILPD can permute with a non-repeating shuffle.
37256 Shuffle = X86ISD::VPERMILPI;
37257 ShuffleVT = MVT::getVectorVT(MVT::f64, Mask.size());
37258 PermuteImm = 0;
37259 for (int i = 0, e = Mask.size(); i != e; ++i) {
37260 int M = Mask[i];
37261 if (M == SM_SentinelUndef)
37262 continue;
37263 assert(((M / 2) == (i / 2)) && "Out of range shuffle mask index");
37264 PermuteImm |= (M & 1) << i;
37266 return true;
37270 // We are checking for shuffle match or shift match. Loop twice so we can
37271 // order which we try and match first depending on target preference.
37272 for (unsigned Order = 0; Order < 2; ++Order) {
37273 if (Subtarget.preferLowerShuffleAsShift() ? (Order == 1) : (Order == 0)) {
37274 // Handle PSHUFD/VPERMILPI vXi32/vXf32 repeated patterns.
37275 // AVX introduced the VPERMILPD/VPERMILPS float permutes, before then we
37276 // had to use 2-input SHUFPD/SHUFPS shuffles (not handled here).
37277 if ((MaskScalarSizeInBits == 64 || MaskScalarSizeInBits == 32) &&
37278 !ContainsZeros && (AllowIntDomain || Subtarget.hasAVX())) {
37279 SmallVector<int, 4> RepeatedMask;
37280 if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
37281 // Narrow the repeated mask to create 32-bit element permutes.
37282 SmallVector<int, 4> WordMask = RepeatedMask;
37283 if (MaskScalarSizeInBits == 64)
37284 narrowShuffleMaskElts(2, RepeatedMask, WordMask);
37286 Shuffle = (AllowIntDomain ? X86ISD::PSHUFD : X86ISD::VPERMILPI);
37287 ShuffleVT = (AllowIntDomain ? MVT::i32 : MVT::f32);
37288 ShuffleVT = MVT::getVectorVT(ShuffleVT, InputSizeInBits / 32);
37289 PermuteImm = getV4X86ShuffleImm(WordMask);
37290 return true;
37294 // Handle PSHUFLW/PSHUFHW vXi16 repeated patterns.
37295 if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits == 16 &&
37296 ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
37297 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
37298 (MaskVT.is512BitVector() && Subtarget.hasBWI()))) {
37299 SmallVector<int, 4> RepeatedMask;
37300 if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
37301 ArrayRef<int> LoMask(RepeatedMask.data() + 0, 4);
37302 ArrayRef<int> HiMask(RepeatedMask.data() + 4, 4);
37304 // PSHUFLW: permute lower 4 elements only.
37305 if (isUndefOrInRange(LoMask, 0, 4) &&
37306 isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
37307 Shuffle = X86ISD::PSHUFLW;
37308 ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
37309 PermuteImm = getV4X86ShuffleImm(LoMask);
37310 return true;
37313 // PSHUFHW: permute upper 4 elements only.
37314 if (isUndefOrInRange(HiMask, 4, 8) &&
37315 isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
37316 // Offset the HiMask so that we can create the shuffle immediate.
37317 int OffsetHiMask[4];
37318 for (int i = 0; i != 4; ++i)
37319 OffsetHiMask[i] = (HiMask[i] < 0 ? HiMask[i] : HiMask[i] - 4);
37321 Shuffle = X86ISD::PSHUFHW;
37322 ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
37323 PermuteImm = getV4X86ShuffleImm(OffsetHiMask);
37324 return true;
37328 } else {
37329 // Attempt to match against bit rotates.
37330 if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits < 64 &&
37331 ((MaskVT.is128BitVector() && Subtarget.hasXOP()) ||
37332 Subtarget.hasAVX512())) {
37333 int RotateAmt = matchShuffleAsBitRotate(ShuffleVT, MaskScalarSizeInBits,
37334 Subtarget, Mask);
37335 if (0 < RotateAmt) {
37336 Shuffle = X86ISD::VROTLI;
37337 PermuteImm = (unsigned)RotateAmt;
37338 return true;
37342 // Attempt to match against byte/bit shifts.
37343 if (AllowIntDomain &&
37344 ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
37345 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
37346 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
37347 int ShiftAmt =
37348 matchShuffleAsShift(ShuffleVT, Shuffle, MaskScalarSizeInBits, Mask, 0,
37349 Zeroable, Subtarget);
37350 if (0 < ShiftAmt && (!ShuffleVT.is512BitVector() || Subtarget.hasBWI() ||
37351 32 <= ShuffleVT.getScalarSizeInBits())) {
37352 // Byte shifts can be slower so only match them on second attempt.
37353 if (Order == 0 &&
37354 (Shuffle == X86ISD::VSHLDQ || Shuffle == X86ISD::VSRLDQ))
37355 continue;
37357 PermuteImm = (unsigned)ShiftAmt;
37358 return true;
37364 return false;
37367 // Attempt to match a combined unary shuffle mask against supported binary
37368 // shuffle instructions.
37369 // TODO: Investigate sharing more of this with shuffle lowering.
37370 static bool matchBinaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
37371 bool AllowFloatDomain, bool AllowIntDomain,
37372 SDValue &V1, SDValue &V2, const SDLoc &DL,
37373 SelectionDAG &DAG, const X86Subtarget &Subtarget,
37374 unsigned &Shuffle, MVT &SrcVT, MVT &DstVT,
37375 bool IsUnary) {
37376 unsigned NumMaskElts = Mask.size();
37377 unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
37378 unsigned SizeInBits = MaskVT.getSizeInBits();
37380 if (MaskVT.is128BitVector()) {
37381 if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0}, DAG) &&
37382 AllowFloatDomain) {
37383 V2 = V1;
37384 V1 = (SM_SentinelUndef == Mask[0] ? DAG.getUNDEF(MVT::v4f32) : V1);
37385 Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKL : X86ISD::MOVLHPS;
37386 SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
37387 return true;
37389 if (isTargetShuffleEquivalent(MaskVT, Mask, {1, 1}, DAG) &&
37390 AllowFloatDomain) {
37391 V2 = V1;
37392 Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKH : X86ISD::MOVHLPS;
37393 SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
37394 return true;
37396 if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 3}, DAG) &&
37397 Subtarget.hasSSE2() && (AllowFloatDomain || !Subtarget.hasSSE41())) {
37398 std::swap(V1, V2);
37399 Shuffle = X86ISD::MOVSD;
37400 SrcVT = DstVT = MVT::v2f64;
37401 return true;
37403 if (isTargetShuffleEquivalent(MaskVT, Mask, {4, 1, 2, 3}, DAG) &&
37404 (AllowFloatDomain || !Subtarget.hasSSE41())) {
37405 Shuffle = X86ISD::MOVSS;
37406 SrcVT = DstVT = MVT::v4f32;
37407 return true;
37409 if (isTargetShuffleEquivalent(MaskVT, Mask, {8, 1, 2, 3, 4, 5, 6, 7},
37410 DAG) &&
37411 Subtarget.hasFP16()) {
37412 Shuffle = X86ISD::MOVSH;
37413 SrcVT = DstVT = MVT::v8f16;
37414 return true;
37418 // Attempt to match against either an unary or binary PACKSS/PACKUS shuffle.
37419 if (((MaskVT == MVT::v8i16 || MaskVT == MVT::v16i8) && Subtarget.hasSSE2()) ||
37420 ((MaskVT == MVT::v16i16 || MaskVT == MVT::v32i8) && Subtarget.hasInt256()) ||
37421 ((MaskVT == MVT::v32i16 || MaskVT == MVT::v64i8) && Subtarget.hasBWI())) {
37422 if (matchShuffleWithPACK(MaskVT, SrcVT, V1, V2, Shuffle, Mask, DAG,
37423 Subtarget)) {
37424 DstVT = MaskVT;
37425 return true;
37428 // TODO: Can we handle this inside matchShuffleWithPACK?
37429 if (MaskVT == MVT::v4i32 && Subtarget.hasSSE2() &&
37430 isTargetShuffleEquivalent(MaskVT, Mask, {0, 2, 4, 6}, DAG) &&
37431 V1.getScalarValueSizeInBits() == 64 &&
37432 V2.getScalarValueSizeInBits() == 64) {
37433 // Use (SSE41) PACKUSWD if the leading zerobits goto the lowest 16-bits.
37434 unsigned MinLZV1 = DAG.computeKnownBits(V1).countMinLeadingZeros();
37435 unsigned MinLZV2 = DAG.computeKnownBits(V2).countMinLeadingZeros();
37436 if (Subtarget.hasSSE41() && MinLZV1 >= 48 && MinLZV2 >= 48) {
37437 SrcVT = MVT::v4i32;
37438 DstVT = MVT::v8i16;
37439 Shuffle = X86ISD::PACKUS;
37440 return true;
37442 // Use PACKUSBW if the leading zerobits goto the lowest 8-bits.
37443 if (MinLZV1 >= 56 && MinLZV2 >= 56) {
37444 SrcVT = MVT::v8i16;
37445 DstVT = MVT::v16i8;
37446 Shuffle = X86ISD::PACKUS;
37447 return true;
37449 // Use PACKSSWD if the signbits extend to the lowest 16-bits.
37450 if (DAG.ComputeNumSignBits(V1) > 48 && DAG.ComputeNumSignBits(V2) > 48) {
37451 SrcVT = MVT::v4i32;
37452 DstVT = MVT::v8i16;
37453 Shuffle = X86ISD::PACKSS;
37454 return true;
37458 // Attempt to match against either a unary or binary UNPCKL/UNPCKH shuffle.
37459 if ((MaskVT == MVT::v4f32 && Subtarget.hasSSE1()) ||
37460 (MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
37461 (MaskVT.is256BitVector() && 32 <= EltSizeInBits && Subtarget.hasAVX()) ||
37462 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
37463 (MaskVT.is512BitVector() && Subtarget.hasAVX512() &&
37464 (32 <= EltSizeInBits || Subtarget.hasBWI()))) {
37465 if (matchShuffleWithUNPCK(MaskVT, V1, V2, Shuffle, IsUnary, Mask, DL, DAG,
37466 Subtarget)) {
37467 SrcVT = DstVT = MaskVT;
37468 if (MaskVT.is256BitVector() && !Subtarget.hasAVX2())
37469 SrcVT = DstVT = (32 == EltSizeInBits ? MVT::v8f32 : MVT::v4f64);
37470 return true;
37474 // Attempt to match against a OR if we're performing a blend shuffle and the
37475 // non-blended source element is zero in each case.
37476 // TODO: Handle cases where V1/V2 sizes doesn't match SizeInBits.
37477 if (SizeInBits == V1.getValueSizeInBits() &&
37478 SizeInBits == V2.getValueSizeInBits() &&
37479 (EltSizeInBits % V1.getScalarValueSizeInBits()) == 0 &&
37480 (EltSizeInBits % V2.getScalarValueSizeInBits()) == 0) {
37481 bool IsBlend = true;
37482 unsigned NumV1Elts = V1.getValueType().getVectorNumElements();
37483 unsigned NumV2Elts = V2.getValueType().getVectorNumElements();
37484 unsigned Scale1 = NumV1Elts / NumMaskElts;
37485 unsigned Scale2 = NumV2Elts / NumMaskElts;
37486 APInt DemandedZeroV1 = APInt::getZero(NumV1Elts);
37487 APInt DemandedZeroV2 = APInt::getZero(NumV2Elts);
37488 for (unsigned i = 0; i != NumMaskElts; ++i) {
37489 int M = Mask[i];
37490 if (M == SM_SentinelUndef)
37491 continue;
37492 if (M == SM_SentinelZero) {
37493 DemandedZeroV1.setBits(i * Scale1, (i + 1) * Scale1);
37494 DemandedZeroV2.setBits(i * Scale2, (i + 1) * Scale2);
37495 continue;
37497 if (M == (int)i) {
37498 DemandedZeroV2.setBits(i * Scale2, (i + 1) * Scale2);
37499 continue;
37501 if (M == (int)(i + NumMaskElts)) {
37502 DemandedZeroV1.setBits(i * Scale1, (i + 1) * Scale1);
37503 continue;
37505 IsBlend = false;
37506 break;
37508 if (IsBlend) {
37509 if (DAG.MaskedVectorIsZero(V1, DemandedZeroV1) &&
37510 DAG.MaskedVectorIsZero(V2, DemandedZeroV2)) {
37511 Shuffle = ISD::OR;
37512 SrcVT = DstVT = MaskVT.changeTypeToInteger();
37513 return true;
37515 if (NumV1Elts == NumV2Elts && NumV1Elts == NumMaskElts) {
37516 // FIXME: handle mismatched sizes?
37517 // TODO: investigate if `ISD::OR` handling in
37518 // `TargetLowering::SimplifyDemandedVectorElts` can be improved instead.
37519 auto computeKnownBitsElementWise = [&DAG](SDValue V) {
37520 unsigned NumElts = V.getValueType().getVectorNumElements();
37521 KnownBits Known(NumElts);
37522 for (unsigned EltIdx = 0; EltIdx != NumElts; ++EltIdx) {
37523 APInt Mask = APInt::getOneBitSet(NumElts, EltIdx);
37524 KnownBits PeepholeKnown = DAG.computeKnownBits(V, Mask);
37525 if (PeepholeKnown.isZero())
37526 Known.Zero.setBit(EltIdx);
37527 if (PeepholeKnown.isAllOnes())
37528 Known.One.setBit(EltIdx);
37530 return Known;
37533 KnownBits V1Known = computeKnownBitsElementWise(V1);
37534 KnownBits V2Known = computeKnownBitsElementWise(V2);
37536 for (unsigned i = 0; i != NumMaskElts && IsBlend; ++i) {
37537 int M = Mask[i];
37538 if (M == SM_SentinelUndef)
37539 continue;
37540 if (M == SM_SentinelZero) {
37541 IsBlend &= V1Known.Zero[i] && V2Known.Zero[i];
37542 continue;
37544 if (M == (int)i) {
37545 IsBlend &= V2Known.Zero[i] || V1Known.One[i];
37546 continue;
37548 if (M == (int)(i + NumMaskElts)) {
37549 IsBlend &= V1Known.Zero[i] || V2Known.One[i];
37550 continue;
37552 llvm_unreachable("will not get here.");
37554 if (IsBlend) {
37555 Shuffle = ISD::OR;
37556 SrcVT = DstVT = MaskVT.changeTypeToInteger();
37557 return true;
37563 return false;
37566 static bool matchBinaryPermuteShuffle(
37567 MVT MaskVT, ArrayRef<int> Mask, const APInt &Zeroable,
37568 bool AllowFloatDomain, bool AllowIntDomain, SDValue &V1, SDValue &V2,
37569 const SDLoc &DL, SelectionDAG &DAG, const X86Subtarget &Subtarget,
37570 unsigned &Shuffle, MVT &ShuffleVT, unsigned &PermuteImm) {
37571 unsigned NumMaskElts = Mask.size();
37572 unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
37574 // Attempt to match against VALIGND/VALIGNQ rotate.
37575 if (AllowIntDomain && (EltSizeInBits == 64 || EltSizeInBits == 32) &&
37576 ((MaskVT.is128BitVector() && Subtarget.hasVLX()) ||
37577 (MaskVT.is256BitVector() && Subtarget.hasVLX()) ||
37578 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
37579 if (!isAnyZero(Mask)) {
37580 int Rotation = matchShuffleAsElementRotate(V1, V2, Mask);
37581 if (0 < Rotation) {
37582 Shuffle = X86ISD::VALIGN;
37583 if (EltSizeInBits == 64)
37584 ShuffleVT = MVT::getVectorVT(MVT::i64, MaskVT.getSizeInBits() / 64);
37585 else
37586 ShuffleVT = MVT::getVectorVT(MVT::i32, MaskVT.getSizeInBits() / 32);
37587 PermuteImm = Rotation;
37588 return true;
37593 // Attempt to match against PALIGNR byte rotate.
37594 if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSSE3()) ||
37595 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
37596 (MaskVT.is512BitVector() && Subtarget.hasBWI()))) {
37597 int ByteRotation = matchShuffleAsByteRotate(MaskVT, V1, V2, Mask);
37598 if (0 < ByteRotation) {
37599 Shuffle = X86ISD::PALIGNR;
37600 ShuffleVT = MVT::getVectorVT(MVT::i8, MaskVT.getSizeInBits() / 8);
37601 PermuteImm = ByteRotation;
37602 return true;
37606 // Attempt to combine to X86ISD::BLENDI.
37607 if ((NumMaskElts <= 8 && ((Subtarget.hasSSE41() && MaskVT.is128BitVector()) ||
37608 (Subtarget.hasAVX() && MaskVT.is256BitVector()))) ||
37609 (MaskVT == MVT::v16i16 && Subtarget.hasAVX2())) {
37610 uint64_t BlendMask = 0;
37611 bool ForceV1Zero = false, ForceV2Zero = false;
37612 SmallVector<int, 8> TargetMask(Mask);
37613 if (matchShuffleAsBlend(MaskVT, V1, V2, TargetMask, Zeroable, ForceV1Zero,
37614 ForceV2Zero, BlendMask)) {
37615 if (MaskVT == MVT::v16i16) {
37616 // We can only use v16i16 PBLENDW if the lanes are repeated.
37617 SmallVector<int, 8> RepeatedMask;
37618 if (isRepeatedTargetShuffleMask(128, MaskVT, TargetMask,
37619 RepeatedMask)) {
37620 assert(RepeatedMask.size() == 8 &&
37621 "Repeated mask size doesn't match!");
37622 PermuteImm = 0;
37623 for (int i = 0; i < 8; ++i)
37624 if (RepeatedMask[i] >= 8)
37625 PermuteImm |= 1 << i;
37626 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
37627 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
37628 Shuffle = X86ISD::BLENDI;
37629 ShuffleVT = MaskVT;
37630 return true;
37632 } else {
37633 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
37634 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
37635 PermuteImm = (unsigned)BlendMask;
37636 Shuffle = X86ISD::BLENDI;
37637 ShuffleVT = MaskVT;
37638 return true;
37643 // Attempt to combine to INSERTPS, but only if it has elements that need to
37644 // be set to zero.
37645 if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
37646 MaskVT.is128BitVector() && isAnyZero(Mask) &&
37647 matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
37648 Shuffle = X86ISD::INSERTPS;
37649 ShuffleVT = MVT::v4f32;
37650 return true;
37653 // Attempt to combine to SHUFPD.
37654 if (AllowFloatDomain && EltSizeInBits == 64 &&
37655 ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
37656 (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
37657 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
37658 bool ForceV1Zero = false, ForceV2Zero = false;
37659 if (matchShuffleWithSHUFPD(MaskVT, V1, V2, ForceV1Zero, ForceV2Zero,
37660 PermuteImm, Mask, Zeroable)) {
37661 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
37662 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
37663 Shuffle = X86ISD::SHUFP;
37664 ShuffleVT = MVT::getVectorVT(MVT::f64, MaskVT.getSizeInBits() / 64);
37665 return true;
37669 // Attempt to combine to SHUFPS.
37670 if (AllowFloatDomain && EltSizeInBits == 32 &&
37671 ((MaskVT.is128BitVector() && Subtarget.hasSSE1()) ||
37672 (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
37673 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
37674 SmallVector<int, 4> RepeatedMask;
37675 if (isRepeatedTargetShuffleMask(128, MaskVT, Mask, RepeatedMask)) {
37676 // Match each half of the repeated mask, to determine if its just
37677 // referencing one of the vectors, is zeroable or entirely undef.
37678 auto MatchHalf = [&](unsigned Offset, int &S0, int &S1) {
37679 int M0 = RepeatedMask[Offset];
37680 int M1 = RepeatedMask[Offset + 1];
37682 if (isUndefInRange(RepeatedMask, Offset, 2)) {
37683 return DAG.getUNDEF(MaskVT);
37684 } else if (isUndefOrZeroInRange(RepeatedMask, Offset, 2)) {
37685 S0 = (SM_SentinelUndef == M0 ? -1 : 0);
37686 S1 = (SM_SentinelUndef == M1 ? -1 : 1);
37687 return getZeroVector(MaskVT, Subtarget, DAG, DL);
37688 } else if (isUndefOrInRange(M0, 0, 4) && isUndefOrInRange(M1, 0, 4)) {
37689 S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
37690 S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
37691 return V1;
37692 } else if (isUndefOrInRange(M0, 4, 8) && isUndefOrInRange(M1, 4, 8)) {
37693 S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
37694 S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
37695 return V2;
37698 return SDValue();
37701 int ShufMask[4] = {-1, -1, -1, -1};
37702 SDValue Lo = MatchHalf(0, ShufMask[0], ShufMask[1]);
37703 SDValue Hi = MatchHalf(2, ShufMask[2], ShufMask[3]);
37705 if (Lo && Hi) {
37706 V1 = Lo;
37707 V2 = Hi;
37708 Shuffle = X86ISD::SHUFP;
37709 ShuffleVT = MVT::getVectorVT(MVT::f32, MaskVT.getSizeInBits() / 32);
37710 PermuteImm = getV4X86ShuffleImm(ShufMask);
37711 return true;
37716 // Attempt to combine to INSERTPS more generally if X86ISD::SHUFP failed.
37717 if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
37718 MaskVT.is128BitVector() &&
37719 matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
37720 Shuffle = X86ISD::INSERTPS;
37721 ShuffleVT = MVT::v4f32;
37722 return true;
37725 return false;
37728 static SDValue combineX86ShuffleChainWithExtract(
37729 ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
37730 bool HasVariableMask, bool AllowVariableCrossLaneMask,
37731 bool AllowVariablePerLaneMask, SelectionDAG &DAG,
37732 const X86Subtarget &Subtarget);
37734 /// Combine an arbitrary chain of shuffles into a single instruction if
37735 /// possible.
37737 /// This is the leaf of the recursive combine below. When we have found some
37738 /// chain of single-use x86 shuffle instructions and accumulated the combined
37739 /// shuffle mask represented by them, this will try to pattern match that mask
37740 /// into either a single instruction if there is a special purpose instruction
37741 /// for this operation, or into a PSHUFB instruction which is a fully general
37742 /// instruction but should only be used to replace chains over a certain depth.
37743 static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
37744 ArrayRef<int> BaseMask, int Depth,
37745 bool HasVariableMask,
37746 bool AllowVariableCrossLaneMask,
37747 bool AllowVariablePerLaneMask,
37748 SelectionDAG &DAG,
37749 const X86Subtarget &Subtarget) {
37750 assert(!BaseMask.empty() && "Cannot combine an empty shuffle mask!");
37751 assert((Inputs.size() == 1 || Inputs.size() == 2) &&
37752 "Unexpected number of shuffle inputs!");
37754 SDLoc DL(Root);
37755 MVT RootVT = Root.getSimpleValueType();
37756 unsigned RootSizeInBits = RootVT.getSizeInBits();
37757 unsigned NumRootElts = RootVT.getVectorNumElements();
37759 // Canonicalize shuffle input op to the requested type.
37760 auto CanonicalizeShuffleInput = [&](MVT VT, SDValue Op) {
37761 if (VT.getSizeInBits() > Op.getValueSizeInBits())
37762 Op = widenSubVector(Op, false, Subtarget, DAG, DL, VT.getSizeInBits());
37763 else if (VT.getSizeInBits() < Op.getValueSizeInBits())
37764 Op = extractSubVector(Op, 0, DAG, DL, VT.getSizeInBits());
37765 return DAG.getBitcast(VT, Op);
37768 // Find the inputs that enter the chain. Note that multiple uses are OK
37769 // here, we're not going to remove the operands we find.
37770 bool UnaryShuffle = (Inputs.size() == 1);
37771 SDValue V1 = peekThroughBitcasts(Inputs[0]);
37772 SDValue V2 = (UnaryShuffle ? DAG.getUNDEF(V1.getValueType())
37773 : peekThroughBitcasts(Inputs[1]));
37775 MVT VT1 = V1.getSimpleValueType();
37776 MVT VT2 = V2.getSimpleValueType();
37777 assert((RootSizeInBits % VT1.getSizeInBits()) == 0 &&
37778 (RootSizeInBits % VT2.getSizeInBits()) == 0 && "Vector size mismatch");
37780 SDValue Res;
37782 unsigned NumBaseMaskElts = BaseMask.size();
37783 if (NumBaseMaskElts == 1) {
37784 assert(BaseMask[0] == 0 && "Invalid shuffle index found!");
37785 return CanonicalizeShuffleInput(RootVT, V1);
37788 bool OptForSize = DAG.shouldOptForSize();
37789 unsigned BaseMaskEltSizeInBits = RootSizeInBits / NumBaseMaskElts;
37790 bool FloatDomain = VT1.isFloatingPoint() || VT2.isFloatingPoint() ||
37791 (RootVT.isFloatingPoint() && Depth >= 1) ||
37792 (RootVT.is256BitVector() && !Subtarget.hasAVX2());
37794 // Don't combine if we are a AVX512/EVEX target and the mask element size
37795 // is different from the root element size - this would prevent writemasks
37796 // from being reused.
37797 bool IsMaskedShuffle = false;
37798 if (RootSizeInBits == 512 || (Subtarget.hasVLX() && RootSizeInBits >= 128)) {
37799 if (Root.hasOneUse() && Root->use_begin()->getOpcode() == ISD::VSELECT &&
37800 Root->use_begin()->getOperand(0).getScalarValueSizeInBits() == 1) {
37801 IsMaskedShuffle = true;
37805 // If we are shuffling a splat (and not introducing zeros) then we can just
37806 // use it directly. This works for smaller elements as well as they already
37807 // repeat across each mask element.
37808 if (UnaryShuffle && !isAnyZero(BaseMask) &&
37809 V1.getValueSizeInBits() >= RootSizeInBits &&
37810 (BaseMaskEltSizeInBits % V1.getScalarValueSizeInBits()) == 0 &&
37811 DAG.isSplatValue(V1, /*AllowUndefs*/ false)) {
37812 return CanonicalizeShuffleInput(RootVT, V1);
37815 SmallVector<int, 64> Mask(BaseMask);
37817 // See if the shuffle is a hidden identity shuffle - repeated args in HOPs
37818 // etc. can be simplified.
37819 if (VT1 == VT2 && VT1.getSizeInBits() == RootSizeInBits && VT1.isVector()) {
37820 SmallVector<int> ScaledMask, IdentityMask;
37821 unsigned NumElts = VT1.getVectorNumElements();
37822 if (Mask.size() <= NumElts &&
37823 scaleShuffleElements(Mask, NumElts, ScaledMask)) {
37824 for (unsigned i = 0; i != NumElts; ++i)
37825 IdentityMask.push_back(i);
37826 if (isTargetShuffleEquivalent(RootVT, ScaledMask, IdentityMask, DAG, V1,
37827 V2))
37828 return CanonicalizeShuffleInput(RootVT, V1);
37832 // Handle 128/256-bit lane shuffles of 512-bit vectors.
37833 if (RootVT.is512BitVector() &&
37834 (NumBaseMaskElts == 2 || NumBaseMaskElts == 4)) {
37835 // If the upper subvectors are zeroable, then an extract+insert is more
37836 // optimal than using X86ISD::SHUF128. The insertion is free, even if it has
37837 // to zero the upper subvectors.
37838 if (isUndefOrZeroInRange(Mask, 1, NumBaseMaskElts - 1)) {
37839 if (Depth == 0 && Root.getOpcode() == ISD::INSERT_SUBVECTOR)
37840 return SDValue(); // Nothing to do!
37841 assert(isInRange(Mask[0], 0, NumBaseMaskElts) &&
37842 "Unexpected lane shuffle");
37843 Res = CanonicalizeShuffleInput(RootVT, V1);
37844 unsigned SubIdx = Mask[0] * (NumRootElts / NumBaseMaskElts);
37845 bool UseZero = isAnyZero(Mask);
37846 Res = extractSubVector(Res, SubIdx, DAG, DL, BaseMaskEltSizeInBits);
37847 return widenSubVector(Res, UseZero, Subtarget, DAG, DL, RootSizeInBits);
37850 // Narrow shuffle mask to v4x128.
37851 SmallVector<int, 4> ScaledMask;
37852 assert((BaseMaskEltSizeInBits % 128) == 0 && "Illegal mask size");
37853 narrowShuffleMaskElts(BaseMaskEltSizeInBits / 128, Mask, ScaledMask);
37855 // Try to lower to vshuf64x2/vshuf32x4.
37856 auto MatchSHUF128 = [&](MVT ShuffleVT, const SDLoc &DL,
37857 ArrayRef<int> ScaledMask, SDValue V1, SDValue V2,
37858 SelectionDAG &DAG) {
37859 int PermMask[4] = {-1, -1, -1, -1};
37860 // Ensure elements came from the same Op.
37861 SDValue Ops[2] = {DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT)};
37862 for (int i = 0; i < 4; ++i) {
37863 assert(ScaledMask[i] >= -1 && "Illegal shuffle sentinel value");
37864 if (ScaledMask[i] < 0)
37865 continue;
37867 SDValue Op = ScaledMask[i] >= 4 ? V2 : V1;
37868 unsigned OpIndex = i / 2;
37869 if (Ops[OpIndex].isUndef())
37870 Ops[OpIndex] = Op;
37871 else if (Ops[OpIndex] != Op)
37872 return SDValue();
37874 PermMask[i] = ScaledMask[i] % 4;
37877 return DAG.getNode(X86ISD::SHUF128, DL, ShuffleVT,
37878 CanonicalizeShuffleInput(ShuffleVT, Ops[0]),
37879 CanonicalizeShuffleInput(ShuffleVT, Ops[1]),
37880 getV4X86ShuffleImm8ForMask(PermMask, DL, DAG));
37883 // FIXME: Is there a better way to do this? is256BitLaneRepeatedShuffleMask
37884 // doesn't work because our mask is for 128 bits and we don't have an MVT
37885 // to match that.
37886 bool PreferPERMQ = UnaryShuffle && isUndefOrInRange(ScaledMask[0], 0, 2) &&
37887 isUndefOrInRange(ScaledMask[1], 0, 2) &&
37888 isUndefOrInRange(ScaledMask[2], 2, 4) &&
37889 isUndefOrInRange(ScaledMask[3], 2, 4) &&
37890 (ScaledMask[0] < 0 || ScaledMask[2] < 0 ||
37891 ScaledMask[0] == (ScaledMask[2] % 2)) &&
37892 (ScaledMask[1] < 0 || ScaledMask[3] < 0 ||
37893 ScaledMask[1] == (ScaledMask[3] % 2));
37895 if (!isAnyZero(ScaledMask) && !PreferPERMQ) {
37896 if (Depth == 0 && Root.getOpcode() == X86ISD::SHUF128)
37897 return SDValue(); // Nothing to do!
37898 MVT ShuffleVT = (FloatDomain ? MVT::v8f64 : MVT::v8i64);
37899 if (SDValue V = MatchSHUF128(ShuffleVT, DL, ScaledMask, V1, V2, DAG))
37900 return DAG.getBitcast(RootVT, V);
37904 // Handle 128-bit lane shuffles of 256-bit vectors.
37905 if (RootVT.is256BitVector() && NumBaseMaskElts == 2) {
37906 // If the upper half is zeroable, then an extract+insert is more optimal
37907 // than using X86ISD::VPERM2X128. The insertion is free, even if it has to
37908 // zero the upper half.
37909 if (isUndefOrZero(Mask[1])) {
37910 if (Depth == 0 && Root.getOpcode() == ISD::INSERT_SUBVECTOR)
37911 return SDValue(); // Nothing to do!
37912 assert(isInRange(Mask[0], 0, 2) && "Unexpected lane shuffle");
37913 Res = CanonicalizeShuffleInput(RootVT, V1);
37914 Res = extract128BitVector(Res, Mask[0] * (NumRootElts / 2), DAG, DL);
37915 return widenSubVector(Res, Mask[1] == SM_SentinelZero, Subtarget, DAG, DL,
37916 256);
37919 // If we're inserting the low subvector, an insert-subvector 'concat'
37920 // pattern is quicker than VPERM2X128.
37921 // TODO: Add AVX2 support instead of VPERMQ/VPERMPD.
37922 if (BaseMask[0] == 0 && (BaseMask[1] == 0 || BaseMask[1] == 2) &&
37923 !Subtarget.hasAVX2()) {
37924 if (Depth == 0 && Root.getOpcode() == ISD::INSERT_SUBVECTOR)
37925 return SDValue(); // Nothing to do!
37926 SDValue Lo = CanonicalizeShuffleInput(RootVT, V1);
37927 SDValue Hi = CanonicalizeShuffleInput(RootVT, BaseMask[1] == 0 ? V1 : V2);
37928 Hi = extractSubVector(Hi, 0, DAG, DL, 128);
37929 return insertSubVector(Lo, Hi, NumRootElts / 2, DAG, DL, 128);
37932 if (Depth == 0 && Root.getOpcode() == X86ISD::VPERM2X128)
37933 return SDValue(); // Nothing to do!
37935 // If we have AVX2, prefer to use VPERMQ/VPERMPD for unary shuffles unless
37936 // we need to use the zeroing feature.
37937 // Prefer blends for sequential shuffles unless we are optimizing for size.
37938 if (UnaryShuffle &&
37939 !(Subtarget.hasAVX2() && isUndefOrInRange(Mask, 0, 2)) &&
37940 (OptForSize || !isSequentialOrUndefOrZeroInRange(Mask, 0, 2, 0))) {
37941 unsigned PermMask = 0;
37942 PermMask |= ((Mask[0] < 0 ? 0x8 : (Mask[0] & 1)) << 0);
37943 PermMask |= ((Mask[1] < 0 ? 0x8 : (Mask[1] & 1)) << 4);
37944 return DAG.getNode(
37945 X86ISD::VPERM2X128, DL, RootVT, CanonicalizeShuffleInput(RootVT, V1),
37946 DAG.getUNDEF(RootVT), DAG.getTargetConstant(PermMask, DL, MVT::i8));
37949 if (Depth == 0 && Root.getOpcode() == X86ISD::SHUF128)
37950 return SDValue(); // Nothing to do!
37952 // TODO - handle AVX512VL cases with X86ISD::SHUF128.
37953 if (!UnaryShuffle && !IsMaskedShuffle) {
37954 assert(llvm::all_of(Mask, [](int M) { return 0 <= M && M < 4; }) &&
37955 "Unexpected shuffle sentinel value");
37956 // Prefer blends to X86ISD::VPERM2X128.
37957 if (!((Mask[0] == 0 && Mask[1] == 3) || (Mask[0] == 2 && Mask[1] == 1))) {
37958 unsigned PermMask = 0;
37959 PermMask |= ((Mask[0] & 3) << 0);
37960 PermMask |= ((Mask[1] & 3) << 4);
37961 SDValue LHS = isInRange(Mask[0], 0, 2) ? V1 : V2;
37962 SDValue RHS = isInRange(Mask[1], 0, 2) ? V1 : V2;
37963 return DAG.getNode(X86ISD::VPERM2X128, DL, RootVT,
37964 CanonicalizeShuffleInput(RootVT, LHS),
37965 CanonicalizeShuffleInput(RootVT, RHS),
37966 DAG.getTargetConstant(PermMask, DL, MVT::i8));
37971 // For masks that have been widened to 128-bit elements or more,
37972 // narrow back down to 64-bit elements.
37973 if (BaseMaskEltSizeInBits > 64) {
37974 assert((BaseMaskEltSizeInBits % 64) == 0 && "Illegal mask size");
37975 int MaskScale = BaseMaskEltSizeInBits / 64;
37976 SmallVector<int, 64> ScaledMask;
37977 narrowShuffleMaskElts(MaskScale, Mask, ScaledMask);
37978 Mask = std::move(ScaledMask);
37981 // For masked shuffles, we're trying to match the root width for better
37982 // writemask folding, attempt to scale the mask.
37983 // TODO - variable shuffles might need this to be widened again.
37984 if (IsMaskedShuffle && NumRootElts > Mask.size()) {
37985 assert((NumRootElts % Mask.size()) == 0 && "Illegal mask size");
37986 int MaskScale = NumRootElts / Mask.size();
37987 SmallVector<int, 64> ScaledMask;
37988 narrowShuffleMaskElts(MaskScale, Mask, ScaledMask);
37989 Mask = std::move(ScaledMask);
37992 unsigned NumMaskElts = Mask.size();
37993 unsigned MaskEltSizeInBits = RootSizeInBits / NumMaskElts;
37995 // Determine the effective mask value type.
37996 FloatDomain &= (32 <= MaskEltSizeInBits);
37997 MVT MaskVT = FloatDomain ? MVT::getFloatingPointVT(MaskEltSizeInBits)
37998 : MVT::getIntegerVT(MaskEltSizeInBits);
37999 MaskVT = MVT::getVectorVT(MaskVT, NumMaskElts);
38001 // Only allow legal mask types.
38002 if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
38003 return SDValue();
38005 // Attempt to match the mask against known shuffle patterns.
38006 MVT ShuffleSrcVT, ShuffleVT;
38007 unsigned Shuffle, PermuteImm;
38009 // Which shuffle domains are permitted?
38010 // Permit domain crossing at higher combine depths.
38011 // TODO: Should we indicate which domain is preferred if both are allowed?
38012 bool AllowFloatDomain = FloatDomain || (Depth >= 3);
38013 bool AllowIntDomain = (!FloatDomain || (Depth >= 3)) && Subtarget.hasSSE2() &&
38014 (!MaskVT.is256BitVector() || Subtarget.hasAVX2());
38016 // Determine zeroable mask elements.
38017 APInt KnownUndef, KnownZero;
38018 resolveZeroablesFromTargetShuffle(Mask, KnownUndef, KnownZero);
38019 APInt Zeroable = KnownUndef | KnownZero;
38021 if (UnaryShuffle) {
38022 // Attempt to match against broadcast-from-vector.
38023 // Limit AVX1 to cases where we're loading+broadcasting a scalar element.
38024 if ((Subtarget.hasAVX2() ||
38025 (Subtarget.hasAVX() && 32 <= MaskEltSizeInBits)) &&
38026 (!IsMaskedShuffle || NumRootElts == NumMaskElts)) {
38027 if (isUndefOrEqual(Mask, 0)) {
38028 if (V1.getValueType() == MaskVT &&
38029 V1.getOpcode() == ISD::SCALAR_TO_VECTOR &&
38030 X86::mayFoldLoad(V1.getOperand(0), Subtarget)) {
38031 if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
38032 return SDValue(); // Nothing to do!
38033 Res = V1.getOperand(0);
38034 Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
38035 return DAG.getBitcast(RootVT, Res);
38037 if (Subtarget.hasAVX2()) {
38038 if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
38039 return SDValue(); // Nothing to do!
38040 Res = CanonicalizeShuffleInput(MaskVT, V1);
38041 Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
38042 return DAG.getBitcast(RootVT, Res);
38047 if (matchUnaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, V1,
38048 DAG, Subtarget, Shuffle, ShuffleSrcVT, ShuffleVT) &&
38049 (!IsMaskedShuffle ||
38050 (NumRootElts == ShuffleVT.getVectorNumElements()))) {
38051 if (Depth == 0 && Root.getOpcode() == Shuffle)
38052 return SDValue(); // Nothing to do!
38053 Res = CanonicalizeShuffleInput(ShuffleSrcVT, V1);
38054 Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res);
38055 return DAG.getBitcast(RootVT, Res);
38058 if (matchUnaryPermuteShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
38059 AllowIntDomain, DAG, Subtarget, Shuffle, ShuffleVT,
38060 PermuteImm) &&
38061 (!IsMaskedShuffle ||
38062 (NumRootElts == ShuffleVT.getVectorNumElements()))) {
38063 if (Depth == 0 && Root.getOpcode() == Shuffle)
38064 return SDValue(); // Nothing to do!
38065 Res = CanonicalizeShuffleInput(ShuffleVT, V1);
38066 Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res,
38067 DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
38068 return DAG.getBitcast(RootVT, Res);
38072 // Attempt to combine to INSERTPS, but only if the inserted element has come
38073 // from a scalar.
38074 // TODO: Handle other insertions here as well?
38075 if (!UnaryShuffle && AllowFloatDomain && RootSizeInBits == 128 &&
38076 Subtarget.hasSSE41() &&
38077 !isTargetShuffleEquivalent(MaskVT, Mask, {4, 1, 2, 3}, DAG)) {
38078 if (MaskEltSizeInBits == 32) {
38079 SDValue SrcV1 = V1, SrcV2 = V2;
38080 if (matchShuffleAsInsertPS(SrcV1, SrcV2, PermuteImm, Zeroable, Mask,
38081 DAG) &&
38082 SrcV2.getOpcode() == ISD::SCALAR_TO_VECTOR) {
38083 if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTPS)
38084 return SDValue(); // Nothing to do!
38085 Res = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32,
38086 CanonicalizeShuffleInput(MVT::v4f32, SrcV1),
38087 CanonicalizeShuffleInput(MVT::v4f32, SrcV2),
38088 DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
38089 return DAG.getBitcast(RootVT, Res);
38092 if (MaskEltSizeInBits == 64 &&
38093 isTargetShuffleEquivalent(MaskVT, Mask, {0, 2}, DAG) &&
38094 V2.getOpcode() == ISD::SCALAR_TO_VECTOR &&
38095 V2.getScalarValueSizeInBits() <= 32) {
38096 if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTPS)
38097 return SDValue(); // Nothing to do!
38098 PermuteImm = (/*DstIdx*/ 2 << 4) | (/*SrcIdx*/ 0 << 0);
38099 Res = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32,
38100 CanonicalizeShuffleInput(MVT::v4f32, V1),
38101 CanonicalizeShuffleInput(MVT::v4f32, V2),
38102 DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
38103 return DAG.getBitcast(RootVT, Res);
38107 SDValue NewV1 = V1; // Save operands in case early exit happens.
38108 SDValue NewV2 = V2;
38109 if (matchBinaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, NewV1,
38110 NewV2, DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
38111 ShuffleVT, UnaryShuffle) &&
38112 (!IsMaskedShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
38113 if (Depth == 0 && Root.getOpcode() == Shuffle)
38114 return SDValue(); // Nothing to do!
38115 NewV1 = CanonicalizeShuffleInput(ShuffleSrcVT, NewV1);
38116 NewV2 = CanonicalizeShuffleInput(ShuffleSrcVT, NewV2);
38117 Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2);
38118 return DAG.getBitcast(RootVT, Res);
38121 NewV1 = V1; // Save operands in case early exit happens.
38122 NewV2 = V2;
38123 if (matchBinaryPermuteShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
38124 AllowIntDomain, NewV1, NewV2, DL, DAG,
38125 Subtarget, Shuffle, ShuffleVT, PermuteImm) &&
38126 (!IsMaskedShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
38127 if (Depth == 0 && Root.getOpcode() == Shuffle)
38128 return SDValue(); // Nothing to do!
38129 NewV1 = CanonicalizeShuffleInput(ShuffleVT, NewV1);
38130 NewV2 = CanonicalizeShuffleInput(ShuffleVT, NewV2);
38131 Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2,
38132 DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
38133 return DAG.getBitcast(RootVT, Res);
38136 // Typically from here on, we need an integer version of MaskVT.
38137 MVT IntMaskVT = MVT::getIntegerVT(MaskEltSizeInBits);
38138 IntMaskVT = MVT::getVectorVT(IntMaskVT, NumMaskElts);
38140 // Annoyingly, SSE4A instructions don't map into the above match helpers.
38141 if (Subtarget.hasSSE4A() && AllowIntDomain && RootSizeInBits == 128) {
38142 uint64_t BitLen, BitIdx;
38143 if (matchShuffleAsEXTRQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx,
38144 Zeroable)) {
38145 if (Depth == 0 && Root.getOpcode() == X86ISD::EXTRQI)
38146 return SDValue(); // Nothing to do!
38147 V1 = CanonicalizeShuffleInput(IntMaskVT, V1);
38148 Res = DAG.getNode(X86ISD::EXTRQI, DL, IntMaskVT, V1,
38149 DAG.getTargetConstant(BitLen, DL, MVT::i8),
38150 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
38151 return DAG.getBitcast(RootVT, Res);
38154 if (matchShuffleAsINSERTQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx)) {
38155 if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTQI)
38156 return SDValue(); // Nothing to do!
38157 V1 = CanonicalizeShuffleInput(IntMaskVT, V1);
38158 V2 = CanonicalizeShuffleInput(IntMaskVT, V2);
38159 Res = DAG.getNode(X86ISD::INSERTQI, DL, IntMaskVT, V1, V2,
38160 DAG.getTargetConstant(BitLen, DL, MVT::i8),
38161 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
38162 return DAG.getBitcast(RootVT, Res);
38166 // Match shuffle against TRUNCATE patterns.
38167 if (AllowIntDomain && MaskEltSizeInBits < 64 && Subtarget.hasAVX512()) {
38168 // Match against a VTRUNC instruction, accounting for src/dst sizes.
38169 if (matchShuffleAsVTRUNC(ShuffleSrcVT, ShuffleVT, IntMaskVT, Mask, Zeroable,
38170 Subtarget)) {
38171 bool IsTRUNCATE = ShuffleVT.getVectorNumElements() ==
38172 ShuffleSrcVT.getVectorNumElements();
38173 unsigned Opc =
38174 IsTRUNCATE ? (unsigned)ISD::TRUNCATE : (unsigned)X86ISD::VTRUNC;
38175 if (Depth == 0 && Root.getOpcode() == Opc)
38176 return SDValue(); // Nothing to do!
38177 V1 = CanonicalizeShuffleInput(ShuffleSrcVT, V1);
38178 Res = DAG.getNode(Opc, DL, ShuffleVT, V1);
38179 if (ShuffleVT.getSizeInBits() < RootSizeInBits)
38180 Res = widenSubVector(Res, true, Subtarget, DAG, DL, RootSizeInBits);
38181 return DAG.getBitcast(RootVT, Res);
38184 // Do we need a more general binary truncation pattern?
38185 if (RootSizeInBits < 512 &&
38186 ((RootVT.is256BitVector() && Subtarget.useAVX512Regs()) ||
38187 (RootVT.is128BitVector() && Subtarget.hasVLX())) &&
38188 (MaskEltSizeInBits > 8 || Subtarget.hasBWI()) &&
38189 isSequentialOrUndefInRange(Mask, 0, NumMaskElts, 0, 2)) {
38190 // Bail if this was already a truncation or PACK node.
38191 // We sometimes fail to match PACK if we demand known undef elements.
38192 if (Depth == 0 && (Root.getOpcode() == ISD::TRUNCATE ||
38193 Root.getOpcode() == X86ISD::PACKSS ||
38194 Root.getOpcode() == X86ISD::PACKUS))
38195 return SDValue(); // Nothing to do!
38196 ShuffleSrcVT = MVT::getIntegerVT(MaskEltSizeInBits * 2);
38197 ShuffleSrcVT = MVT::getVectorVT(ShuffleSrcVT, NumMaskElts / 2);
38198 V1 = CanonicalizeShuffleInput(ShuffleSrcVT, V1);
38199 V2 = CanonicalizeShuffleInput(ShuffleSrcVT, V2);
38200 ShuffleSrcVT = MVT::getIntegerVT(MaskEltSizeInBits * 2);
38201 ShuffleSrcVT = MVT::getVectorVT(ShuffleSrcVT, NumMaskElts);
38202 Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, ShuffleSrcVT, V1, V2);
38203 Res = DAG.getNode(ISD::TRUNCATE, DL, IntMaskVT, Res);
38204 return DAG.getBitcast(RootVT, Res);
38208 // Don't try to re-form single instruction chains under any circumstances now
38209 // that we've done encoding canonicalization for them.
38210 if (Depth < 1)
38211 return SDValue();
38213 // Depth threshold above which we can efficiently use variable mask shuffles.
38214 int VariableCrossLaneShuffleDepth =
38215 Subtarget.hasFastVariableCrossLaneShuffle() ? 1 : 2;
38216 int VariablePerLaneShuffleDepth =
38217 Subtarget.hasFastVariablePerLaneShuffle() ? 1 : 2;
38218 AllowVariableCrossLaneMask &=
38219 (Depth >= VariableCrossLaneShuffleDepth) || HasVariableMask;
38220 AllowVariablePerLaneMask &=
38221 (Depth >= VariablePerLaneShuffleDepth) || HasVariableMask;
38222 // VPERMI2W/VPERMI2B are 3 uops on Skylake and Icelake so we require a
38223 // higher depth before combining them.
38224 bool AllowBWIVPERMV3 =
38225 (Depth >= (VariableCrossLaneShuffleDepth + 2) || HasVariableMask);
38227 bool MaskContainsZeros = isAnyZero(Mask);
38229 if (is128BitLaneCrossingShuffleMask(MaskVT, Mask)) {
38230 // If we have a single input lane-crossing shuffle then lower to VPERMV.
38231 if (UnaryShuffle && AllowVariableCrossLaneMask && !MaskContainsZeros) {
38232 if (Subtarget.hasAVX2() &&
38233 (MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) {
38234 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
38235 Res = CanonicalizeShuffleInput(MaskVT, V1);
38236 Res = DAG.getNode(X86ISD::VPERMV, DL, MaskVT, VPermMask, Res);
38237 return DAG.getBitcast(RootVT, Res);
38239 // AVX512 variants (non-VLX will pad to 512-bit shuffles).
38240 if ((Subtarget.hasAVX512() &&
38241 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
38242 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
38243 (Subtarget.hasBWI() &&
38244 (MaskVT == MVT::v16i16 || MaskVT == MVT::v32i16)) ||
38245 (Subtarget.hasVBMI() &&
38246 (MaskVT == MVT::v32i8 || MaskVT == MVT::v64i8))) {
38247 V1 = CanonicalizeShuffleInput(MaskVT, V1);
38248 V2 = DAG.getUNDEF(MaskVT);
38249 Res = lowerShuffleWithPERMV(DL, MaskVT, Mask, V1, V2, Subtarget, DAG);
38250 return DAG.getBitcast(RootVT, Res);
38254 // Lower a unary+zero lane-crossing shuffle as VPERMV3 with a zero
38255 // vector as the second source (non-VLX will pad to 512-bit shuffles).
38256 if (UnaryShuffle && AllowVariableCrossLaneMask &&
38257 ((Subtarget.hasAVX512() &&
38258 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
38259 MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
38260 MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32 ||
38261 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
38262 (Subtarget.hasBWI() && AllowBWIVPERMV3 &&
38263 (MaskVT == MVT::v16i16 || MaskVT == MVT::v32i16)) ||
38264 (Subtarget.hasVBMI() && AllowBWIVPERMV3 &&
38265 (MaskVT == MVT::v32i8 || MaskVT == MVT::v64i8)))) {
38266 // Adjust shuffle mask - replace SM_SentinelZero with second source index.
38267 for (unsigned i = 0; i != NumMaskElts; ++i)
38268 if (Mask[i] == SM_SentinelZero)
38269 Mask[i] = NumMaskElts + i;
38270 V1 = CanonicalizeShuffleInput(MaskVT, V1);
38271 V2 = getZeroVector(MaskVT, Subtarget, DAG, DL);
38272 Res = lowerShuffleWithPERMV(DL, MaskVT, Mask, V1, V2, Subtarget, DAG);
38273 return DAG.getBitcast(RootVT, Res);
38276 // If that failed and either input is extracted then try to combine as a
38277 // shuffle with the larger type.
38278 if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
38279 Inputs, Root, BaseMask, Depth, HasVariableMask,
38280 AllowVariableCrossLaneMask, AllowVariablePerLaneMask, DAG,
38281 Subtarget))
38282 return WideShuffle;
38284 // If we have a dual input lane-crossing shuffle then lower to VPERMV3,
38285 // (non-VLX will pad to 512-bit shuffles).
38286 if (AllowVariableCrossLaneMask && !MaskContainsZeros &&
38287 ((Subtarget.hasAVX512() &&
38288 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
38289 MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
38290 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32 ||
38291 MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
38292 (Subtarget.hasBWI() && AllowBWIVPERMV3 &&
38293 (MaskVT == MVT::v16i16 || MaskVT == MVT::v32i16)) ||
38294 (Subtarget.hasVBMI() && AllowBWIVPERMV3 &&
38295 (MaskVT == MVT::v32i8 || MaskVT == MVT::v64i8)))) {
38296 V1 = CanonicalizeShuffleInput(MaskVT, V1);
38297 V2 = CanonicalizeShuffleInput(MaskVT, V2);
38298 Res = lowerShuffleWithPERMV(DL, MaskVT, Mask, V1, V2, Subtarget, DAG);
38299 return DAG.getBitcast(RootVT, Res);
38301 return SDValue();
38304 // See if we can combine a single input shuffle with zeros to a bit-mask,
38305 // which is much simpler than any shuffle.
38306 if (UnaryShuffle && MaskContainsZeros && AllowVariablePerLaneMask &&
38307 isSequentialOrUndefOrZeroInRange(Mask, 0, NumMaskElts, 0) &&
38308 DAG.getTargetLoweringInfo().isTypeLegal(MaskVT)) {
38309 APInt Zero = APInt::getZero(MaskEltSizeInBits);
38310 APInt AllOnes = APInt::getAllOnes(MaskEltSizeInBits);
38311 APInt UndefElts(NumMaskElts, 0);
38312 SmallVector<APInt, 64> EltBits(NumMaskElts, Zero);
38313 for (unsigned i = 0; i != NumMaskElts; ++i) {
38314 int M = Mask[i];
38315 if (M == SM_SentinelUndef) {
38316 UndefElts.setBit(i);
38317 continue;
38319 if (M == SM_SentinelZero)
38320 continue;
38321 EltBits[i] = AllOnes;
38323 SDValue BitMask = getConstVector(EltBits, UndefElts, MaskVT, DAG, DL);
38324 Res = CanonicalizeShuffleInput(MaskVT, V1);
38325 unsigned AndOpcode =
38326 MaskVT.isFloatingPoint() ? unsigned(X86ISD::FAND) : unsigned(ISD::AND);
38327 Res = DAG.getNode(AndOpcode, DL, MaskVT, Res, BitMask);
38328 return DAG.getBitcast(RootVT, Res);
38331 // If we have a single input shuffle with different shuffle patterns in the
38332 // the 128-bit lanes use the variable mask to VPERMILPS.
38333 // TODO Combine other mask types at higher depths.
38334 if (UnaryShuffle && AllowVariablePerLaneMask && !MaskContainsZeros &&
38335 ((MaskVT == MVT::v8f32 && Subtarget.hasAVX()) ||
38336 (MaskVT == MVT::v16f32 && Subtarget.hasAVX512()))) {
38337 SmallVector<SDValue, 16> VPermIdx;
38338 for (int M : Mask) {
38339 SDValue Idx =
38340 M < 0 ? DAG.getUNDEF(MVT::i32) : DAG.getConstant(M % 4, DL, MVT::i32);
38341 VPermIdx.push_back(Idx);
38343 SDValue VPermMask = DAG.getBuildVector(IntMaskVT, DL, VPermIdx);
38344 Res = CanonicalizeShuffleInput(MaskVT, V1);
38345 Res = DAG.getNode(X86ISD::VPERMILPV, DL, MaskVT, Res, VPermMask);
38346 return DAG.getBitcast(RootVT, Res);
38349 // With XOP, binary shuffles of 128/256-bit floating point vectors can combine
38350 // to VPERMIL2PD/VPERMIL2PS.
38351 if (AllowVariablePerLaneMask && Subtarget.hasXOP() &&
38352 (MaskVT == MVT::v2f64 || MaskVT == MVT::v4f64 || MaskVT == MVT::v4f32 ||
38353 MaskVT == MVT::v8f32)) {
38354 // VPERMIL2 Operation.
38355 // Bits[3] - Match Bit.
38356 // Bits[2:1] - (Per Lane) PD Shuffle Mask.
38357 // Bits[2:0] - (Per Lane) PS Shuffle Mask.
38358 unsigned NumLanes = MaskVT.getSizeInBits() / 128;
38359 unsigned NumEltsPerLane = NumMaskElts / NumLanes;
38360 SmallVector<int, 8> VPerm2Idx;
38361 unsigned M2ZImm = 0;
38362 for (int M : Mask) {
38363 if (M == SM_SentinelUndef) {
38364 VPerm2Idx.push_back(-1);
38365 continue;
38367 if (M == SM_SentinelZero) {
38368 M2ZImm = 2;
38369 VPerm2Idx.push_back(8);
38370 continue;
38372 int Index = (M % NumEltsPerLane) + ((M / NumMaskElts) * NumEltsPerLane);
38373 Index = (MaskVT.getScalarSizeInBits() == 64 ? Index << 1 : Index);
38374 VPerm2Idx.push_back(Index);
38376 V1 = CanonicalizeShuffleInput(MaskVT, V1);
38377 V2 = CanonicalizeShuffleInput(MaskVT, V2);
38378 SDValue VPerm2MaskOp = getConstVector(VPerm2Idx, IntMaskVT, DAG, DL, true);
38379 Res = DAG.getNode(X86ISD::VPERMIL2, DL, MaskVT, V1, V2, VPerm2MaskOp,
38380 DAG.getTargetConstant(M2ZImm, DL, MVT::i8));
38381 return DAG.getBitcast(RootVT, Res);
38384 // If we have 3 or more shuffle instructions or a chain involving a variable
38385 // mask, we can replace them with a single PSHUFB instruction profitably.
38386 // Intel's manuals suggest only using PSHUFB if doing so replacing 5
38387 // instructions, but in practice PSHUFB tends to be *very* fast so we're
38388 // more aggressive.
38389 if (UnaryShuffle && AllowVariablePerLaneMask &&
38390 ((RootVT.is128BitVector() && Subtarget.hasSSSE3()) ||
38391 (RootVT.is256BitVector() && Subtarget.hasAVX2()) ||
38392 (RootVT.is512BitVector() && Subtarget.hasBWI()))) {
38393 SmallVector<SDValue, 16> PSHUFBMask;
38394 int NumBytes = RootVT.getSizeInBits() / 8;
38395 int Ratio = NumBytes / NumMaskElts;
38396 for (int i = 0; i < NumBytes; ++i) {
38397 int M = Mask[i / Ratio];
38398 if (M == SM_SentinelUndef) {
38399 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
38400 continue;
38402 if (M == SM_SentinelZero) {
38403 PSHUFBMask.push_back(DAG.getConstant(0x80, DL, MVT::i8));
38404 continue;
38406 M = Ratio * M + i % Ratio;
38407 assert((M / 16) == (i / 16) && "Lane crossing detected");
38408 PSHUFBMask.push_back(DAG.getConstant(M, DL, MVT::i8));
38410 MVT ByteVT = MVT::getVectorVT(MVT::i8, NumBytes);
38411 Res = CanonicalizeShuffleInput(ByteVT, V1);
38412 SDValue PSHUFBMaskOp = DAG.getBuildVector(ByteVT, DL, PSHUFBMask);
38413 Res = DAG.getNode(X86ISD::PSHUFB, DL, ByteVT, Res, PSHUFBMaskOp);
38414 return DAG.getBitcast(RootVT, Res);
38417 // With XOP, if we have a 128-bit binary input shuffle we can always combine
38418 // to VPPERM. We match the depth requirement of PSHUFB - VPPERM is never
38419 // slower than PSHUFB on targets that support both.
38420 if (AllowVariablePerLaneMask && RootVT.is128BitVector() &&
38421 Subtarget.hasXOP()) {
38422 // VPPERM Mask Operation
38423 // Bits[4:0] - Byte Index (0 - 31)
38424 // Bits[7:5] - Permute Operation (0 - Source byte, 4 - ZERO)
38425 SmallVector<SDValue, 16> VPPERMMask;
38426 int NumBytes = 16;
38427 int Ratio = NumBytes / NumMaskElts;
38428 for (int i = 0; i < NumBytes; ++i) {
38429 int M = Mask[i / Ratio];
38430 if (M == SM_SentinelUndef) {
38431 VPPERMMask.push_back(DAG.getUNDEF(MVT::i8));
38432 continue;
38434 if (M == SM_SentinelZero) {
38435 VPPERMMask.push_back(DAG.getConstant(0x80, DL, MVT::i8));
38436 continue;
38438 M = Ratio * M + i % Ratio;
38439 VPPERMMask.push_back(DAG.getConstant(M, DL, MVT::i8));
38441 MVT ByteVT = MVT::v16i8;
38442 V1 = CanonicalizeShuffleInput(ByteVT, V1);
38443 V2 = CanonicalizeShuffleInput(ByteVT, V2);
38444 SDValue VPPERMMaskOp = DAG.getBuildVector(ByteVT, DL, VPPERMMask);
38445 Res = DAG.getNode(X86ISD::VPPERM, DL, ByteVT, V1, V2, VPPERMMaskOp);
38446 return DAG.getBitcast(RootVT, Res);
38449 // If that failed and either input is extracted then try to combine as a
38450 // shuffle with the larger type.
38451 if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
38452 Inputs, Root, BaseMask, Depth, HasVariableMask,
38453 AllowVariableCrossLaneMask, AllowVariablePerLaneMask, DAG, Subtarget))
38454 return WideShuffle;
38456 // If we have a dual input shuffle then lower to VPERMV3,
38457 // (non-VLX will pad to 512-bit shuffles)
38458 if (!UnaryShuffle && AllowVariablePerLaneMask && !MaskContainsZeros &&
38459 ((Subtarget.hasAVX512() &&
38460 (MaskVT == MVT::v2f64 || MaskVT == MVT::v4f64 || MaskVT == MVT::v8f64 ||
38461 MaskVT == MVT::v2i64 || MaskVT == MVT::v4i64 || MaskVT == MVT::v8i64 ||
38462 MaskVT == MVT::v4f32 || MaskVT == MVT::v4i32 || MaskVT == MVT::v8f32 ||
38463 MaskVT == MVT::v8i32 || MaskVT == MVT::v16f32 ||
38464 MaskVT == MVT::v16i32)) ||
38465 (Subtarget.hasBWI() && AllowBWIVPERMV3 &&
38466 (MaskVT == MVT::v8i16 || MaskVT == MVT::v16i16 ||
38467 MaskVT == MVT::v32i16)) ||
38468 (Subtarget.hasVBMI() && AllowBWIVPERMV3 &&
38469 (MaskVT == MVT::v16i8 || MaskVT == MVT::v32i8 ||
38470 MaskVT == MVT::v64i8)))) {
38471 V1 = CanonicalizeShuffleInput(MaskVT, V1);
38472 V2 = CanonicalizeShuffleInput(MaskVT, V2);
38473 Res = lowerShuffleWithPERMV(DL, MaskVT, Mask, V1, V2, Subtarget, DAG);
38474 return DAG.getBitcast(RootVT, Res);
38477 // Failed to find any combines.
38478 return SDValue();
38481 // Combine an arbitrary chain of shuffles + extract_subvectors into a single
38482 // instruction if possible.
38484 // Wrapper for combineX86ShuffleChain that extends the shuffle mask to a larger
38485 // type size to attempt to combine:
38486 // shuffle(extract_subvector(x,c1),extract_subvector(y,c2),m1)
38487 // -->
38488 // extract_subvector(shuffle(x,y,m2),0)
38489 static SDValue combineX86ShuffleChainWithExtract(
38490 ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
38491 bool HasVariableMask, bool AllowVariableCrossLaneMask,
38492 bool AllowVariablePerLaneMask, SelectionDAG &DAG,
38493 const X86Subtarget &Subtarget) {
38494 unsigned NumMaskElts = BaseMask.size();
38495 unsigned NumInputs = Inputs.size();
38496 if (NumInputs == 0)
38497 return SDValue();
38499 EVT RootVT = Root.getValueType();
38500 unsigned RootSizeInBits = RootVT.getSizeInBits();
38501 unsigned RootEltSizeInBits = RootSizeInBits / NumMaskElts;
38502 assert((RootSizeInBits % NumMaskElts) == 0 && "Unexpected root shuffle mask");
38504 // Peek through extract_subvector to find widest legal vector.
38505 // TODO: Handle ISD::TRUNCATE
38506 unsigned WideSizeInBits = RootSizeInBits;
38507 for (unsigned I = 0; I != NumInputs; ++I) {
38508 SDValue Input = peekThroughBitcasts(Inputs[I]);
38509 while (Input.getOpcode() == ISD::EXTRACT_SUBVECTOR)
38510 Input = peekThroughBitcasts(Input.getOperand(0));
38511 if (DAG.getTargetLoweringInfo().isTypeLegal(Input.getValueType()) &&
38512 WideSizeInBits < Input.getValueSizeInBits())
38513 WideSizeInBits = Input.getValueSizeInBits();
38516 // Bail if we fail to find a source larger than the existing root.
38517 unsigned Scale = WideSizeInBits / RootSizeInBits;
38518 if (WideSizeInBits <= RootSizeInBits ||
38519 (WideSizeInBits % RootSizeInBits) != 0)
38520 return SDValue();
38522 // Create new mask for larger type.
38523 SmallVector<int, 64> WideMask(BaseMask);
38524 for (int &M : WideMask) {
38525 if (M < 0)
38526 continue;
38527 M = (M % NumMaskElts) + ((M / NumMaskElts) * Scale * NumMaskElts);
38529 WideMask.append((Scale - 1) * NumMaskElts, SM_SentinelUndef);
38531 // Attempt to peek through inputs and adjust mask when we extract from an
38532 // upper subvector.
38533 int AdjustedMasks = 0;
38534 SmallVector<SDValue, 4> WideInputs(Inputs.begin(), Inputs.end());
38535 for (unsigned I = 0; I != NumInputs; ++I) {
38536 SDValue &Input = WideInputs[I];
38537 Input = peekThroughBitcasts(Input);
38538 while (Input.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
38539 Input.getOperand(0).getValueSizeInBits() <= WideSizeInBits) {
38540 uint64_t Idx = Input.getConstantOperandVal(1);
38541 if (Idx != 0) {
38542 ++AdjustedMasks;
38543 unsigned InputEltSizeInBits = Input.getScalarValueSizeInBits();
38544 Idx = (Idx * InputEltSizeInBits) / RootEltSizeInBits;
38546 int lo = I * WideMask.size();
38547 int hi = (I + 1) * WideMask.size();
38548 for (int &M : WideMask)
38549 if (lo <= M && M < hi)
38550 M += Idx;
38552 Input = peekThroughBitcasts(Input.getOperand(0));
38556 // Remove unused/repeated shuffle source ops.
38557 resolveTargetShuffleInputsAndMask(WideInputs, WideMask);
38558 assert(!WideInputs.empty() && "Shuffle with no inputs detected");
38560 // Bail if we're always extracting from the lowest subvectors,
38561 // combineX86ShuffleChain should match this for the current width, or the
38562 // shuffle still references too many inputs.
38563 if (AdjustedMasks == 0 || WideInputs.size() > 2)
38564 return SDValue();
38566 // Minor canonicalization of the accumulated shuffle mask to make it easier
38567 // to match below. All this does is detect masks with sequential pairs of
38568 // elements, and shrink them to the half-width mask. It does this in a loop
38569 // so it will reduce the size of the mask to the minimal width mask which
38570 // performs an equivalent shuffle.
38571 while (WideMask.size() > 1) {
38572 SmallVector<int, 64> WidenedMask;
38573 if (!canWidenShuffleElements(WideMask, WidenedMask))
38574 break;
38575 WideMask = std::move(WidenedMask);
38578 // Canonicalization of binary shuffle masks to improve pattern matching by
38579 // commuting the inputs.
38580 if (WideInputs.size() == 2 && canonicalizeShuffleMaskWithCommute(WideMask)) {
38581 ShuffleVectorSDNode::commuteMask(WideMask);
38582 std::swap(WideInputs[0], WideInputs[1]);
38585 // Increase depth for every upper subvector we've peeked through.
38586 Depth += AdjustedMasks;
38588 // Attempt to combine wider chain.
38589 // TODO: Can we use a better Root?
38590 SDValue WideRoot = WideInputs.front().getValueSizeInBits() >
38591 WideInputs.back().getValueSizeInBits()
38592 ? WideInputs.front()
38593 : WideInputs.back();
38594 assert(WideRoot.getValueSizeInBits() == WideSizeInBits &&
38595 "WideRootSize mismatch");
38597 if (SDValue WideShuffle =
38598 combineX86ShuffleChain(WideInputs, WideRoot, WideMask, Depth,
38599 HasVariableMask, AllowVariableCrossLaneMask,
38600 AllowVariablePerLaneMask, DAG, Subtarget)) {
38601 WideShuffle =
38602 extractSubVector(WideShuffle, 0, DAG, SDLoc(Root), RootSizeInBits);
38603 return DAG.getBitcast(RootVT, WideShuffle);
38606 return SDValue();
38609 // Canonicalize the combined shuffle mask chain with horizontal ops.
38610 // NOTE: This may update the Ops and Mask.
38611 static SDValue canonicalizeShuffleMaskWithHorizOp(
38612 MutableArrayRef<SDValue> Ops, MutableArrayRef<int> Mask,
38613 unsigned RootSizeInBits, const SDLoc &DL, SelectionDAG &DAG,
38614 const X86Subtarget &Subtarget) {
38615 if (Mask.empty() || Ops.empty())
38616 return SDValue();
38618 SmallVector<SDValue> BC;
38619 for (SDValue Op : Ops)
38620 BC.push_back(peekThroughBitcasts(Op));
38622 // All ops must be the same horizop + type.
38623 SDValue BC0 = BC[0];
38624 EVT VT0 = BC0.getValueType();
38625 unsigned Opcode0 = BC0.getOpcode();
38626 if (VT0.getSizeInBits() != RootSizeInBits || llvm::any_of(BC, [&](SDValue V) {
38627 return V.getOpcode() != Opcode0 || V.getValueType() != VT0;
38629 return SDValue();
38631 bool isHoriz = (Opcode0 == X86ISD::FHADD || Opcode0 == X86ISD::HADD ||
38632 Opcode0 == X86ISD::FHSUB || Opcode0 == X86ISD::HSUB);
38633 bool isPack = (Opcode0 == X86ISD::PACKSS || Opcode0 == X86ISD::PACKUS);
38634 if (!isHoriz && !isPack)
38635 return SDValue();
38637 // Do all ops have a single use?
38638 bool OneUseOps = llvm::all_of(Ops, [](SDValue Op) {
38639 return Op.hasOneUse() &&
38640 peekThroughBitcasts(Op) == peekThroughOneUseBitcasts(Op);
38643 int NumElts = VT0.getVectorNumElements();
38644 int NumLanes = VT0.getSizeInBits() / 128;
38645 int NumEltsPerLane = NumElts / NumLanes;
38646 int NumHalfEltsPerLane = NumEltsPerLane / 2;
38647 MVT SrcVT = BC0.getOperand(0).getSimpleValueType();
38648 unsigned EltSizeInBits = RootSizeInBits / Mask.size();
38650 if (NumEltsPerLane >= 4 &&
38651 (isPack || shouldUseHorizontalOp(Ops.size() == 1, DAG, Subtarget))) {
38652 SmallVector<int> LaneMask, ScaledMask;
38653 if (isRepeatedTargetShuffleMask(128, EltSizeInBits, Mask, LaneMask) &&
38654 scaleShuffleElements(LaneMask, 4, ScaledMask)) {
38655 // See if we can remove the shuffle by resorting the HOP chain so that
38656 // the HOP args are pre-shuffled.
38657 // TODO: Generalize to any sized/depth chain.
38658 // TODO: Add support for PACKSS/PACKUS.
38659 if (isHoriz) {
38660 // Attempt to find a HOP(HOP(X,Y),HOP(Z,W)) source operand.
38661 auto GetHOpSrc = [&](int M) {
38662 if (M == SM_SentinelUndef)
38663 return DAG.getUNDEF(VT0);
38664 if (M == SM_SentinelZero)
38665 return getZeroVector(VT0.getSimpleVT(), Subtarget, DAG, DL);
38666 SDValue Src0 = BC[M / 4];
38667 SDValue Src1 = Src0.getOperand((M % 4) >= 2);
38668 if (Src1.getOpcode() == Opcode0 && Src0->isOnlyUserOf(Src1.getNode()))
38669 return Src1.getOperand(M % 2);
38670 return SDValue();
38672 SDValue M0 = GetHOpSrc(ScaledMask[0]);
38673 SDValue M1 = GetHOpSrc(ScaledMask[1]);
38674 SDValue M2 = GetHOpSrc(ScaledMask[2]);
38675 SDValue M3 = GetHOpSrc(ScaledMask[3]);
38676 if (M0 && M1 && M2 && M3) {
38677 SDValue LHS = DAG.getNode(Opcode0, DL, SrcVT, M0, M1);
38678 SDValue RHS = DAG.getNode(Opcode0, DL, SrcVT, M2, M3);
38679 return DAG.getNode(Opcode0, DL, VT0, LHS, RHS);
38682 // shuffle(hop(x,y),hop(z,w)) -> permute(hop(x,z)) etc.
38683 if (Ops.size() >= 2) {
38684 SDValue LHS, RHS;
38685 auto GetHOpSrc = [&](int M, int &OutM) {
38686 // TODO: Support SM_SentinelZero
38687 if (M < 0)
38688 return M == SM_SentinelUndef;
38689 SDValue Src = BC[M / 4].getOperand((M % 4) >= 2);
38690 if (!LHS || LHS == Src) {
38691 LHS = Src;
38692 OutM = (M % 2);
38693 return true;
38695 if (!RHS || RHS == Src) {
38696 RHS = Src;
38697 OutM = (M % 2) + 2;
38698 return true;
38700 return false;
38702 int PostMask[4] = {-1, -1, -1, -1};
38703 if (GetHOpSrc(ScaledMask[0], PostMask[0]) &&
38704 GetHOpSrc(ScaledMask[1], PostMask[1]) &&
38705 GetHOpSrc(ScaledMask[2], PostMask[2]) &&
38706 GetHOpSrc(ScaledMask[3], PostMask[3])) {
38707 LHS = DAG.getBitcast(SrcVT, LHS);
38708 RHS = DAG.getBitcast(SrcVT, RHS ? RHS : LHS);
38709 SDValue Res = DAG.getNode(Opcode0, DL, VT0, LHS, RHS);
38710 // Use SHUFPS for the permute so this will work on SSE2 targets,
38711 // shuffle combining and domain handling will simplify this later on.
38712 MVT ShuffleVT = MVT::getVectorVT(MVT::f32, RootSizeInBits / 32);
38713 Res = DAG.getBitcast(ShuffleVT, Res);
38714 return DAG.getNode(X86ISD::SHUFP, DL, ShuffleVT, Res, Res,
38715 getV4X86ShuffleImm8ForMask(PostMask, DL, DAG));
38721 if (2 < Ops.size())
38722 return SDValue();
38724 SDValue BC1 = BC[BC.size() - 1];
38725 if (Mask.size() == VT0.getVectorNumElements()) {
38726 // Canonicalize binary shuffles of horizontal ops that use the
38727 // same sources to an unary shuffle.
38728 // TODO: Try to perform this fold even if the shuffle remains.
38729 if (Ops.size() == 2) {
38730 auto ContainsOps = [](SDValue HOp, SDValue Op) {
38731 return Op == HOp.getOperand(0) || Op == HOp.getOperand(1);
38733 // Commute if all BC0's ops are contained in BC1.
38734 if (ContainsOps(BC1, BC0.getOperand(0)) &&
38735 ContainsOps(BC1, BC0.getOperand(1))) {
38736 ShuffleVectorSDNode::commuteMask(Mask);
38737 std::swap(Ops[0], Ops[1]);
38738 std::swap(BC0, BC1);
38741 // If BC1 can be represented by BC0, then convert to unary shuffle.
38742 if (ContainsOps(BC0, BC1.getOperand(0)) &&
38743 ContainsOps(BC0, BC1.getOperand(1))) {
38744 for (int &M : Mask) {
38745 if (M < NumElts) // BC0 element or UNDEF/Zero sentinel.
38746 continue;
38747 int SubLane = ((M % NumEltsPerLane) >= NumHalfEltsPerLane) ? 1 : 0;
38748 M -= NumElts + (SubLane * NumHalfEltsPerLane);
38749 if (BC1.getOperand(SubLane) != BC0.getOperand(0))
38750 M += NumHalfEltsPerLane;
38755 // Canonicalize unary horizontal ops to only refer to lower halves.
38756 for (int i = 0; i != NumElts; ++i) {
38757 int &M = Mask[i];
38758 if (isUndefOrZero(M))
38759 continue;
38760 if (M < NumElts && BC0.getOperand(0) == BC0.getOperand(1) &&
38761 (M % NumEltsPerLane) >= NumHalfEltsPerLane)
38762 M -= NumHalfEltsPerLane;
38763 if (NumElts <= M && BC1.getOperand(0) == BC1.getOperand(1) &&
38764 (M % NumEltsPerLane) >= NumHalfEltsPerLane)
38765 M -= NumHalfEltsPerLane;
38769 // Combine binary shuffle of 2 similar 'Horizontal' instructions into a
38770 // single instruction. Attempt to match a v2X64 repeating shuffle pattern that
38771 // represents the LHS/RHS inputs for the lower/upper halves.
38772 SmallVector<int, 16> TargetMask128, WideMask128;
38773 if (isRepeatedTargetShuffleMask(128, EltSizeInBits, Mask, TargetMask128) &&
38774 scaleShuffleElements(TargetMask128, 2, WideMask128)) {
38775 assert(isUndefOrZeroOrInRange(WideMask128, 0, 4) && "Illegal shuffle");
38776 bool SingleOp = (Ops.size() == 1);
38777 if (isPack || OneUseOps ||
38778 shouldUseHorizontalOp(SingleOp, DAG, Subtarget)) {
38779 SDValue Lo = isInRange(WideMask128[0], 0, 2) ? BC0 : BC1;
38780 SDValue Hi = isInRange(WideMask128[1], 0, 2) ? BC0 : BC1;
38781 Lo = Lo.getOperand(WideMask128[0] & 1);
38782 Hi = Hi.getOperand(WideMask128[1] & 1);
38783 if (SingleOp) {
38784 SDValue Undef = DAG.getUNDEF(SrcVT);
38785 SDValue Zero = getZeroVector(SrcVT, Subtarget, DAG, DL);
38786 Lo = (WideMask128[0] == SM_SentinelZero ? Zero : Lo);
38787 Hi = (WideMask128[1] == SM_SentinelZero ? Zero : Hi);
38788 Lo = (WideMask128[0] == SM_SentinelUndef ? Undef : Lo);
38789 Hi = (WideMask128[1] == SM_SentinelUndef ? Undef : Hi);
38791 return DAG.getNode(Opcode0, DL, VT0, Lo, Hi);
38795 // If we are post-shuffling a 256-bit hop and not requiring the upper
38796 // elements, then try to narrow to a 128-bit hop directly.
38797 SmallVector<int, 16> WideMask64;
38798 if (Ops.size() == 1 && NumLanes == 2 &&
38799 scaleShuffleElements(Mask, 4, WideMask64) &&
38800 isUndefInRange(WideMask64, 2, 2)) {
38801 int M0 = WideMask64[0];
38802 int M1 = WideMask64[1];
38803 if (isInRange(M0, 0, 4) && isInRange(M1, 0, 4)) {
38804 MVT HalfVT = VT0.getSimpleVT().getHalfNumVectorElementsVT();
38805 unsigned Idx0 = (M0 & 2) ? (SrcVT.getVectorNumElements() / 2) : 0;
38806 unsigned Idx1 = (M1 & 2) ? (SrcVT.getVectorNumElements() / 2) : 0;
38807 SDValue V0 = extract128BitVector(BC[0].getOperand(M0 & 1), Idx0, DAG, DL);
38808 SDValue V1 = extract128BitVector(BC[0].getOperand(M1 & 1), Idx1, DAG, DL);
38809 SDValue Res = DAG.getNode(Opcode0, DL, HalfVT, V0, V1);
38810 return widenSubVector(Res, false, Subtarget, DAG, DL, 256);
38814 return SDValue();
38817 // Attempt to constant fold all of the constant source ops.
38818 // Returns true if the entire shuffle is folded to a constant.
38819 // TODO: Extend this to merge multiple constant Ops and update the mask.
38820 static SDValue combineX86ShufflesConstants(ArrayRef<SDValue> Ops,
38821 ArrayRef<int> Mask, SDValue Root,
38822 bool HasVariableMask,
38823 SelectionDAG &DAG,
38824 const X86Subtarget &Subtarget) {
38825 MVT VT = Root.getSimpleValueType();
38827 unsigned SizeInBits = VT.getSizeInBits();
38828 unsigned NumMaskElts = Mask.size();
38829 unsigned MaskSizeInBits = SizeInBits / NumMaskElts;
38830 unsigned NumOps = Ops.size();
38832 // Extract constant bits from each source op.
38833 SmallVector<APInt, 16> UndefEltsOps(NumOps);
38834 SmallVector<SmallVector<APInt, 16>, 16> RawBitsOps(NumOps);
38835 for (unsigned I = 0; I != NumOps; ++I)
38836 if (!getTargetConstantBitsFromNode(Ops[I], MaskSizeInBits, UndefEltsOps[I],
38837 RawBitsOps[I]))
38838 return SDValue();
38840 // If we're optimizing for size, only fold if at least one of the constants is
38841 // only used once or the combined shuffle has included a variable mask
38842 // shuffle, this is to avoid constant pool bloat.
38843 bool IsOptimizingSize = DAG.shouldOptForSize();
38844 if (IsOptimizingSize && !HasVariableMask &&
38845 llvm::none_of(Ops, [](SDValue SrcOp) { return SrcOp->hasOneUse(); }))
38846 return SDValue();
38848 // Shuffle the constant bits according to the mask.
38849 SDLoc DL(Root);
38850 APInt UndefElts(NumMaskElts, 0);
38851 APInt ZeroElts(NumMaskElts, 0);
38852 APInt ConstantElts(NumMaskElts, 0);
38853 SmallVector<APInt, 8> ConstantBitData(NumMaskElts,
38854 APInt::getZero(MaskSizeInBits));
38855 for (unsigned i = 0; i != NumMaskElts; ++i) {
38856 int M = Mask[i];
38857 if (M == SM_SentinelUndef) {
38858 UndefElts.setBit(i);
38859 continue;
38860 } else if (M == SM_SentinelZero) {
38861 ZeroElts.setBit(i);
38862 continue;
38864 assert(0 <= M && M < (int)(NumMaskElts * NumOps));
38866 unsigned SrcOpIdx = (unsigned)M / NumMaskElts;
38867 unsigned SrcMaskIdx = (unsigned)M % NumMaskElts;
38869 auto &SrcUndefElts = UndefEltsOps[SrcOpIdx];
38870 if (SrcUndefElts[SrcMaskIdx]) {
38871 UndefElts.setBit(i);
38872 continue;
38875 auto &SrcEltBits = RawBitsOps[SrcOpIdx];
38876 APInt &Bits = SrcEltBits[SrcMaskIdx];
38877 if (!Bits) {
38878 ZeroElts.setBit(i);
38879 continue;
38882 ConstantElts.setBit(i);
38883 ConstantBitData[i] = Bits;
38885 assert((UndefElts | ZeroElts | ConstantElts).isAllOnes());
38887 // Attempt to create a zero vector.
38888 if ((UndefElts | ZeroElts).isAllOnes())
38889 return getZeroVector(Root.getSimpleValueType(), Subtarget, DAG, DL);
38891 // Create the constant data.
38892 MVT MaskSVT;
38893 if (VT.isFloatingPoint() && (MaskSizeInBits == 32 || MaskSizeInBits == 64))
38894 MaskSVT = MVT::getFloatingPointVT(MaskSizeInBits);
38895 else
38896 MaskSVT = MVT::getIntegerVT(MaskSizeInBits);
38898 MVT MaskVT = MVT::getVectorVT(MaskSVT, NumMaskElts);
38899 if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
38900 return SDValue();
38902 SDValue CstOp = getConstVector(ConstantBitData, UndefElts, MaskVT, DAG, DL);
38903 return DAG.getBitcast(VT, CstOp);
38906 namespace llvm {
38907 namespace X86 {
38908 enum {
38909 MaxShuffleCombineDepth = 8
38911 } // namespace X86
38912 } // namespace llvm
38914 /// Fully generic combining of x86 shuffle instructions.
38916 /// This should be the last combine run over the x86 shuffle instructions. Once
38917 /// they have been fully optimized, this will recursively consider all chains
38918 /// of single-use shuffle instructions, build a generic model of the cumulative
38919 /// shuffle operation, and check for simpler instructions which implement this
38920 /// operation. We use this primarily for two purposes:
38922 /// 1) Collapse generic shuffles to specialized single instructions when
38923 /// equivalent. In most cases, this is just an encoding size win, but
38924 /// sometimes we will collapse multiple generic shuffles into a single
38925 /// special-purpose shuffle.
38926 /// 2) Look for sequences of shuffle instructions with 3 or more total
38927 /// instructions, and replace them with the slightly more expensive SSSE3
38928 /// PSHUFB instruction if available. We do this as the last combining step
38929 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
38930 /// a suitable short sequence of other instructions. The PSHUFB will either
38931 /// use a register or have to read from memory and so is slightly (but only
38932 /// slightly) more expensive than the other shuffle instructions.
38934 /// Because this is inherently a quadratic operation (for each shuffle in
38935 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
38936 /// This should never be an issue in practice as the shuffle lowering doesn't
38937 /// produce sequences of more than 8 instructions.
38939 /// FIXME: We will currently miss some cases where the redundant shuffling
38940 /// would simplify under the threshold for PSHUFB formation because of
38941 /// combine-ordering. To fix this, we should do the redundant instruction
38942 /// combining in this recursive walk.
38943 static SDValue combineX86ShufflesRecursively(
38944 ArrayRef<SDValue> SrcOps, int SrcOpIndex, SDValue Root,
38945 ArrayRef<int> RootMask, ArrayRef<const SDNode *> SrcNodes, unsigned Depth,
38946 unsigned MaxDepth, bool HasVariableMask, bool AllowVariableCrossLaneMask,
38947 bool AllowVariablePerLaneMask, SelectionDAG &DAG,
38948 const X86Subtarget &Subtarget) {
38949 assert(!RootMask.empty() &&
38950 (RootMask.size() > 1 || (RootMask[0] == 0 && SrcOpIndex == 0)) &&
38951 "Illegal shuffle root mask");
38952 MVT RootVT = Root.getSimpleValueType();
38953 assert(RootVT.isVector() && "Shuffles operate on vector types!");
38954 unsigned RootSizeInBits = RootVT.getSizeInBits();
38956 // Bound the depth of our recursive combine because this is ultimately
38957 // quadratic in nature.
38958 if (Depth >= MaxDepth)
38959 return SDValue();
38961 // Directly rip through bitcasts to find the underlying operand.
38962 SDValue Op = SrcOps[SrcOpIndex];
38963 Op = peekThroughOneUseBitcasts(Op);
38965 EVT VT = Op.getValueType();
38966 if (!VT.isVector() || !VT.isSimple())
38967 return SDValue(); // Bail if we hit a non-simple non-vector.
38969 // FIXME: Just bail on f16 for now.
38970 if (VT.getVectorElementType() == MVT::f16)
38971 return SDValue();
38973 assert((RootSizeInBits % VT.getSizeInBits()) == 0 &&
38974 "Can only combine shuffles upto size of the root op.");
38976 // Create a demanded elts mask from the referenced elements of Op.
38977 APInt OpDemandedElts = APInt::getZero(RootMask.size());
38978 for (int M : RootMask) {
38979 int BaseIdx = RootMask.size() * SrcOpIndex;
38980 if (isInRange(M, BaseIdx, BaseIdx + RootMask.size()))
38981 OpDemandedElts.setBit(M - BaseIdx);
38983 if (RootSizeInBits != VT.getSizeInBits()) {
38984 // Op is smaller than Root - extract the demanded elts for the subvector.
38985 unsigned Scale = RootSizeInBits / VT.getSizeInBits();
38986 unsigned NumOpMaskElts = RootMask.size() / Scale;
38987 assert((RootMask.size() % Scale) == 0 && "Root mask size mismatch");
38988 assert(OpDemandedElts
38989 .extractBits(RootMask.size() - NumOpMaskElts, NumOpMaskElts)
38990 .isZero() &&
38991 "Out of range elements referenced in root mask");
38992 OpDemandedElts = OpDemandedElts.extractBits(NumOpMaskElts, 0);
38994 OpDemandedElts =
38995 APIntOps::ScaleBitMask(OpDemandedElts, VT.getVectorNumElements());
38997 // Extract target shuffle mask and resolve sentinels and inputs.
38998 SmallVector<int, 64> OpMask;
38999 SmallVector<SDValue, 2> OpInputs;
39000 APInt OpUndef, OpZero;
39001 bool IsOpVariableMask = isTargetShuffleVariableMask(Op.getOpcode());
39002 if (getTargetShuffleInputs(Op, OpDemandedElts, OpInputs, OpMask, OpUndef,
39003 OpZero, DAG, Depth, false)) {
39004 // Shuffle inputs must not be larger than the shuffle result.
39005 // TODO: Relax this for single input faux shuffles (e.g. trunc).
39006 if (llvm::any_of(OpInputs, [VT](SDValue OpInput) {
39007 return OpInput.getValueSizeInBits() > VT.getSizeInBits();
39009 return SDValue();
39010 } else if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
39011 (RootSizeInBits % Op.getOperand(0).getValueSizeInBits()) == 0 &&
39012 !isNullConstant(Op.getOperand(1))) {
39013 SDValue SrcVec = Op.getOperand(0);
39014 int ExtractIdx = Op.getConstantOperandVal(1);
39015 unsigned NumElts = VT.getVectorNumElements();
39016 OpInputs.assign({SrcVec});
39017 OpMask.assign(NumElts, SM_SentinelUndef);
39018 std::iota(OpMask.begin(), OpMask.end(), ExtractIdx);
39019 OpZero = OpUndef = APInt::getZero(NumElts);
39020 } else {
39021 return SDValue();
39024 // If the shuffle result was smaller than the root, we need to adjust the
39025 // mask indices and pad the mask with undefs.
39026 if (RootSizeInBits > VT.getSizeInBits()) {
39027 unsigned NumSubVecs = RootSizeInBits / VT.getSizeInBits();
39028 unsigned OpMaskSize = OpMask.size();
39029 if (OpInputs.size() > 1) {
39030 unsigned PaddedMaskSize = NumSubVecs * OpMaskSize;
39031 for (int &M : OpMask) {
39032 if (M < 0)
39033 continue;
39034 int EltIdx = M % OpMaskSize;
39035 int OpIdx = M / OpMaskSize;
39036 M = (PaddedMaskSize * OpIdx) + EltIdx;
39039 OpZero = OpZero.zext(NumSubVecs * OpMaskSize);
39040 OpUndef = OpUndef.zext(NumSubVecs * OpMaskSize);
39041 OpMask.append((NumSubVecs - 1) * OpMaskSize, SM_SentinelUndef);
39044 SmallVector<int, 64> Mask;
39045 SmallVector<SDValue, 16> Ops;
39047 // We don't need to merge masks if the root is empty.
39048 bool EmptyRoot = (Depth == 0) && (RootMask.size() == 1);
39049 if (EmptyRoot) {
39050 // Only resolve zeros if it will remove an input, otherwise we might end
39051 // up in an infinite loop.
39052 bool ResolveKnownZeros = true;
39053 if (!OpZero.isZero()) {
39054 APInt UsedInputs = APInt::getZero(OpInputs.size());
39055 for (int i = 0, e = OpMask.size(); i != e; ++i) {
39056 int M = OpMask[i];
39057 if (OpUndef[i] || OpZero[i] || isUndefOrZero(M))
39058 continue;
39059 UsedInputs.setBit(M / OpMask.size());
39060 if (UsedInputs.isAllOnes()) {
39061 ResolveKnownZeros = false;
39062 break;
39066 resolveTargetShuffleFromZeroables(OpMask, OpUndef, OpZero,
39067 ResolveKnownZeros);
39069 Mask = OpMask;
39070 Ops.append(OpInputs.begin(), OpInputs.end());
39071 } else {
39072 resolveTargetShuffleFromZeroables(OpMask, OpUndef, OpZero);
39074 // Add the inputs to the Ops list, avoiding duplicates.
39075 Ops.append(SrcOps.begin(), SrcOps.end());
39077 auto AddOp = [&Ops](SDValue Input, int InsertionPoint) -> int {
39078 // Attempt to find an existing match.
39079 SDValue InputBC = peekThroughBitcasts(Input);
39080 for (int i = 0, e = Ops.size(); i < e; ++i)
39081 if (InputBC == peekThroughBitcasts(Ops[i]))
39082 return i;
39083 // Match failed - should we replace an existing Op?
39084 if (InsertionPoint >= 0) {
39085 Ops[InsertionPoint] = Input;
39086 return InsertionPoint;
39088 // Add to the end of the Ops list.
39089 Ops.push_back(Input);
39090 return Ops.size() - 1;
39093 SmallVector<int, 2> OpInputIdx;
39094 for (SDValue OpInput : OpInputs)
39095 OpInputIdx.push_back(
39096 AddOp(OpInput, OpInputIdx.empty() ? SrcOpIndex : -1));
39098 assert(((RootMask.size() > OpMask.size() &&
39099 RootMask.size() % OpMask.size() == 0) ||
39100 (OpMask.size() > RootMask.size() &&
39101 OpMask.size() % RootMask.size() == 0) ||
39102 OpMask.size() == RootMask.size()) &&
39103 "The smaller number of elements must divide the larger.");
39105 // This function can be performance-critical, so we rely on the power-of-2
39106 // knowledge that we have about the mask sizes to replace div/rem ops with
39107 // bit-masks and shifts.
39108 assert(llvm::has_single_bit<uint32_t>(RootMask.size()) &&
39109 "Non-power-of-2 shuffle mask sizes");
39110 assert(llvm::has_single_bit<uint32_t>(OpMask.size()) &&
39111 "Non-power-of-2 shuffle mask sizes");
39112 unsigned RootMaskSizeLog2 = llvm::countr_zero(RootMask.size());
39113 unsigned OpMaskSizeLog2 = llvm::countr_zero(OpMask.size());
39115 unsigned MaskWidth = std::max<unsigned>(OpMask.size(), RootMask.size());
39116 unsigned RootRatio =
39117 std::max<unsigned>(1, OpMask.size() >> RootMaskSizeLog2);
39118 unsigned OpRatio = std::max<unsigned>(1, RootMask.size() >> OpMaskSizeLog2);
39119 assert((RootRatio == 1 || OpRatio == 1) &&
39120 "Must not have a ratio for both incoming and op masks!");
39122 assert(isPowerOf2_32(MaskWidth) && "Non-power-of-2 shuffle mask sizes");
39123 assert(isPowerOf2_32(RootRatio) && "Non-power-of-2 shuffle mask sizes");
39124 assert(isPowerOf2_32(OpRatio) && "Non-power-of-2 shuffle mask sizes");
39125 unsigned RootRatioLog2 = llvm::countr_zero(RootRatio);
39126 unsigned OpRatioLog2 = llvm::countr_zero(OpRatio);
39128 Mask.resize(MaskWidth, SM_SentinelUndef);
39130 // Merge this shuffle operation's mask into our accumulated mask. Note that
39131 // this shuffle's mask will be the first applied to the input, followed by
39132 // the root mask to get us all the way to the root value arrangement. The
39133 // reason for this order is that we are recursing up the operation chain.
39134 for (unsigned i = 0; i < MaskWidth; ++i) {
39135 unsigned RootIdx = i >> RootRatioLog2;
39136 if (RootMask[RootIdx] < 0) {
39137 // This is a zero or undef lane, we're done.
39138 Mask[i] = RootMask[RootIdx];
39139 continue;
39142 unsigned RootMaskedIdx =
39143 RootRatio == 1
39144 ? RootMask[RootIdx]
39145 : (RootMask[RootIdx] << RootRatioLog2) + (i & (RootRatio - 1));
39147 // Just insert the scaled root mask value if it references an input other
39148 // than the SrcOp we're currently inserting.
39149 if ((RootMaskedIdx < (SrcOpIndex * MaskWidth)) ||
39150 (((SrcOpIndex + 1) * MaskWidth) <= RootMaskedIdx)) {
39151 Mask[i] = RootMaskedIdx;
39152 continue;
39155 RootMaskedIdx = RootMaskedIdx & (MaskWidth - 1);
39156 unsigned OpIdx = RootMaskedIdx >> OpRatioLog2;
39157 if (OpMask[OpIdx] < 0) {
39158 // The incoming lanes are zero or undef, it doesn't matter which ones we
39159 // are using.
39160 Mask[i] = OpMask[OpIdx];
39161 continue;
39164 // Ok, we have non-zero lanes, map them through to one of the Op's inputs.
39165 unsigned OpMaskedIdx = OpRatio == 1 ? OpMask[OpIdx]
39166 : (OpMask[OpIdx] << OpRatioLog2) +
39167 (RootMaskedIdx & (OpRatio - 1));
39169 OpMaskedIdx = OpMaskedIdx & (MaskWidth - 1);
39170 int InputIdx = OpMask[OpIdx] / (int)OpMask.size();
39171 assert(0 <= OpInputIdx[InputIdx] && "Unknown target shuffle input");
39172 OpMaskedIdx += OpInputIdx[InputIdx] * MaskWidth;
39174 Mask[i] = OpMaskedIdx;
39178 // Peek through vector widenings and set out of bounds mask indices to undef.
39179 // TODO: Can resolveTargetShuffleInputsAndMask do some of this?
39180 for (unsigned I = 0, E = Ops.size(); I != E; ++I) {
39181 SDValue &Op = Ops[I];
39182 if (Op.getOpcode() == ISD::INSERT_SUBVECTOR && Op.getOperand(0).isUndef() &&
39183 isNullConstant(Op.getOperand(2))) {
39184 Op = Op.getOperand(1);
39185 unsigned Scale = RootSizeInBits / Op.getValueSizeInBits();
39186 int Lo = I * Mask.size();
39187 int Hi = (I + 1) * Mask.size();
39188 int NewHi = Lo + (Mask.size() / Scale);
39189 for (int &M : Mask) {
39190 if (Lo <= M && NewHi <= M && M < Hi)
39191 M = SM_SentinelUndef;
39196 // Peek through any free extract_subvector nodes back to root size.
39197 for (SDValue &Op : Ops)
39198 while (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
39199 (RootSizeInBits % Op.getOperand(0).getValueSizeInBits()) == 0 &&
39200 isNullConstant(Op.getOperand(1)))
39201 Op = Op.getOperand(0);
39203 // Remove unused/repeated shuffle source ops.
39204 resolveTargetShuffleInputsAndMask(Ops, Mask);
39206 // Handle the all undef/zero/ones cases early.
39207 if (all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; }))
39208 return DAG.getUNDEF(RootVT);
39209 if (all_of(Mask, [](int Idx) { return Idx < 0; }))
39210 return getZeroVector(RootVT, Subtarget, DAG, SDLoc(Root));
39211 if (Ops.size() == 1 && ISD::isBuildVectorAllOnes(Ops[0].getNode()) &&
39212 !llvm::is_contained(Mask, SM_SentinelZero))
39213 return getOnesVector(RootVT, DAG, SDLoc(Root));
39215 assert(!Ops.empty() && "Shuffle with no inputs detected");
39216 HasVariableMask |= IsOpVariableMask;
39218 // Update the list of shuffle nodes that have been combined so far.
39219 SmallVector<const SDNode *, 16> CombinedNodes(SrcNodes.begin(),
39220 SrcNodes.end());
39221 CombinedNodes.push_back(Op.getNode());
39223 // See if we can recurse into each shuffle source op (if it's a target
39224 // shuffle). The source op should only be generally combined if it either has
39225 // a single use (i.e. current Op) or all its users have already been combined,
39226 // if not then we can still combine but should prevent generation of variable
39227 // shuffles to avoid constant pool bloat.
39228 // Don't recurse if we already have more source ops than we can combine in
39229 // the remaining recursion depth.
39230 if (Ops.size() < (MaxDepth - Depth)) {
39231 for (int i = 0, e = Ops.size(); i < e; ++i) {
39232 // For empty roots, we need to resolve zeroable elements before combining
39233 // them with other shuffles.
39234 SmallVector<int, 64> ResolvedMask = Mask;
39235 if (EmptyRoot)
39236 resolveTargetShuffleFromZeroables(ResolvedMask, OpUndef, OpZero);
39237 bool AllowCrossLaneVar = false;
39238 bool AllowPerLaneVar = false;
39239 if (Ops[i].getNode()->hasOneUse() ||
39240 SDNode::areOnlyUsersOf(CombinedNodes, Ops[i].getNode())) {
39241 AllowCrossLaneVar = AllowVariableCrossLaneMask;
39242 AllowPerLaneVar = AllowVariablePerLaneMask;
39244 if (SDValue Res = combineX86ShufflesRecursively(
39245 Ops, i, Root, ResolvedMask, CombinedNodes, Depth + 1, MaxDepth,
39246 HasVariableMask, AllowCrossLaneVar, AllowPerLaneVar, DAG,
39247 Subtarget))
39248 return Res;
39252 // Attempt to constant fold all of the constant source ops.
39253 if (SDValue Cst = combineX86ShufflesConstants(
39254 Ops, Mask, Root, HasVariableMask, DAG, Subtarget))
39255 return Cst;
39257 // If constant fold failed and we only have constants - then we have
39258 // multiple uses by a single non-variable shuffle - just bail.
39259 if (Depth == 0 && llvm::all_of(Ops, [&](SDValue Op) {
39260 APInt UndefElts;
39261 SmallVector<APInt> RawBits;
39262 unsigned EltSizeInBits = RootSizeInBits / Mask.size();
39263 return getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
39264 RawBits);
39265 })) {
39266 return SDValue();
39269 // Canonicalize the combined shuffle mask chain with horizontal ops.
39270 // NOTE: This will update the Ops and Mask.
39271 if (SDValue HOp = canonicalizeShuffleMaskWithHorizOp(
39272 Ops, Mask, RootSizeInBits, SDLoc(Root), DAG, Subtarget))
39273 return DAG.getBitcast(RootVT, HOp);
39275 // Try to refine our inputs given our knowledge of target shuffle mask.
39276 for (auto I : enumerate(Ops)) {
39277 int OpIdx = I.index();
39278 SDValue &Op = I.value();
39280 // What range of shuffle mask element values results in picking from Op?
39281 int Lo = OpIdx * Mask.size();
39282 int Hi = Lo + Mask.size();
39284 // Which elements of Op do we demand, given the mask's granularity?
39285 APInt OpDemandedElts(Mask.size(), 0);
39286 for (int MaskElt : Mask) {
39287 if (isInRange(MaskElt, Lo, Hi)) { // Picks from Op?
39288 int OpEltIdx = MaskElt - Lo;
39289 OpDemandedElts.setBit(OpEltIdx);
39293 // Is the shuffle result smaller than the root?
39294 if (Op.getValueSizeInBits() < RootSizeInBits) {
39295 // We padded the mask with undefs. But we now need to undo that.
39296 unsigned NumExpectedVectorElts = Mask.size();
39297 unsigned EltSizeInBits = RootSizeInBits / NumExpectedVectorElts;
39298 unsigned NumOpVectorElts = Op.getValueSizeInBits() / EltSizeInBits;
39299 assert(!OpDemandedElts.extractBits(
39300 NumExpectedVectorElts - NumOpVectorElts, NumOpVectorElts) &&
39301 "Demanding the virtual undef widening padding?");
39302 OpDemandedElts = OpDemandedElts.trunc(NumOpVectorElts); // NUW
39305 // The Op itself may be of different VT, so we need to scale the mask.
39306 unsigned NumOpElts = Op.getValueType().getVectorNumElements();
39307 APInt OpScaledDemandedElts = APIntOps::ScaleBitMask(OpDemandedElts, NumOpElts);
39309 // Can this operand be simplified any further, given it's demanded elements?
39310 if (SDValue NewOp =
39311 DAG.getTargetLoweringInfo().SimplifyMultipleUseDemandedVectorElts(
39312 Op, OpScaledDemandedElts, DAG))
39313 Op = NewOp;
39315 // FIXME: should we rerun resolveTargetShuffleInputsAndMask() now?
39317 // Widen any subvector shuffle inputs we've collected.
39318 // TODO: Remove this to avoid generating temporary nodes, we should only
39319 // widen once combineX86ShuffleChain has found a match.
39320 if (any_of(Ops, [RootSizeInBits](SDValue Op) {
39321 return Op.getValueSizeInBits() < RootSizeInBits;
39322 })) {
39323 for (SDValue &Op : Ops)
39324 if (Op.getValueSizeInBits() < RootSizeInBits)
39325 Op = widenSubVector(Op, false, Subtarget, DAG, SDLoc(Op),
39326 RootSizeInBits);
39327 // Reresolve - we might have repeated subvector sources.
39328 resolveTargetShuffleInputsAndMask(Ops, Mask);
39331 // We can only combine unary and binary shuffle mask cases.
39332 if (Ops.size() <= 2) {
39333 // Minor canonicalization of the accumulated shuffle mask to make it easier
39334 // to match below. All this does is detect masks with sequential pairs of
39335 // elements, and shrink them to the half-width mask. It does this in a loop
39336 // so it will reduce the size of the mask to the minimal width mask which
39337 // performs an equivalent shuffle.
39338 while (Mask.size() > 1) {
39339 SmallVector<int, 64> WidenedMask;
39340 if (!canWidenShuffleElements(Mask, WidenedMask))
39341 break;
39342 Mask = std::move(WidenedMask);
39345 // Canonicalization of binary shuffle masks to improve pattern matching by
39346 // commuting the inputs.
39347 if (Ops.size() == 2 && canonicalizeShuffleMaskWithCommute(Mask)) {
39348 ShuffleVectorSDNode::commuteMask(Mask);
39349 std::swap(Ops[0], Ops[1]);
39352 // Try to combine into a single shuffle instruction.
39353 if (SDValue Shuffle = combineX86ShuffleChain(
39354 Ops, Root, Mask, Depth, HasVariableMask, AllowVariableCrossLaneMask,
39355 AllowVariablePerLaneMask, DAG, Subtarget))
39356 return Shuffle;
39358 // If all the operands come from the same larger vector, fallthrough and try
39359 // to use combineX86ShuffleChainWithExtract.
39360 SDValue LHS = peekThroughBitcasts(Ops.front());
39361 SDValue RHS = peekThroughBitcasts(Ops.back());
39362 if (Ops.size() != 2 || !Subtarget.hasAVX2() || RootSizeInBits != 128 ||
39363 (RootSizeInBits / Mask.size()) != 64 ||
39364 LHS.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
39365 RHS.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
39366 LHS.getOperand(0) != RHS.getOperand(0))
39367 return SDValue();
39370 // If that failed and any input is extracted then try to combine as a
39371 // shuffle with the larger type.
39372 return combineX86ShuffleChainWithExtract(
39373 Ops, Root, Mask, Depth, HasVariableMask, AllowVariableCrossLaneMask,
39374 AllowVariablePerLaneMask, DAG, Subtarget);
39377 /// Helper entry wrapper to combineX86ShufflesRecursively.
39378 static SDValue combineX86ShufflesRecursively(SDValue Op, SelectionDAG &DAG,
39379 const X86Subtarget &Subtarget) {
39380 return combineX86ShufflesRecursively(
39381 {Op}, 0, Op, {0}, {}, /*Depth*/ 0, X86::MaxShuffleCombineDepth,
39382 /*HasVarMask*/ false,
39383 /*AllowCrossLaneVarMask*/ true, /*AllowPerLaneVarMask*/ true, DAG,
39384 Subtarget);
39387 /// Get the PSHUF-style mask from PSHUF node.
39389 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
39390 /// PSHUF-style masks that can be reused with such instructions.
39391 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
39392 MVT VT = N.getSimpleValueType();
39393 SmallVector<int, 4> Mask;
39394 SmallVector<SDValue, 2> Ops;
39395 bool HaveMask =
39396 getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask);
39397 (void)HaveMask;
39398 assert(HaveMask);
39400 // If we have more than 128-bits, only the low 128-bits of shuffle mask
39401 // matter. Check that the upper masks are repeats and remove them.
39402 if (VT.getSizeInBits() > 128) {
39403 int LaneElts = 128 / VT.getScalarSizeInBits();
39404 #ifndef NDEBUG
39405 for (int i = 1, NumLanes = VT.getSizeInBits() / 128; i < NumLanes; ++i)
39406 for (int j = 0; j < LaneElts; ++j)
39407 assert(Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) &&
39408 "Mask doesn't repeat in high 128-bit lanes!");
39409 #endif
39410 Mask.resize(LaneElts);
39413 switch (N.getOpcode()) {
39414 case X86ISD::PSHUFD:
39415 return Mask;
39416 case X86ISD::PSHUFLW:
39417 Mask.resize(4);
39418 return Mask;
39419 case X86ISD::PSHUFHW:
39420 Mask.erase(Mask.begin(), Mask.begin() + 4);
39421 for (int &M : Mask)
39422 M -= 4;
39423 return Mask;
39424 default:
39425 llvm_unreachable("No valid shuffle instruction found!");
39429 /// Search for a combinable shuffle across a chain ending in pshufd.
39431 /// We walk up the chain and look for a combinable shuffle, skipping over
39432 /// shuffles that we could hoist this shuffle's transformation past without
39433 /// altering anything.
39434 static SDValue
39435 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
39436 SelectionDAG &DAG) {
39437 assert(N.getOpcode() == X86ISD::PSHUFD &&
39438 "Called with something other than an x86 128-bit half shuffle!");
39439 SDLoc DL(N);
39441 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
39442 // of the shuffles in the chain so that we can form a fresh chain to replace
39443 // this one.
39444 SmallVector<SDValue, 8> Chain;
39445 SDValue V = N.getOperand(0);
39446 for (; V.hasOneUse(); V = V.getOperand(0)) {
39447 switch (V.getOpcode()) {
39448 default:
39449 return SDValue(); // Nothing combined!
39451 case ISD::BITCAST:
39452 // Skip bitcasts as we always know the type for the target specific
39453 // instructions.
39454 continue;
39456 case X86ISD::PSHUFD:
39457 // Found another dword shuffle.
39458 break;
39460 case X86ISD::PSHUFLW:
39461 // Check that the low words (being shuffled) are the identity in the
39462 // dword shuffle, and the high words are self-contained.
39463 if (Mask[0] != 0 || Mask[1] != 1 ||
39464 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
39465 return SDValue();
39467 Chain.push_back(V);
39468 continue;
39470 case X86ISD::PSHUFHW:
39471 // Check that the high words (being shuffled) are the identity in the
39472 // dword shuffle, and the low words are self-contained.
39473 if (Mask[2] != 2 || Mask[3] != 3 ||
39474 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
39475 return SDValue();
39477 Chain.push_back(V);
39478 continue;
39480 case X86ISD::UNPCKL:
39481 case X86ISD::UNPCKH:
39482 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
39483 // shuffle into a preceding word shuffle.
39484 if (V.getSimpleValueType().getVectorElementType() != MVT::i8 &&
39485 V.getSimpleValueType().getVectorElementType() != MVT::i16)
39486 return SDValue();
39488 // Search for a half-shuffle which we can combine with.
39489 unsigned CombineOp =
39490 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
39491 if (V.getOperand(0) != V.getOperand(1) ||
39492 !V->isOnlyUserOf(V.getOperand(0).getNode()))
39493 return SDValue();
39494 Chain.push_back(V);
39495 V = V.getOperand(0);
39496 do {
39497 switch (V.getOpcode()) {
39498 default:
39499 return SDValue(); // Nothing to combine.
39501 case X86ISD::PSHUFLW:
39502 case X86ISD::PSHUFHW:
39503 if (V.getOpcode() == CombineOp)
39504 break;
39506 Chain.push_back(V);
39508 [[fallthrough]];
39509 case ISD::BITCAST:
39510 V = V.getOperand(0);
39511 continue;
39513 break;
39514 } while (V.hasOneUse());
39515 break;
39517 // Break out of the loop if we break out of the switch.
39518 break;
39521 if (!V.hasOneUse())
39522 // We fell out of the loop without finding a viable combining instruction.
39523 return SDValue();
39525 // Merge this node's mask and our incoming mask.
39526 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
39527 for (int &M : Mask)
39528 M = VMask[M];
39529 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
39530 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
39532 // Rebuild the chain around this new shuffle.
39533 while (!Chain.empty()) {
39534 SDValue W = Chain.pop_back_val();
39536 if (V.getValueType() != W.getOperand(0).getValueType())
39537 V = DAG.getBitcast(W.getOperand(0).getValueType(), V);
39539 switch (W.getOpcode()) {
39540 default:
39541 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
39543 case X86ISD::UNPCKL:
39544 case X86ISD::UNPCKH:
39545 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
39546 break;
39548 case X86ISD::PSHUFD:
39549 case X86ISD::PSHUFLW:
39550 case X86ISD::PSHUFHW:
39551 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
39552 break;
39555 if (V.getValueType() != N.getValueType())
39556 V = DAG.getBitcast(N.getValueType(), V);
39558 // Return the new chain to replace N.
39559 return V;
39562 // Attempt to commute shufps LHS loads:
39563 // permilps(shufps(load(),x)) --> permilps(shufps(x,load()))
39564 static SDValue combineCommutableSHUFP(SDValue N, MVT VT, const SDLoc &DL,
39565 SelectionDAG &DAG) {
39566 // TODO: Add vXf64 support.
39567 if (VT != MVT::v4f32 && VT != MVT::v8f32 && VT != MVT::v16f32)
39568 return SDValue();
39570 // SHUFP(LHS, RHS) -> SHUFP(RHS, LHS) iff LHS is foldable + RHS is not.
39571 auto commuteSHUFP = [&VT, &DL, &DAG](SDValue Parent, SDValue V) {
39572 if (V.getOpcode() != X86ISD::SHUFP || !Parent->isOnlyUserOf(V.getNode()))
39573 return SDValue();
39574 SDValue N0 = V.getOperand(0);
39575 SDValue N1 = V.getOperand(1);
39576 unsigned Imm = V.getConstantOperandVal(2);
39577 const X86Subtarget &Subtarget = DAG.getSubtarget<X86Subtarget>();
39578 if (!X86::mayFoldLoad(peekThroughOneUseBitcasts(N0), Subtarget) ||
39579 X86::mayFoldLoad(peekThroughOneUseBitcasts(N1), Subtarget))
39580 return SDValue();
39581 Imm = ((Imm & 0x0F) << 4) | ((Imm & 0xF0) >> 4);
39582 return DAG.getNode(X86ISD::SHUFP, DL, VT, N1, N0,
39583 DAG.getTargetConstant(Imm, DL, MVT::i8));
39586 switch (N.getOpcode()) {
39587 case X86ISD::VPERMILPI:
39588 if (SDValue NewSHUFP = commuteSHUFP(N, N.getOperand(0))) {
39589 unsigned Imm = N.getConstantOperandVal(1);
39590 return DAG.getNode(X86ISD::VPERMILPI, DL, VT, NewSHUFP,
39591 DAG.getTargetConstant(Imm ^ 0xAA, DL, MVT::i8));
39593 break;
39594 case X86ISD::SHUFP: {
39595 SDValue N0 = N.getOperand(0);
39596 SDValue N1 = N.getOperand(1);
39597 unsigned Imm = N.getConstantOperandVal(2);
39598 if (N0 == N1) {
39599 if (SDValue NewSHUFP = commuteSHUFP(N, N0))
39600 return DAG.getNode(X86ISD::SHUFP, DL, VT, NewSHUFP, NewSHUFP,
39601 DAG.getTargetConstant(Imm ^ 0xAA, DL, MVT::i8));
39602 } else if (SDValue NewSHUFP = commuteSHUFP(N, N0)) {
39603 return DAG.getNode(X86ISD::SHUFP, DL, VT, NewSHUFP, N1,
39604 DAG.getTargetConstant(Imm ^ 0x0A, DL, MVT::i8));
39605 } else if (SDValue NewSHUFP = commuteSHUFP(N, N1)) {
39606 return DAG.getNode(X86ISD::SHUFP, DL, VT, N0, NewSHUFP,
39607 DAG.getTargetConstant(Imm ^ 0xA0, DL, MVT::i8));
39609 break;
39613 return SDValue();
39616 // TODO - move this to TLI like isBinOp?
39617 static bool isUnaryOp(unsigned Opcode) {
39618 switch (Opcode) {
39619 case ISD::CTLZ:
39620 case ISD::CTTZ:
39621 case ISD::CTPOP:
39622 return true;
39624 return false;
39627 // Canonicalize SHUFFLE(UNARYOP(X)) -> UNARYOP(SHUFFLE(X)).
39628 // Canonicalize SHUFFLE(BINOP(X,Y)) -> BINOP(SHUFFLE(X),SHUFFLE(Y)).
39629 static SDValue canonicalizeShuffleWithOp(SDValue N, SelectionDAG &DAG,
39630 const SDLoc &DL) {
39631 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39632 EVT ShuffleVT = N.getValueType();
39634 auto IsMergeableWithShuffle = [&DAG](SDValue Op, bool FoldLoad = false) {
39635 // AllZeros/AllOnes constants are freely shuffled and will peek through
39636 // bitcasts. Other constant build vectors do not peek through bitcasts. Only
39637 // merge with target shuffles if it has one use so shuffle combining is
39638 // likely to kick in. Shuffles of splats are expected to be removed.
39639 return ISD::isBuildVectorAllOnes(Op.getNode()) ||
39640 ISD::isBuildVectorAllZeros(Op.getNode()) ||
39641 ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) ||
39642 ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode()) ||
39643 getTargetConstantFromNode(dyn_cast<LoadSDNode>(Op)) ||
39644 (Op.getOpcode() == ISD::INSERT_SUBVECTOR && Op->hasOneUse()) ||
39645 (isTargetShuffle(Op.getOpcode()) && Op->hasOneUse()) ||
39646 (FoldLoad && isShuffleFoldableLoad(Op)) ||
39647 DAG.isSplatValue(Op, /*AllowUndefs*/ false);
39649 auto IsSafeToMoveShuffle = [ShuffleVT](SDValue Op, unsigned BinOp) {
39650 // Ensure we only shuffle whole vector src elements, unless its a logical
39651 // binops where we can more aggressively move shuffles from dst to src.
39652 return BinOp == ISD::AND || BinOp == ISD::OR || BinOp == ISD::XOR ||
39653 BinOp == X86ISD::ANDNP ||
39654 (Op.getScalarValueSizeInBits() <= ShuffleVT.getScalarSizeInBits());
39657 unsigned Opc = N.getOpcode();
39658 switch (Opc) {
39659 // Unary and Unary+Permute Shuffles.
39660 case X86ISD::PSHUFB: {
39661 // Don't merge PSHUFB if it contains zero'd elements.
39662 SmallVector<int> Mask;
39663 SmallVector<SDValue> Ops;
39664 if (!getTargetShuffleMask(N.getNode(), ShuffleVT.getSimpleVT(), false, Ops,
39665 Mask))
39666 break;
39667 [[fallthrough]];
39669 case X86ISD::VBROADCAST:
39670 case X86ISD::MOVDDUP:
39671 case X86ISD::PSHUFD:
39672 case X86ISD::PSHUFHW:
39673 case X86ISD::PSHUFLW:
39674 case X86ISD::VPERMI:
39675 case X86ISD::VPERMILPI: {
39676 if (N.getOperand(0).getValueType() == ShuffleVT &&
39677 N->isOnlyUserOf(N.getOperand(0).getNode())) {
39678 SDValue N0 = peekThroughOneUseBitcasts(N.getOperand(0));
39679 unsigned SrcOpcode = N0.getOpcode();
39680 if (TLI.isBinOp(SrcOpcode) && IsSafeToMoveShuffle(N0, SrcOpcode)) {
39681 SDValue Op00 = peekThroughOneUseBitcasts(N0.getOperand(0));
39682 SDValue Op01 = peekThroughOneUseBitcasts(N0.getOperand(1));
39683 if (IsMergeableWithShuffle(Op00, Opc != X86ISD::PSHUFB) ||
39684 IsMergeableWithShuffle(Op01, Opc != X86ISD::PSHUFB)) {
39685 SDValue LHS, RHS;
39686 Op00 = DAG.getBitcast(ShuffleVT, Op00);
39687 Op01 = DAG.getBitcast(ShuffleVT, Op01);
39688 if (N.getNumOperands() == 2) {
39689 LHS = DAG.getNode(Opc, DL, ShuffleVT, Op00, N.getOperand(1));
39690 RHS = DAG.getNode(Opc, DL, ShuffleVT, Op01, N.getOperand(1));
39691 } else {
39692 LHS = DAG.getNode(Opc, DL, ShuffleVT, Op00);
39693 RHS = DAG.getNode(Opc, DL, ShuffleVT, Op01);
39695 EVT OpVT = N0.getValueType();
39696 return DAG.getBitcast(ShuffleVT,
39697 DAG.getNode(SrcOpcode, DL, OpVT,
39698 DAG.getBitcast(OpVT, LHS),
39699 DAG.getBitcast(OpVT, RHS)));
39703 break;
39705 // Binary and Binary+Permute Shuffles.
39706 case X86ISD::INSERTPS: {
39707 // Don't merge INSERTPS if it contains zero'd elements.
39708 unsigned InsertPSMask = N.getConstantOperandVal(2);
39709 unsigned ZeroMask = InsertPSMask & 0xF;
39710 if (ZeroMask != 0)
39711 break;
39712 [[fallthrough]];
39714 case X86ISD::MOVSD:
39715 case X86ISD::MOVSS:
39716 case X86ISD::BLENDI:
39717 case X86ISD::SHUFP:
39718 case X86ISD::UNPCKH:
39719 case X86ISD::UNPCKL: {
39720 if (N->isOnlyUserOf(N.getOperand(0).getNode()) &&
39721 N->isOnlyUserOf(N.getOperand(1).getNode())) {
39722 SDValue N0 = peekThroughOneUseBitcasts(N.getOperand(0));
39723 SDValue N1 = peekThroughOneUseBitcasts(N.getOperand(1));
39724 unsigned SrcOpcode = N0.getOpcode();
39725 if (TLI.isBinOp(SrcOpcode) && N1.getOpcode() == SrcOpcode &&
39726 N0.getValueType() == N1.getValueType() &&
39727 IsSafeToMoveShuffle(N0, SrcOpcode) &&
39728 IsSafeToMoveShuffle(N1, SrcOpcode)) {
39729 SDValue Op00 = peekThroughOneUseBitcasts(N0.getOperand(0));
39730 SDValue Op10 = peekThroughOneUseBitcasts(N1.getOperand(0));
39731 SDValue Op01 = peekThroughOneUseBitcasts(N0.getOperand(1));
39732 SDValue Op11 = peekThroughOneUseBitcasts(N1.getOperand(1));
39733 // Ensure the total number of shuffles doesn't increase by folding this
39734 // shuffle through to the source ops.
39735 if (((IsMergeableWithShuffle(Op00) && IsMergeableWithShuffle(Op10)) ||
39736 (IsMergeableWithShuffle(Op01) && IsMergeableWithShuffle(Op11))) ||
39737 ((IsMergeableWithShuffle(Op00) || IsMergeableWithShuffle(Op10)) &&
39738 (IsMergeableWithShuffle(Op01) || IsMergeableWithShuffle(Op11)))) {
39739 SDValue LHS, RHS;
39740 Op00 = DAG.getBitcast(ShuffleVT, Op00);
39741 Op10 = DAG.getBitcast(ShuffleVT, Op10);
39742 Op01 = DAG.getBitcast(ShuffleVT, Op01);
39743 Op11 = DAG.getBitcast(ShuffleVT, Op11);
39744 if (N.getNumOperands() == 3) {
39745 LHS = DAG.getNode(Opc, DL, ShuffleVT, Op00, Op10, N.getOperand(2));
39746 RHS = DAG.getNode(Opc, DL, ShuffleVT, Op01, Op11, N.getOperand(2));
39747 } else {
39748 LHS = DAG.getNode(Opc, DL, ShuffleVT, Op00, Op10);
39749 RHS = DAG.getNode(Opc, DL, ShuffleVT, Op01, Op11);
39751 EVT OpVT = N0.getValueType();
39752 return DAG.getBitcast(ShuffleVT,
39753 DAG.getNode(SrcOpcode, DL, OpVT,
39754 DAG.getBitcast(OpVT, LHS),
39755 DAG.getBitcast(OpVT, RHS)));
39758 if (isUnaryOp(SrcOpcode) && N1.getOpcode() == SrcOpcode &&
39759 N0.getValueType() == N1.getValueType() &&
39760 IsSafeToMoveShuffle(N0, SrcOpcode) &&
39761 IsSafeToMoveShuffle(N1, SrcOpcode)) {
39762 SDValue Op00 = peekThroughOneUseBitcasts(N0.getOperand(0));
39763 SDValue Op10 = peekThroughOneUseBitcasts(N1.getOperand(0));
39764 SDValue Res;
39765 Op00 = DAG.getBitcast(ShuffleVT, Op00);
39766 Op10 = DAG.getBitcast(ShuffleVT, Op10);
39767 if (N.getNumOperands() == 3) {
39768 Res = DAG.getNode(Opc, DL, ShuffleVT, Op00, Op10, N.getOperand(2));
39769 } else {
39770 Res = DAG.getNode(Opc, DL, ShuffleVT, Op00, Op10);
39772 EVT OpVT = N0.getValueType();
39773 return DAG.getBitcast(
39774 ShuffleVT,
39775 DAG.getNode(SrcOpcode, DL, OpVT, DAG.getBitcast(OpVT, Res)));
39778 break;
39781 return SDValue();
39784 /// Attempt to fold vpermf128(op(),op()) -> op(vpermf128(),vpermf128()).
39785 static SDValue canonicalizeLaneShuffleWithRepeatedOps(SDValue V,
39786 SelectionDAG &DAG,
39787 const SDLoc &DL) {
39788 assert(V.getOpcode() == X86ISD::VPERM2X128 && "Unknown lane shuffle");
39790 MVT VT = V.getSimpleValueType();
39791 SDValue Src0 = peekThroughBitcasts(V.getOperand(0));
39792 SDValue Src1 = peekThroughBitcasts(V.getOperand(1));
39793 unsigned SrcOpc0 = Src0.getOpcode();
39794 unsigned SrcOpc1 = Src1.getOpcode();
39795 EVT SrcVT0 = Src0.getValueType();
39796 EVT SrcVT1 = Src1.getValueType();
39798 if (!Src1.isUndef() && (SrcVT0 != SrcVT1 || SrcOpc0 != SrcOpc1))
39799 return SDValue();
39801 switch (SrcOpc0) {
39802 case X86ISD::MOVDDUP: {
39803 SDValue LHS = Src0.getOperand(0);
39804 SDValue RHS = Src1.isUndef() ? Src1 : Src1.getOperand(0);
39805 SDValue Res =
39806 DAG.getNode(X86ISD::VPERM2X128, DL, SrcVT0, LHS, RHS, V.getOperand(2));
39807 Res = DAG.getNode(SrcOpc0, DL, SrcVT0, Res);
39808 return DAG.getBitcast(VT, Res);
39810 case X86ISD::VPERMILPI:
39811 // TODO: Handle v4f64 permutes with different low/high lane masks.
39812 if (SrcVT0 == MVT::v4f64) {
39813 uint64_t Mask = Src0.getConstantOperandVal(1);
39814 if ((Mask & 0x3) != ((Mask >> 2) & 0x3))
39815 break;
39817 [[fallthrough]];
39818 case X86ISD::VSHLI:
39819 case X86ISD::VSRLI:
39820 case X86ISD::VSRAI:
39821 case X86ISD::PSHUFD:
39822 if (Src1.isUndef() || Src0.getOperand(1) == Src1.getOperand(1)) {
39823 SDValue LHS = Src0.getOperand(0);
39824 SDValue RHS = Src1.isUndef() ? Src1 : Src1.getOperand(0);
39825 SDValue Res = DAG.getNode(X86ISD::VPERM2X128, DL, SrcVT0, LHS, RHS,
39826 V.getOperand(2));
39827 Res = DAG.getNode(SrcOpc0, DL, SrcVT0, Res, Src0.getOperand(1));
39828 return DAG.getBitcast(VT, Res);
39830 break;
39833 return SDValue();
39836 /// Try to combine x86 target specific shuffles.
39837 static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
39838 TargetLowering::DAGCombinerInfo &DCI,
39839 const X86Subtarget &Subtarget) {
39840 SDLoc DL(N);
39841 MVT VT = N.getSimpleValueType();
39842 SmallVector<int, 4> Mask;
39843 unsigned Opcode = N.getOpcode();
39845 if (SDValue R = combineCommutableSHUFP(N, VT, DL, DAG))
39846 return R;
39848 // Handle specific target shuffles.
39849 switch (Opcode) {
39850 case X86ISD::MOVDDUP: {
39851 SDValue Src = N.getOperand(0);
39852 // Turn a 128-bit MOVDDUP of a full vector load into movddup+vzload.
39853 if (VT == MVT::v2f64 && Src.hasOneUse() &&
39854 ISD::isNormalLoad(Src.getNode())) {
39855 LoadSDNode *LN = cast<LoadSDNode>(Src);
39856 if (SDValue VZLoad = narrowLoadToVZLoad(LN, MVT::f64, MVT::v2f64, DAG)) {
39857 SDValue Movddup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, VZLoad);
39858 DCI.CombineTo(N.getNode(), Movddup);
39859 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
39860 DCI.recursivelyDeleteUnusedNodes(LN);
39861 return N; // Return N so it doesn't get rechecked!
39865 return SDValue();
39867 case X86ISD::VBROADCAST: {
39868 SDValue Src = N.getOperand(0);
39869 SDValue BC = peekThroughBitcasts(Src);
39870 EVT SrcVT = Src.getValueType();
39871 EVT BCVT = BC.getValueType();
39873 // If broadcasting from another shuffle, attempt to simplify it.
39874 // TODO - we really need a general SimplifyDemandedVectorElts mechanism.
39875 if (isTargetShuffle(BC.getOpcode()) &&
39876 VT.getScalarSizeInBits() % BCVT.getScalarSizeInBits() == 0) {
39877 unsigned Scale = VT.getScalarSizeInBits() / BCVT.getScalarSizeInBits();
39878 SmallVector<int, 16> DemandedMask(BCVT.getVectorNumElements(),
39879 SM_SentinelUndef);
39880 for (unsigned i = 0; i != Scale; ++i)
39881 DemandedMask[i] = i;
39882 if (SDValue Res = combineX86ShufflesRecursively(
39883 {BC}, 0, BC, DemandedMask, {}, /*Depth*/ 0,
39884 X86::MaxShuffleCombineDepth,
39885 /*HasVarMask*/ false, /*AllowCrossLaneVarMask*/ true,
39886 /*AllowPerLaneVarMask*/ true, DAG, Subtarget))
39887 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
39888 DAG.getBitcast(SrcVT, Res));
39891 // broadcast(bitcast(src)) -> bitcast(broadcast(src))
39892 // 32-bit targets have to bitcast i64 to f64, so better to bitcast upward.
39893 if (Src.getOpcode() == ISD::BITCAST &&
39894 SrcVT.getScalarSizeInBits() == BCVT.getScalarSizeInBits() &&
39895 DAG.getTargetLoweringInfo().isTypeLegal(BCVT) &&
39896 FixedVectorType::isValidElementType(
39897 BCVT.getScalarType().getTypeForEVT(*DAG.getContext()))) {
39898 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), BCVT.getScalarType(),
39899 VT.getVectorNumElements());
39900 return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, DL, NewVT, BC));
39903 // vbroadcast(bitcast(vbroadcast(src))) -> bitcast(vbroadcast(src))
39904 // If we're re-broadcasting a smaller type then broadcast with that type and
39905 // bitcast.
39906 // TODO: Do this for any splat?
39907 if (Src.getOpcode() == ISD::BITCAST &&
39908 (BC.getOpcode() == X86ISD::VBROADCAST ||
39909 BC.getOpcode() == X86ISD::VBROADCAST_LOAD) &&
39910 (VT.getScalarSizeInBits() % BCVT.getScalarSizeInBits()) == 0 &&
39911 (VT.getSizeInBits() % BCVT.getSizeInBits()) == 0) {
39912 MVT NewVT =
39913 MVT::getVectorVT(BCVT.getSimpleVT().getScalarType(),
39914 VT.getSizeInBits() / BCVT.getScalarSizeInBits());
39915 return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, DL, NewVT, BC));
39918 // Reduce broadcast source vector to lowest 128-bits.
39919 if (SrcVT.getSizeInBits() > 128)
39920 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
39921 extract128BitVector(Src, 0, DAG, DL));
39923 // broadcast(scalar_to_vector(x)) -> broadcast(x).
39924 if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR &&
39925 Src.getValueType().getScalarType() == Src.getOperand(0).getValueType())
39926 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Src.getOperand(0));
39928 // broadcast(extract_vector_elt(x, 0)) -> broadcast(x).
39929 if (Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
39930 isNullConstant(Src.getOperand(1)) &&
39931 Src.getValueType() ==
39932 Src.getOperand(0).getValueType().getScalarType() &&
39933 DAG.getTargetLoweringInfo().isTypeLegal(
39934 Src.getOperand(0).getValueType()))
39935 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Src.getOperand(0));
39937 // Share broadcast with the longest vector and extract low subvector (free).
39938 // Ensure the same SDValue from the SDNode use is being used.
39939 for (SDNode *User : Src->uses())
39940 if (User != N.getNode() && User->getOpcode() == X86ISD::VBROADCAST &&
39941 Src == User->getOperand(0) &&
39942 User->getValueSizeInBits(0).getFixedValue() >
39943 VT.getFixedSizeInBits()) {
39944 return extractSubVector(SDValue(User, 0), 0, DAG, DL,
39945 VT.getSizeInBits());
39948 // vbroadcast(scalarload X) -> vbroadcast_load X
39949 // For float loads, extract other uses of the scalar from the broadcast.
39950 if (!SrcVT.isVector() && (Src.hasOneUse() || VT.isFloatingPoint()) &&
39951 ISD::isNormalLoad(Src.getNode())) {
39952 LoadSDNode *LN = cast<LoadSDNode>(Src);
39953 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
39954 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
39955 SDValue BcastLd =
39956 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
39957 LN->getMemoryVT(), LN->getMemOperand());
39958 // If the load value is used only by N, replace it via CombineTo N.
39959 bool NoReplaceExtract = Src.hasOneUse();
39960 DCI.CombineTo(N.getNode(), BcastLd);
39961 if (NoReplaceExtract) {
39962 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
39963 DCI.recursivelyDeleteUnusedNodes(LN);
39964 } else {
39965 SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT, BcastLd,
39966 DAG.getIntPtrConstant(0, DL));
39967 DCI.CombineTo(LN, Scl, BcastLd.getValue(1));
39969 return N; // Return N so it doesn't get rechecked!
39972 // Due to isTypeDesirableForOp, we won't always shrink a load truncated to
39973 // i16. So shrink it ourselves if we can make a broadcast_load.
39974 if (SrcVT == MVT::i16 && Src.getOpcode() == ISD::TRUNCATE &&
39975 Src.hasOneUse() && Src.getOperand(0).hasOneUse()) {
39976 assert(Subtarget.hasAVX2() && "Expected AVX2");
39977 SDValue TruncIn = Src.getOperand(0);
39979 // If this is a truncate of a non extending load we can just narrow it to
39980 // use a broadcast_load.
39981 if (ISD::isNormalLoad(TruncIn.getNode())) {
39982 LoadSDNode *LN = cast<LoadSDNode>(TruncIn);
39983 // Unless its volatile or atomic.
39984 if (LN->isSimple()) {
39985 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
39986 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
39987 SDValue BcastLd = DAG.getMemIntrinsicNode(
39988 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::i16,
39989 LN->getPointerInfo(), LN->getOriginalAlign(),
39990 LN->getMemOperand()->getFlags());
39991 DCI.CombineTo(N.getNode(), BcastLd);
39992 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
39993 DCI.recursivelyDeleteUnusedNodes(Src.getNode());
39994 return N; // Return N so it doesn't get rechecked!
39998 // If this is a truncate of an i16 extload, we can directly replace it.
39999 if (ISD::isUNINDEXEDLoad(Src.getOperand(0).getNode()) &&
40000 ISD::isEXTLoad(Src.getOperand(0).getNode())) {
40001 LoadSDNode *LN = cast<LoadSDNode>(Src.getOperand(0));
40002 if (LN->getMemoryVT().getSizeInBits() == 16) {
40003 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
40004 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
40005 SDValue BcastLd =
40006 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
40007 LN->getMemoryVT(), LN->getMemOperand());
40008 DCI.CombineTo(N.getNode(), BcastLd);
40009 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
40010 DCI.recursivelyDeleteUnusedNodes(Src.getNode());
40011 return N; // Return N so it doesn't get rechecked!
40015 // If this is a truncate of load that has been shifted right, we can
40016 // offset the pointer and use a narrower load.
40017 if (TruncIn.getOpcode() == ISD::SRL &&
40018 TruncIn.getOperand(0).hasOneUse() &&
40019 isa<ConstantSDNode>(TruncIn.getOperand(1)) &&
40020 ISD::isNormalLoad(TruncIn.getOperand(0).getNode())) {
40021 LoadSDNode *LN = cast<LoadSDNode>(TruncIn.getOperand(0));
40022 unsigned ShiftAmt = TruncIn.getConstantOperandVal(1);
40023 // Make sure the shift amount and the load size are divisible by 16.
40024 // Don't do this if the load is volatile or atomic.
40025 if (ShiftAmt % 16 == 0 && TruncIn.getValueSizeInBits() % 16 == 0 &&
40026 LN->isSimple()) {
40027 unsigned Offset = ShiftAmt / 8;
40028 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
40029 SDValue Ptr = DAG.getMemBasePlusOffset(
40030 LN->getBasePtr(), TypeSize::getFixed(Offset), DL);
40031 SDValue Ops[] = { LN->getChain(), Ptr };
40032 SDValue BcastLd = DAG.getMemIntrinsicNode(
40033 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::i16,
40034 LN->getPointerInfo().getWithOffset(Offset),
40035 LN->getOriginalAlign(),
40036 LN->getMemOperand()->getFlags());
40037 DCI.CombineTo(N.getNode(), BcastLd);
40038 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
40039 DCI.recursivelyDeleteUnusedNodes(Src.getNode());
40040 return N; // Return N so it doesn't get rechecked!
40045 // vbroadcast(vzload X) -> vbroadcast_load X
40046 if (Src.getOpcode() == X86ISD::VZEXT_LOAD && Src.hasOneUse()) {
40047 MemSDNode *LN = cast<MemIntrinsicSDNode>(Src);
40048 if (LN->getMemoryVT().getSizeInBits() == VT.getScalarSizeInBits()) {
40049 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
40050 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
40051 SDValue BcastLd =
40052 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
40053 LN->getMemoryVT(), LN->getMemOperand());
40054 DCI.CombineTo(N.getNode(), BcastLd);
40055 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
40056 DCI.recursivelyDeleteUnusedNodes(LN);
40057 return N; // Return N so it doesn't get rechecked!
40061 // vbroadcast(vector load X) -> vbroadcast_load
40062 if ((SrcVT == MVT::v2f64 || SrcVT == MVT::v4f32 || SrcVT == MVT::v2i64 ||
40063 SrcVT == MVT::v4i32) &&
40064 Src.hasOneUse() && ISD::isNormalLoad(Src.getNode())) {
40065 LoadSDNode *LN = cast<LoadSDNode>(Src);
40066 // Unless the load is volatile or atomic.
40067 if (LN->isSimple()) {
40068 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
40069 SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
40070 SDValue BcastLd = DAG.getMemIntrinsicNode(
40071 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, SrcVT.getScalarType(),
40072 LN->getPointerInfo(), LN->getOriginalAlign(),
40073 LN->getMemOperand()->getFlags());
40074 DCI.CombineTo(N.getNode(), BcastLd);
40075 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
40076 DCI.recursivelyDeleteUnusedNodes(LN);
40077 return N; // Return N so it doesn't get rechecked!
40081 return SDValue();
40083 case X86ISD::VZEXT_MOVL: {
40084 SDValue N0 = N.getOperand(0);
40086 // If this a vzmovl of a full vector load, replace it with a vzload, unless
40087 // the load is volatile.
40088 if (N0.hasOneUse() && ISD::isNormalLoad(N0.getNode())) {
40089 auto *LN = cast<LoadSDNode>(N0);
40090 if (SDValue VZLoad =
40091 narrowLoadToVZLoad(LN, VT.getVectorElementType(), VT, DAG)) {
40092 DCI.CombineTo(N.getNode(), VZLoad);
40093 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
40094 DCI.recursivelyDeleteUnusedNodes(LN);
40095 return N;
40099 // If this a VZEXT_MOVL of a VBROADCAST_LOAD, we don't need the broadcast
40100 // and can just use a VZEXT_LOAD.
40101 // FIXME: Is there some way to do this with SimplifyDemandedVectorElts?
40102 if (N0.hasOneUse() && N0.getOpcode() == X86ISD::VBROADCAST_LOAD) {
40103 auto *LN = cast<MemSDNode>(N0);
40104 if (VT.getScalarSizeInBits() == LN->getMemoryVT().getSizeInBits()) {
40105 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
40106 SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
40107 SDValue VZLoad =
40108 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops,
40109 LN->getMemoryVT(), LN->getMemOperand());
40110 DCI.CombineTo(N.getNode(), VZLoad);
40111 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
40112 DCI.recursivelyDeleteUnusedNodes(LN);
40113 return N;
40117 // Turn (v2i64 (vzext_movl (scalar_to_vector (i64 X)))) into
40118 // (v2i64 (bitcast (v4i32 (vzext_movl (scalar_to_vector (i32 (trunc X)))))))
40119 // if the upper bits of the i64 are zero.
40120 if (N0.hasOneUse() && N0.getOpcode() == ISD::SCALAR_TO_VECTOR &&
40121 N0.getOperand(0).hasOneUse() &&
40122 N0.getOperand(0).getValueType() == MVT::i64) {
40123 SDValue In = N0.getOperand(0);
40124 APInt Mask = APInt::getHighBitsSet(64, 32);
40125 if (DAG.MaskedValueIsZero(In, Mask)) {
40126 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, In);
40127 MVT VecVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
40128 SDValue SclVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Trunc);
40129 SDValue Movl = DAG.getNode(X86ISD::VZEXT_MOVL, DL, VecVT, SclVec);
40130 return DAG.getBitcast(VT, Movl);
40134 // Load a scalar integer constant directly to XMM instead of transferring an
40135 // immediate value from GPR.
40136 // vzext_movl (scalar_to_vector C) --> load [C,0...]
40137 if (N0.getOpcode() == ISD::SCALAR_TO_VECTOR) {
40138 if (auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(0))) {
40139 // Create a vector constant - scalar constant followed by zeros.
40140 EVT ScalarVT = N0.getOperand(0).getValueType();
40141 Type *ScalarTy = ScalarVT.getTypeForEVT(*DAG.getContext());
40142 unsigned NumElts = VT.getVectorNumElements();
40143 Constant *Zero = ConstantInt::getNullValue(ScalarTy);
40144 SmallVector<Constant *, 32> ConstantVec(NumElts, Zero);
40145 ConstantVec[0] = const_cast<ConstantInt *>(C->getConstantIntValue());
40147 // Load the vector constant from constant pool.
40148 MVT PVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
40149 SDValue CP = DAG.getConstantPool(ConstantVector::get(ConstantVec), PVT);
40150 MachinePointerInfo MPI =
40151 MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
40152 Align Alignment = cast<ConstantPoolSDNode>(CP)->getAlign();
40153 return DAG.getLoad(VT, DL, DAG.getEntryNode(), CP, MPI, Alignment,
40154 MachineMemOperand::MOLoad);
40158 // Pull subvector inserts into undef through VZEXT_MOVL by making it an
40159 // insert into a zero vector. This helps get VZEXT_MOVL closer to
40160 // scalar_to_vectors where 256/512 are canonicalized to an insert and a
40161 // 128-bit scalar_to_vector. This reduces the number of isel patterns.
40162 if (!DCI.isBeforeLegalizeOps() && N0.hasOneUse()) {
40163 SDValue V = peekThroughOneUseBitcasts(N0);
40165 if (V.getOpcode() == ISD::INSERT_SUBVECTOR && V.getOperand(0).isUndef() &&
40166 isNullConstant(V.getOperand(2))) {
40167 SDValue In = V.getOperand(1);
40168 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
40169 In.getValueSizeInBits() /
40170 VT.getScalarSizeInBits());
40171 In = DAG.getBitcast(SubVT, In);
40172 SDValue Movl = DAG.getNode(X86ISD::VZEXT_MOVL, DL, SubVT, In);
40173 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
40174 getZeroVector(VT, Subtarget, DAG, DL), Movl,
40175 V.getOperand(2));
40179 return SDValue();
40181 case X86ISD::BLENDI: {
40182 SDValue N0 = N.getOperand(0);
40183 SDValue N1 = N.getOperand(1);
40185 // blend(bitcast(x),bitcast(y)) -> bitcast(blend(x,y)) to narrower types.
40186 // TODO: Handle MVT::v16i16 repeated blend mask.
40187 if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST &&
40188 N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()) {
40189 MVT SrcVT = N0.getOperand(0).getSimpleValueType();
40190 if ((VT.getScalarSizeInBits() % SrcVT.getScalarSizeInBits()) == 0 &&
40191 SrcVT.getScalarSizeInBits() >= 32) {
40192 unsigned BlendMask = N.getConstantOperandVal(2);
40193 unsigned Size = VT.getVectorNumElements();
40194 unsigned Scale = VT.getScalarSizeInBits() / SrcVT.getScalarSizeInBits();
40195 BlendMask = scaleVectorShuffleBlendMask(BlendMask, Size, Scale);
40196 return DAG.getBitcast(
40197 VT, DAG.getNode(X86ISD::BLENDI, DL, SrcVT, N0.getOperand(0),
40198 N1.getOperand(0),
40199 DAG.getTargetConstant(BlendMask, DL, MVT::i8)));
40202 return SDValue();
40204 case X86ISD::SHUFP: {
40205 // Fold shufps(shuffle(x),shuffle(y)) -> shufps(x,y).
40206 // This is a more relaxed shuffle combiner that can ignore oneuse limits.
40207 // TODO: Support types other than v4f32.
40208 if (VT == MVT::v4f32) {
40209 bool Updated = false;
40210 SmallVector<int> Mask;
40211 SmallVector<SDValue> Ops;
40212 if (getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask) &&
40213 Ops.size() == 2) {
40214 for (int i = 0; i != 2; ++i) {
40215 SmallVector<SDValue> SubOps;
40216 SmallVector<int> SubMask, SubScaledMask;
40217 SDValue Sub = peekThroughBitcasts(Ops[i]);
40218 // TODO: Scaling might be easier if we specify the demanded elts.
40219 if (getTargetShuffleInputs(Sub, SubOps, SubMask, DAG, 0, false) &&
40220 scaleShuffleElements(SubMask, 4, SubScaledMask) &&
40221 SubOps.size() == 1 && isUndefOrInRange(SubScaledMask, 0, 4)) {
40222 int Ofs = i * 2;
40223 Mask[Ofs + 0] = SubScaledMask[Mask[Ofs + 0] % 4] + (i * 4);
40224 Mask[Ofs + 1] = SubScaledMask[Mask[Ofs + 1] % 4] + (i * 4);
40225 Ops[i] = DAG.getBitcast(VT, SubOps[0]);
40226 Updated = true;
40230 if (Updated) {
40231 for (int &M : Mask)
40232 M %= 4;
40233 Ops.push_back(getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
40234 return DAG.getNode(X86ISD::SHUFP, DL, VT, Ops);
40237 return SDValue();
40239 case X86ISD::VPERMI: {
40240 // vpermi(bitcast(x)) -> bitcast(vpermi(x)) for same number of elements.
40241 // TODO: Remove when we have preferred domains in combineX86ShuffleChain.
40242 SDValue N0 = N.getOperand(0);
40243 SDValue N1 = N.getOperand(1);
40244 unsigned EltSizeInBits = VT.getScalarSizeInBits();
40245 if (N0.getOpcode() == ISD::BITCAST &&
40246 N0.getOperand(0).getScalarValueSizeInBits() == EltSizeInBits) {
40247 SDValue Src = N0.getOperand(0);
40248 EVT SrcVT = Src.getValueType();
40249 SDValue Res = DAG.getNode(X86ISD::VPERMI, DL, SrcVT, Src, N1);
40250 return DAG.getBitcast(VT, Res);
40252 return SDValue();
40254 case X86ISD::SHUF128: {
40255 // If we're permuting the upper 256-bits subvectors of a concatenation, then
40256 // see if we can peek through and access the subvector directly.
40257 if (VT.is512BitVector()) {
40258 // 512-bit mask uses 4 x i2 indices - if the msb is always set then only the
40259 // upper subvector is used.
40260 SDValue LHS = N->getOperand(0);
40261 SDValue RHS = N->getOperand(1);
40262 uint64_t Mask = N->getConstantOperandVal(2);
40263 SmallVector<SDValue> LHSOps, RHSOps;
40264 SDValue NewLHS, NewRHS;
40265 if ((Mask & 0x0A) == 0x0A &&
40266 collectConcatOps(LHS.getNode(), LHSOps, DAG) && LHSOps.size() == 2) {
40267 NewLHS = widenSubVector(LHSOps[1], false, Subtarget, DAG, DL, 512);
40268 Mask &= ~0x0A;
40270 if ((Mask & 0xA0) == 0xA0 &&
40271 collectConcatOps(RHS.getNode(), RHSOps, DAG) && RHSOps.size() == 2) {
40272 NewRHS = widenSubVector(RHSOps[1], false, Subtarget, DAG, DL, 512);
40273 Mask &= ~0xA0;
40275 if (NewLHS || NewRHS)
40276 return DAG.getNode(X86ISD::SHUF128, DL, VT, NewLHS ? NewLHS : LHS,
40277 NewRHS ? NewRHS : RHS,
40278 DAG.getTargetConstant(Mask, DL, MVT::i8));
40280 return SDValue();
40282 case X86ISD::VPERM2X128: {
40283 // Fold vperm2x128(bitcast(x),bitcast(y),c) -> bitcast(vperm2x128(x,y,c)).
40284 SDValue LHS = N->getOperand(0);
40285 SDValue RHS = N->getOperand(1);
40286 if (LHS.getOpcode() == ISD::BITCAST &&
40287 (RHS.getOpcode() == ISD::BITCAST || RHS.isUndef())) {
40288 EVT SrcVT = LHS.getOperand(0).getValueType();
40289 if (RHS.isUndef() || SrcVT == RHS.getOperand(0).getValueType()) {
40290 return DAG.getBitcast(VT, DAG.getNode(X86ISD::VPERM2X128, DL, SrcVT,
40291 DAG.getBitcast(SrcVT, LHS),
40292 DAG.getBitcast(SrcVT, RHS),
40293 N->getOperand(2)));
40297 // Fold vperm2x128(op(),op()) -> op(vperm2x128(),vperm2x128()).
40298 if (SDValue Res = canonicalizeLaneShuffleWithRepeatedOps(N, DAG, DL))
40299 return Res;
40301 // Fold vperm2x128 subvector shuffle with an inner concat pattern.
40302 // vperm2x128(concat(X,Y),concat(Z,W)) --> concat X,Y etc.
40303 auto FindSubVector128 = [&](unsigned Idx) {
40304 if (Idx > 3)
40305 return SDValue();
40306 SDValue Src = peekThroughBitcasts(N.getOperand(Idx < 2 ? 0 : 1));
40307 SmallVector<SDValue> SubOps;
40308 if (collectConcatOps(Src.getNode(), SubOps, DAG) && SubOps.size() == 2)
40309 return SubOps[Idx & 1];
40310 unsigned NumElts = Src.getValueType().getVectorNumElements();
40311 if ((Idx & 1) == 1 && Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
40312 Src.getOperand(1).getValueSizeInBits() == 128 &&
40313 Src.getConstantOperandAPInt(2) == (NumElts / 2)) {
40314 return Src.getOperand(1);
40316 return SDValue();
40318 unsigned Imm = N.getConstantOperandVal(2);
40319 if (SDValue SubLo = FindSubVector128(Imm & 0x0F)) {
40320 if (SDValue SubHi = FindSubVector128((Imm & 0xF0) >> 4)) {
40321 MVT SubVT = VT.getHalfNumVectorElementsVT();
40322 SubLo = DAG.getBitcast(SubVT, SubLo);
40323 SubHi = DAG.getBitcast(SubVT, SubHi);
40324 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, SubLo, SubHi);
40327 return SDValue();
40329 case X86ISD::PSHUFD:
40330 case X86ISD::PSHUFLW:
40331 case X86ISD::PSHUFHW: {
40332 SDValue N0 = N.getOperand(0);
40333 SDValue N1 = N.getOperand(1);
40334 if (N0->hasOneUse()) {
40335 SDValue V = peekThroughOneUseBitcasts(N0);
40336 switch (V.getOpcode()) {
40337 case X86ISD::VSHL:
40338 case X86ISD::VSRL:
40339 case X86ISD::VSRA:
40340 case X86ISD::VSHLI:
40341 case X86ISD::VSRLI:
40342 case X86ISD::VSRAI:
40343 case X86ISD::VROTLI:
40344 case X86ISD::VROTRI: {
40345 MVT InnerVT = V.getSimpleValueType();
40346 if (InnerVT.getScalarSizeInBits() <= VT.getScalarSizeInBits()) {
40347 SDValue Res = DAG.getNode(Opcode, DL, VT,
40348 DAG.getBitcast(VT, V.getOperand(0)), N1);
40349 Res = DAG.getBitcast(InnerVT, Res);
40350 Res = DAG.getNode(V.getOpcode(), DL, InnerVT, Res, V.getOperand(1));
40351 return DAG.getBitcast(VT, Res);
40353 break;
40358 Mask = getPSHUFShuffleMask(N);
40359 assert(Mask.size() == 4);
40360 break;
40362 case X86ISD::MOVSD:
40363 case X86ISD::MOVSH:
40364 case X86ISD::MOVSS: {
40365 SDValue N0 = N.getOperand(0);
40366 SDValue N1 = N.getOperand(1);
40368 // Canonicalize scalar FPOps:
40369 // MOVS*(N0, OP(N0, N1)) --> MOVS*(N0, SCALAR_TO_VECTOR(OP(N0[0], N1[0])))
40370 // If commutable, allow OP(N1[0], N0[0]).
40371 unsigned Opcode1 = N1.getOpcode();
40372 if (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL || Opcode1 == ISD::FSUB ||
40373 Opcode1 == ISD::FDIV) {
40374 SDValue N10 = N1.getOperand(0);
40375 SDValue N11 = N1.getOperand(1);
40376 if (N10 == N0 ||
40377 (N11 == N0 && (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL))) {
40378 if (N10 != N0)
40379 std::swap(N10, N11);
40380 MVT SVT = VT.getVectorElementType();
40381 SDValue ZeroIdx = DAG.getIntPtrConstant(0, DL);
40382 N10 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N10, ZeroIdx);
40383 N11 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N11, ZeroIdx);
40384 SDValue Scl = DAG.getNode(Opcode1, DL, SVT, N10, N11);
40385 SDValue SclVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
40386 return DAG.getNode(Opcode, DL, VT, N0, SclVec);
40390 return SDValue();
40392 case X86ISD::INSERTPS: {
40393 assert(VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32");
40394 SDValue Op0 = N.getOperand(0);
40395 SDValue Op1 = N.getOperand(1);
40396 unsigned InsertPSMask = N.getConstantOperandVal(2);
40397 unsigned SrcIdx = (InsertPSMask >> 6) & 0x3;
40398 unsigned DstIdx = (InsertPSMask >> 4) & 0x3;
40399 unsigned ZeroMask = InsertPSMask & 0xF;
40401 // If we zero out all elements from Op0 then we don't need to reference it.
40402 if (((ZeroMask | (1u << DstIdx)) == 0xF) && !Op0.isUndef())
40403 return DAG.getNode(X86ISD::INSERTPS, DL, VT, DAG.getUNDEF(VT), Op1,
40404 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
40406 // If we zero out the element from Op1 then we don't need to reference it.
40407 if ((ZeroMask & (1u << DstIdx)) && !Op1.isUndef())
40408 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
40409 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
40411 // Attempt to merge insertps Op1 with an inner target shuffle node.
40412 SmallVector<int, 8> TargetMask1;
40413 SmallVector<SDValue, 2> Ops1;
40414 APInt KnownUndef1, KnownZero1;
40415 if (getTargetShuffleAndZeroables(Op1, TargetMask1, Ops1, KnownUndef1,
40416 KnownZero1)) {
40417 if (KnownUndef1[SrcIdx] || KnownZero1[SrcIdx]) {
40418 // Zero/UNDEF insertion - zero out element and remove dependency.
40419 InsertPSMask |= (1u << DstIdx);
40420 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
40421 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
40423 // Update insertps mask srcidx and reference the source input directly.
40424 int M = TargetMask1[SrcIdx];
40425 assert(0 <= M && M < 8 && "Shuffle index out of range");
40426 InsertPSMask = (InsertPSMask & 0x3f) | ((M & 0x3) << 6);
40427 Op1 = Ops1[M < 4 ? 0 : 1];
40428 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
40429 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
40432 // Attempt to merge insertps Op0 with an inner target shuffle node.
40433 SmallVector<int, 8> TargetMask0;
40434 SmallVector<SDValue, 2> Ops0;
40435 APInt KnownUndef0, KnownZero0;
40436 if (getTargetShuffleAndZeroables(Op0, TargetMask0, Ops0, KnownUndef0,
40437 KnownZero0)) {
40438 bool Updated = false;
40439 bool UseInput00 = false;
40440 bool UseInput01 = false;
40441 for (int i = 0; i != 4; ++i) {
40442 if ((InsertPSMask & (1u << i)) || (i == (int)DstIdx)) {
40443 // No change if element is already zero or the inserted element.
40444 continue;
40447 if (KnownUndef0[i] || KnownZero0[i]) {
40448 // If the target mask is undef/zero then we must zero the element.
40449 InsertPSMask |= (1u << i);
40450 Updated = true;
40451 continue;
40454 // The input vector element must be inline.
40455 int M = TargetMask0[i];
40456 if (M != i && M != (i + 4))
40457 return SDValue();
40459 // Determine which inputs of the target shuffle we're using.
40460 UseInput00 |= (0 <= M && M < 4);
40461 UseInput01 |= (4 <= M);
40464 // If we're not using both inputs of the target shuffle then use the
40465 // referenced input directly.
40466 if (UseInput00 && !UseInput01) {
40467 Updated = true;
40468 Op0 = Ops0[0];
40469 } else if (!UseInput00 && UseInput01) {
40470 Updated = true;
40471 Op0 = Ops0[1];
40474 if (Updated)
40475 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
40476 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
40479 // If we're inserting an element from a vbroadcast load, fold the
40480 // load into the X86insertps instruction. We need to convert the scalar
40481 // load to a vector and clear the source lane of the INSERTPS control.
40482 if (Op1.getOpcode() == X86ISD::VBROADCAST_LOAD && Op1.hasOneUse()) {
40483 auto *MemIntr = cast<MemIntrinsicSDNode>(Op1);
40484 if (MemIntr->getMemoryVT().getScalarSizeInBits() == 32) {
40485 SDValue Load = DAG.getLoad(MVT::f32, DL, MemIntr->getChain(),
40486 MemIntr->getBasePtr(),
40487 MemIntr->getMemOperand());
40488 SDValue Insert = DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0,
40489 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT,
40490 Load),
40491 DAG.getTargetConstant(InsertPSMask & 0x3f, DL, MVT::i8));
40492 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
40493 return Insert;
40497 return SDValue();
40499 default:
40500 return SDValue();
40503 // Nuke no-op shuffles that show up after combining.
40504 if (isNoopShuffleMask(Mask))
40505 return N.getOperand(0);
40507 // Look for simplifications involving one or two shuffle instructions.
40508 SDValue V = N.getOperand(0);
40509 switch (N.getOpcode()) {
40510 default:
40511 break;
40512 case X86ISD::PSHUFLW:
40513 case X86ISD::PSHUFHW:
40514 assert(VT.getVectorElementType() == MVT::i16 && "Bad word shuffle type!");
40516 // See if this reduces to a PSHUFD which is no more expensive and can
40517 // combine with more operations. Note that it has to at least flip the
40518 // dwords as otherwise it would have been removed as a no-op.
40519 if (ArrayRef<int>(Mask).equals({2, 3, 0, 1})) {
40520 int DMask[] = {0, 1, 2, 3};
40521 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
40522 DMask[DOffset + 0] = DOffset + 1;
40523 DMask[DOffset + 1] = DOffset + 0;
40524 MVT DVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
40525 V = DAG.getBitcast(DVT, V);
40526 V = DAG.getNode(X86ISD::PSHUFD, DL, DVT, V,
40527 getV4X86ShuffleImm8ForMask(DMask, DL, DAG));
40528 return DAG.getBitcast(VT, V);
40531 // Look for shuffle patterns which can be implemented as a single unpack.
40532 // FIXME: This doesn't handle the location of the PSHUFD generically, and
40533 // only works when we have a PSHUFD followed by two half-shuffles.
40534 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
40535 (V.getOpcode() == X86ISD::PSHUFLW ||
40536 V.getOpcode() == X86ISD::PSHUFHW) &&
40537 V.getOpcode() != N.getOpcode() &&
40538 V.hasOneUse() && V.getOperand(0).hasOneUse()) {
40539 SDValue D = peekThroughOneUseBitcasts(V.getOperand(0));
40540 if (D.getOpcode() == X86ISD::PSHUFD) {
40541 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
40542 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
40543 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
40544 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
40545 int WordMask[8];
40546 for (int i = 0; i < 4; ++i) {
40547 WordMask[i + NOffset] = Mask[i] + NOffset;
40548 WordMask[i + VOffset] = VMask[i] + VOffset;
40550 // Map the word mask through the DWord mask.
40551 int MappedMask[8];
40552 for (int i = 0; i < 8; ++i)
40553 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
40554 if (ArrayRef<int>(MappedMask).equals({0, 0, 1, 1, 2, 2, 3, 3}) ||
40555 ArrayRef<int>(MappedMask).equals({4, 4, 5, 5, 6, 6, 7, 7})) {
40556 // We can replace all three shuffles with an unpack.
40557 V = DAG.getBitcast(VT, D.getOperand(0));
40558 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
40559 : X86ISD::UNPCKH,
40560 DL, VT, V, V);
40565 break;
40567 case X86ISD::PSHUFD:
40568 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG))
40569 return NewN;
40571 break;
40574 return SDValue();
40577 /// Checks if the shuffle mask takes subsequent elements
40578 /// alternately from two vectors.
40579 /// For example <0, 5, 2, 7> or <8, 1, 10, 3, 12, 5, 14, 7> are both correct.
40580 static bool isAddSubOrSubAddMask(ArrayRef<int> Mask, bool &Op0Even) {
40582 int ParitySrc[2] = {-1, -1};
40583 unsigned Size = Mask.size();
40584 for (unsigned i = 0; i != Size; ++i) {
40585 int M = Mask[i];
40586 if (M < 0)
40587 continue;
40589 // Make sure we are using the matching element from the input.
40590 if ((M % Size) != i)
40591 return false;
40593 // Make sure we use the same input for all elements of the same parity.
40594 int Src = M / Size;
40595 if (ParitySrc[i % 2] >= 0 && ParitySrc[i % 2] != Src)
40596 return false;
40597 ParitySrc[i % 2] = Src;
40600 // Make sure each input is used.
40601 if (ParitySrc[0] < 0 || ParitySrc[1] < 0 || ParitySrc[0] == ParitySrc[1])
40602 return false;
40604 Op0Even = ParitySrc[0] == 0;
40605 return true;
40608 /// Returns true iff the shuffle node \p N can be replaced with ADDSUB(SUBADD)
40609 /// operation. If true is returned then the operands of ADDSUB(SUBADD) operation
40610 /// are written to the parameters \p Opnd0 and \p Opnd1.
40612 /// We combine shuffle to ADDSUB(SUBADD) directly on the abstract vector shuffle nodes
40613 /// so it is easier to generically match. We also insert dummy vector shuffle
40614 /// nodes for the operands which explicitly discard the lanes which are unused
40615 /// by this operation to try to flow through the rest of the combiner
40616 /// the fact that they're unused.
40617 static bool isAddSubOrSubAdd(SDNode *N, const X86Subtarget &Subtarget,
40618 SelectionDAG &DAG, SDValue &Opnd0, SDValue &Opnd1,
40619 bool &IsSubAdd) {
40621 EVT VT = N->getValueType(0);
40622 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40623 if (!Subtarget.hasSSE3() || !TLI.isTypeLegal(VT) ||
40624 !VT.getSimpleVT().isFloatingPoint())
40625 return false;
40627 // We only handle target-independent shuffles.
40628 // FIXME: It would be easy and harmless to use the target shuffle mask
40629 // extraction tool to support more.
40630 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
40631 return false;
40633 SDValue V1 = N->getOperand(0);
40634 SDValue V2 = N->getOperand(1);
40636 // Make sure we have an FADD and an FSUB.
40637 if ((V1.getOpcode() != ISD::FADD && V1.getOpcode() != ISD::FSUB) ||
40638 (V2.getOpcode() != ISD::FADD && V2.getOpcode() != ISD::FSUB) ||
40639 V1.getOpcode() == V2.getOpcode())
40640 return false;
40642 // If there are other uses of these operations we can't fold them.
40643 if (!V1->hasOneUse() || !V2->hasOneUse())
40644 return false;
40646 // Ensure that both operations have the same operands. Note that we can
40647 // commute the FADD operands.
40648 SDValue LHS, RHS;
40649 if (V1.getOpcode() == ISD::FSUB) {
40650 LHS = V1->getOperand(0); RHS = V1->getOperand(1);
40651 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
40652 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
40653 return false;
40654 } else {
40655 assert(V2.getOpcode() == ISD::FSUB && "Unexpected opcode");
40656 LHS = V2->getOperand(0); RHS = V2->getOperand(1);
40657 if ((V1->getOperand(0) != LHS || V1->getOperand(1) != RHS) &&
40658 (V1->getOperand(0) != RHS || V1->getOperand(1) != LHS))
40659 return false;
40662 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
40663 bool Op0Even;
40664 if (!isAddSubOrSubAddMask(Mask, Op0Even))
40665 return false;
40667 // It's a subadd if the vector in the even parity is an FADD.
40668 IsSubAdd = Op0Even ? V1->getOpcode() == ISD::FADD
40669 : V2->getOpcode() == ISD::FADD;
40671 Opnd0 = LHS;
40672 Opnd1 = RHS;
40673 return true;
40676 /// Combine shuffle of two fma nodes into FMAddSub or FMSubAdd.
40677 static SDValue combineShuffleToFMAddSub(SDNode *N,
40678 const X86Subtarget &Subtarget,
40679 SelectionDAG &DAG) {
40680 // We only handle target-independent shuffles.
40681 // FIXME: It would be easy and harmless to use the target shuffle mask
40682 // extraction tool to support more.
40683 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
40684 return SDValue();
40686 MVT VT = N->getSimpleValueType(0);
40687 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40688 if (!Subtarget.hasAnyFMA() || !TLI.isTypeLegal(VT))
40689 return SDValue();
40691 // We're trying to match (shuffle fma(a, b, c), X86Fmsub(a, b, c).
40692 SDValue Op0 = N->getOperand(0);
40693 SDValue Op1 = N->getOperand(1);
40694 SDValue FMAdd = Op0, FMSub = Op1;
40695 if (FMSub.getOpcode() != X86ISD::FMSUB)
40696 std::swap(FMAdd, FMSub);
40698 if (FMAdd.getOpcode() != ISD::FMA || FMSub.getOpcode() != X86ISD::FMSUB ||
40699 FMAdd.getOperand(0) != FMSub.getOperand(0) || !FMAdd.hasOneUse() ||
40700 FMAdd.getOperand(1) != FMSub.getOperand(1) || !FMSub.hasOneUse() ||
40701 FMAdd.getOperand(2) != FMSub.getOperand(2))
40702 return SDValue();
40704 // Check for correct shuffle mask.
40705 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
40706 bool Op0Even;
40707 if (!isAddSubOrSubAddMask(Mask, Op0Even))
40708 return SDValue();
40710 // FMAddSub takes zeroth operand from FMSub node.
40711 SDLoc DL(N);
40712 bool IsSubAdd = Op0Even ? Op0 == FMAdd : Op1 == FMAdd;
40713 unsigned Opcode = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
40714 return DAG.getNode(Opcode, DL, VT, FMAdd.getOperand(0), FMAdd.getOperand(1),
40715 FMAdd.getOperand(2));
40718 /// Try to combine a shuffle into a target-specific add-sub or
40719 /// mul-add-sub node.
40720 static SDValue combineShuffleToAddSubOrFMAddSub(SDNode *N,
40721 const X86Subtarget &Subtarget,
40722 SelectionDAG &DAG) {
40723 if (SDValue V = combineShuffleToFMAddSub(N, Subtarget, DAG))
40724 return V;
40726 SDValue Opnd0, Opnd1;
40727 bool IsSubAdd;
40728 if (!isAddSubOrSubAdd(N, Subtarget, DAG, Opnd0, Opnd1, IsSubAdd))
40729 return SDValue();
40731 MVT VT = N->getSimpleValueType(0);
40732 SDLoc DL(N);
40734 // Try to generate X86ISD::FMADDSUB node here.
40735 SDValue Opnd2;
40736 if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, 2)) {
40737 unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
40738 return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
40741 if (IsSubAdd)
40742 return SDValue();
40744 // Do not generate X86ISD::ADDSUB node for 512-bit types even though
40745 // the ADDSUB idiom has been successfully recognized. There are no known
40746 // X86 targets with 512-bit ADDSUB instructions!
40747 if (VT.is512BitVector())
40748 return SDValue();
40750 // Do not generate X86ISD::ADDSUB node for FP16's vector types even though
40751 // the ADDSUB idiom has been successfully recognized. There are no known
40752 // X86 targets with FP16 ADDSUB instructions!
40753 if (VT.getVectorElementType() == MVT::f16)
40754 return SDValue();
40756 return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
40759 // We are looking for a shuffle where both sources are concatenated with undef
40760 // and have a width that is half of the output's width. AVX2 has VPERMD/Q, so
40761 // if we can express this as a single-source shuffle, that's preferable.
40762 static SDValue combineShuffleOfConcatUndef(SDNode *N, SelectionDAG &DAG,
40763 const X86Subtarget &Subtarget) {
40764 if (!Subtarget.hasAVX2() || !isa<ShuffleVectorSDNode>(N))
40765 return SDValue();
40767 EVT VT = N->getValueType(0);
40769 // We only care about shuffles of 128/256-bit vectors of 32/64-bit values.
40770 if (!VT.is128BitVector() && !VT.is256BitVector())
40771 return SDValue();
40773 if (VT.getVectorElementType() != MVT::i32 &&
40774 VT.getVectorElementType() != MVT::i64 &&
40775 VT.getVectorElementType() != MVT::f32 &&
40776 VT.getVectorElementType() != MVT::f64)
40777 return SDValue();
40779 SDValue N0 = N->getOperand(0);
40780 SDValue N1 = N->getOperand(1);
40782 // Check that both sources are concats with undef.
40783 if (N0.getOpcode() != ISD::CONCAT_VECTORS ||
40784 N1.getOpcode() != ISD::CONCAT_VECTORS || N0.getNumOperands() != 2 ||
40785 N1.getNumOperands() != 2 || !N0.getOperand(1).isUndef() ||
40786 !N1.getOperand(1).isUndef())
40787 return SDValue();
40789 // Construct the new shuffle mask. Elements from the first source retain their
40790 // index, but elements from the second source no longer need to skip an undef.
40791 SmallVector<int, 8> Mask;
40792 int NumElts = VT.getVectorNumElements();
40794 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
40795 for (int Elt : SVOp->getMask())
40796 Mask.push_back(Elt < NumElts ? Elt : (Elt - NumElts / 2));
40798 SDLoc DL(N);
40799 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, N0.getOperand(0),
40800 N1.getOperand(0));
40801 return DAG.getVectorShuffle(VT, DL, Concat, DAG.getUNDEF(VT), Mask);
40804 /// If we have a shuffle of AVX/AVX512 (256/512 bit) vectors that only uses the
40805 /// low half of each source vector and does not set any high half elements in
40806 /// the destination vector, narrow the shuffle to half its original size.
40807 static SDValue narrowShuffle(ShuffleVectorSDNode *Shuf, SelectionDAG &DAG) {
40808 EVT VT = Shuf->getValueType(0);
40809 if (!DAG.getTargetLoweringInfo().isTypeLegal(Shuf->getValueType(0)))
40810 return SDValue();
40811 if (!VT.is256BitVector() && !VT.is512BitVector())
40812 return SDValue();
40814 // See if we can ignore all of the high elements of the shuffle.
40815 ArrayRef<int> Mask = Shuf->getMask();
40816 if (!isUndefUpperHalf(Mask))
40817 return SDValue();
40819 // Check if the shuffle mask accesses only the low half of each input vector
40820 // (half-index output is 0 or 2).
40821 int HalfIdx1, HalfIdx2;
40822 SmallVector<int, 8> HalfMask(Mask.size() / 2);
40823 if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2) ||
40824 (HalfIdx1 % 2 == 1) || (HalfIdx2 % 2 == 1))
40825 return SDValue();
40827 // Create a half-width shuffle to replace the unnecessarily wide shuffle.
40828 // The trick is knowing that all of the insert/extract are actually free
40829 // subregister (zmm<->ymm or ymm<->xmm) ops. That leaves us with a shuffle
40830 // of narrow inputs into a narrow output, and that is always cheaper than
40831 // the wide shuffle that we started with.
40832 return getShuffleHalfVectors(SDLoc(Shuf), Shuf->getOperand(0),
40833 Shuf->getOperand(1), HalfMask, HalfIdx1,
40834 HalfIdx2, false, DAG, /*UseConcat*/ true);
40837 static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG,
40838 TargetLowering::DAGCombinerInfo &DCI,
40839 const X86Subtarget &Subtarget) {
40840 if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(N))
40841 if (SDValue V = narrowShuffle(Shuf, DAG))
40842 return V;
40844 // If we have legalized the vector types, look for blends of FADD and FSUB
40845 // nodes that we can fuse into an ADDSUB, FMADDSUB, or FMSUBADD node.
40846 SDLoc dl(N);
40847 EVT VT = N->getValueType(0);
40848 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40849 if (TLI.isTypeLegal(VT) && !isSoftF16(VT, Subtarget))
40850 if (SDValue AddSub = combineShuffleToAddSubOrFMAddSub(N, Subtarget, DAG))
40851 return AddSub;
40853 // Attempt to combine into a vector load/broadcast.
40854 if (SDValue LD = combineToConsecutiveLoads(
40855 VT, SDValue(N, 0), dl, DAG, Subtarget, /*IsAfterLegalize*/ true))
40856 return LD;
40858 // For AVX2, we sometimes want to combine
40859 // (vector_shuffle <mask> (concat_vectors t1, undef)
40860 // (concat_vectors t2, undef))
40861 // Into:
40862 // (vector_shuffle <mask> (concat_vectors t1, t2), undef)
40863 // Since the latter can be efficiently lowered with VPERMD/VPERMQ
40864 if (SDValue ShufConcat = combineShuffleOfConcatUndef(N, DAG, Subtarget))
40865 return ShufConcat;
40867 if (isTargetShuffle(N->getOpcode())) {
40868 SDValue Op(N, 0);
40869 if (SDValue Shuffle = combineTargetShuffle(Op, DAG, DCI, Subtarget))
40870 return Shuffle;
40872 // Try recursively combining arbitrary sequences of x86 shuffle
40873 // instructions into higher-order shuffles. We do this after combining
40874 // specific PSHUF instruction sequences into their minimal form so that we
40875 // can evaluate how many specialized shuffle instructions are involved in
40876 // a particular chain.
40877 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
40878 return Res;
40880 // Simplify source operands based on shuffle mask.
40881 // TODO - merge this into combineX86ShufflesRecursively.
40882 APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
40883 if (TLI.SimplifyDemandedVectorElts(Op, DemandedElts, DCI))
40884 return SDValue(N, 0);
40886 // Canonicalize SHUFFLE(UNARYOP(X)) -> UNARYOP(SHUFFLE(X)).
40887 // Canonicalize SHUFFLE(BINOP(X,Y)) -> BINOP(SHUFFLE(X),SHUFFLE(Y)).
40888 // Perform this after other shuffle combines to allow inner shuffles to be
40889 // combined away first.
40890 if (SDValue BinOp = canonicalizeShuffleWithOp(Op, DAG, dl))
40891 return BinOp;
40894 return SDValue();
40897 // Simplify variable target shuffle masks based on the demanded elements.
40898 // TODO: Handle DemandedBits in mask indices as well?
40899 bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetShuffle(
40900 SDValue Op, const APInt &DemandedElts, unsigned MaskIndex,
40901 TargetLowering::TargetLoweringOpt &TLO, unsigned Depth) const {
40902 // If we're demanding all elements don't bother trying to simplify the mask.
40903 unsigned NumElts = DemandedElts.getBitWidth();
40904 if (DemandedElts.isAllOnes())
40905 return false;
40907 SDValue Mask = Op.getOperand(MaskIndex);
40908 if (!Mask.hasOneUse())
40909 return false;
40911 // Attempt to generically simplify the variable shuffle mask.
40912 APInt MaskUndef, MaskZero;
40913 if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
40914 Depth + 1))
40915 return true;
40917 // Attempt to extract+simplify a (constant pool load) shuffle mask.
40918 // TODO: Support other types from getTargetShuffleMaskIndices?
40919 SDValue BC = peekThroughOneUseBitcasts(Mask);
40920 EVT BCVT = BC.getValueType();
40921 auto *Load = dyn_cast<LoadSDNode>(BC);
40922 if (!Load || !Load->getBasePtr().hasOneUse())
40923 return false;
40925 const Constant *C = getTargetConstantFromNode(Load);
40926 if (!C)
40927 return false;
40929 Type *CTy = C->getType();
40930 if (!CTy->isVectorTy() ||
40931 CTy->getPrimitiveSizeInBits() != Mask.getValueSizeInBits())
40932 return false;
40934 // Handle scaling for i64 elements on 32-bit targets.
40935 unsigned NumCstElts = cast<FixedVectorType>(CTy)->getNumElements();
40936 if (NumCstElts != NumElts && NumCstElts != (NumElts * 2))
40937 return false;
40938 unsigned Scale = NumCstElts / NumElts;
40940 // Simplify mask if we have an undemanded element that is not undef.
40941 bool Simplified = false;
40942 SmallVector<Constant *, 32> ConstVecOps;
40943 for (unsigned i = 0; i != NumCstElts; ++i) {
40944 Constant *Elt = C->getAggregateElement(i);
40945 if (!DemandedElts[i / Scale] && !isa<UndefValue>(Elt)) {
40946 ConstVecOps.push_back(UndefValue::get(Elt->getType()));
40947 Simplified = true;
40948 continue;
40950 ConstVecOps.push_back(Elt);
40952 if (!Simplified)
40953 return false;
40955 // Generate new constant pool entry + legalize immediately for the load.
40956 SDLoc DL(Op);
40957 SDValue CV = TLO.DAG.getConstantPool(ConstantVector::get(ConstVecOps), BCVT);
40958 SDValue LegalCV = LowerConstantPool(CV, TLO.DAG);
40959 SDValue NewMask = TLO.DAG.getLoad(
40960 BCVT, DL, TLO.DAG.getEntryNode(), LegalCV,
40961 MachinePointerInfo::getConstantPool(TLO.DAG.getMachineFunction()),
40962 Load->getAlign());
40963 return TLO.CombineTo(Mask, TLO.DAG.getBitcast(Mask.getValueType(), NewMask));
40966 bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
40967 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero,
40968 TargetLoweringOpt &TLO, unsigned Depth) const {
40969 int NumElts = DemandedElts.getBitWidth();
40970 unsigned Opc = Op.getOpcode();
40971 EVT VT = Op.getValueType();
40973 // Handle special case opcodes.
40974 switch (Opc) {
40975 case X86ISD::PMULDQ:
40976 case X86ISD::PMULUDQ: {
40977 APInt LHSUndef, LHSZero;
40978 APInt RHSUndef, RHSZero;
40979 SDValue LHS = Op.getOperand(0);
40980 SDValue RHS = Op.getOperand(1);
40981 if (SimplifyDemandedVectorElts(LHS, DemandedElts, LHSUndef, LHSZero, TLO,
40982 Depth + 1))
40983 return true;
40984 if (SimplifyDemandedVectorElts(RHS, DemandedElts, RHSUndef, RHSZero, TLO,
40985 Depth + 1))
40986 return true;
40987 // Multiply by zero.
40988 KnownZero = LHSZero | RHSZero;
40989 break;
40991 case X86ISD::VPMADDWD: {
40992 APInt LHSUndef, LHSZero;
40993 APInt RHSUndef, RHSZero;
40994 SDValue LHS = Op.getOperand(0);
40995 SDValue RHS = Op.getOperand(1);
40996 APInt DemandedSrcElts = APIntOps::ScaleBitMask(DemandedElts, 2 * NumElts);
40998 if (SimplifyDemandedVectorElts(LHS, DemandedSrcElts, LHSUndef, LHSZero, TLO,
40999 Depth + 1))
41000 return true;
41001 if (SimplifyDemandedVectorElts(RHS, DemandedSrcElts, RHSUndef, RHSZero, TLO,
41002 Depth + 1))
41003 return true;
41005 // TODO: Multiply by zero.
41007 // If RHS/LHS elements are known zero then we don't need the LHS/RHS equivalent.
41008 APInt DemandedLHSElts = DemandedSrcElts & ~RHSZero;
41009 if (SimplifyDemandedVectorElts(LHS, DemandedLHSElts, LHSUndef, LHSZero, TLO,
41010 Depth + 1))
41011 return true;
41012 APInt DemandedRHSElts = DemandedSrcElts & ~LHSZero;
41013 if (SimplifyDemandedVectorElts(RHS, DemandedRHSElts, RHSUndef, RHSZero, TLO,
41014 Depth + 1))
41015 return true;
41016 break;
41018 case X86ISD::PSADBW: {
41019 SDValue LHS = Op.getOperand(0);
41020 SDValue RHS = Op.getOperand(1);
41021 assert(VT.getScalarType() == MVT::i64 &&
41022 LHS.getValueType() == RHS.getValueType() &&
41023 LHS.getValueType().getScalarType() == MVT::i8 &&
41024 "Unexpected PSADBW types");
41026 // Aggressively peek through ops to get at the demanded elts.
41027 if (!DemandedElts.isAllOnes()) {
41028 unsigned NumSrcElts = LHS.getValueType().getVectorNumElements();
41029 APInt DemandedSrcElts = APIntOps::ScaleBitMask(DemandedElts, NumSrcElts);
41030 SDValue NewLHS = SimplifyMultipleUseDemandedVectorElts(
41031 LHS, DemandedSrcElts, TLO.DAG, Depth + 1);
41032 SDValue NewRHS = SimplifyMultipleUseDemandedVectorElts(
41033 RHS, DemandedSrcElts, TLO.DAG, Depth + 1);
41034 if (NewLHS || NewRHS) {
41035 NewLHS = NewLHS ? NewLHS : LHS;
41036 NewRHS = NewRHS ? NewRHS : RHS;
41037 return TLO.CombineTo(
41038 Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewLHS, NewRHS));
41041 break;
41043 case X86ISD::VSHL:
41044 case X86ISD::VSRL:
41045 case X86ISD::VSRA: {
41046 // We only need the bottom 64-bits of the (128-bit) shift amount.
41047 SDValue Amt = Op.getOperand(1);
41048 MVT AmtVT = Amt.getSimpleValueType();
41049 assert(AmtVT.is128BitVector() && "Unexpected value type");
41051 // If we reuse the shift amount just for sse shift amounts then we know that
41052 // only the bottom 64-bits are only ever used.
41053 bool AssumeSingleUse = llvm::all_of(Amt->uses(), [&Amt](SDNode *Use) {
41054 unsigned UseOpc = Use->getOpcode();
41055 return (UseOpc == X86ISD::VSHL || UseOpc == X86ISD::VSRL ||
41056 UseOpc == X86ISD::VSRA) &&
41057 Use->getOperand(0) != Amt;
41060 APInt AmtUndef, AmtZero;
41061 unsigned NumAmtElts = AmtVT.getVectorNumElements();
41062 APInt AmtElts = APInt::getLowBitsSet(NumAmtElts, NumAmtElts / 2);
41063 if (SimplifyDemandedVectorElts(Amt, AmtElts, AmtUndef, AmtZero, TLO,
41064 Depth + 1, AssumeSingleUse))
41065 return true;
41066 [[fallthrough]];
41068 case X86ISD::VSHLI:
41069 case X86ISD::VSRLI:
41070 case X86ISD::VSRAI: {
41071 SDValue Src = Op.getOperand(0);
41072 APInt SrcUndef;
41073 if (SimplifyDemandedVectorElts(Src, DemandedElts, SrcUndef, KnownZero, TLO,
41074 Depth + 1))
41075 return true;
41077 // Fold shift(0,x) -> 0
41078 if (DemandedElts.isSubsetOf(KnownZero))
41079 return TLO.CombineTo(
41080 Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
41082 // Aggressively peek through ops to get at the demanded elts.
41083 if (!DemandedElts.isAllOnes())
41084 if (SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
41085 Src, DemandedElts, TLO.DAG, Depth + 1))
41086 return TLO.CombineTo(
41087 Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc, Op.getOperand(1)));
41088 break;
41090 case X86ISD::VPSHA:
41091 case X86ISD::VPSHL:
41092 case X86ISD::VSHLV:
41093 case X86ISD::VSRLV:
41094 case X86ISD::VSRAV: {
41095 APInt LHSUndef, LHSZero;
41096 APInt RHSUndef, RHSZero;
41097 SDValue LHS = Op.getOperand(0);
41098 SDValue RHS = Op.getOperand(1);
41099 if (SimplifyDemandedVectorElts(LHS, DemandedElts, LHSUndef, LHSZero, TLO,
41100 Depth + 1))
41101 return true;
41103 // Fold shift(0,x) -> 0
41104 if (DemandedElts.isSubsetOf(LHSZero))
41105 return TLO.CombineTo(
41106 Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
41108 if (SimplifyDemandedVectorElts(RHS, DemandedElts, RHSUndef, RHSZero, TLO,
41109 Depth + 1))
41110 return true;
41112 KnownZero = LHSZero;
41113 break;
41115 case X86ISD::KSHIFTL: {
41116 SDValue Src = Op.getOperand(0);
41117 auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
41118 assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
41119 unsigned ShiftAmt = Amt->getZExtValue();
41121 if (ShiftAmt == 0)
41122 return TLO.CombineTo(Op, Src);
41124 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
41125 // single shift. We can do this if the bottom bits (which are shifted
41126 // out) are never demanded.
41127 if (Src.getOpcode() == X86ISD::KSHIFTR) {
41128 if (!DemandedElts.intersects(APInt::getLowBitsSet(NumElts, ShiftAmt))) {
41129 unsigned C1 = Src.getConstantOperandVal(1);
41130 unsigned NewOpc = X86ISD::KSHIFTL;
41131 int Diff = ShiftAmt - C1;
41132 if (Diff < 0) {
41133 Diff = -Diff;
41134 NewOpc = X86ISD::KSHIFTR;
41137 SDLoc dl(Op);
41138 SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
41139 return TLO.CombineTo(
41140 Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
41144 APInt DemandedSrc = DemandedElts.lshr(ShiftAmt);
41145 if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
41146 Depth + 1))
41147 return true;
41149 KnownUndef <<= ShiftAmt;
41150 KnownZero <<= ShiftAmt;
41151 KnownZero.setLowBits(ShiftAmt);
41152 break;
41154 case X86ISD::KSHIFTR: {
41155 SDValue Src = Op.getOperand(0);
41156 auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
41157 assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
41158 unsigned ShiftAmt = Amt->getZExtValue();
41160 if (ShiftAmt == 0)
41161 return TLO.CombineTo(Op, Src);
41163 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
41164 // single shift. We can do this if the top bits (which are shifted
41165 // out) are never demanded.
41166 if (Src.getOpcode() == X86ISD::KSHIFTL) {
41167 if (!DemandedElts.intersects(APInt::getHighBitsSet(NumElts, ShiftAmt))) {
41168 unsigned C1 = Src.getConstantOperandVal(1);
41169 unsigned NewOpc = X86ISD::KSHIFTR;
41170 int Diff = ShiftAmt - C1;
41171 if (Diff < 0) {
41172 Diff = -Diff;
41173 NewOpc = X86ISD::KSHIFTL;
41176 SDLoc dl(Op);
41177 SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
41178 return TLO.CombineTo(
41179 Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
41183 APInt DemandedSrc = DemandedElts.shl(ShiftAmt);
41184 if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
41185 Depth + 1))
41186 return true;
41188 KnownUndef.lshrInPlace(ShiftAmt);
41189 KnownZero.lshrInPlace(ShiftAmt);
41190 KnownZero.setHighBits(ShiftAmt);
41191 break;
41193 case X86ISD::ANDNP: {
41194 // ANDNP = (~LHS & RHS);
41195 SDValue LHS = Op.getOperand(0);
41196 SDValue RHS = Op.getOperand(1);
41198 auto GetDemandedMasks = [&](SDValue Op, bool Invert = false) {
41199 APInt UndefElts;
41200 SmallVector<APInt> EltBits;
41201 int NumElts = VT.getVectorNumElements();
41202 int EltSizeInBits = VT.getScalarSizeInBits();
41203 APInt OpBits = APInt::getAllOnes(EltSizeInBits);
41204 APInt OpElts = DemandedElts;
41205 if (getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
41206 EltBits)) {
41207 OpBits.clearAllBits();
41208 OpElts.clearAllBits();
41209 for (int I = 0; I != NumElts; ++I) {
41210 if (!DemandedElts[I])
41211 continue;
41212 if (UndefElts[I]) {
41213 // We can't assume an undef src element gives an undef dst - the
41214 // other src might be zero.
41215 OpBits.setAllBits();
41216 OpElts.setBit(I);
41217 } else if ((Invert && !EltBits[I].isAllOnes()) ||
41218 (!Invert && !EltBits[I].isZero())) {
41219 OpBits |= Invert ? ~EltBits[I] : EltBits[I];
41220 OpElts.setBit(I);
41224 return std::make_pair(OpBits, OpElts);
41226 APInt BitsLHS, EltsLHS;
41227 APInt BitsRHS, EltsRHS;
41228 std::tie(BitsLHS, EltsLHS) = GetDemandedMasks(RHS);
41229 std::tie(BitsRHS, EltsRHS) = GetDemandedMasks(LHS, true);
41231 APInt LHSUndef, LHSZero;
41232 APInt RHSUndef, RHSZero;
41233 if (SimplifyDemandedVectorElts(LHS, EltsLHS, LHSUndef, LHSZero, TLO,
41234 Depth + 1))
41235 return true;
41236 if (SimplifyDemandedVectorElts(RHS, EltsRHS, RHSUndef, RHSZero, TLO,
41237 Depth + 1))
41238 return true;
41240 if (!DemandedElts.isAllOnes()) {
41241 SDValue NewLHS = SimplifyMultipleUseDemandedBits(LHS, BitsLHS, EltsLHS,
41242 TLO.DAG, Depth + 1);
41243 SDValue NewRHS = SimplifyMultipleUseDemandedBits(RHS, BitsRHS, EltsRHS,
41244 TLO.DAG, Depth + 1);
41245 if (NewLHS || NewRHS) {
41246 NewLHS = NewLHS ? NewLHS : LHS;
41247 NewRHS = NewRHS ? NewRHS : RHS;
41248 return TLO.CombineTo(
41249 Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewLHS, NewRHS));
41252 break;
41254 case X86ISD::CVTSI2P:
41255 case X86ISD::CVTUI2P: {
41256 SDValue Src = Op.getOperand(0);
41257 MVT SrcVT = Src.getSimpleValueType();
41258 APInt SrcUndef, SrcZero;
41259 APInt SrcElts = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
41260 if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
41261 Depth + 1))
41262 return true;
41263 break;
41265 case X86ISD::PACKSS:
41266 case X86ISD::PACKUS: {
41267 SDValue N0 = Op.getOperand(0);
41268 SDValue N1 = Op.getOperand(1);
41270 APInt DemandedLHS, DemandedRHS;
41271 getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
41273 APInt LHSUndef, LHSZero;
41274 if (SimplifyDemandedVectorElts(N0, DemandedLHS, LHSUndef, LHSZero, TLO,
41275 Depth + 1))
41276 return true;
41277 APInt RHSUndef, RHSZero;
41278 if (SimplifyDemandedVectorElts(N1, DemandedRHS, RHSUndef, RHSZero, TLO,
41279 Depth + 1))
41280 return true;
41282 // TODO - pass on known zero/undef.
41284 // Aggressively peek through ops to get at the demanded elts.
41285 // TODO - we should do this for all target/faux shuffles ops.
41286 if (!DemandedElts.isAllOnes()) {
41287 SDValue NewN0 = SimplifyMultipleUseDemandedVectorElts(N0, DemandedLHS,
41288 TLO.DAG, Depth + 1);
41289 SDValue NewN1 = SimplifyMultipleUseDemandedVectorElts(N1, DemandedRHS,
41290 TLO.DAG, Depth + 1);
41291 if (NewN0 || NewN1) {
41292 NewN0 = NewN0 ? NewN0 : N0;
41293 NewN1 = NewN1 ? NewN1 : N1;
41294 return TLO.CombineTo(Op,
41295 TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewN0, NewN1));
41298 break;
41300 case X86ISD::HADD:
41301 case X86ISD::HSUB:
41302 case X86ISD::FHADD:
41303 case X86ISD::FHSUB: {
41304 SDValue N0 = Op.getOperand(0);
41305 SDValue N1 = Op.getOperand(1);
41307 APInt DemandedLHS, DemandedRHS;
41308 getHorizDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
41310 APInt LHSUndef, LHSZero;
41311 if (SimplifyDemandedVectorElts(N0, DemandedLHS, LHSUndef, LHSZero, TLO,
41312 Depth + 1))
41313 return true;
41314 APInt RHSUndef, RHSZero;
41315 if (SimplifyDemandedVectorElts(N1, DemandedRHS, RHSUndef, RHSZero, TLO,
41316 Depth + 1))
41317 return true;
41319 // TODO - pass on known zero/undef.
41321 // Aggressively peek through ops to get at the demanded elts.
41322 // TODO: Handle repeated operands.
41323 if (N0 != N1 && !DemandedElts.isAllOnes()) {
41324 SDValue NewN0 = SimplifyMultipleUseDemandedVectorElts(N0, DemandedLHS,
41325 TLO.DAG, Depth + 1);
41326 SDValue NewN1 = SimplifyMultipleUseDemandedVectorElts(N1, DemandedRHS,
41327 TLO.DAG, Depth + 1);
41328 if (NewN0 || NewN1) {
41329 NewN0 = NewN0 ? NewN0 : N0;
41330 NewN1 = NewN1 ? NewN1 : N1;
41331 return TLO.CombineTo(Op,
41332 TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewN0, NewN1));
41335 break;
41337 case X86ISD::VTRUNC:
41338 case X86ISD::VTRUNCS:
41339 case X86ISD::VTRUNCUS: {
41340 SDValue Src = Op.getOperand(0);
41341 MVT SrcVT = Src.getSimpleValueType();
41342 APInt DemandedSrc = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
41343 APInt SrcUndef, SrcZero;
41344 if (SimplifyDemandedVectorElts(Src, DemandedSrc, SrcUndef, SrcZero, TLO,
41345 Depth + 1))
41346 return true;
41347 KnownZero = SrcZero.zextOrTrunc(NumElts);
41348 KnownUndef = SrcUndef.zextOrTrunc(NumElts);
41349 break;
41351 case X86ISD::BLENDV: {
41352 APInt SelUndef, SelZero;
41353 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, SelUndef,
41354 SelZero, TLO, Depth + 1))
41355 return true;
41357 // TODO: Use SelZero to adjust LHS/RHS DemandedElts.
41358 APInt LHSUndef, LHSZero;
41359 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedElts, LHSUndef,
41360 LHSZero, TLO, Depth + 1))
41361 return true;
41363 APInt RHSUndef, RHSZero;
41364 if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedElts, RHSUndef,
41365 RHSZero, TLO, Depth + 1))
41366 return true;
41368 KnownZero = LHSZero & RHSZero;
41369 KnownUndef = LHSUndef & RHSUndef;
41370 break;
41372 case X86ISD::VZEXT_MOVL: {
41373 // If upper demanded elements are already zero then we have nothing to do.
41374 SDValue Src = Op.getOperand(0);
41375 APInt DemandedUpperElts = DemandedElts;
41376 DemandedUpperElts.clearLowBits(1);
41377 if (TLO.DAG.MaskedVectorIsZero(Src, DemandedUpperElts, Depth + 1))
41378 return TLO.CombineTo(Op, Src);
41379 break;
41381 case X86ISD::VZEXT_LOAD: {
41382 // If upper demanded elements are not demanded then simplify to a
41383 // scalar_to_vector(load()).
41384 MVT SVT = VT.getSimpleVT().getVectorElementType();
41385 if (DemandedElts == 1 && Op.getValue(1).use_empty() && isTypeLegal(SVT)) {
41386 SDLoc DL(Op);
41387 auto *Mem = cast<MemSDNode>(Op);
41388 SDValue Elt = TLO.DAG.getLoad(SVT, DL, Mem->getChain(), Mem->getBasePtr(),
41389 Mem->getMemOperand());
41390 SDValue Vec = TLO.DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Elt);
41391 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Vec));
41393 break;
41395 case X86ISD::VBROADCAST: {
41396 SDValue Src = Op.getOperand(0);
41397 MVT SrcVT = Src.getSimpleValueType();
41398 if (!SrcVT.isVector())
41399 break;
41400 // Don't bother broadcasting if we just need the 0'th element.
41401 if (DemandedElts == 1) {
41402 if (Src.getValueType() != VT)
41403 Src = widenSubVector(VT.getSimpleVT(), Src, false, Subtarget, TLO.DAG,
41404 SDLoc(Op));
41405 return TLO.CombineTo(Op, Src);
41407 APInt SrcUndef, SrcZero;
41408 APInt SrcElts = APInt::getOneBitSet(SrcVT.getVectorNumElements(), 0);
41409 if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
41410 Depth + 1))
41411 return true;
41412 // Aggressively peek through src to get at the demanded elt.
41413 // TODO - we should do this for all target/faux shuffles ops.
41414 if (SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
41415 Src, SrcElts, TLO.DAG, Depth + 1))
41416 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc));
41417 break;
41419 case X86ISD::VPERMV:
41420 if (SimplifyDemandedVectorEltsForTargetShuffle(Op, DemandedElts, 0, TLO,
41421 Depth))
41422 return true;
41423 break;
41424 case X86ISD::PSHUFB:
41425 case X86ISD::VPERMV3:
41426 case X86ISD::VPERMILPV:
41427 if (SimplifyDemandedVectorEltsForTargetShuffle(Op, DemandedElts, 1, TLO,
41428 Depth))
41429 return true;
41430 break;
41431 case X86ISD::VPPERM:
41432 case X86ISD::VPERMIL2:
41433 if (SimplifyDemandedVectorEltsForTargetShuffle(Op, DemandedElts, 2, TLO,
41434 Depth))
41435 return true;
41436 break;
41439 // For 256/512-bit ops that are 128/256-bit ops glued together, if we do not
41440 // demand any of the high elements, then narrow the op to 128/256-bits: e.g.
41441 // (op ymm0, ymm1) --> insert undef, (op xmm0, xmm1), 0
41442 if ((VT.is256BitVector() || VT.is512BitVector()) &&
41443 DemandedElts.lshr(NumElts / 2) == 0) {
41444 unsigned SizeInBits = VT.getSizeInBits();
41445 unsigned ExtSizeInBits = SizeInBits / 2;
41447 // See if 512-bit ops only use the bottom 128-bits.
41448 if (VT.is512BitVector() && DemandedElts.lshr(NumElts / 4) == 0)
41449 ExtSizeInBits = SizeInBits / 4;
41451 switch (Opc) {
41452 // Scalar broadcast.
41453 case X86ISD::VBROADCAST: {
41454 SDLoc DL(Op);
41455 SDValue Src = Op.getOperand(0);
41456 if (Src.getValueSizeInBits() > ExtSizeInBits)
41457 Src = extractSubVector(Src, 0, TLO.DAG, DL, ExtSizeInBits);
41458 EVT BcstVT = EVT::getVectorVT(*TLO.DAG.getContext(), VT.getScalarType(),
41459 ExtSizeInBits / VT.getScalarSizeInBits());
41460 SDValue Bcst = TLO.DAG.getNode(X86ISD::VBROADCAST, DL, BcstVT, Src);
41461 return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Bcst, 0,
41462 TLO.DAG, DL, ExtSizeInBits));
41464 case X86ISD::VBROADCAST_LOAD: {
41465 SDLoc DL(Op);
41466 auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
41467 EVT BcstVT = EVT::getVectorVT(*TLO.DAG.getContext(), VT.getScalarType(),
41468 ExtSizeInBits / VT.getScalarSizeInBits());
41469 SDVTList Tys = TLO.DAG.getVTList(BcstVT, MVT::Other);
41470 SDValue Ops[] = {MemIntr->getOperand(0), MemIntr->getOperand(1)};
41471 SDValue Bcst = TLO.DAG.getMemIntrinsicNode(
41472 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MemIntr->getMemoryVT(),
41473 MemIntr->getMemOperand());
41474 TLO.DAG.makeEquivalentMemoryOrdering(SDValue(MemIntr, 1),
41475 Bcst.getValue(1));
41476 return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Bcst, 0,
41477 TLO.DAG, DL, ExtSizeInBits));
41479 // Subvector broadcast.
41480 case X86ISD::SUBV_BROADCAST_LOAD: {
41481 auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
41482 EVT MemVT = MemIntr->getMemoryVT();
41483 if (ExtSizeInBits == MemVT.getStoreSizeInBits()) {
41484 SDLoc DL(Op);
41485 SDValue Ld =
41486 TLO.DAG.getLoad(MemVT, DL, MemIntr->getChain(),
41487 MemIntr->getBasePtr(), MemIntr->getMemOperand());
41488 TLO.DAG.makeEquivalentMemoryOrdering(SDValue(MemIntr, 1),
41489 Ld.getValue(1));
41490 return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Ld, 0,
41491 TLO.DAG, DL, ExtSizeInBits));
41492 } else if ((ExtSizeInBits % MemVT.getStoreSizeInBits()) == 0) {
41493 SDLoc DL(Op);
41494 EVT BcstVT = EVT::getVectorVT(*TLO.DAG.getContext(), VT.getScalarType(),
41495 ExtSizeInBits / VT.getScalarSizeInBits());
41496 if (SDValue BcstLd =
41497 getBROADCAST_LOAD(Opc, DL, BcstVT, MemVT, MemIntr, 0, TLO.DAG))
41498 return TLO.CombineTo(Op,
41499 insertSubVector(TLO.DAG.getUNDEF(VT), BcstLd, 0,
41500 TLO.DAG, DL, ExtSizeInBits));
41502 break;
41504 // Byte shifts by immediate.
41505 case X86ISD::VSHLDQ:
41506 case X86ISD::VSRLDQ:
41507 // Shift by uniform.
41508 case X86ISD::VSHL:
41509 case X86ISD::VSRL:
41510 case X86ISD::VSRA:
41511 // Shift by immediate.
41512 case X86ISD::VSHLI:
41513 case X86ISD::VSRLI:
41514 case X86ISD::VSRAI: {
41515 SDLoc DL(Op);
41516 SDValue Ext0 =
41517 extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
41518 SDValue ExtOp =
41519 TLO.DAG.getNode(Opc, DL, Ext0.getValueType(), Ext0, Op.getOperand(1));
41520 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
41521 SDValue Insert =
41522 insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
41523 return TLO.CombineTo(Op, Insert);
41525 case X86ISD::VPERMI: {
41526 // Simplify PERMPD/PERMQ to extract_subvector.
41527 // TODO: This should be done in shuffle combining.
41528 if (VT == MVT::v4f64 || VT == MVT::v4i64) {
41529 SmallVector<int, 4> Mask;
41530 DecodeVPERMMask(NumElts, Op.getConstantOperandVal(1), Mask);
41531 if (isUndefOrEqual(Mask[0], 2) && isUndefOrEqual(Mask[1], 3)) {
41532 SDLoc DL(Op);
41533 SDValue Ext = extractSubVector(Op.getOperand(0), 2, TLO.DAG, DL, 128);
41534 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
41535 SDValue Insert = insertSubVector(UndefVec, Ext, 0, TLO.DAG, DL, 128);
41536 return TLO.CombineTo(Op, Insert);
41539 break;
41541 case X86ISD::VPERM2X128: {
41542 // Simplify VPERM2F128/VPERM2I128 to extract_subvector.
41543 SDLoc DL(Op);
41544 unsigned LoMask = Op.getConstantOperandVal(2) & 0xF;
41545 if (LoMask & 0x8)
41546 return TLO.CombineTo(
41547 Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, DL));
41548 unsigned EltIdx = (LoMask & 0x1) * (NumElts / 2);
41549 unsigned SrcIdx = (LoMask & 0x2) >> 1;
41550 SDValue ExtOp =
41551 extractSubVector(Op.getOperand(SrcIdx), EltIdx, TLO.DAG, DL, 128);
41552 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
41553 SDValue Insert =
41554 insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
41555 return TLO.CombineTo(Op, Insert);
41557 // Zero upper elements.
41558 case X86ISD::VZEXT_MOVL:
41559 // Target unary shuffles by immediate:
41560 case X86ISD::PSHUFD:
41561 case X86ISD::PSHUFLW:
41562 case X86ISD::PSHUFHW:
41563 case X86ISD::VPERMILPI:
41564 // (Non-Lane Crossing) Target Shuffles.
41565 case X86ISD::VPERMILPV:
41566 case X86ISD::VPERMIL2:
41567 case X86ISD::PSHUFB:
41568 case X86ISD::UNPCKL:
41569 case X86ISD::UNPCKH:
41570 case X86ISD::BLENDI:
41571 // Integer ops.
41572 case X86ISD::PACKSS:
41573 case X86ISD::PACKUS:
41574 case X86ISD::PCMPEQ:
41575 case X86ISD::PCMPGT:
41576 case X86ISD::PMULUDQ:
41577 case X86ISD::PMULDQ:
41578 case X86ISD::VSHLV:
41579 case X86ISD::VSRLV:
41580 case X86ISD::VSRAV:
41581 // Float ops.
41582 case X86ISD::FMAX:
41583 case X86ISD::FMIN:
41584 case X86ISD::FMAXC:
41585 case X86ISD::FMINC:
41586 // Horizontal Ops.
41587 case X86ISD::HADD:
41588 case X86ISD::HSUB:
41589 case X86ISD::FHADD:
41590 case X86ISD::FHSUB: {
41591 SDLoc DL(Op);
41592 SmallVector<SDValue, 4> Ops;
41593 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
41594 SDValue SrcOp = Op.getOperand(i);
41595 EVT SrcVT = SrcOp.getValueType();
41596 assert((!SrcVT.isVector() || SrcVT.getSizeInBits() == SizeInBits) &&
41597 "Unsupported vector size");
41598 Ops.push_back(SrcVT.isVector() ? extractSubVector(SrcOp, 0, TLO.DAG, DL,
41599 ExtSizeInBits)
41600 : SrcOp);
41602 MVT ExtVT = VT.getSimpleVT();
41603 ExtVT = MVT::getVectorVT(ExtVT.getScalarType(),
41604 ExtSizeInBits / ExtVT.getScalarSizeInBits());
41605 SDValue ExtOp = TLO.DAG.getNode(Opc, DL, ExtVT, Ops);
41606 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
41607 SDValue Insert =
41608 insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
41609 return TLO.CombineTo(Op, Insert);
41614 // For splats, unless we *only* demand the 0'th element,
41615 // stop attempts at simplification here, we aren't going to improve things,
41616 // this is better than any potential shuffle.
41617 if (!DemandedElts.isOne() && TLO.DAG.isSplatValue(Op, /*AllowUndefs*/false))
41618 return false;
41620 // Get target/faux shuffle mask.
41621 APInt OpUndef, OpZero;
41622 SmallVector<int, 64> OpMask;
41623 SmallVector<SDValue, 2> OpInputs;
41624 if (!getTargetShuffleInputs(Op, DemandedElts, OpInputs, OpMask, OpUndef,
41625 OpZero, TLO.DAG, Depth, false))
41626 return false;
41628 // Shuffle inputs must be the same size as the result.
41629 if (OpMask.size() != (unsigned)NumElts ||
41630 llvm::any_of(OpInputs, [VT](SDValue V) {
41631 return VT.getSizeInBits() != V.getValueSizeInBits() ||
41632 !V.getValueType().isVector();
41634 return false;
41636 KnownZero = OpZero;
41637 KnownUndef = OpUndef;
41639 // Check if shuffle mask can be simplified to undef/zero/identity.
41640 int NumSrcs = OpInputs.size();
41641 for (int i = 0; i != NumElts; ++i)
41642 if (!DemandedElts[i])
41643 OpMask[i] = SM_SentinelUndef;
41645 if (isUndefInRange(OpMask, 0, NumElts)) {
41646 KnownUndef.setAllBits();
41647 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
41649 if (isUndefOrZeroInRange(OpMask, 0, NumElts)) {
41650 KnownZero.setAllBits();
41651 return TLO.CombineTo(
41652 Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
41654 for (int Src = 0; Src != NumSrcs; ++Src)
41655 if (isSequentialOrUndefInRange(OpMask, 0, NumElts, Src * NumElts))
41656 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, OpInputs[Src]));
41658 // Attempt to simplify inputs.
41659 for (int Src = 0; Src != NumSrcs; ++Src) {
41660 // TODO: Support inputs of different types.
41661 if (OpInputs[Src].getValueType() != VT)
41662 continue;
41664 int Lo = Src * NumElts;
41665 APInt SrcElts = APInt::getZero(NumElts);
41666 for (int i = 0; i != NumElts; ++i)
41667 if (DemandedElts[i]) {
41668 int M = OpMask[i] - Lo;
41669 if (0 <= M && M < NumElts)
41670 SrcElts.setBit(M);
41673 // TODO - Propagate input undef/zero elts.
41674 APInt SrcUndef, SrcZero;
41675 if (SimplifyDemandedVectorElts(OpInputs[Src], SrcElts, SrcUndef, SrcZero,
41676 TLO, Depth + 1))
41677 return true;
41680 // If we don't demand all elements, then attempt to combine to a simpler
41681 // shuffle.
41682 // We need to convert the depth to something combineX86ShufflesRecursively
41683 // can handle - so pretend its Depth == 0 again, and reduce the max depth
41684 // to match. This prevents combineX86ShuffleChain from returning a
41685 // combined shuffle that's the same as the original root, causing an
41686 // infinite loop.
41687 if (!DemandedElts.isAllOnes()) {
41688 assert(Depth < X86::MaxShuffleCombineDepth && "Depth out of range");
41690 SmallVector<int, 64> DemandedMask(NumElts, SM_SentinelUndef);
41691 for (int i = 0; i != NumElts; ++i)
41692 if (DemandedElts[i])
41693 DemandedMask[i] = i;
41695 SDValue NewShuffle = combineX86ShufflesRecursively(
41696 {Op}, 0, Op, DemandedMask, {}, 0, X86::MaxShuffleCombineDepth - Depth,
41697 /*HasVarMask*/ false,
41698 /*AllowCrossLaneVarMask*/ true, /*AllowPerLaneVarMask*/ true, TLO.DAG,
41699 Subtarget);
41700 if (NewShuffle)
41701 return TLO.CombineTo(Op, NewShuffle);
41704 return false;
41707 bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
41708 SDValue Op, const APInt &OriginalDemandedBits,
41709 const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
41710 unsigned Depth) const {
41711 EVT VT = Op.getValueType();
41712 unsigned BitWidth = OriginalDemandedBits.getBitWidth();
41713 unsigned Opc = Op.getOpcode();
41714 switch(Opc) {
41715 case X86ISD::VTRUNC: {
41716 KnownBits KnownOp;
41717 SDValue Src = Op.getOperand(0);
41718 MVT SrcVT = Src.getSimpleValueType();
41720 // Simplify the input, using demanded bit information.
41721 APInt TruncMask = OriginalDemandedBits.zext(SrcVT.getScalarSizeInBits());
41722 APInt DemandedElts = OriginalDemandedElts.trunc(SrcVT.getVectorNumElements());
41723 if (SimplifyDemandedBits(Src, TruncMask, DemandedElts, KnownOp, TLO, Depth + 1))
41724 return true;
41725 break;
41727 case X86ISD::PMULDQ:
41728 case X86ISD::PMULUDQ: {
41729 // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
41730 KnownBits KnownLHS, KnownRHS;
41731 SDValue LHS = Op.getOperand(0);
41732 SDValue RHS = Op.getOperand(1);
41734 // Don't mask bits on 32-bit AVX512 targets which might lose a broadcast.
41735 // FIXME: Can we bound this better?
41736 APInt DemandedMask = APInt::getLowBitsSet(64, 32);
41737 APInt DemandedMaskLHS = APInt::getAllOnes(64);
41738 APInt DemandedMaskRHS = APInt::getAllOnes(64);
41740 bool Is32BitAVX512 = !Subtarget.is64Bit() && Subtarget.hasAVX512();
41741 if (!Is32BitAVX512 || !TLO.DAG.isSplatValue(LHS))
41742 DemandedMaskLHS = DemandedMask;
41743 if (!Is32BitAVX512 || !TLO.DAG.isSplatValue(RHS))
41744 DemandedMaskRHS = DemandedMask;
41746 if (SimplifyDemandedBits(LHS, DemandedMaskLHS, OriginalDemandedElts,
41747 KnownLHS, TLO, Depth + 1))
41748 return true;
41749 if (SimplifyDemandedBits(RHS, DemandedMaskRHS, OriginalDemandedElts,
41750 KnownRHS, TLO, Depth + 1))
41751 return true;
41753 // PMULUDQ(X,1) -> AND(X,(1<<32)-1) 'getZeroExtendInReg'.
41754 KnownRHS = KnownRHS.trunc(32);
41755 if (Opc == X86ISD::PMULUDQ && KnownRHS.isConstant() &&
41756 KnownRHS.getConstant().isOne()) {
41757 SDLoc DL(Op);
41758 SDValue Mask = TLO.DAG.getConstant(DemandedMask, DL, VT);
41759 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, DL, VT, LHS, Mask));
41762 // Aggressively peek through ops to get at the demanded low bits.
41763 SDValue DemandedLHS = SimplifyMultipleUseDemandedBits(
41764 LHS, DemandedMaskLHS, OriginalDemandedElts, TLO.DAG, Depth + 1);
41765 SDValue DemandedRHS = SimplifyMultipleUseDemandedBits(
41766 RHS, DemandedMaskRHS, OriginalDemandedElts, TLO.DAG, Depth + 1);
41767 if (DemandedLHS || DemandedRHS) {
41768 DemandedLHS = DemandedLHS ? DemandedLHS : LHS;
41769 DemandedRHS = DemandedRHS ? DemandedRHS : RHS;
41770 return TLO.CombineTo(
41771 Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, DemandedLHS, DemandedRHS));
41773 break;
41775 case X86ISD::ANDNP: {
41776 KnownBits Known2;
41777 SDValue Op0 = Op.getOperand(0);
41778 SDValue Op1 = Op.getOperand(1);
41780 if (SimplifyDemandedBits(Op1, OriginalDemandedBits, OriginalDemandedElts,
41781 Known, TLO, Depth + 1))
41782 return true;
41783 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
41785 if (SimplifyDemandedBits(Op0, ~Known.Zero & OriginalDemandedBits,
41786 OriginalDemandedElts, Known2, TLO, Depth + 1))
41787 return true;
41788 assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
41790 // If the RHS is a constant, see if we can simplify it.
41791 if (ShrinkDemandedConstant(Op, ~Known2.One & OriginalDemandedBits,
41792 OriginalDemandedElts, TLO))
41793 return true;
41795 // ANDNP = (~Op0 & Op1);
41796 Known.One &= Known2.Zero;
41797 Known.Zero |= Known2.One;
41798 break;
41800 case X86ISD::VSHLI: {
41801 SDValue Op0 = Op.getOperand(0);
41803 unsigned ShAmt = Op.getConstantOperandVal(1);
41804 if (ShAmt >= BitWidth)
41805 break;
41807 APInt DemandedMask = OriginalDemandedBits.lshr(ShAmt);
41809 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
41810 // single shift. We can do this if the bottom bits (which are shifted
41811 // out) are never demanded.
41812 if (Op0.getOpcode() == X86ISD::VSRLI &&
41813 OriginalDemandedBits.countr_zero() >= ShAmt) {
41814 unsigned Shift2Amt = Op0.getConstantOperandVal(1);
41815 if (Shift2Amt < BitWidth) {
41816 int Diff = ShAmt - Shift2Amt;
41817 if (Diff == 0)
41818 return TLO.CombineTo(Op, Op0.getOperand(0));
41820 unsigned NewOpc = Diff < 0 ? X86ISD::VSRLI : X86ISD::VSHLI;
41821 SDValue NewShift = TLO.DAG.getNode(
41822 NewOpc, SDLoc(Op), VT, Op0.getOperand(0),
41823 TLO.DAG.getTargetConstant(std::abs(Diff), SDLoc(Op), MVT::i8));
41824 return TLO.CombineTo(Op, NewShift);
41828 // If we are only demanding sign bits then we can use the shift source directly.
41829 unsigned NumSignBits =
41830 TLO.DAG.ComputeNumSignBits(Op0, OriginalDemandedElts, Depth + 1);
41831 unsigned UpperDemandedBits = BitWidth - OriginalDemandedBits.countr_zero();
41832 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= UpperDemandedBits)
41833 return TLO.CombineTo(Op, Op0);
41835 if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
41836 TLO, Depth + 1))
41837 return true;
41839 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
41840 Known.Zero <<= ShAmt;
41841 Known.One <<= ShAmt;
41843 // Low bits known zero.
41844 Known.Zero.setLowBits(ShAmt);
41845 return false;
41847 case X86ISD::VSRLI: {
41848 unsigned ShAmt = Op.getConstantOperandVal(1);
41849 if (ShAmt >= BitWidth)
41850 break;
41852 APInt DemandedMask = OriginalDemandedBits << ShAmt;
41854 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask,
41855 OriginalDemandedElts, Known, TLO, Depth + 1))
41856 return true;
41858 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
41859 Known.Zero.lshrInPlace(ShAmt);
41860 Known.One.lshrInPlace(ShAmt);
41862 // High bits known zero.
41863 Known.Zero.setHighBits(ShAmt);
41864 return false;
41866 case X86ISD::VSRAI: {
41867 SDValue Op0 = Op.getOperand(0);
41868 SDValue Op1 = Op.getOperand(1);
41870 unsigned ShAmt = Op1->getAsZExtVal();
41871 if (ShAmt >= BitWidth)
41872 break;
41874 APInt DemandedMask = OriginalDemandedBits << ShAmt;
41876 // If we just want the sign bit then we don't need to shift it.
41877 if (OriginalDemandedBits.isSignMask())
41878 return TLO.CombineTo(Op, Op0);
41880 // fold (VSRAI (VSHLI X, C1), C1) --> X iff NumSignBits(X) > C1
41881 if (Op0.getOpcode() == X86ISD::VSHLI &&
41882 Op.getOperand(1) == Op0.getOperand(1)) {
41883 SDValue Op00 = Op0.getOperand(0);
41884 unsigned NumSignBits =
41885 TLO.DAG.ComputeNumSignBits(Op00, OriginalDemandedElts);
41886 if (ShAmt < NumSignBits)
41887 return TLO.CombineTo(Op, Op00);
41890 // If any of the demanded bits are produced by the sign extension, we also
41891 // demand the input sign bit.
41892 if (OriginalDemandedBits.countl_zero() < ShAmt)
41893 DemandedMask.setSignBit();
41895 if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
41896 TLO, Depth + 1))
41897 return true;
41899 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
41900 Known.Zero.lshrInPlace(ShAmt);
41901 Known.One.lshrInPlace(ShAmt);
41903 // If the input sign bit is known to be zero, or if none of the top bits
41904 // are demanded, turn this into an unsigned shift right.
41905 if (Known.Zero[BitWidth - ShAmt - 1] ||
41906 OriginalDemandedBits.countl_zero() >= ShAmt)
41907 return TLO.CombineTo(
41908 Op, TLO.DAG.getNode(X86ISD::VSRLI, SDLoc(Op), VT, Op0, Op1));
41910 // High bits are known one.
41911 if (Known.One[BitWidth - ShAmt - 1])
41912 Known.One.setHighBits(ShAmt);
41913 return false;
41915 case X86ISD::BLENDV: {
41916 SDValue Sel = Op.getOperand(0);
41917 SDValue LHS = Op.getOperand(1);
41918 SDValue RHS = Op.getOperand(2);
41920 APInt SignMask = APInt::getSignMask(BitWidth);
41921 SDValue NewSel = SimplifyMultipleUseDemandedBits(
41922 Sel, SignMask, OriginalDemandedElts, TLO.DAG, Depth + 1);
41923 SDValue NewLHS = SimplifyMultipleUseDemandedBits(
41924 LHS, OriginalDemandedBits, OriginalDemandedElts, TLO.DAG, Depth + 1);
41925 SDValue NewRHS = SimplifyMultipleUseDemandedBits(
41926 RHS, OriginalDemandedBits, OriginalDemandedElts, TLO.DAG, Depth + 1);
41928 if (NewSel || NewLHS || NewRHS) {
41929 NewSel = NewSel ? NewSel : Sel;
41930 NewLHS = NewLHS ? NewLHS : LHS;
41931 NewRHS = NewRHS ? NewRHS : RHS;
41932 return TLO.CombineTo(Op, TLO.DAG.getNode(X86ISD::BLENDV, SDLoc(Op), VT,
41933 NewSel, NewLHS, NewRHS));
41935 break;
41937 case X86ISD::PEXTRB:
41938 case X86ISD::PEXTRW: {
41939 SDValue Vec = Op.getOperand(0);
41940 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
41941 MVT VecVT = Vec.getSimpleValueType();
41942 unsigned NumVecElts = VecVT.getVectorNumElements();
41944 if (CIdx && CIdx->getAPIntValue().ult(NumVecElts)) {
41945 unsigned Idx = CIdx->getZExtValue();
41946 unsigned VecBitWidth = VecVT.getScalarSizeInBits();
41948 // If we demand no bits from the vector then we must have demanded
41949 // bits from the implict zext - simplify to zero.
41950 APInt DemandedVecBits = OriginalDemandedBits.trunc(VecBitWidth);
41951 if (DemandedVecBits == 0)
41952 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
41954 APInt KnownUndef, KnownZero;
41955 APInt DemandedVecElts = APInt::getOneBitSet(NumVecElts, Idx);
41956 if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef,
41957 KnownZero, TLO, Depth + 1))
41958 return true;
41960 KnownBits KnownVec;
41961 if (SimplifyDemandedBits(Vec, DemandedVecBits, DemandedVecElts,
41962 KnownVec, TLO, Depth + 1))
41963 return true;
41965 if (SDValue V = SimplifyMultipleUseDemandedBits(
41966 Vec, DemandedVecBits, DemandedVecElts, TLO.DAG, Depth + 1))
41967 return TLO.CombineTo(
41968 Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, V, Op.getOperand(1)));
41970 Known = KnownVec.zext(BitWidth);
41971 return false;
41973 break;
41975 case X86ISD::PINSRB:
41976 case X86ISD::PINSRW: {
41977 SDValue Vec = Op.getOperand(0);
41978 SDValue Scl = Op.getOperand(1);
41979 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
41980 MVT VecVT = Vec.getSimpleValueType();
41982 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) {
41983 unsigned Idx = CIdx->getZExtValue();
41984 if (!OriginalDemandedElts[Idx])
41985 return TLO.CombineTo(Op, Vec);
41987 KnownBits KnownVec;
41988 APInt DemandedVecElts(OriginalDemandedElts);
41989 DemandedVecElts.clearBit(Idx);
41990 if (SimplifyDemandedBits(Vec, OriginalDemandedBits, DemandedVecElts,
41991 KnownVec, TLO, Depth + 1))
41992 return true;
41994 KnownBits KnownScl;
41995 unsigned NumSclBits = Scl.getScalarValueSizeInBits();
41996 APInt DemandedSclBits = OriginalDemandedBits.zext(NumSclBits);
41997 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1))
41998 return true;
42000 KnownScl = KnownScl.trunc(VecVT.getScalarSizeInBits());
42001 Known = KnownVec.intersectWith(KnownScl);
42002 return false;
42004 break;
42006 case X86ISD::PACKSS:
42007 // PACKSS saturates to MIN/MAX integer values. So if we just want the
42008 // sign bit then we can just ask for the source operands sign bit.
42009 // TODO - add known bits handling.
42010 if (OriginalDemandedBits.isSignMask()) {
42011 APInt DemandedLHS, DemandedRHS;
42012 getPackDemandedElts(VT, OriginalDemandedElts, DemandedLHS, DemandedRHS);
42014 KnownBits KnownLHS, KnownRHS;
42015 APInt SignMask = APInt::getSignMask(BitWidth * 2);
42016 if (SimplifyDemandedBits(Op.getOperand(0), SignMask, DemandedLHS,
42017 KnownLHS, TLO, Depth + 1))
42018 return true;
42019 if (SimplifyDemandedBits(Op.getOperand(1), SignMask, DemandedRHS,
42020 KnownRHS, TLO, Depth + 1))
42021 return true;
42023 // Attempt to avoid multi-use ops if we don't need anything from them.
42024 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
42025 Op.getOperand(0), SignMask, DemandedLHS, TLO.DAG, Depth + 1);
42026 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
42027 Op.getOperand(1), SignMask, DemandedRHS, TLO.DAG, Depth + 1);
42028 if (DemandedOp0 || DemandedOp1) {
42029 SDValue Op0 = DemandedOp0 ? DemandedOp0 : Op.getOperand(0);
42030 SDValue Op1 = DemandedOp1 ? DemandedOp1 : Op.getOperand(1);
42031 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, Op0, Op1));
42034 // TODO - add general PACKSS/PACKUS SimplifyDemandedBits support.
42035 break;
42036 case X86ISD::VBROADCAST: {
42037 SDValue Src = Op.getOperand(0);
42038 MVT SrcVT = Src.getSimpleValueType();
42039 APInt DemandedElts = APInt::getOneBitSet(
42040 SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1, 0);
42041 if (SimplifyDemandedBits(Src, OriginalDemandedBits, DemandedElts, Known,
42042 TLO, Depth + 1))
42043 return true;
42044 // If we don't need the upper bits, attempt to narrow the broadcast source.
42045 // Don't attempt this on AVX512 as it might affect broadcast folding.
42046 // TODO: Should we attempt this for i32/i16 splats? They tend to be slower.
42047 if ((BitWidth == 64) && SrcVT.isScalarInteger() && !Subtarget.hasAVX512() &&
42048 OriginalDemandedBits.countl_zero() >= (BitWidth / 2) &&
42049 Src->hasOneUse()) {
42050 MVT NewSrcVT = MVT::getIntegerVT(BitWidth / 2);
42051 SDValue NewSrc =
42052 TLO.DAG.getNode(ISD::TRUNCATE, SDLoc(Src), NewSrcVT, Src);
42053 MVT NewVT = MVT::getVectorVT(NewSrcVT, VT.getVectorNumElements() * 2);
42054 SDValue NewBcst =
42055 TLO.DAG.getNode(X86ISD::VBROADCAST, SDLoc(Op), NewVT, NewSrc);
42056 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, NewBcst));
42058 break;
42060 case X86ISD::PCMPGT:
42061 // icmp sgt(0, R) == ashr(R, BitWidth-1).
42062 // iff we only need the sign bit then we can use R directly.
42063 if (OriginalDemandedBits.isSignMask() &&
42064 ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
42065 return TLO.CombineTo(Op, Op.getOperand(1));
42066 break;
42067 case X86ISD::MOVMSK: {
42068 SDValue Src = Op.getOperand(0);
42069 MVT SrcVT = Src.getSimpleValueType();
42070 unsigned SrcBits = SrcVT.getScalarSizeInBits();
42071 unsigned NumElts = SrcVT.getVectorNumElements();
42073 // If we don't need the sign bits at all just return zero.
42074 if (OriginalDemandedBits.countr_zero() >= NumElts)
42075 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
42077 // See if we only demand bits from the lower 128-bit vector.
42078 if (SrcVT.is256BitVector() &&
42079 OriginalDemandedBits.getActiveBits() <= (NumElts / 2)) {
42080 SDValue NewSrc = extract128BitVector(Src, 0, TLO.DAG, SDLoc(Src));
42081 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc));
42084 // Only demand the vector elements of the sign bits we need.
42085 APInt KnownUndef, KnownZero;
42086 APInt DemandedElts = OriginalDemandedBits.zextOrTrunc(NumElts);
42087 if (SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, KnownZero,
42088 TLO, Depth + 1))
42089 return true;
42091 Known.Zero = KnownZero.zext(BitWidth);
42092 Known.Zero.setHighBits(BitWidth - NumElts);
42094 // MOVMSK only uses the MSB from each vector element.
42095 KnownBits KnownSrc;
42096 APInt DemandedSrcBits = APInt::getSignMask(SrcBits);
42097 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, KnownSrc, TLO,
42098 Depth + 1))
42099 return true;
42101 if (KnownSrc.One[SrcBits - 1])
42102 Known.One.setLowBits(NumElts);
42103 else if (KnownSrc.Zero[SrcBits - 1])
42104 Known.Zero.setLowBits(NumElts);
42106 // Attempt to avoid multi-use os if we don't need anything from it.
42107 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits(
42108 Src, DemandedSrcBits, DemandedElts, TLO.DAG, Depth + 1))
42109 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc));
42110 return false;
42112 case X86ISD::TESTP: {
42113 SDValue Op0 = Op.getOperand(0);
42114 SDValue Op1 = Op.getOperand(1);
42115 MVT OpVT = Op0.getSimpleValueType();
42116 assert((OpVT.getVectorElementType() == MVT::f32 ||
42117 OpVT.getVectorElementType() == MVT::f64) &&
42118 "Illegal vector type for X86ISD::TESTP");
42120 // TESTPS/TESTPD only demands the sign bits of ALL the elements.
42121 KnownBits KnownSrc;
42122 APInt SignMask = APInt::getSignMask(OpVT.getScalarSizeInBits());
42123 bool AssumeSingleUse = (Op0 == Op1) && Op->isOnlyUserOf(Op0.getNode());
42124 return SimplifyDemandedBits(Op0, SignMask, KnownSrc, TLO, Depth + 1,
42125 AssumeSingleUse) ||
42126 SimplifyDemandedBits(Op1, SignMask, KnownSrc, TLO, Depth + 1,
42127 AssumeSingleUse);
42129 case X86ISD::BEXTR:
42130 case X86ISD::BEXTRI: {
42131 SDValue Op0 = Op.getOperand(0);
42132 SDValue Op1 = Op.getOperand(1);
42134 // Only bottom 16-bits of the control bits are required.
42135 if (auto *Cst1 = dyn_cast<ConstantSDNode>(Op1)) {
42136 // NOTE: SimplifyDemandedBits won't do this for constants.
42137 uint64_t Val1 = Cst1->getZExtValue();
42138 uint64_t MaskedVal1 = Val1 & 0xFFFF;
42139 if (Opc == X86ISD::BEXTR && MaskedVal1 != Val1) {
42140 SDLoc DL(Op);
42141 return TLO.CombineTo(
42142 Op, TLO.DAG.getNode(X86ISD::BEXTR, DL, VT, Op0,
42143 TLO.DAG.getConstant(MaskedVal1, DL, VT)));
42146 unsigned Shift = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 0);
42147 unsigned Length = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 8);
42149 // If the length is 0, the result is 0.
42150 if (Length == 0) {
42151 Known.setAllZero();
42152 return false;
42155 if ((Shift + Length) <= BitWidth) {
42156 APInt DemandedMask = APInt::getBitsSet(BitWidth, Shift, Shift + Length);
42157 if (SimplifyDemandedBits(Op0, DemandedMask, Known, TLO, Depth + 1))
42158 return true;
42160 Known = Known.extractBits(Length, Shift);
42161 Known = Known.zextOrTrunc(BitWidth);
42162 return false;
42164 } else {
42165 assert(Opc == X86ISD::BEXTR && "Unexpected opcode!");
42166 KnownBits Known1;
42167 APInt DemandedMask(APInt::getLowBitsSet(BitWidth, 16));
42168 if (SimplifyDemandedBits(Op1, DemandedMask, Known1, TLO, Depth + 1))
42169 return true;
42171 // If the length is 0, replace with 0.
42172 KnownBits LengthBits = Known1.extractBits(8, 8);
42173 if (LengthBits.isZero())
42174 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
42177 break;
42179 case X86ISD::PDEP: {
42180 SDValue Op0 = Op.getOperand(0);
42181 SDValue Op1 = Op.getOperand(1);
42183 unsigned DemandedBitsLZ = OriginalDemandedBits.countl_zero();
42184 APInt LoMask = APInt::getLowBitsSet(BitWidth, BitWidth - DemandedBitsLZ);
42186 // If the demanded bits has leading zeroes, we don't demand those from the
42187 // mask.
42188 if (SimplifyDemandedBits(Op1, LoMask, Known, TLO, Depth + 1))
42189 return true;
42191 // The number of possible 1s in the mask determines the number of LSBs of
42192 // operand 0 used. Undemanded bits from the mask don't matter so filter
42193 // them before counting.
42194 KnownBits Known2;
42195 uint64_t Count = (~Known.Zero & LoMask).popcount();
42196 APInt DemandedMask(APInt::getLowBitsSet(BitWidth, Count));
42197 if (SimplifyDemandedBits(Op0, DemandedMask, Known2, TLO, Depth + 1))
42198 return true;
42200 // Zeroes are retained from the mask, but not ones.
42201 Known.One.clearAllBits();
42202 // The result will have at least as many trailing zeros as the non-mask
42203 // operand since bits can only map to the same or higher bit position.
42204 Known.Zero.setLowBits(Known2.countMinTrailingZeros());
42205 return false;
42209 return TargetLowering::SimplifyDemandedBitsForTargetNode(
42210 Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
42213 SDValue X86TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
42214 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
42215 SelectionDAG &DAG, unsigned Depth) const {
42216 int NumElts = DemandedElts.getBitWidth();
42217 unsigned Opc = Op.getOpcode();
42218 EVT VT = Op.getValueType();
42220 switch (Opc) {
42221 case X86ISD::PINSRB:
42222 case X86ISD::PINSRW: {
42223 // If we don't demand the inserted element, return the base vector.
42224 SDValue Vec = Op.getOperand(0);
42225 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
42226 MVT VecVT = Vec.getSimpleValueType();
42227 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements()) &&
42228 !DemandedElts[CIdx->getZExtValue()])
42229 return Vec;
42230 break;
42232 case X86ISD::VSHLI: {
42233 // If we are only demanding sign bits then we can use the shift source
42234 // directly.
42235 SDValue Op0 = Op.getOperand(0);
42236 unsigned ShAmt = Op.getConstantOperandVal(1);
42237 unsigned BitWidth = DemandedBits.getBitWidth();
42238 unsigned NumSignBits = DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1);
42239 unsigned UpperDemandedBits = BitWidth - DemandedBits.countr_zero();
42240 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= UpperDemandedBits)
42241 return Op0;
42242 break;
42244 case X86ISD::VSRAI:
42245 // iff we only need the sign bit then we can use the source directly.
42246 // TODO: generalize where we only demand extended signbits.
42247 if (DemandedBits.isSignMask())
42248 return Op.getOperand(0);
42249 break;
42250 case X86ISD::PCMPGT:
42251 // icmp sgt(0, R) == ashr(R, BitWidth-1).
42252 // iff we only need the sign bit then we can use R directly.
42253 if (DemandedBits.isSignMask() &&
42254 ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
42255 return Op.getOperand(1);
42256 break;
42257 case X86ISD::BLENDV: {
42258 // BLENDV: Cond (MSB) ? LHS : RHS
42259 SDValue Cond = Op.getOperand(0);
42260 SDValue LHS = Op.getOperand(1);
42261 SDValue RHS = Op.getOperand(2);
42263 KnownBits CondKnown = DAG.computeKnownBits(Cond, DemandedElts, Depth + 1);
42264 if (CondKnown.isNegative())
42265 return LHS;
42266 if (CondKnown.isNonNegative())
42267 return RHS;
42268 break;
42270 case X86ISD::ANDNP: {
42271 // ANDNP = (~LHS & RHS);
42272 SDValue LHS = Op.getOperand(0);
42273 SDValue RHS = Op.getOperand(1);
42275 KnownBits LHSKnown = DAG.computeKnownBits(LHS, DemandedElts, Depth + 1);
42276 KnownBits RHSKnown = DAG.computeKnownBits(RHS, DemandedElts, Depth + 1);
42278 // If all of the demanded bits are known 0 on LHS and known 0 on RHS, then
42279 // the (inverted) LHS bits cannot contribute to the result of the 'andn' in
42280 // this context, so return RHS.
42281 if (DemandedBits.isSubsetOf(RHSKnown.Zero | LHSKnown.Zero))
42282 return RHS;
42283 break;
42287 APInt ShuffleUndef, ShuffleZero;
42288 SmallVector<int, 16> ShuffleMask;
42289 SmallVector<SDValue, 2> ShuffleOps;
42290 if (getTargetShuffleInputs(Op, DemandedElts, ShuffleOps, ShuffleMask,
42291 ShuffleUndef, ShuffleZero, DAG, Depth, false)) {
42292 // If all the demanded elts are from one operand and are inline,
42293 // then we can use the operand directly.
42294 int NumOps = ShuffleOps.size();
42295 if (ShuffleMask.size() == (unsigned)NumElts &&
42296 llvm::all_of(ShuffleOps, [VT](SDValue V) {
42297 return VT.getSizeInBits() == V.getValueSizeInBits();
42298 })) {
42300 if (DemandedElts.isSubsetOf(ShuffleUndef))
42301 return DAG.getUNDEF(VT);
42302 if (DemandedElts.isSubsetOf(ShuffleUndef | ShuffleZero))
42303 return getZeroVector(VT.getSimpleVT(), Subtarget, DAG, SDLoc(Op));
42305 // Bitmask that indicates which ops have only been accessed 'inline'.
42306 APInt IdentityOp = APInt::getAllOnes(NumOps);
42307 for (int i = 0; i != NumElts; ++i) {
42308 int M = ShuffleMask[i];
42309 if (!DemandedElts[i] || ShuffleUndef[i])
42310 continue;
42311 int OpIdx = M / NumElts;
42312 int EltIdx = M % NumElts;
42313 if (M < 0 || EltIdx != i) {
42314 IdentityOp.clearAllBits();
42315 break;
42317 IdentityOp &= APInt::getOneBitSet(NumOps, OpIdx);
42318 if (IdentityOp == 0)
42319 break;
42321 assert((IdentityOp == 0 || IdentityOp.popcount() == 1) &&
42322 "Multiple identity shuffles detected");
42324 if (IdentityOp != 0)
42325 return DAG.getBitcast(VT, ShuffleOps[IdentityOp.countr_zero()]);
42329 return TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
42330 Op, DemandedBits, DemandedElts, DAG, Depth);
42333 bool X86TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode(
42334 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
42335 bool PoisonOnly, unsigned Depth) const {
42336 unsigned EltsBits = Op.getScalarValueSizeInBits();
42337 unsigned NumElts = DemandedElts.getBitWidth();
42339 // TODO: Add more target shuffles.
42340 switch (Op.getOpcode()) {
42341 case X86ISD::PSHUFD:
42342 case X86ISD::VPERMILPI: {
42343 SmallVector<int, 8> Mask;
42344 DecodePSHUFMask(NumElts, EltsBits, Op.getConstantOperandVal(1), Mask);
42346 APInt DemandedSrcElts = APInt::getZero(NumElts);
42347 for (unsigned I = 0; I != NumElts; ++I)
42348 if (DemandedElts[I])
42349 DemandedSrcElts.setBit(Mask[I]);
42351 return DAG.isGuaranteedNotToBeUndefOrPoison(
42352 Op.getOperand(0), DemandedSrcElts, PoisonOnly, Depth + 1);
42355 return TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode(
42356 Op, DemandedElts, DAG, PoisonOnly, Depth);
42359 bool X86TargetLowering::canCreateUndefOrPoisonForTargetNode(
42360 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
42361 bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const {
42363 // TODO: Add more target shuffles.
42364 switch (Op.getOpcode()) {
42365 case X86ISD::PSHUFD:
42366 case X86ISD::VPERMILPI:
42367 return false;
42369 return TargetLowering::canCreateUndefOrPoisonForTargetNode(
42370 Op, DemandedElts, DAG, PoisonOnly, ConsiderFlags, Depth);
42373 bool X86TargetLowering::isSplatValueForTargetNode(SDValue Op,
42374 const APInt &DemandedElts,
42375 APInt &UndefElts,
42376 const SelectionDAG &DAG,
42377 unsigned Depth) const {
42378 unsigned NumElts = DemandedElts.getBitWidth();
42379 unsigned Opc = Op.getOpcode();
42381 switch (Opc) {
42382 case X86ISD::VBROADCAST:
42383 case X86ISD::VBROADCAST_LOAD:
42384 UndefElts = APInt::getZero(NumElts);
42385 return true;
42388 return TargetLowering::isSplatValueForTargetNode(Op, DemandedElts, UndefElts,
42389 DAG, Depth);
42392 // Helper to peek through bitops/trunc/setcc to determine size of source vector.
42393 // Allows combineBitcastvxi1 to determine what size vector generated a <X x i1>.
42394 static bool checkBitcastSrcVectorSize(SDValue Src, unsigned Size,
42395 bool AllowTruncate) {
42396 switch (Src.getOpcode()) {
42397 case ISD::TRUNCATE:
42398 if (!AllowTruncate)
42399 return false;
42400 [[fallthrough]];
42401 case ISD::SETCC:
42402 return Src.getOperand(0).getValueSizeInBits() == Size;
42403 case ISD::AND:
42404 case ISD::XOR:
42405 case ISD::OR:
42406 return checkBitcastSrcVectorSize(Src.getOperand(0), Size, AllowTruncate) &&
42407 checkBitcastSrcVectorSize(Src.getOperand(1), Size, AllowTruncate);
42408 case ISD::SELECT:
42409 case ISD::VSELECT:
42410 return Src.getOperand(0).getScalarValueSizeInBits() == 1 &&
42411 checkBitcastSrcVectorSize(Src.getOperand(1), Size, AllowTruncate) &&
42412 checkBitcastSrcVectorSize(Src.getOperand(2), Size, AllowTruncate);
42413 case ISD::BUILD_VECTOR:
42414 return ISD::isBuildVectorAllZeros(Src.getNode()) ||
42415 ISD::isBuildVectorAllOnes(Src.getNode());
42417 return false;
42420 // Helper to flip between AND/OR/XOR opcodes and their X86ISD FP equivalents.
42421 static unsigned getAltBitOpcode(unsigned Opcode) {
42422 switch(Opcode) {
42423 case ISD::AND: return X86ISD::FAND;
42424 case ISD::OR: return X86ISD::FOR;
42425 case ISD::XOR: return X86ISD::FXOR;
42426 case X86ISD::ANDNP: return X86ISD::FANDN;
42428 llvm_unreachable("Unknown bitwise opcode");
42431 // Helper to adjust v4i32 MOVMSK expansion to work with SSE1-only targets.
42432 static SDValue adjustBitcastSrcVectorSSE1(SelectionDAG &DAG, SDValue Src,
42433 const SDLoc &DL) {
42434 EVT SrcVT = Src.getValueType();
42435 if (SrcVT != MVT::v4i1)
42436 return SDValue();
42438 switch (Src.getOpcode()) {
42439 case ISD::SETCC:
42440 if (Src.getOperand(0).getValueType() == MVT::v4i32 &&
42441 ISD::isBuildVectorAllZeros(Src.getOperand(1).getNode()) &&
42442 cast<CondCodeSDNode>(Src.getOperand(2))->get() == ISD::SETLT) {
42443 SDValue Op0 = Src.getOperand(0);
42444 if (ISD::isNormalLoad(Op0.getNode()))
42445 return DAG.getBitcast(MVT::v4f32, Op0);
42446 if (Op0.getOpcode() == ISD::BITCAST &&
42447 Op0.getOperand(0).getValueType() == MVT::v4f32)
42448 return Op0.getOperand(0);
42450 break;
42451 case ISD::AND:
42452 case ISD::XOR:
42453 case ISD::OR: {
42454 SDValue Op0 = adjustBitcastSrcVectorSSE1(DAG, Src.getOperand(0), DL);
42455 SDValue Op1 = adjustBitcastSrcVectorSSE1(DAG, Src.getOperand(1), DL);
42456 if (Op0 && Op1)
42457 return DAG.getNode(getAltBitOpcode(Src.getOpcode()), DL, MVT::v4f32, Op0,
42458 Op1);
42459 break;
42462 return SDValue();
42465 // Helper to push sign extension of vXi1 SETCC result through bitops.
42466 static SDValue signExtendBitcastSrcVector(SelectionDAG &DAG, EVT SExtVT,
42467 SDValue Src, const SDLoc &DL) {
42468 switch (Src.getOpcode()) {
42469 case ISD::SETCC:
42470 case ISD::TRUNCATE:
42471 case ISD::BUILD_VECTOR:
42472 return DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
42473 case ISD::AND:
42474 case ISD::XOR:
42475 case ISD::OR:
42476 return DAG.getNode(
42477 Src.getOpcode(), DL, SExtVT,
42478 signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(0), DL),
42479 signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(1), DL));
42480 case ISD::SELECT:
42481 case ISD::VSELECT:
42482 return DAG.getSelect(
42483 DL, SExtVT, Src.getOperand(0),
42484 signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(1), DL),
42485 signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(2), DL));
42487 llvm_unreachable("Unexpected node type for vXi1 sign extension");
42490 // Try to match patterns such as
42491 // (i16 bitcast (v16i1 x))
42492 // ->
42493 // (i16 movmsk (16i8 sext (v16i1 x)))
42494 // before the illegal vector is scalarized on subtargets that don't have legal
42495 // vxi1 types.
42496 static SDValue combineBitcastvxi1(SelectionDAG &DAG, EVT VT, SDValue Src,
42497 const SDLoc &DL,
42498 const X86Subtarget &Subtarget) {
42499 EVT SrcVT = Src.getValueType();
42500 if (!SrcVT.isSimple() || SrcVT.getScalarType() != MVT::i1)
42501 return SDValue();
42503 // Recognize the IR pattern for the movmsk intrinsic under SSE1 before type
42504 // legalization destroys the v4i32 type.
42505 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2()) {
42506 if (SDValue V = adjustBitcastSrcVectorSSE1(DAG, Src, DL)) {
42507 V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32,
42508 DAG.getBitcast(MVT::v4f32, V));
42509 return DAG.getZExtOrTrunc(V, DL, VT);
42513 // If the input is a truncate from v16i8 or v32i8 go ahead and use a
42514 // movmskb even with avx512. This will be better than truncating to vXi1 and
42515 // using a kmov. This can especially help KNL if the input is a v16i8/v32i8
42516 // vpcmpeqb/vpcmpgtb.
42517 bool PreferMovMsk = Src.getOpcode() == ISD::TRUNCATE && Src.hasOneUse() &&
42518 (Src.getOperand(0).getValueType() == MVT::v16i8 ||
42519 Src.getOperand(0).getValueType() == MVT::v32i8 ||
42520 Src.getOperand(0).getValueType() == MVT::v64i8);
42522 // Prefer movmsk for AVX512 for (bitcast (setlt X, 0)) which can be handled
42523 // directly with vpmovmskb/vmovmskps/vmovmskpd.
42524 if (Src.getOpcode() == ISD::SETCC && Src.hasOneUse() &&
42525 cast<CondCodeSDNode>(Src.getOperand(2))->get() == ISD::SETLT &&
42526 ISD::isBuildVectorAllZeros(Src.getOperand(1).getNode())) {
42527 EVT CmpVT = Src.getOperand(0).getValueType();
42528 EVT EltVT = CmpVT.getVectorElementType();
42529 if (CmpVT.getSizeInBits() <= 256 &&
42530 (EltVT == MVT::i8 || EltVT == MVT::i32 || EltVT == MVT::i64))
42531 PreferMovMsk = true;
42534 // With AVX512 vxi1 types are legal and we prefer using k-regs.
42535 // MOVMSK is supported in SSE2 or later.
42536 if (!Subtarget.hasSSE2() || (Subtarget.hasAVX512() && !PreferMovMsk))
42537 return SDValue();
42539 // If the upper ops of a concatenation are undef, then try to bitcast the
42540 // lower op and extend.
42541 SmallVector<SDValue, 4> SubSrcOps;
42542 if (collectConcatOps(Src.getNode(), SubSrcOps, DAG) &&
42543 SubSrcOps.size() >= 2) {
42544 SDValue LowerOp = SubSrcOps[0];
42545 ArrayRef<SDValue> UpperOps(std::next(SubSrcOps.begin()), SubSrcOps.end());
42546 if (LowerOp.getOpcode() == ISD::SETCC &&
42547 all_of(UpperOps, [](SDValue Op) { return Op.isUndef(); })) {
42548 EVT SubVT = VT.getIntegerVT(
42549 *DAG.getContext(), LowerOp.getValueType().getVectorMinNumElements());
42550 if (SDValue V = combineBitcastvxi1(DAG, SubVT, LowerOp, DL, Subtarget)) {
42551 EVT IntVT = VT.getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
42552 return DAG.getBitcast(VT, DAG.getNode(ISD::ANY_EXTEND, DL, IntVT, V));
42557 // There are MOVMSK flavors for types v16i8, v32i8, v4f32, v8f32, v4f64 and
42558 // v8f64. So all legal 128-bit and 256-bit vectors are covered except for
42559 // v8i16 and v16i16.
42560 // For these two cases, we can shuffle the upper element bytes to a
42561 // consecutive sequence at the start of the vector and treat the results as
42562 // v16i8 or v32i8, and for v16i8 this is the preferable solution. However,
42563 // for v16i16 this is not the case, because the shuffle is expensive, so we
42564 // avoid sign-extending to this type entirely.
42565 // For example, t0 := (v8i16 sext(v8i1 x)) needs to be shuffled as:
42566 // (v16i8 shuffle <0,2,4,6,8,10,12,14,u,u,...,u> (v16i8 bitcast t0), undef)
42567 MVT SExtVT;
42568 bool PropagateSExt = false;
42569 switch (SrcVT.getSimpleVT().SimpleTy) {
42570 default:
42571 return SDValue();
42572 case MVT::v2i1:
42573 SExtVT = MVT::v2i64;
42574 break;
42575 case MVT::v4i1:
42576 SExtVT = MVT::v4i32;
42577 // For cases such as (i4 bitcast (v4i1 setcc v4i64 v1, v2))
42578 // sign-extend to a 256-bit operation to avoid truncation.
42579 if (Subtarget.hasAVX() &&
42580 checkBitcastSrcVectorSize(Src, 256, Subtarget.hasAVX2())) {
42581 SExtVT = MVT::v4i64;
42582 PropagateSExt = true;
42584 break;
42585 case MVT::v8i1:
42586 SExtVT = MVT::v8i16;
42587 // For cases such as (i8 bitcast (v8i1 setcc v8i32 v1, v2)),
42588 // sign-extend to a 256-bit operation to match the compare.
42589 // If the setcc operand is 128-bit, prefer sign-extending to 128-bit over
42590 // 256-bit because the shuffle is cheaper than sign extending the result of
42591 // the compare.
42592 if (Subtarget.hasAVX() && (checkBitcastSrcVectorSize(Src, 256, true) ||
42593 checkBitcastSrcVectorSize(Src, 512, true))) {
42594 SExtVT = MVT::v8i32;
42595 PropagateSExt = true;
42597 break;
42598 case MVT::v16i1:
42599 SExtVT = MVT::v16i8;
42600 // For the case (i16 bitcast (v16i1 setcc v16i16 v1, v2)),
42601 // it is not profitable to sign-extend to 256-bit because this will
42602 // require an extra cross-lane shuffle which is more expensive than
42603 // truncating the result of the compare to 128-bits.
42604 break;
42605 case MVT::v32i1:
42606 SExtVT = MVT::v32i8;
42607 break;
42608 case MVT::v64i1:
42609 // If we have AVX512F, but not AVX512BW and the input is truncated from
42610 // v64i8 checked earlier. Then split the input and make two pmovmskbs.
42611 if (Subtarget.hasAVX512()) {
42612 if (Subtarget.hasBWI())
42613 return SDValue();
42614 SExtVT = MVT::v64i8;
42615 break;
42617 // Split if this is a <64 x i8> comparison result.
42618 if (checkBitcastSrcVectorSize(Src, 512, false)) {
42619 SExtVT = MVT::v64i8;
42620 break;
42622 return SDValue();
42625 SDValue V = PropagateSExt ? signExtendBitcastSrcVector(DAG, SExtVT, Src, DL)
42626 : DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
42628 if (SExtVT == MVT::v16i8 || SExtVT == MVT::v32i8 || SExtVT == MVT::v64i8) {
42629 V = getPMOVMSKB(DL, V, DAG, Subtarget);
42630 } else {
42631 if (SExtVT == MVT::v8i16) {
42632 V = widenSubVector(V, false, Subtarget, DAG, DL, 256);
42633 V = DAG.getNode(ISD::TRUNCATE, DL, MVT::v16i8, V);
42635 V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
42638 EVT IntVT =
42639 EVT::getIntegerVT(*DAG.getContext(), SrcVT.getVectorNumElements());
42640 V = DAG.getZExtOrTrunc(V, DL, IntVT);
42641 return DAG.getBitcast(VT, V);
42644 // Convert a vXi1 constant build vector to the same width scalar integer.
42645 static SDValue combinevXi1ConstantToInteger(SDValue Op, SelectionDAG &DAG) {
42646 EVT SrcVT = Op.getValueType();
42647 assert(SrcVT.getVectorElementType() == MVT::i1 &&
42648 "Expected a vXi1 vector");
42649 assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
42650 "Expected a constant build vector");
42652 APInt Imm(SrcVT.getVectorNumElements(), 0);
42653 for (unsigned Idx = 0, e = Op.getNumOperands(); Idx < e; ++Idx) {
42654 SDValue In = Op.getOperand(Idx);
42655 if (!In.isUndef() && (In->getAsZExtVal() & 0x1))
42656 Imm.setBit(Idx);
42658 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), Imm.getBitWidth());
42659 return DAG.getConstant(Imm, SDLoc(Op), IntVT);
42662 static SDValue combineCastedMaskArithmetic(SDNode *N, SelectionDAG &DAG,
42663 TargetLowering::DAGCombinerInfo &DCI,
42664 const X86Subtarget &Subtarget) {
42665 assert(N->getOpcode() == ISD::BITCAST && "Expected a bitcast");
42667 if (!DCI.isBeforeLegalizeOps())
42668 return SDValue();
42670 // Only do this if we have k-registers.
42671 if (!Subtarget.hasAVX512())
42672 return SDValue();
42674 EVT DstVT = N->getValueType(0);
42675 SDValue Op = N->getOperand(0);
42676 EVT SrcVT = Op.getValueType();
42678 if (!Op.hasOneUse())
42679 return SDValue();
42681 // Look for logic ops.
42682 if (Op.getOpcode() != ISD::AND &&
42683 Op.getOpcode() != ISD::OR &&
42684 Op.getOpcode() != ISD::XOR)
42685 return SDValue();
42687 // Make sure we have a bitcast between mask registers and a scalar type.
42688 if (!(SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
42689 DstVT.isScalarInteger()) &&
42690 !(DstVT.isVector() && DstVT.getVectorElementType() == MVT::i1 &&
42691 SrcVT.isScalarInteger()))
42692 return SDValue();
42694 SDValue LHS = Op.getOperand(0);
42695 SDValue RHS = Op.getOperand(1);
42697 if (LHS.hasOneUse() && LHS.getOpcode() == ISD::BITCAST &&
42698 LHS.getOperand(0).getValueType() == DstVT)
42699 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT, LHS.getOperand(0),
42700 DAG.getBitcast(DstVT, RHS));
42702 if (RHS.hasOneUse() && RHS.getOpcode() == ISD::BITCAST &&
42703 RHS.getOperand(0).getValueType() == DstVT)
42704 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
42705 DAG.getBitcast(DstVT, LHS), RHS.getOperand(0));
42707 // If the RHS is a vXi1 build vector, this is a good reason to flip too.
42708 // Most of these have to move a constant from the scalar domain anyway.
42709 if (ISD::isBuildVectorOfConstantSDNodes(RHS.getNode())) {
42710 RHS = combinevXi1ConstantToInteger(RHS, DAG);
42711 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
42712 DAG.getBitcast(DstVT, LHS), RHS);
42715 return SDValue();
42718 static SDValue createMMXBuildVector(BuildVectorSDNode *BV, SelectionDAG &DAG,
42719 const X86Subtarget &Subtarget) {
42720 SDLoc DL(BV);
42721 unsigned NumElts = BV->getNumOperands();
42722 SDValue Splat = BV->getSplatValue();
42724 // Build MMX element from integer GPR or SSE float values.
42725 auto CreateMMXElement = [&](SDValue V) {
42726 if (V.isUndef())
42727 return DAG.getUNDEF(MVT::x86mmx);
42728 if (V.getValueType().isFloatingPoint()) {
42729 if (Subtarget.hasSSE1() && !isa<ConstantFPSDNode>(V)) {
42730 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, V);
42731 V = DAG.getBitcast(MVT::v2i64, V);
42732 return DAG.getNode(X86ISD::MOVDQ2Q, DL, MVT::x86mmx, V);
42734 V = DAG.getBitcast(MVT::i32, V);
42735 } else {
42736 V = DAG.getAnyExtOrTrunc(V, DL, MVT::i32);
42738 return DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, V);
42741 // Convert build vector ops to MMX data in the bottom elements.
42742 SmallVector<SDValue, 8> Ops;
42744 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42746 // Broadcast - use (PUNPCKL+)PSHUFW to broadcast single element.
42747 if (Splat) {
42748 if (Splat.isUndef())
42749 return DAG.getUNDEF(MVT::x86mmx);
42751 Splat = CreateMMXElement(Splat);
42753 if (Subtarget.hasSSE1()) {
42754 // Unpack v8i8 to splat i8 elements to lowest 16-bits.
42755 if (NumElts == 8)
42756 Splat = DAG.getNode(
42757 ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
42758 DAG.getTargetConstant(Intrinsic::x86_mmx_punpcklbw, DL,
42759 TLI.getPointerTy(DAG.getDataLayout())),
42760 Splat, Splat);
42762 // Use PSHUFW to repeat 16-bit elements.
42763 unsigned ShufMask = (NumElts > 2 ? 0 : 0x44);
42764 return DAG.getNode(
42765 ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
42766 DAG.getTargetConstant(Intrinsic::x86_sse_pshuf_w, DL,
42767 TLI.getPointerTy(DAG.getDataLayout())),
42768 Splat, DAG.getTargetConstant(ShufMask, DL, MVT::i8));
42770 Ops.append(NumElts, Splat);
42771 } else {
42772 for (unsigned i = 0; i != NumElts; ++i)
42773 Ops.push_back(CreateMMXElement(BV->getOperand(i)));
42776 // Use tree of PUNPCKLs to build up general MMX vector.
42777 while (Ops.size() > 1) {
42778 unsigned NumOps = Ops.size();
42779 unsigned IntrinOp =
42780 (NumOps == 2 ? Intrinsic::x86_mmx_punpckldq
42781 : (NumOps == 4 ? Intrinsic::x86_mmx_punpcklwd
42782 : Intrinsic::x86_mmx_punpcklbw));
42783 SDValue Intrin = DAG.getTargetConstant(
42784 IntrinOp, DL, TLI.getPointerTy(DAG.getDataLayout()));
42785 for (unsigned i = 0; i != NumOps; i += 2)
42786 Ops[i / 2] = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx, Intrin,
42787 Ops[i], Ops[i + 1]);
42788 Ops.resize(NumOps / 2);
42791 return Ops[0];
42794 // Recursive function that attempts to find if a bool vector node was originally
42795 // a vector/float/double that got truncated/extended/bitcast to/from a scalar
42796 // integer. If so, replace the scalar ops with bool vector equivalents back down
42797 // the chain.
42798 static SDValue combineBitcastToBoolVector(EVT VT, SDValue V, const SDLoc &DL,
42799 SelectionDAG &DAG,
42800 const X86Subtarget &Subtarget) {
42801 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42802 unsigned Opc = V.getOpcode();
42803 switch (Opc) {
42804 case ISD::BITCAST: {
42805 // Bitcast from a vector/float/double, we can cheaply bitcast to VT.
42806 SDValue Src = V.getOperand(0);
42807 EVT SrcVT = Src.getValueType();
42808 if (SrcVT.isVector() || SrcVT.isFloatingPoint())
42809 return DAG.getBitcast(VT, Src);
42810 break;
42812 case ISD::TRUNCATE: {
42813 // If we find a suitable source, a truncated scalar becomes a subvector.
42814 SDValue Src = V.getOperand(0);
42815 EVT NewSrcVT =
42816 EVT::getVectorVT(*DAG.getContext(), MVT::i1, Src.getValueSizeInBits());
42817 if (TLI.isTypeLegal(NewSrcVT))
42818 if (SDValue N0 =
42819 combineBitcastToBoolVector(NewSrcVT, Src, DL, DAG, Subtarget))
42820 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, N0,
42821 DAG.getIntPtrConstant(0, DL));
42822 break;
42824 case ISD::ANY_EXTEND:
42825 case ISD::ZERO_EXTEND: {
42826 // If we find a suitable source, an extended scalar becomes a subvector.
42827 SDValue Src = V.getOperand(0);
42828 EVT NewSrcVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
42829 Src.getScalarValueSizeInBits());
42830 if (TLI.isTypeLegal(NewSrcVT))
42831 if (SDValue N0 =
42832 combineBitcastToBoolVector(NewSrcVT, Src, DL, DAG, Subtarget))
42833 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
42834 Opc == ISD::ANY_EXTEND ? DAG.getUNDEF(VT)
42835 : DAG.getConstant(0, DL, VT),
42836 N0, DAG.getIntPtrConstant(0, DL));
42837 break;
42839 case ISD::OR: {
42840 // If we find suitable sources, we can just move an OR to the vector domain.
42841 SDValue Src0 = V.getOperand(0);
42842 SDValue Src1 = V.getOperand(1);
42843 if (SDValue N0 = combineBitcastToBoolVector(VT, Src0, DL, DAG, Subtarget))
42844 if (SDValue N1 = combineBitcastToBoolVector(VT, Src1, DL, DAG, Subtarget))
42845 return DAG.getNode(Opc, DL, VT, N0, N1);
42846 break;
42848 case ISD::SHL: {
42849 // If we find a suitable source, a SHL becomes a KSHIFTL.
42850 SDValue Src0 = V.getOperand(0);
42851 if ((VT == MVT::v8i1 && !Subtarget.hasDQI()) ||
42852 ((VT == MVT::v32i1 || VT == MVT::v64i1) && !Subtarget.hasBWI()))
42853 break;
42855 if (auto *Amt = dyn_cast<ConstantSDNode>(V.getOperand(1)))
42856 if (SDValue N0 = combineBitcastToBoolVector(VT, Src0, DL, DAG, Subtarget))
42857 return DAG.getNode(
42858 X86ISD::KSHIFTL, DL, VT, N0,
42859 DAG.getTargetConstant(Amt->getZExtValue(), DL, MVT::i8));
42860 break;
42863 return SDValue();
42866 static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
42867 TargetLowering::DAGCombinerInfo &DCI,
42868 const X86Subtarget &Subtarget) {
42869 SDValue N0 = N->getOperand(0);
42870 EVT VT = N->getValueType(0);
42871 EVT SrcVT = N0.getValueType();
42872 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42874 // Try to match patterns such as
42875 // (i16 bitcast (v16i1 x))
42876 // ->
42877 // (i16 movmsk (16i8 sext (v16i1 x)))
42878 // before the setcc result is scalarized on subtargets that don't have legal
42879 // vxi1 types.
42880 if (DCI.isBeforeLegalize()) {
42881 SDLoc dl(N);
42882 if (SDValue V = combineBitcastvxi1(DAG, VT, N0, dl, Subtarget))
42883 return V;
42885 // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
42886 // type, widen both sides to avoid a trip through memory.
42887 if ((VT == MVT::v4i1 || VT == MVT::v2i1) && SrcVT.isScalarInteger() &&
42888 Subtarget.hasAVX512()) {
42889 N0 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i8, N0);
42890 N0 = DAG.getBitcast(MVT::v8i1, N0);
42891 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, N0,
42892 DAG.getIntPtrConstant(0, dl));
42895 // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
42896 // type, widen both sides to avoid a trip through memory.
42897 if ((SrcVT == MVT::v4i1 || SrcVT == MVT::v2i1) && VT.isScalarInteger() &&
42898 Subtarget.hasAVX512()) {
42899 // Use zeros for the widening if we already have some zeroes. This can
42900 // allow SimplifyDemandedBits to remove scalar ANDs that may be down
42901 // stream of this.
42902 // FIXME: It might make sense to detect a concat_vectors with a mix of
42903 // zeroes and undef and turn it into insert_subvector for i1 vectors as
42904 // a separate combine. What we can't do is canonicalize the operands of
42905 // such a concat or we'll get into a loop with SimplifyDemandedBits.
42906 if (N0.getOpcode() == ISD::CONCAT_VECTORS) {
42907 SDValue LastOp = N0.getOperand(N0.getNumOperands() - 1);
42908 if (ISD::isBuildVectorAllZeros(LastOp.getNode())) {
42909 SrcVT = LastOp.getValueType();
42910 unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
42911 SmallVector<SDValue, 4> Ops(N0->op_begin(), N0->op_end());
42912 Ops.resize(NumConcats, DAG.getConstant(0, dl, SrcVT));
42913 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
42914 N0 = DAG.getBitcast(MVT::i8, N0);
42915 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
42919 unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
42920 SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(SrcVT));
42921 Ops[0] = N0;
42922 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
42923 N0 = DAG.getBitcast(MVT::i8, N0);
42924 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
42926 } else {
42927 // If we're bitcasting from iX to vXi1, see if the integer originally
42928 // began as a vXi1 and whether we can remove the bitcast entirely.
42929 if (VT.isVector() && VT.getScalarType() == MVT::i1 &&
42930 SrcVT.isScalarInteger() && TLI.isTypeLegal(VT)) {
42931 if (SDValue V =
42932 combineBitcastToBoolVector(VT, N0, SDLoc(N), DAG, Subtarget))
42933 return V;
42937 // Look for (i8 (bitcast (v8i1 (extract_subvector (v16i1 X), 0)))) and
42938 // replace with (i8 (trunc (i16 (bitcast (v16i1 X))))). This can occur
42939 // due to insert_subvector legalization on KNL. By promoting the copy to i16
42940 // we can help with known bits propagation from the vXi1 domain to the
42941 // scalar domain.
42942 if (VT == MVT::i8 && SrcVT == MVT::v8i1 && Subtarget.hasAVX512() &&
42943 !Subtarget.hasDQI() && N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
42944 N0.getOperand(0).getValueType() == MVT::v16i1 &&
42945 isNullConstant(N0.getOperand(1)))
42946 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT,
42947 DAG.getBitcast(MVT::i16, N0.getOperand(0)));
42949 // Canonicalize (bitcast (vbroadcast_load)) so that the output of the bitcast
42950 // and the vbroadcast_load are both integer or both fp. In some cases this
42951 // will remove the bitcast entirely.
42952 if (N0.getOpcode() == X86ISD::VBROADCAST_LOAD && N0.hasOneUse() &&
42953 VT.isFloatingPoint() != SrcVT.isFloatingPoint() && VT.isVector()) {
42954 auto *BCast = cast<MemIntrinsicSDNode>(N0);
42955 unsigned SrcVTSize = SrcVT.getScalarSizeInBits();
42956 unsigned MemSize = BCast->getMemoryVT().getScalarSizeInBits();
42957 // Don't swap i8/i16 since don't have fp types that size.
42958 if (MemSize >= 32) {
42959 MVT MemVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(MemSize)
42960 : MVT::getIntegerVT(MemSize);
42961 MVT LoadVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(SrcVTSize)
42962 : MVT::getIntegerVT(SrcVTSize);
42963 LoadVT = MVT::getVectorVT(LoadVT, SrcVT.getVectorNumElements());
42965 SDVTList Tys = DAG.getVTList(LoadVT, MVT::Other);
42966 SDValue Ops[] = { BCast->getChain(), BCast->getBasePtr() };
42967 SDValue ResNode =
42968 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, SDLoc(N), Tys, Ops,
42969 MemVT, BCast->getMemOperand());
42970 DAG.ReplaceAllUsesOfValueWith(SDValue(BCast, 1), ResNode.getValue(1));
42971 return DAG.getBitcast(VT, ResNode);
42975 // Since MMX types are special and don't usually play with other vector types,
42976 // it's better to handle them early to be sure we emit efficient code by
42977 // avoiding store-load conversions.
42978 if (VT == MVT::x86mmx) {
42979 // Detect MMX constant vectors.
42980 APInt UndefElts;
42981 SmallVector<APInt, 1> EltBits;
42982 if (getTargetConstantBitsFromNode(N0, 64, UndefElts, EltBits)) {
42983 SDLoc DL(N0);
42984 // Handle zero-extension of i32 with MOVD.
42985 if (EltBits[0].countl_zero() >= 32)
42986 return DAG.getNode(X86ISD::MMX_MOVW2D, DL, VT,
42987 DAG.getConstant(EltBits[0].trunc(32), DL, MVT::i32));
42988 // Else, bitcast to a double.
42989 // TODO - investigate supporting sext 32-bit immediates on x86_64.
42990 APFloat F64(APFloat::IEEEdouble(), EltBits[0]);
42991 return DAG.getBitcast(VT, DAG.getConstantFP(F64, DL, MVT::f64));
42994 // Detect bitcasts to x86mmx low word.
42995 if (N0.getOpcode() == ISD::BUILD_VECTOR &&
42996 (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) &&
42997 N0.getOperand(0).getValueType() == SrcVT.getScalarType()) {
42998 bool LowUndef = true, AllUndefOrZero = true;
42999 for (unsigned i = 1, e = SrcVT.getVectorNumElements(); i != e; ++i) {
43000 SDValue Op = N0.getOperand(i);
43001 LowUndef &= Op.isUndef() || (i >= e/2);
43002 AllUndefOrZero &= (Op.isUndef() || isNullConstant(Op));
43004 if (AllUndefOrZero) {
43005 SDValue N00 = N0.getOperand(0);
43006 SDLoc dl(N00);
43007 N00 = LowUndef ? DAG.getAnyExtOrTrunc(N00, dl, MVT::i32)
43008 : DAG.getZExtOrTrunc(N00, dl, MVT::i32);
43009 return DAG.getNode(X86ISD::MMX_MOVW2D, dl, VT, N00);
43013 // Detect bitcasts of 64-bit build vectors and convert to a
43014 // MMX UNPCK/PSHUFW which takes MMX type inputs with the value in the
43015 // lowest element.
43016 if (N0.getOpcode() == ISD::BUILD_VECTOR &&
43017 (SrcVT == MVT::v2f32 || SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 ||
43018 SrcVT == MVT::v8i8))
43019 return createMMXBuildVector(cast<BuildVectorSDNode>(N0), DAG, Subtarget);
43021 // Detect bitcasts between element or subvector extraction to x86mmx.
43022 if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
43023 N0.getOpcode() == ISD::EXTRACT_SUBVECTOR) &&
43024 isNullConstant(N0.getOperand(1))) {
43025 SDValue N00 = N0.getOperand(0);
43026 if (N00.getValueType().is128BitVector())
43027 return DAG.getNode(X86ISD::MOVDQ2Q, SDLoc(N00), VT,
43028 DAG.getBitcast(MVT::v2i64, N00));
43031 // Detect bitcasts from FP_TO_SINT to x86mmx.
43032 if (SrcVT == MVT::v2i32 && N0.getOpcode() == ISD::FP_TO_SINT) {
43033 SDLoc DL(N0);
43034 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
43035 DAG.getUNDEF(MVT::v2i32));
43036 return DAG.getNode(X86ISD::MOVDQ2Q, DL, VT,
43037 DAG.getBitcast(MVT::v2i64, Res));
43041 // Try to remove a bitcast of constant vXi1 vector. We have to legalize
43042 // most of these to scalar anyway.
43043 if (Subtarget.hasAVX512() && VT.isScalarInteger() &&
43044 SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
43045 ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
43046 return combinevXi1ConstantToInteger(N0, DAG);
43049 if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() &&
43050 VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
43051 isa<ConstantSDNode>(N0)) {
43052 auto *C = cast<ConstantSDNode>(N0);
43053 if (C->isAllOnes())
43054 return DAG.getConstant(1, SDLoc(N0), VT);
43055 if (C->isZero())
43056 return DAG.getConstant(0, SDLoc(N0), VT);
43059 // Look for MOVMSK that is maybe truncated and then bitcasted to vXi1.
43060 // Turn it into a sign bit compare that produces a k-register. This avoids
43061 // a trip through a GPR.
43062 if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() &&
43063 VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
43064 isPowerOf2_32(VT.getVectorNumElements())) {
43065 unsigned NumElts = VT.getVectorNumElements();
43066 SDValue Src = N0;
43068 // Peek through truncate.
43069 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse())
43070 Src = N0.getOperand(0);
43072 if (Src.getOpcode() == X86ISD::MOVMSK && Src.hasOneUse()) {
43073 SDValue MovmskIn = Src.getOperand(0);
43074 MVT MovmskVT = MovmskIn.getSimpleValueType();
43075 unsigned MovMskElts = MovmskVT.getVectorNumElements();
43077 // We allow extra bits of the movmsk to be used since they are known zero.
43078 // We can't convert a VPMOVMSKB without avx512bw.
43079 if (MovMskElts <= NumElts &&
43080 (Subtarget.hasBWI() || MovmskVT.getVectorElementType() != MVT::i8)) {
43081 EVT IntVT = EVT(MovmskVT).changeVectorElementTypeToInteger();
43082 MovmskIn = DAG.getBitcast(IntVT, MovmskIn);
43083 SDLoc dl(N);
43084 MVT CmpVT = MVT::getVectorVT(MVT::i1, MovMskElts);
43085 SDValue Cmp = DAG.getSetCC(dl, CmpVT, MovmskIn,
43086 DAG.getConstant(0, dl, IntVT), ISD::SETLT);
43087 if (EVT(CmpVT) == VT)
43088 return Cmp;
43090 // Pad with zeroes up to original VT to replace the zeroes that were
43091 // being used from the MOVMSK.
43092 unsigned NumConcats = NumElts / MovMskElts;
43093 SmallVector<SDValue, 4> Ops(NumConcats, DAG.getConstant(0, dl, CmpVT));
43094 Ops[0] = Cmp;
43095 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Ops);
43100 // Try to remove bitcasts from input and output of mask arithmetic to
43101 // remove GPR<->K-register crossings.
43102 if (SDValue V = combineCastedMaskArithmetic(N, DAG, DCI, Subtarget))
43103 return V;
43105 // Convert a bitcasted integer logic operation that has one bitcasted
43106 // floating-point operand into a floating-point logic operation. This may
43107 // create a load of a constant, but that is cheaper than materializing the
43108 // constant in an integer register and transferring it to an SSE register or
43109 // transferring the SSE operand to integer register and back.
43110 unsigned FPOpcode;
43111 switch (N0.getOpcode()) {
43112 case ISD::AND: FPOpcode = X86ISD::FAND; break;
43113 case ISD::OR: FPOpcode = X86ISD::FOR; break;
43114 case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
43115 default: return SDValue();
43118 // Check if we have a bitcast from another integer type as well.
43119 if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
43120 (Subtarget.hasSSE2() && VT == MVT::f64) ||
43121 (Subtarget.hasFP16() && VT == MVT::f16) ||
43122 (Subtarget.hasSSE2() && VT.isInteger() && VT.isVector() &&
43123 TLI.isTypeLegal(VT))))
43124 return SDValue();
43126 SDValue LogicOp0 = N0.getOperand(0);
43127 SDValue LogicOp1 = N0.getOperand(1);
43128 SDLoc DL0(N0);
43130 // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y))
43131 if (N0.hasOneUse() && LogicOp0.getOpcode() == ISD::BITCAST &&
43132 LogicOp0.hasOneUse() && LogicOp0.getOperand(0).hasOneUse() &&
43133 LogicOp0.getOperand(0).getValueType() == VT &&
43134 !isa<ConstantSDNode>(LogicOp0.getOperand(0))) {
43135 SDValue CastedOp1 = DAG.getBitcast(VT, LogicOp1);
43136 unsigned Opcode = VT.isFloatingPoint() ? FPOpcode : N0.getOpcode();
43137 return DAG.getNode(Opcode, DL0, VT, LogicOp0.getOperand(0), CastedOp1);
43139 // bitcast(logic(X, bitcast(Y))) --> logic'(bitcast(X), Y)
43140 if (N0.hasOneUse() && LogicOp1.getOpcode() == ISD::BITCAST &&
43141 LogicOp1.hasOneUse() && LogicOp1.getOperand(0).hasOneUse() &&
43142 LogicOp1.getOperand(0).getValueType() == VT &&
43143 !isa<ConstantSDNode>(LogicOp1.getOperand(0))) {
43144 SDValue CastedOp0 = DAG.getBitcast(VT, LogicOp0);
43145 unsigned Opcode = VT.isFloatingPoint() ? FPOpcode : N0.getOpcode();
43146 return DAG.getNode(Opcode, DL0, VT, LogicOp1.getOperand(0), CastedOp0);
43149 return SDValue();
43152 // (mul (zext a), (sext, b))
43153 static bool detectExtMul(SelectionDAG &DAG, const SDValue &Mul, SDValue &Op0,
43154 SDValue &Op1) {
43155 Op0 = Mul.getOperand(0);
43156 Op1 = Mul.getOperand(1);
43158 // The operand1 should be signed extend
43159 if (Op0.getOpcode() == ISD::SIGN_EXTEND)
43160 std::swap(Op0, Op1);
43162 auto IsFreeTruncation = [](SDValue &Op) -> bool {
43163 if ((Op.getOpcode() == ISD::ZERO_EXTEND ||
43164 Op.getOpcode() == ISD::SIGN_EXTEND) &&
43165 Op.getOperand(0).getScalarValueSizeInBits() <= 8)
43166 return true;
43168 auto *BV = dyn_cast<BuildVectorSDNode>(Op);
43169 return (BV && BV->isConstant());
43172 // (dpbusd (zext a), (sext, b)). Since the first operand should be unsigned
43173 // value, we need to check Op0 is zero extended value. Op1 should be signed
43174 // value, so we just check the signed bits.
43175 if ((IsFreeTruncation(Op0) &&
43176 DAG.computeKnownBits(Op0).countMaxActiveBits() <= 8) &&
43177 (IsFreeTruncation(Op1) && DAG.ComputeMaxSignificantBits(Op1) <= 8))
43178 return true;
43180 return false;
43183 // Given a ABS node, detect the following pattern:
43184 // (ABS (SUB (ZERO_EXTEND a), (ZERO_EXTEND b))).
43185 // This is useful as it is the input into a SAD pattern.
43186 static bool detectZextAbsDiff(const SDValue &Abs, SDValue &Op0, SDValue &Op1) {
43187 SDValue AbsOp1 = Abs->getOperand(0);
43188 if (AbsOp1.getOpcode() != ISD::SUB)
43189 return false;
43191 Op0 = AbsOp1.getOperand(0);
43192 Op1 = AbsOp1.getOperand(1);
43194 // Check if the operands of the sub are zero-extended from vectors of i8.
43195 if (Op0.getOpcode() != ISD::ZERO_EXTEND ||
43196 Op0.getOperand(0).getValueType().getVectorElementType() != MVT::i8 ||
43197 Op1.getOpcode() != ISD::ZERO_EXTEND ||
43198 Op1.getOperand(0).getValueType().getVectorElementType() != MVT::i8)
43199 return false;
43201 return true;
43204 static SDValue createVPDPBUSD(SelectionDAG &DAG, SDValue LHS, SDValue RHS,
43205 unsigned &LogBias, const SDLoc &DL,
43206 const X86Subtarget &Subtarget) {
43207 // Extend or truncate to MVT::i8 first.
43208 MVT Vi8VT =
43209 MVT::getVectorVT(MVT::i8, LHS.getValueType().getVectorElementCount());
43210 LHS = DAG.getZExtOrTrunc(LHS, DL, Vi8VT);
43211 RHS = DAG.getSExtOrTrunc(RHS, DL, Vi8VT);
43213 // VPDPBUSD(<16 x i32>C, <16 x i8>A, <16 x i8>B). For each dst element
43214 // C[0] = C[0] + A[0]B[0] + A[1]B[1] + A[2]B[2] + A[3]B[3].
43215 // The src A, B element type is i8, but the dst C element type is i32.
43216 // When we calculate the reduce stage, we use src vector type vXi8 for it
43217 // so we need logbias 2 to avoid extra 2 stages.
43218 LogBias = 2;
43220 unsigned RegSize = std::max(128u, (unsigned)Vi8VT.getSizeInBits());
43221 if (Subtarget.hasVNNI() && !Subtarget.hasVLX())
43222 RegSize = std::max(512u, RegSize);
43224 // "Zero-extend" the i8 vectors. This is not a per-element zext, rather we
43225 // fill in the missing vector elements with 0.
43226 unsigned NumConcat = RegSize / Vi8VT.getSizeInBits();
43227 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, Vi8VT));
43228 Ops[0] = LHS;
43229 MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
43230 SDValue DpOp0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
43231 Ops[0] = RHS;
43232 SDValue DpOp1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
43234 // Actually build the DotProduct, split as 256/512 bits for
43235 // AVXVNNI/AVX512VNNI.
43236 auto DpBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
43237 ArrayRef<SDValue> Ops) {
43238 MVT VT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
43239 return DAG.getNode(X86ISD::VPDPBUSD, DL, VT, Ops);
43241 MVT DpVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
43242 SDValue Zero = DAG.getConstant(0, DL, DpVT);
43244 return SplitOpsAndApply(DAG, Subtarget, DL, DpVT, {Zero, DpOp0, DpOp1},
43245 DpBuilder, false);
43248 // Given two zexts of <k x i8> to <k x i32>, create a PSADBW of the inputs
43249 // to these zexts.
43250 static SDValue createPSADBW(SelectionDAG &DAG, const SDValue &Zext0,
43251 const SDValue &Zext1, const SDLoc &DL,
43252 const X86Subtarget &Subtarget) {
43253 // Find the appropriate width for the PSADBW.
43254 EVT InVT = Zext0.getOperand(0).getValueType();
43255 unsigned RegSize = std::max(128u, (unsigned)InVT.getSizeInBits());
43257 // "Zero-extend" the i8 vectors. This is not a per-element zext, rather we
43258 // fill in the missing vector elements with 0.
43259 unsigned NumConcat = RegSize / InVT.getSizeInBits();
43260 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, InVT));
43261 Ops[0] = Zext0.getOperand(0);
43262 MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
43263 SDValue SadOp0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
43264 Ops[0] = Zext1.getOperand(0);
43265 SDValue SadOp1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
43267 // Actually build the SAD, split as 128/256/512 bits for SSE/AVX2/AVX512BW.
43268 auto PSADBWBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
43269 ArrayRef<SDValue> Ops) {
43270 MVT VT = MVT::getVectorVT(MVT::i64, Ops[0].getValueSizeInBits() / 64);
43271 return DAG.getNode(X86ISD::PSADBW, DL, VT, Ops);
43273 MVT SadVT = MVT::getVectorVT(MVT::i64, RegSize / 64);
43274 return SplitOpsAndApply(DAG, Subtarget, DL, SadVT, { SadOp0, SadOp1 },
43275 PSADBWBuilder);
43278 // Attempt to replace an min/max v8i16/v16i8 horizontal reduction with
43279 // PHMINPOSUW.
43280 static SDValue combineMinMaxReduction(SDNode *Extract, SelectionDAG &DAG,
43281 const X86Subtarget &Subtarget) {
43282 // Bail without SSE41.
43283 if (!Subtarget.hasSSE41())
43284 return SDValue();
43286 EVT ExtractVT = Extract->getValueType(0);
43287 if (ExtractVT != MVT::i16 && ExtractVT != MVT::i8)
43288 return SDValue();
43290 // Check for SMAX/SMIN/UMAX/UMIN horizontal reduction patterns.
43291 ISD::NodeType BinOp;
43292 SDValue Src = DAG.matchBinOpReduction(
43293 Extract, BinOp, {ISD::SMAX, ISD::SMIN, ISD::UMAX, ISD::UMIN}, true);
43294 if (!Src)
43295 return SDValue();
43297 EVT SrcVT = Src.getValueType();
43298 EVT SrcSVT = SrcVT.getScalarType();
43299 if (SrcSVT != ExtractVT || (SrcVT.getSizeInBits() % 128) != 0)
43300 return SDValue();
43302 SDLoc DL(Extract);
43303 SDValue MinPos = Src;
43305 // First, reduce the source down to 128-bit, applying BinOp to lo/hi.
43306 while (SrcVT.getSizeInBits() > 128) {
43307 SDValue Lo, Hi;
43308 std::tie(Lo, Hi) = splitVector(MinPos, DAG, DL);
43309 SrcVT = Lo.getValueType();
43310 MinPos = DAG.getNode(BinOp, DL, SrcVT, Lo, Hi);
43312 assert(((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) ||
43313 (SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) &&
43314 "Unexpected value type");
43316 // PHMINPOSUW applies to UMIN(v8i16), for SMIN/SMAX/UMAX we must apply a mask
43317 // to flip the value accordingly.
43318 SDValue Mask;
43319 unsigned MaskEltsBits = ExtractVT.getSizeInBits();
43320 if (BinOp == ISD::SMAX)
43321 Mask = DAG.getConstant(APInt::getSignedMaxValue(MaskEltsBits), DL, SrcVT);
43322 else if (BinOp == ISD::SMIN)
43323 Mask = DAG.getConstant(APInt::getSignedMinValue(MaskEltsBits), DL, SrcVT);
43324 else if (BinOp == ISD::UMAX)
43325 Mask = DAG.getAllOnesConstant(DL, SrcVT);
43327 if (Mask)
43328 MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
43330 // For v16i8 cases we need to perform UMIN on pairs of byte elements,
43331 // shuffling each upper element down and insert zeros. This means that the
43332 // v16i8 UMIN will leave the upper element as zero, performing zero-extension
43333 // ready for the PHMINPOS.
43334 if (ExtractVT == MVT::i8) {
43335 SDValue Upper = DAG.getVectorShuffle(
43336 SrcVT, DL, MinPos, DAG.getConstant(0, DL, MVT::v16i8),
43337 {1, 16, 3, 16, 5, 16, 7, 16, 9, 16, 11, 16, 13, 16, 15, 16});
43338 MinPos = DAG.getNode(ISD::UMIN, DL, SrcVT, MinPos, Upper);
43341 // Perform the PHMINPOS on a v8i16 vector,
43342 MinPos = DAG.getBitcast(MVT::v8i16, MinPos);
43343 MinPos = DAG.getNode(X86ISD::PHMINPOS, DL, MVT::v8i16, MinPos);
43344 MinPos = DAG.getBitcast(SrcVT, MinPos);
43346 if (Mask)
43347 MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
43349 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, MinPos,
43350 DAG.getIntPtrConstant(0, DL));
43353 // Attempt to replace an all_of/any_of/parity style horizontal reduction with a MOVMSK.
43354 static SDValue combinePredicateReduction(SDNode *Extract, SelectionDAG &DAG,
43355 const X86Subtarget &Subtarget) {
43356 // Bail without SSE2.
43357 if (!Subtarget.hasSSE2())
43358 return SDValue();
43360 EVT ExtractVT = Extract->getValueType(0);
43361 unsigned BitWidth = ExtractVT.getSizeInBits();
43362 if (ExtractVT != MVT::i64 && ExtractVT != MVT::i32 && ExtractVT != MVT::i16 &&
43363 ExtractVT != MVT::i8 && ExtractVT != MVT::i1)
43364 return SDValue();
43366 // Check for OR(any_of)/AND(all_of)/XOR(parity) horizontal reduction patterns.
43367 ISD::NodeType BinOp;
43368 SDValue Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::OR, ISD::AND});
43369 if (!Match && ExtractVT == MVT::i1)
43370 Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::XOR});
43371 if (!Match)
43372 return SDValue();
43374 // EXTRACT_VECTOR_ELT can require implicit extension of the vector element
43375 // which we can't support here for now.
43376 if (Match.getScalarValueSizeInBits() != BitWidth)
43377 return SDValue();
43379 SDValue Movmsk;
43380 SDLoc DL(Extract);
43381 EVT MatchVT = Match.getValueType();
43382 unsigned NumElts = MatchVT.getVectorNumElements();
43383 unsigned MaxElts = Subtarget.hasInt256() ? 32 : 16;
43384 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43385 LLVMContext &Ctx = *DAG.getContext();
43387 if (ExtractVT == MVT::i1) {
43388 // Special case for (pre-legalization) vXi1 reductions.
43389 if (NumElts > 64 || !isPowerOf2_32(NumElts))
43390 return SDValue();
43391 if (Match.getOpcode() == ISD::SETCC) {
43392 ISD::CondCode CC = cast<CondCodeSDNode>(Match.getOperand(2))->get();
43393 if ((BinOp == ISD::AND && CC == ISD::CondCode::SETEQ) ||
43394 (BinOp == ISD::OR && CC == ISD::CondCode::SETNE)) {
43395 // For all_of(setcc(x,y,eq)) - use (iX)x == (iX)y.
43396 // For any_of(setcc(x,y,ne)) - use (iX)x != (iX)y.
43397 X86::CondCode X86CC;
43398 SDValue LHS = DAG.getFreeze(Match.getOperand(0));
43399 SDValue RHS = DAG.getFreeze(Match.getOperand(1));
43400 APInt Mask = APInt::getAllOnes(LHS.getScalarValueSizeInBits());
43401 if (SDValue V = LowerVectorAllEqual(DL, LHS, RHS, CC, Mask, Subtarget,
43402 DAG, X86CC))
43403 return DAG.getNode(ISD::TRUNCATE, DL, ExtractVT,
43404 getSETCC(X86CC, V, DL, DAG));
43407 if (TLI.isTypeLegal(MatchVT)) {
43408 // If this is a legal AVX512 predicate type then we can just bitcast.
43409 EVT MovmskVT = EVT::getIntegerVT(Ctx, NumElts);
43410 Movmsk = DAG.getBitcast(MovmskVT, Match);
43411 } else {
43412 // Use combineBitcastvxi1 to create the MOVMSK.
43413 while (NumElts > MaxElts) {
43414 SDValue Lo, Hi;
43415 std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
43416 Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
43417 NumElts /= 2;
43419 EVT MovmskVT = EVT::getIntegerVT(Ctx, NumElts);
43420 Movmsk = combineBitcastvxi1(DAG, MovmskVT, Match, DL, Subtarget);
43422 if (!Movmsk)
43423 return SDValue();
43424 Movmsk = DAG.getZExtOrTrunc(Movmsk, DL, NumElts > 32 ? MVT::i64 : MVT::i32);
43425 } else {
43426 // FIXME: Better handling of k-registers or 512-bit vectors?
43427 unsigned MatchSizeInBits = Match.getValueSizeInBits();
43428 if (!(MatchSizeInBits == 128 ||
43429 (MatchSizeInBits == 256 && Subtarget.hasAVX())))
43430 return SDValue();
43432 // Make sure this isn't a vector of 1 element. The perf win from using
43433 // MOVMSK diminishes with less elements in the reduction, but it is
43434 // generally better to get the comparison over to the GPRs as soon as
43435 // possible to reduce the number of vector ops.
43436 if (Match.getValueType().getVectorNumElements() < 2)
43437 return SDValue();
43439 // Check that we are extracting a reduction of all sign bits.
43440 if (DAG.ComputeNumSignBits(Match) != BitWidth)
43441 return SDValue();
43443 if (MatchSizeInBits == 256 && BitWidth < 32 && !Subtarget.hasInt256()) {
43444 SDValue Lo, Hi;
43445 std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
43446 Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
43447 MatchSizeInBits = Match.getValueSizeInBits();
43450 // For 32/64 bit comparisons use MOVMSKPS/MOVMSKPD, else PMOVMSKB.
43451 MVT MaskSrcVT;
43452 if (64 == BitWidth || 32 == BitWidth)
43453 MaskSrcVT = MVT::getVectorVT(MVT::getFloatingPointVT(BitWidth),
43454 MatchSizeInBits / BitWidth);
43455 else
43456 MaskSrcVT = MVT::getVectorVT(MVT::i8, MatchSizeInBits / 8);
43458 SDValue BitcastLogicOp = DAG.getBitcast(MaskSrcVT, Match);
43459 Movmsk = getPMOVMSKB(DL, BitcastLogicOp, DAG, Subtarget);
43460 NumElts = MaskSrcVT.getVectorNumElements();
43462 assert((NumElts <= 32 || NumElts == 64) &&
43463 "Not expecting more than 64 elements");
43465 MVT CmpVT = NumElts == 64 ? MVT::i64 : MVT::i32;
43466 if (BinOp == ISD::XOR) {
43467 // parity -> (PARITY(MOVMSK X))
43468 SDValue Result = DAG.getNode(ISD::PARITY, DL, CmpVT, Movmsk);
43469 return DAG.getZExtOrTrunc(Result, DL, ExtractVT);
43472 SDValue CmpC;
43473 ISD::CondCode CondCode;
43474 if (BinOp == ISD::OR) {
43475 // any_of -> MOVMSK != 0
43476 CmpC = DAG.getConstant(0, DL, CmpVT);
43477 CondCode = ISD::CondCode::SETNE;
43478 } else {
43479 // all_of -> MOVMSK == ((1 << NumElts) - 1)
43480 CmpC = DAG.getConstant(APInt::getLowBitsSet(CmpVT.getSizeInBits(), NumElts),
43481 DL, CmpVT);
43482 CondCode = ISD::CondCode::SETEQ;
43485 // The setcc produces an i8 of 0/1, so extend that to the result width and
43486 // negate to get the final 0/-1 mask value.
43487 EVT SetccVT = TLI.getSetCCResultType(DAG.getDataLayout(), Ctx, CmpVT);
43488 SDValue Setcc = DAG.getSetCC(DL, SetccVT, Movmsk, CmpC, CondCode);
43489 SDValue Zext = DAG.getZExtOrTrunc(Setcc, DL, ExtractVT);
43490 SDValue Zero = DAG.getConstant(0, DL, ExtractVT);
43491 return DAG.getNode(ISD::SUB, DL, ExtractVT, Zero, Zext);
43494 static SDValue combineVPDPBUSDPattern(SDNode *Extract, SelectionDAG &DAG,
43495 const X86Subtarget &Subtarget) {
43496 if (!Subtarget.hasVNNI() && !Subtarget.hasAVXVNNI())
43497 return SDValue();
43499 EVT ExtractVT = Extract->getValueType(0);
43500 // Verify the type we're extracting is i32, as the output element type of
43501 // vpdpbusd is i32.
43502 if (ExtractVT != MVT::i32)
43503 return SDValue();
43505 EVT VT = Extract->getOperand(0).getValueType();
43506 if (!isPowerOf2_32(VT.getVectorNumElements()))
43507 return SDValue();
43509 // Match shuffle + add pyramid.
43510 ISD::NodeType BinOp;
43511 SDValue Root = DAG.matchBinOpReduction(Extract, BinOp, {ISD::ADD});
43513 // We can't combine to vpdpbusd for zext, because each of the 4 multiplies
43514 // done by vpdpbusd compute a signed 16-bit product that will be sign extended
43515 // before adding into the accumulator.
43516 // TODO:
43517 // We also need to verify that the multiply has at least 2x the number of bits
43518 // of the input. We shouldn't match
43519 // (sign_extend (mul (vXi9 (zext (vXi8 X))), (vXi9 (zext (vXi8 Y)))).
43520 // if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND))
43521 // Root = Root.getOperand(0);
43523 // If there was a match, we want Root to be a mul.
43524 if (!Root || Root.getOpcode() != ISD::MUL)
43525 return SDValue();
43527 // Check whether we have an extend and mul pattern
43528 SDValue LHS, RHS;
43529 if (!detectExtMul(DAG, Root, LHS, RHS))
43530 return SDValue();
43532 // Create the dot product instruction.
43533 SDLoc DL(Extract);
43534 unsigned StageBias;
43535 SDValue DP = createVPDPBUSD(DAG, LHS, RHS, StageBias, DL, Subtarget);
43537 // If the original vector was wider than 4 elements, sum over the results
43538 // in the DP vector.
43539 unsigned Stages = Log2_32(VT.getVectorNumElements());
43540 EVT DpVT = DP.getValueType();
43542 if (Stages > StageBias) {
43543 unsigned DpElems = DpVT.getVectorNumElements();
43545 for (unsigned i = Stages - StageBias; i > 0; --i) {
43546 SmallVector<int, 16> Mask(DpElems, -1);
43547 for (unsigned j = 0, MaskEnd = 1 << (i - 1); j < MaskEnd; ++j)
43548 Mask[j] = MaskEnd + j;
43550 SDValue Shuffle =
43551 DAG.getVectorShuffle(DpVT, DL, DP, DAG.getUNDEF(DpVT), Mask);
43552 DP = DAG.getNode(ISD::ADD, DL, DpVT, DP, Shuffle);
43556 // Return the lowest ExtractSizeInBits bits.
43557 EVT ResVT =
43558 EVT::getVectorVT(*DAG.getContext(), ExtractVT,
43559 DpVT.getSizeInBits() / ExtractVT.getSizeInBits());
43560 DP = DAG.getBitcast(ResVT, DP);
43561 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, DP,
43562 Extract->getOperand(1));
43565 static SDValue combineBasicSADPattern(SDNode *Extract, SelectionDAG &DAG,
43566 const X86Subtarget &Subtarget) {
43567 // PSADBW is only supported on SSE2 and up.
43568 if (!Subtarget.hasSSE2())
43569 return SDValue();
43571 EVT ExtractVT = Extract->getValueType(0);
43572 // Verify the type we're extracting is either i32 or i64.
43573 // FIXME: Could support other types, but this is what we have coverage for.
43574 if (ExtractVT != MVT::i32 && ExtractVT != MVT::i64)
43575 return SDValue();
43577 EVT VT = Extract->getOperand(0).getValueType();
43578 if (!isPowerOf2_32(VT.getVectorNumElements()))
43579 return SDValue();
43581 // Match shuffle + add pyramid.
43582 ISD::NodeType BinOp;
43583 SDValue Root = DAG.matchBinOpReduction(Extract, BinOp, {ISD::ADD});
43585 // The operand is expected to be zero extended from i8
43586 // (verified in detectZextAbsDiff).
43587 // In order to convert to i64 and above, additional any/zero/sign
43588 // extend is expected.
43589 // The zero extend from 32 bit has no mathematical effect on the result.
43590 // Also the sign extend is basically zero extend
43591 // (extends the sign bit which is zero).
43592 // So it is correct to skip the sign/zero extend instruction.
43593 if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND ||
43594 Root.getOpcode() == ISD::ZERO_EXTEND ||
43595 Root.getOpcode() == ISD::ANY_EXTEND))
43596 Root = Root.getOperand(0);
43598 // If there was a match, we want Root to be a select that is the root of an
43599 // abs-diff pattern.
43600 if (!Root || Root.getOpcode() != ISD::ABS)
43601 return SDValue();
43603 // Check whether we have an abs-diff pattern feeding into the select.
43604 SDValue Zext0, Zext1;
43605 if (!detectZextAbsDiff(Root, Zext0, Zext1))
43606 return SDValue();
43608 // Create the SAD instruction.
43609 SDLoc DL(Extract);
43610 SDValue SAD = createPSADBW(DAG, Zext0, Zext1, DL, Subtarget);
43612 // If the original vector was wider than 8 elements, sum over the results
43613 // in the SAD vector.
43614 unsigned Stages = Log2_32(VT.getVectorNumElements());
43615 EVT SadVT = SAD.getValueType();
43616 if (Stages > 3) {
43617 unsigned SadElems = SadVT.getVectorNumElements();
43619 for(unsigned i = Stages - 3; i > 0; --i) {
43620 SmallVector<int, 16> Mask(SadElems, -1);
43621 for(unsigned j = 0, MaskEnd = 1 << (i - 1); j < MaskEnd; ++j)
43622 Mask[j] = MaskEnd + j;
43624 SDValue Shuffle =
43625 DAG.getVectorShuffle(SadVT, DL, SAD, DAG.getUNDEF(SadVT), Mask);
43626 SAD = DAG.getNode(ISD::ADD, DL, SadVT, SAD, Shuffle);
43630 unsigned ExtractSizeInBits = ExtractVT.getSizeInBits();
43631 // Return the lowest ExtractSizeInBits bits.
43632 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), ExtractVT,
43633 SadVT.getSizeInBits() / ExtractSizeInBits);
43634 SAD = DAG.getBitcast(ResVT, SAD);
43635 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, SAD,
43636 Extract->getOperand(1));
43639 // Attempt to peek through a target shuffle and extract the scalar from the
43640 // source.
43641 static SDValue combineExtractWithShuffle(SDNode *N, SelectionDAG &DAG,
43642 TargetLowering::DAGCombinerInfo &DCI,
43643 const X86Subtarget &Subtarget) {
43644 if (DCI.isBeforeLegalizeOps())
43645 return SDValue();
43647 SDLoc dl(N);
43648 SDValue Src = N->getOperand(0);
43649 SDValue Idx = N->getOperand(1);
43651 EVT VT = N->getValueType(0);
43652 EVT SrcVT = Src.getValueType();
43653 EVT SrcSVT = SrcVT.getVectorElementType();
43654 unsigned SrcEltBits = SrcSVT.getSizeInBits();
43655 unsigned NumSrcElts = SrcVT.getVectorNumElements();
43657 // Don't attempt this for boolean mask vectors or unknown extraction indices.
43658 if (SrcSVT == MVT::i1 || !isa<ConstantSDNode>(Idx))
43659 return SDValue();
43661 const APInt &IdxC = N->getConstantOperandAPInt(1);
43662 if (IdxC.uge(NumSrcElts))
43663 return SDValue();
43665 SDValue SrcBC = peekThroughBitcasts(Src);
43667 // Handle extract(bitcast(broadcast(scalar_value))).
43668 if (X86ISD::VBROADCAST == SrcBC.getOpcode()) {
43669 SDValue SrcOp = SrcBC.getOperand(0);
43670 EVT SrcOpVT = SrcOp.getValueType();
43671 if (SrcOpVT.isScalarInteger() && VT.isInteger() &&
43672 (SrcOpVT.getSizeInBits() % SrcEltBits) == 0) {
43673 unsigned Scale = SrcOpVT.getSizeInBits() / SrcEltBits;
43674 unsigned Offset = IdxC.urem(Scale) * SrcEltBits;
43675 // TODO support non-zero offsets.
43676 if (Offset == 0) {
43677 SrcOp = DAG.getZExtOrTrunc(SrcOp, dl, SrcVT.getScalarType());
43678 SrcOp = DAG.getZExtOrTrunc(SrcOp, dl, VT);
43679 return SrcOp;
43684 // If we're extracting a single element from a broadcast load and there are
43685 // no other users, just create a single load.
43686 if (SrcBC.getOpcode() == X86ISD::VBROADCAST_LOAD && SrcBC.hasOneUse()) {
43687 auto *MemIntr = cast<MemIntrinsicSDNode>(SrcBC);
43688 unsigned SrcBCWidth = SrcBC.getScalarValueSizeInBits();
43689 if (MemIntr->getMemoryVT().getSizeInBits() == SrcBCWidth &&
43690 VT.getSizeInBits() == SrcBCWidth && SrcEltBits == SrcBCWidth) {
43691 SDValue Load = DAG.getLoad(VT, dl, MemIntr->getChain(),
43692 MemIntr->getBasePtr(),
43693 MemIntr->getPointerInfo(),
43694 MemIntr->getOriginalAlign(),
43695 MemIntr->getMemOperand()->getFlags());
43696 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
43697 return Load;
43701 // Handle extract(bitcast(scalar_to_vector(scalar_value))) for integers.
43702 // TODO: Move to DAGCombine?
43703 if (SrcBC.getOpcode() == ISD::SCALAR_TO_VECTOR && VT.isInteger() &&
43704 SrcBC.getValueType().isInteger() &&
43705 (SrcBC.getScalarValueSizeInBits() % SrcEltBits) == 0 &&
43706 SrcBC.getScalarValueSizeInBits() ==
43707 SrcBC.getOperand(0).getValueSizeInBits()) {
43708 unsigned Scale = SrcBC.getScalarValueSizeInBits() / SrcEltBits;
43709 if (IdxC.ult(Scale)) {
43710 unsigned Offset = IdxC.getZExtValue() * SrcVT.getScalarSizeInBits();
43711 SDValue Scl = SrcBC.getOperand(0);
43712 EVT SclVT = Scl.getValueType();
43713 if (Offset) {
43714 Scl = DAG.getNode(ISD::SRL, dl, SclVT, Scl,
43715 DAG.getShiftAmountConstant(Offset, SclVT, dl));
43717 Scl = DAG.getZExtOrTrunc(Scl, dl, SrcVT.getScalarType());
43718 Scl = DAG.getZExtOrTrunc(Scl, dl, VT);
43719 return Scl;
43723 // Handle extract(truncate(x)) for 0'th index.
43724 // TODO: Treat this as a faux shuffle?
43725 // TODO: When can we use this for general indices?
43726 if (ISD::TRUNCATE == Src.getOpcode() && IdxC == 0 &&
43727 (SrcVT.getSizeInBits() % 128) == 0) {
43728 Src = extract128BitVector(Src.getOperand(0), 0, DAG, dl);
43729 MVT ExtractVT = MVT::getVectorVT(SrcSVT.getSimpleVT(), 128 / SrcEltBits);
43730 return DAG.getNode(N->getOpcode(), dl, VT, DAG.getBitcast(ExtractVT, Src),
43731 Idx);
43734 // We can only legally extract other elements from 128-bit vectors and in
43735 // certain circumstances, depending on SSE-level.
43736 // TODO: Investigate float/double extraction if it will be just stored.
43737 auto GetLegalExtract = [&Subtarget, &DAG, &dl](SDValue Vec, EVT VecVT,
43738 unsigned Idx) {
43739 EVT VecSVT = VecVT.getScalarType();
43740 if ((VecVT.is256BitVector() || VecVT.is512BitVector()) &&
43741 (VecSVT == MVT::i8 || VecSVT == MVT::i16 || VecSVT == MVT::i32 ||
43742 VecSVT == MVT::i64)) {
43743 unsigned EltSizeInBits = VecSVT.getSizeInBits();
43744 unsigned NumEltsPerLane = 128 / EltSizeInBits;
43745 unsigned LaneOffset = (Idx & ~(NumEltsPerLane - 1)) * EltSizeInBits;
43746 unsigned LaneIdx = LaneOffset / Vec.getScalarValueSizeInBits();
43747 VecVT = EVT::getVectorVT(*DAG.getContext(), VecSVT, NumEltsPerLane);
43748 Vec = extract128BitVector(Vec, LaneIdx, DAG, dl);
43749 Idx &= (NumEltsPerLane - 1);
43751 if ((VecVT == MVT::v4i32 || VecVT == MVT::v2i64) &&
43752 ((Idx == 0 && Subtarget.hasSSE2()) || Subtarget.hasSSE41())) {
43753 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VecVT.getScalarType(),
43754 DAG.getBitcast(VecVT, Vec),
43755 DAG.getIntPtrConstant(Idx, dl));
43757 if ((VecVT == MVT::v8i16 && Subtarget.hasSSE2()) ||
43758 (VecVT == MVT::v16i8 && Subtarget.hasSSE41())) {
43759 unsigned OpCode = (VecVT == MVT::v8i16 ? X86ISD::PEXTRW : X86ISD::PEXTRB);
43760 return DAG.getNode(OpCode, dl, MVT::i32, DAG.getBitcast(VecVT, Vec),
43761 DAG.getTargetConstant(Idx, dl, MVT::i8));
43763 return SDValue();
43766 // Resolve the target shuffle inputs and mask.
43767 SmallVector<int, 16> Mask;
43768 SmallVector<SDValue, 2> Ops;
43769 if (!getTargetShuffleInputs(SrcBC, Ops, Mask, DAG))
43770 return SDValue();
43772 // Shuffle inputs must be the same size as the result.
43773 if (llvm::any_of(Ops, [SrcVT](SDValue Op) {
43774 return SrcVT.getSizeInBits() != Op.getValueSizeInBits();
43776 return SDValue();
43778 // Attempt to narrow/widen the shuffle mask to the correct size.
43779 if (Mask.size() != NumSrcElts) {
43780 if ((NumSrcElts % Mask.size()) == 0) {
43781 SmallVector<int, 16> ScaledMask;
43782 int Scale = NumSrcElts / Mask.size();
43783 narrowShuffleMaskElts(Scale, Mask, ScaledMask);
43784 Mask = std::move(ScaledMask);
43785 } else if ((Mask.size() % NumSrcElts) == 0) {
43786 // Simplify Mask based on demanded element.
43787 int ExtractIdx = (int)IdxC.getZExtValue();
43788 int Scale = Mask.size() / NumSrcElts;
43789 int Lo = Scale * ExtractIdx;
43790 int Hi = Scale * (ExtractIdx + 1);
43791 for (int i = 0, e = (int)Mask.size(); i != e; ++i)
43792 if (i < Lo || Hi <= i)
43793 Mask[i] = SM_SentinelUndef;
43795 SmallVector<int, 16> WidenedMask;
43796 while (Mask.size() > NumSrcElts &&
43797 canWidenShuffleElements(Mask, WidenedMask))
43798 Mask = std::move(WidenedMask);
43802 // If narrowing/widening failed, see if we can extract+zero-extend.
43803 int ExtractIdx;
43804 EVT ExtractVT;
43805 if (Mask.size() == NumSrcElts) {
43806 ExtractIdx = Mask[IdxC.getZExtValue()];
43807 ExtractVT = SrcVT;
43808 } else {
43809 unsigned Scale = Mask.size() / NumSrcElts;
43810 if ((Mask.size() % NumSrcElts) != 0 || SrcVT.isFloatingPoint())
43811 return SDValue();
43812 unsigned ScaledIdx = Scale * IdxC.getZExtValue();
43813 if (!isUndefOrZeroInRange(Mask, ScaledIdx + 1, Scale - 1))
43814 return SDValue();
43815 ExtractIdx = Mask[ScaledIdx];
43816 EVT ExtractSVT = EVT::getIntegerVT(*DAG.getContext(), SrcEltBits / Scale);
43817 ExtractVT = EVT::getVectorVT(*DAG.getContext(), ExtractSVT, Mask.size());
43818 assert(SrcVT.getSizeInBits() == ExtractVT.getSizeInBits() &&
43819 "Failed to widen vector type");
43822 // If the shuffle source element is undef/zero then we can just accept it.
43823 if (ExtractIdx == SM_SentinelUndef)
43824 return DAG.getUNDEF(VT);
43826 if (ExtractIdx == SM_SentinelZero)
43827 return VT.isFloatingPoint() ? DAG.getConstantFP(0.0, dl, VT)
43828 : DAG.getConstant(0, dl, VT);
43830 SDValue SrcOp = Ops[ExtractIdx / Mask.size()];
43831 ExtractIdx = ExtractIdx % Mask.size();
43832 if (SDValue V = GetLegalExtract(SrcOp, ExtractVT, ExtractIdx))
43833 return DAG.getZExtOrTrunc(V, dl, VT);
43835 return SDValue();
43838 /// Extracting a scalar FP value from vector element 0 is free, so extract each
43839 /// operand first, then perform the math as a scalar op.
43840 static SDValue scalarizeExtEltFP(SDNode *ExtElt, SelectionDAG &DAG,
43841 const X86Subtarget &Subtarget) {
43842 assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Expected extract");
43843 SDValue Vec = ExtElt->getOperand(0);
43844 SDValue Index = ExtElt->getOperand(1);
43845 EVT VT = ExtElt->getValueType(0);
43846 EVT VecVT = Vec.getValueType();
43848 // TODO: If this is a unary/expensive/expand op, allow extraction from a
43849 // non-zero element because the shuffle+scalar op will be cheaper?
43850 if (!Vec.hasOneUse() || !isNullConstant(Index) || VecVT.getScalarType() != VT)
43851 return SDValue();
43853 // Vector FP compares don't fit the pattern of FP math ops (propagate, not
43854 // extract, the condition code), so deal with those as a special-case.
43855 if (Vec.getOpcode() == ISD::SETCC && VT == MVT::i1) {
43856 EVT OpVT = Vec.getOperand(0).getValueType().getScalarType();
43857 if (OpVT != MVT::f32 && OpVT != MVT::f64)
43858 return SDValue();
43860 // extract (setcc X, Y, CC), 0 --> setcc (extract X, 0), (extract Y, 0), CC
43861 SDLoc DL(ExtElt);
43862 SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
43863 Vec.getOperand(0), Index);
43864 SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
43865 Vec.getOperand(1), Index);
43866 return DAG.getNode(Vec.getOpcode(), DL, VT, Ext0, Ext1, Vec.getOperand(2));
43869 if (!(VT == MVT::f16 && Subtarget.hasFP16()) && VT != MVT::f32 &&
43870 VT != MVT::f64)
43871 return SDValue();
43873 // Vector FP selects don't fit the pattern of FP math ops (because the
43874 // condition has a different type and we have to change the opcode), so deal
43875 // with those here.
43876 // FIXME: This is restricted to pre type legalization by ensuring the setcc
43877 // has i1 elements. If we loosen this we need to convert vector bool to a
43878 // scalar bool.
43879 if (Vec.getOpcode() == ISD::VSELECT &&
43880 Vec.getOperand(0).getOpcode() == ISD::SETCC &&
43881 Vec.getOperand(0).getValueType().getScalarType() == MVT::i1 &&
43882 Vec.getOperand(0).getOperand(0).getValueType() == VecVT) {
43883 // ext (sel Cond, X, Y), 0 --> sel (ext Cond, 0), (ext X, 0), (ext Y, 0)
43884 SDLoc DL(ExtElt);
43885 SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
43886 Vec.getOperand(0).getValueType().getScalarType(),
43887 Vec.getOperand(0), Index);
43888 SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
43889 Vec.getOperand(1), Index);
43890 SDValue Ext2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
43891 Vec.getOperand(2), Index);
43892 return DAG.getNode(ISD::SELECT, DL, VT, Ext0, Ext1, Ext2);
43895 // TODO: This switch could include FNEG and the x86-specific FP logic ops
43896 // (FAND, FANDN, FOR, FXOR). But that may require enhancements to avoid
43897 // missed load folding and fma+fneg combining.
43898 switch (Vec.getOpcode()) {
43899 case ISD::FMA: // Begin 3 operands
43900 case ISD::FMAD:
43901 case ISD::FADD: // Begin 2 operands
43902 case ISD::FSUB:
43903 case ISD::FMUL:
43904 case ISD::FDIV:
43905 case ISD::FREM:
43906 case ISD::FCOPYSIGN:
43907 case ISD::FMINNUM:
43908 case ISD::FMAXNUM:
43909 case ISD::FMINNUM_IEEE:
43910 case ISD::FMAXNUM_IEEE:
43911 case ISD::FMAXIMUM:
43912 case ISD::FMINIMUM:
43913 case X86ISD::FMAX:
43914 case X86ISD::FMIN:
43915 case ISD::FABS: // Begin 1 operand
43916 case ISD::FSQRT:
43917 case ISD::FRINT:
43918 case ISD::FCEIL:
43919 case ISD::FTRUNC:
43920 case ISD::FNEARBYINT:
43921 case ISD::FROUNDEVEN:
43922 case ISD::FROUND:
43923 case ISD::FFLOOR:
43924 case X86ISD::FRCP:
43925 case X86ISD::FRSQRT: {
43926 // extract (fp X, Y, ...), 0 --> fp (extract X, 0), (extract Y, 0), ...
43927 SDLoc DL(ExtElt);
43928 SmallVector<SDValue, 4> ExtOps;
43929 for (SDValue Op : Vec->ops())
43930 ExtOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op, Index));
43931 return DAG.getNode(Vec.getOpcode(), DL, VT, ExtOps);
43933 default:
43934 return SDValue();
43936 llvm_unreachable("All opcodes should return within switch");
43939 /// Try to convert a vector reduction sequence composed of binops and shuffles
43940 /// into horizontal ops.
43941 static SDValue combineArithReduction(SDNode *ExtElt, SelectionDAG &DAG,
43942 const X86Subtarget &Subtarget) {
43943 assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unexpected caller");
43945 // We need at least SSE2 to anything here.
43946 if (!Subtarget.hasSSE2())
43947 return SDValue();
43949 ISD::NodeType Opc;
43950 SDValue Rdx = DAG.matchBinOpReduction(ExtElt, Opc,
43951 {ISD::ADD, ISD::MUL, ISD::FADD}, true);
43952 if (!Rdx)
43953 return SDValue();
43955 SDValue Index = ExtElt->getOperand(1);
43956 assert(isNullConstant(Index) &&
43957 "Reduction doesn't end in an extract from index 0");
43959 EVT VT = ExtElt->getValueType(0);
43960 EVT VecVT = Rdx.getValueType();
43961 if (VecVT.getScalarType() != VT)
43962 return SDValue();
43964 SDLoc DL(ExtElt);
43965 unsigned NumElts = VecVT.getVectorNumElements();
43966 unsigned EltSizeInBits = VecVT.getScalarSizeInBits();
43968 // Extend v4i8/v8i8 vector to v16i8, with undef upper 64-bits.
43969 auto WidenToV16I8 = [&](SDValue V, bool ZeroExtend) {
43970 if (V.getValueType() == MVT::v4i8) {
43971 if (ZeroExtend && Subtarget.hasSSE41()) {
43972 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v4i32,
43973 DAG.getConstant(0, DL, MVT::v4i32),
43974 DAG.getBitcast(MVT::i32, V),
43975 DAG.getIntPtrConstant(0, DL));
43976 return DAG.getBitcast(MVT::v16i8, V);
43978 V = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i8, V,
43979 ZeroExtend ? DAG.getConstant(0, DL, MVT::v4i8)
43980 : DAG.getUNDEF(MVT::v4i8));
43982 return DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V,
43983 DAG.getUNDEF(MVT::v8i8));
43986 // vXi8 mul reduction - promote to vXi16 mul reduction.
43987 if (Opc == ISD::MUL) {
43988 if (VT != MVT::i8 || NumElts < 4 || !isPowerOf2_32(NumElts))
43989 return SDValue();
43990 if (VecVT.getSizeInBits() >= 128) {
43991 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts / 2);
43992 SDValue Lo = getUnpackl(DAG, DL, VecVT, Rdx, DAG.getUNDEF(VecVT));
43993 SDValue Hi = getUnpackh(DAG, DL, VecVT, Rdx, DAG.getUNDEF(VecVT));
43994 Lo = DAG.getBitcast(WideVT, Lo);
43995 Hi = DAG.getBitcast(WideVT, Hi);
43996 Rdx = DAG.getNode(Opc, DL, WideVT, Lo, Hi);
43997 while (Rdx.getValueSizeInBits() > 128) {
43998 std::tie(Lo, Hi) = splitVector(Rdx, DAG, DL);
43999 Rdx = DAG.getNode(Opc, DL, Lo.getValueType(), Lo, Hi);
44001 } else {
44002 Rdx = WidenToV16I8(Rdx, false);
44003 Rdx = getUnpackl(DAG, DL, MVT::v16i8, Rdx, DAG.getUNDEF(MVT::v16i8));
44004 Rdx = DAG.getBitcast(MVT::v8i16, Rdx);
44006 if (NumElts >= 8)
44007 Rdx = DAG.getNode(Opc, DL, MVT::v8i16, Rdx,
44008 DAG.getVectorShuffle(MVT::v8i16, DL, Rdx, Rdx,
44009 {4, 5, 6, 7, -1, -1, -1, -1}));
44010 Rdx = DAG.getNode(Opc, DL, MVT::v8i16, Rdx,
44011 DAG.getVectorShuffle(MVT::v8i16, DL, Rdx, Rdx,
44012 {2, 3, -1, -1, -1, -1, -1, -1}));
44013 Rdx = DAG.getNode(Opc, DL, MVT::v8i16, Rdx,
44014 DAG.getVectorShuffle(MVT::v8i16, DL, Rdx, Rdx,
44015 {1, -1, -1, -1, -1, -1, -1, -1}));
44016 Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
44017 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
44020 // vXi8 add reduction - sub 128-bit vector.
44021 if (VecVT == MVT::v4i8 || VecVT == MVT::v8i8) {
44022 Rdx = WidenToV16I8(Rdx, true);
44023 Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
44024 DAG.getConstant(0, DL, MVT::v16i8));
44025 Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
44026 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
44029 // Must be a >=128-bit vector with pow2 elements.
44030 if ((VecVT.getSizeInBits() % 128) != 0 || !isPowerOf2_32(NumElts))
44031 return SDValue();
44033 // vXi8 add reduction - sum lo/hi halves then use PSADBW.
44034 if (VT == MVT::i8) {
44035 while (Rdx.getValueSizeInBits() > 128) {
44036 SDValue Lo, Hi;
44037 std::tie(Lo, Hi) = splitVector(Rdx, DAG, DL);
44038 VecVT = Lo.getValueType();
44039 Rdx = DAG.getNode(ISD::ADD, DL, VecVT, Lo, Hi);
44041 assert(VecVT == MVT::v16i8 && "v16i8 reduction expected");
44043 SDValue Hi = DAG.getVectorShuffle(
44044 MVT::v16i8, DL, Rdx, Rdx,
44045 {8, 9, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1});
44046 Rdx = DAG.getNode(ISD::ADD, DL, MVT::v16i8, Rdx, Hi);
44047 Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
44048 getZeroVector(MVT::v16i8, Subtarget, DAG, DL));
44049 Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
44050 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
44053 // See if we can use vXi8 PSADBW add reduction for larger zext types.
44054 // If the source vector values are 0-255, then we can use PSADBW to
44055 // sum+zext v8i8 subvectors to vXi64, then perform the reduction.
44056 // TODO: See if its worth avoiding vXi16/i32 truncations?
44057 if (Opc == ISD::ADD && NumElts >= 4 && EltSizeInBits >= 16 &&
44058 DAG.computeKnownBits(Rdx).getMaxValue().ule(255) &&
44059 (EltSizeInBits == 16 || Rdx.getOpcode() == ISD::ZERO_EXTEND ||
44060 Subtarget.hasAVX512())) {
44061 if (Rdx.getValueType() == MVT::v8i16) {
44062 Rdx = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Rdx,
44063 DAG.getUNDEF(MVT::v8i16));
44064 } else {
44065 EVT ByteVT = VecVT.changeVectorElementType(MVT::i8);
44066 Rdx = DAG.getNode(ISD::TRUNCATE, DL, ByteVT, Rdx);
44067 if (ByteVT.getSizeInBits() < 128)
44068 Rdx = WidenToV16I8(Rdx, true);
44071 // Build the PSADBW, split as 128/256/512 bits for SSE/AVX2/AVX512BW.
44072 auto PSADBWBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
44073 ArrayRef<SDValue> Ops) {
44074 MVT VT = MVT::getVectorVT(MVT::i64, Ops[0].getValueSizeInBits() / 64);
44075 SDValue Zero = DAG.getConstant(0, DL, Ops[0].getValueType());
44076 return DAG.getNode(X86ISD::PSADBW, DL, VT, Ops[0], Zero);
44078 MVT SadVT = MVT::getVectorVT(MVT::i64, Rdx.getValueSizeInBits() / 64);
44079 Rdx = SplitOpsAndApply(DAG, Subtarget, DL, SadVT, {Rdx}, PSADBWBuilder);
44081 // TODO: We could truncate to vXi16/vXi32 before performing the reduction.
44082 while (Rdx.getValueSizeInBits() > 128) {
44083 SDValue Lo, Hi;
44084 std::tie(Lo, Hi) = splitVector(Rdx, DAG, DL);
44085 VecVT = Lo.getValueType();
44086 Rdx = DAG.getNode(ISD::ADD, DL, VecVT, Lo, Hi);
44088 assert(Rdx.getValueType() == MVT::v2i64 && "v2i64 reduction expected");
44090 if (NumElts > 8) {
44091 SDValue RdxHi = DAG.getVectorShuffle(MVT::v2i64, DL, Rdx, Rdx, {1, -1});
44092 Rdx = DAG.getNode(ISD::ADD, DL, MVT::v2i64, Rdx, RdxHi);
44095 VecVT = MVT::getVectorVT(VT.getSimpleVT(), 128 / VT.getSizeInBits());
44096 Rdx = DAG.getBitcast(VecVT, Rdx);
44097 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
44100 // Only use (F)HADD opcodes if they aren't microcoded or minimizes codesize.
44101 if (!shouldUseHorizontalOp(true, DAG, Subtarget))
44102 return SDValue();
44104 unsigned HorizOpcode = Opc == ISD::ADD ? X86ISD::HADD : X86ISD::FHADD;
44106 // 256-bit horizontal instructions operate on 128-bit chunks rather than
44107 // across the whole vector, so we need an extract + hop preliminary stage.
44108 // This is the only step where the operands of the hop are not the same value.
44109 // TODO: We could extend this to handle 512-bit or even longer vectors.
44110 if (((VecVT == MVT::v16i16 || VecVT == MVT::v8i32) && Subtarget.hasSSSE3()) ||
44111 ((VecVT == MVT::v8f32 || VecVT == MVT::v4f64) && Subtarget.hasSSE3())) {
44112 unsigned NumElts = VecVT.getVectorNumElements();
44113 SDValue Hi = extract128BitVector(Rdx, NumElts / 2, DAG, DL);
44114 SDValue Lo = extract128BitVector(Rdx, 0, DAG, DL);
44115 Rdx = DAG.getNode(HorizOpcode, DL, Lo.getValueType(), Hi, Lo);
44116 VecVT = Rdx.getValueType();
44118 if (!((VecVT == MVT::v8i16 || VecVT == MVT::v4i32) && Subtarget.hasSSSE3()) &&
44119 !((VecVT == MVT::v4f32 || VecVT == MVT::v2f64) && Subtarget.hasSSE3()))
44120 return SDValue();
44122 // extract (add (shuf X), X), 0 --> extract (hadd X, X), 0
44123 unsigned ReductionSteps = Log2_32(VecVT.getVectorNumElements());
44124 for (unsigned i = 0; i != ReductionSteps; ++i)
44125 Rdx = DAG.getNode(HorizOpcode, DL, VecVT, Rdx, Rdx);
44127 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
44130 /// Detect vector gather/scatter index generation and convert it from being a
44131 /// bunch of shuffles and extracts into a somewhat faster sequence.
44132 /// For i686, the best sequence is apparently storing the value and loading
44133 /// scalars back, while for x64 we should use 64-bit extracts and shifts.
44134 static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG,
44135 TargetLowering::DAGCombinerInfo &DCI,
44136 const X86Subtarget &Subtarget) {
44137 if (SDValue NewOp = combineExtractWithShuffle(N, DAG, DCI, Subtarget))
44138 return NewOp;
44140 SDValue InputVector = N->getOperand(0);
44141 SDValue EltIdx = N->getOperand(1);
44142 auto *CIdx = dyn_cast<ConstantSDNode>(EltIdx);
44144 EVT SrcVT = InputVector.getValueType();
44145 EVT VT = N->getValueType(0);
44146 SDLoc dl(InputVector);
44147 bool IsPextr = N->getOpcode() != ISD::EXTRACT_VECTOR_ELT;
44148 unsigned NumSrcElts = SrcVT.getVectorNumElements();
44149 unsigned NumEltBits = VT.getScalarSizeInBits();
44150 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44152 if (CIdx && CIdx->getAPIntValue().uge(NumSrcElts))
44153 return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
44155 // Integer Constant Folding.
44156 if (CIdx && VT.isInteger()) {
44157 APInt UndefVecElts;
44158 SmallVector<APInt, 16> EltBits;
44159 unsigned VecEltBitWidth = SrcVT.getScalarSizeInBits();
44160 if (getTargetConstantBitsFromNode(InputVector, VecEltBitWidth, UndefVecElts,
44161 EltBits, true, false)) {
44162 uint64_t Idx = CIdx->getZExtValue();
44163 if (UndefVecElts[Idx])
44164 return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
44165 return DAG.getConstant(EltBits[Idx].zext(NumEltBits), dl, VT);
44168 // Convert extract_element(bitcast(<X x i1>) -> bitcast(extract_subvector()).
44169 // Improves lowering of bool masks on rust which splits them into byte array.
44170 if (InputVector.getOpcode() == ISD::BITCAST && (NumEltBits % 8) == 0) {
44171 SDValue Src = peekThroughBitcasts(InputVector);
44172 if (Src.getValueType().getScalarType() == MVT::i1 &&
44173 TLI.isTypeLegal(Src.getValueType())) {
44174 MVT SubVT = MVT::getVectorVT(MVT::i1, NumEltBits);
44175 SDValue Sub = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, Src,
44176 DAG.getIntPtrConstant(CIdx->getZExtValue() * NumEltBits, dl));
44177 return DAG.getBitcast(VT, Sub);
44182 if (IsPextr) {
44183 if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(NumEltBits),
44184 DCI))
44185 return SDValue(N, 0);
44187 // PEXTR*(PINSR*(v, s, c), c) -> s (with implicit zext handling).
44188 if ((InputVector.getOpcode() == X86ISD::PINSRB ||
44189 InputVector.getOpcode() == X86ISD::PINSRW) &&
44190 InputVector.getOperand(2) == EltIdx) {
44191 assert(SrcVT == InputVector.getOperand(0).getValueType() &&
44192 "Vector type mismatch");
44193 SDValue Scl = InputVector.getOperand(1);
44194 Scl = DAG.getNode(ISD::TRUNCATE, dl, SrcVT.getScalarType(), Scl);
44195 return DAG.getZExtOrTrunc(Scl, dl, VT);
44198 // TODO - Remove this once we can handle the implicit zero-extension of
44199 // X86ISD::PEXTRW/X86ISD::PEXTRB in combinePredicateReduction and
44200 // combineBasicSADPattern.
44201 return SDValue();
44204 // Detect mmx extraction of all bits as a i64. It works better as a bitcast.
44205 if (VT == MVT::i64 && SrcVT == MVT::v1i64 &&
44206 InputVector.getOpcode() == ISD::BITCAST &&
44207 InputVector.getOperand(0).getValueType() == MVT::x86mmx &&
44208 isNullConstant(EltIdx) && InputVector.hasOneUse())
44209 return DAG.getBitcast(VT, InputVector);
44211 // Detect mmx to i32 conversion through a v2i32 elt extract.
44212 if (VT == MVT::i32 && SrcVT == MVT::v2i32 &&
44213 InputVector.getOpcode() == ISD::BITCAST &&
44214 InputVector.getOperand(0).getValueType() == MVT::x86mmx &&
44215 isNullConstant(EltIdx) && InputVector.hasOneUse())
44216 return DAG.getNode(X86ISD::MMX_MOVD2W, dl, MVT::i32,
44217 InputVector.getOperand(0));
44219 // Check whether this extract is the root of a sum of absolute differences
44220 // pattern. This has to be done here because we really want it to happen
44221 // pre-legalization,
44222 if (SDValue SAD = combineBasicSADPattern(N, DAG, Subtarget))
44223 return SAD;
44225 if (SDValue VPDPBUSD = combineVPDPBUSDPattern(N, DAG, Subtarget))
44226 return VPDPBUSD;
44228 // Attempt to replace an all_of/any_of horizontal reduction with a MOVMSK.
44229 if (SDValue Cmp = combinePredicateReduction(N, DAG, Subtarget))
44230 return Cmp;
44232 // Attempt to replace min/max v8i16/v16i8 reductions with PHMINPOSUW.
44233 if (SDValue MinMax = combineMinMaxReduction(N, DAG, Subtarget))
44234 return MinMax;
44236 // Attempt to optimize ADD/FADD/MUL reductions with HADD, promotion etc..
44237 if (SDValue V = combineArithReduction(N, DAG, Subtarget))
44238 return V;
44240 if (SDValue V = scalarizeExtEltFP(N, DAG, Subtarget))
44241 return V;
44243 // Attempt to extract a i1 element by using MOVMSK to extract the signbits
44244 // and then testing the relevant element.
44246 // Note that we only combine extracts on the *same* result number, i.e.
44247 // t0 = merge_values a0, a1, a2, a3
44248 // i1 = extract_vector_elt t0, Constant:i64<2>
44249 // i1 = extract_vector_elt t0, Constant:i64<3>
44250 // but not
44251 // i1 = extract_vector_elt t0:1, Constant:i64<2>
44252 // since the latter would need its own MOVMSK.
44253 if (SrcVT.getScalarType() == MVT::i1) {
44254 bool IsVar = !CIdx;
44255 SmallVector<SDNode *, 16> BoolExtracts;
44256 unsigned ResNo = InputVector.getResNo();
44257 auto IsBoolExtract = [&BoolExtracts, &ResNo, &IsVar](SDNode *Use) {
44258 if (Use->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
44259 Use->getOperand(0).getResNo() == ResNo &&
44260 Use->getValueType(0) == MVT::i1) {
44261 BoolExtracts.push_back(Use);
44262 IsVar |= !isa<ConstantSDNode>(Use->getOperand(1));
44263 return true;
44265 return false;
44267 // TODO: Can we drop the oneuse check for constant extracts?
44268 if (all_of(InputVector->uses(), IsBoolExtract) &&
44269 (IsVar || BoolExtracts.size() > 1)) {
44270 EVT BCVT = EVT::getIntegerVT(*DAG.getContext(), NumSrcElts);
44271 if (SDValue BC =
44272 combineBitcastvxi1(DAG, BCVT, InputVector, dl, Subtarget)) {
44273 for (SDNode *Use : BoolExtracts) {
44274 // extractelement vXi1 X, MaskIdx --> ((movmsk X) & Mask) == Mask
44275 // Mask = 1 << MaskIdx
44276 SDValue MaskIdx = DAG.getZExtOrTrunc(Use->getOperand(1), dl, MVT::i8);
44277 SDValue MaskBit = DAG.getConstant(1, dl, BCVT);
44278 SDValue Mask = DAG.getNode(ISD::SHL, dl, BCVT, MaskBit, MaskIdx);
44279 SDValue Res = DAG.getNode(ISD::AND, dl, BCVT, BC, Mask);
44280 Res = DAG.getSetCC(dl, MVT::i1, Res, Mask, ISD::SETEQ);
44281 DCI.CombineTo(Use, Res);
44283 return SDValue(N, 0);
44288 // If this extract is from a loaded vector value and will be used as an
44289 // integer, that requires a potentially expensive XMM -> GPR transfer.
44290 // Additionally, if we can convert to a scalar integer load, that will likely
44291 // be folded into a subsequent integer op.
44292 // Note: Unlike the related fold for this in DAGCombiner, this is not limited
44293 // to a single-use of the loaded vector. For the reasons above, we
44294 // expect this to be profitable even if it creates an extra load.
44295 bool LikelyUsedAsVector = any_of(N->uses(), [](SDNode *Use) {
44296 return Use->getOpcode() == ISD::STORE ||
44297 Use->getOpcode() == ISD::INSERT_VECTOR_ELT ||
44298 Use->getOpcode() == ISD::SCALAR_TO_VECTOR;
44300 auto *LoadVec = dyn_cast<LoadSDNode>(InputVector);
44301 if (LoadVec && CIdx && ISD::isNormalLoad(LoadVec) && VT.isInteger() &&
44302 SrcVT.getVectorElementType() == VT && DCI.isAfterLegalizeDAG() &&
44303 !LikelyUsedAsVector && LoadVec->isSimple()) {
44304 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44305 SDValue NewPtr =
44306 TLI.getVectorElementPointer(DAG, LoadVec->getBasePtr(), SrcVT, EltIdx);
44307 unsigned PtrOff = VT.getSizeInBits() * CIdx->getZExtValue() / 8;
44308 MachinePointerInfo MPI = LoadVec->getPointerInfo().getWithOffset(PtrOff);
44309 Align Alignment = commonAlignment(LoadVec->getAlign(), PtrOff);
44310 SDValue Load =
44311 DAG.getLoad(VT, dl, LoadVec->getChain(), NewPtr, MPI, Alignment,
44312 LoadVec->getMemOperand()->getFlags(), LoadVec->getAAInfo());
44313 DAG.makeEquivalentMemoryOrdering(LoadVec, Load);
44314 return Load;
44317 return SDValue();
44320 // Convert (vXiY *ext(vXi1 bitcast(iX))) to extend_in_reg(broadcast(iX)).
44321 // This is more or less the reverse of combineBitcastvxi1.
44322 static SDValue combineToExtendBoolVectorInReg(
44323 unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N0, SelectionDAG &DAG,
44324 TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget &Subtarget) {
44325 if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND &&
44326 Opcode != ISD::ANY_EXTEND)
44327 return SDValue();
44328 if (!DCI.isBeforeLegalizeOps())
44329 return SDValue();
44330 if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
44331 return SDValue();
44333 EVT SVT = VT.getScalarType();
44334 EVT InSVT = N0.getValueType().getScalarType();
44335 unsigned EltSizeInBits = SVT.getSizeInBits();
44337 // Input type must be extending a bool vector (bit-casted from a scalar
44338 // integer) to legal integer types.
44339 if (!VT.isVector())
44340 return SDValue();
44341 if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16 && SVT != MVT::i8)
44342 return SDValue();
44343 if (InSVT != MVT::i1 || N0.getOpcode() != ISD::BITCAST)
44344 return SDValue();
44346 SDValue N00 = N0.getOperand(0);
44347 EVT SclVT = N00.getValueType();
44348 if (!SclVT.isScalarInteger())
44349 return SDValue();
44351 SDValue Vec;
44352 SmallVector<int> ShuffleMask;
44353 unsigned NumElts = VT.getVectorNumElements();
44354 assert(NumElts == SclVT.getSizeInBits() && "Unexpected bool vector size");
44356 // Broadcast the scalar integer to the vector elements.
44357 if (NumElts > EltSizeInBits) {
44358 // If the scalar integer is greater than the vector element size, then we
44359 // must split it down into sub-sections for broadcasting. For example:
44360 // i16 -> v16i8 (i16 -> v8i16 -> v16i8) with 2 sub-sections.
44361 // i32 -> v32i8 (i32 -> v8i32 -> v32i8) with 4 sub-sections.
44362 assert((NumElts % EltSizeInBits) == 0 && "Unexpected integer scale");
44363 unsigned Scale = NumElts / EltSizeInBits;
44364 EVT BroadcastVT = EVT::getVectorVT(*DAG.getContext(), SclVT, EltSizeInBits);
44365 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
44366 Vec = DAG.getBitcast(VT, Vec);
44368 for (unsigned i = 0; i != Scale; ++i)
44369 ShuffleMask.append(EltSizeInBits, i);
44370 Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
44371 } else if (Subtarget.hasAVX2() && NumElts < EltSizeInBits &&
44372 (SclVT == MVT::i8 || SclVT == MVT::i16 || SclVT == MVT::i32)) {
44373 // If we have register broadcast instructions, use the scalar size as the
44374 // element type for the shuffle. Then cast to the wider element type. The
44375 // widened bits won't be used, and this might allow the use of a broadcast
44376 // load.
44377 assert((EltSizeInBits % NumElts) == 0 && "Unexpected integer scale");
44378 unsigned Scale = EltSizeInBits / NumElts;
44379 EVT BroadcastVT =
44380 EVT::getVectorVT(*DAG.getContext(), SclVT, NumElts * Scale);
44381 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
44382 ShuffleMask.append(NumElts * Scale, 0);
44383 Vec = DAG.getVectorShuffle(BroadcastVT, DL, Vec, Vec, ShuffleMask);
44384 Vec = DAG.getBitcast(VT, Vec);
44385 } else {
44386 // For smaller scalar integers, we can simply any-extend it to the vector
44387 // element size (we don't care about the upper bits) and broadcast it to all
44388 // elements.
44389 SDValue Scl = DAG.getAnyExtOrTrunc(N00, DL, SVT);
44390 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
44391 ShuffleMask.append(NumElts, 0);
44392 Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
44395 // Now, mask the relevant bit in each element.
44396 SmallVector<SDValue, 32> Bits;
44397 for (unsigned i = 0; i != NumElts; ++i) {
44398 int BitIdx = (i % EltSizeInBits);
44399 APInt Bit = APInt::getBitsSet(EltSizeInBits, BitIdx, BitIdx + 1);
44400 Bits.push_back(DAG.getConstant(Bit, DL, SVT));
44402 SDValue BitMask = DAG.getBuildVector(VT, DL, Bits);
44403 Vec = DAG.getNode(ISD::AND, DL, VT, Vec, BitMask);
44405 // Compare against the bitmask and extend the result.
44406 EVT CCVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
44407 Vec = DAG.getSetCC(DL, CCVT, Vec, BitMask, ISD::SETEQ);
44408 Vec = DAG.getSExtOrTrunc(Vec, DL, VT);
44410 // For SEXT, this is now done, otherwise shift the result down for
44411 // zero-extension.
44412 if (Opcode == ISD::SIGN_EXTEND)
44413 return Vec;
44414 return DAG.getNode(ISD::SRL, DL, VT, Vec,
44415 DAG.getConstant(EltSizeInBits - 1, DL, VT));
44418 /// If a vector select has an operand that is -1 or 0, try to simplify the
44419 /// select to a bitwise logic operation.
44420 /// TODO: Move to DAGCombiner, possibly using TargetLowering::hasAndNot()?
44421 static SDValue
44422 combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG,
44423 TargetLowering::DAGCombinerInfo &DCI,
44424 const X86Subtarget &Subtarget) {
44425 SDValue Cond = N->getOperand(0);
44426 SDValue LHS = N->getOperand(1);
44427 SDValue RHS = N->getOperand(2);
44428 EVT VT = LHS.getValueType();
44429 EVT CondVT = Cond.getValueType();
44430 SDLoc DL(N);
44431 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44433 if (N->getOpcode() != ISD::VSELECT)
44434 return SDValue();
44436 assert(CondVT.isVector() && "Vector select expects a vector selector!");
44438 // TODO: Use isNullOrNullSplat() to distinguish constants with undefs?
44439 // TODO: Can we assert that both operands are not zeros (because that should
44440 // get simplified at node creation time)?
44441 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
44442 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
44444 // If both inputs are 0/undef, create a complete zero vector.
44445 // FIXME: As noted above this should be handled by DAGCombiner/getNode.
44446 if (TValIsAllZeros && FValIsAllZeros) {
44447 if (VT.isFloatingPoint())
44448 return DAG.getConstantFP(0.0, DL, VT);
44449 return DAG.getConstant(0, DL, VT);
44452 // To use the condition operand as a bitwise mask, it must have elements that
44453 // are the same size as the select elements. Ie, the condition operand must
44454 // have already been promoted from the IR select condition type <N x i1>.
44455 // Don't check if the types themselves are equal because that excludes
44456 // vector floating-point selects.
44457 if (CondVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
44458 return SDValue();
44460 // Try to invert the condition if true value is not all 1s and false value is
44461 // not all 0s. Only do this if the condition has one use.
44462 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
44463 if (!TValIsAllOnes && !FValIsAllZeros && Cond.hasOneUse() &&
44464 // Check if the selector will be produced by CMPP*/PCMP*.
44465 Cond.getOpcode() == ISD::SETCC &&
44466 // Check if SETCC has already been promoted.
44467 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT) ==
44468 CondVT) {
44469 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
44471 if (TValIsAllZeros || FValIsAllOnes) {
44472 SDValue CC = Cond.getOperand(2);
44473 ISD::CondCode NewCC = ISD::getSetCCInverse(
44474 cast<CondCodeSDNode>(CC)->get(), Cond.getOperand(0).getValueType());
44475 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1),
44476 NewCC);
44477 std::swap(LHS, RHS);
44478 TValIsAllOnes = FValIsAllOnes;
44479 FValIsAllZeros = TValIsAllZeros;
44483 // Cond value must be 'sign splat' to be converted to a logical op.
44484 if (DAG.ComputeNumSignBits(Cond) != CondVT.getScalarSizeInBits())
44485 return SDValue();
44487 // vselect Cond, 111..., 000... -> Cond
44488 if (TValIsAllOnes && FValIsAllZeros)
44489 return DAG.getBitcast(VT, Cond);
44491 if (!TLI.isTypeLegal(CondVT))
44492 return SDValue();
44494 // vselect Cond, 111..., X -> or Cond, X
44495 if (TValIsAllOnes) {
44496 SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
44497 SDValue Or = DAG.getNode(ISD::OR, DL, CondVT, Cond, CastRHS);
44498 return DAG.getBitcast(VT, Or);
44501 // vselect Cond, X, 000... -> and Cond, X
44502 if (FValIsAllZeros) {
44503 SDValue CastLHS = DAG.getBitcast(CondVT, LHS);
44504 SDValue And = DAG.getNode(ISD::AND, DL, CondVT, Cond, CastLHS);
44505 return DAG.getBitcast(VT, And);
44508 // vselect Cond, 000..., X -> andn Cond, X
44509 if (TValIsAllZeros) {
44510 SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
44511 SDValue AndN;
44512 // The canonical form differs for i1 vectors - x86andnp is not used
44513 if (CondVT.getScalarType() == MVT::i1)
44514 AndN = DAG.getNode(ISD::AND, DL, CondVT, DAG.getNOT(DL, Cond, CondVT),
44515 CastRHS);
44516 else
44517 AndN = DAG.getNode(X86ISD::ANDNP, DL, CondVT, Cond, CastRHS);
44518 return DAG.getBitcast(VT, AndN);
44521 return SDValue();
44524 /// If both arms of a vector select are concatenated vectors, split the select,
44525 /// and concatenate the result to eliminate a wide (256-bit) vector instruction:
44526 /// vselect Cond, (concat T0, T1), (concat F0, F1) -->
44527 /// concat (vselect (split Cond), T0, F0), (vselect (split Cond), T1, F1)
44528 static SDValue narrowVectorSelect(SDNode *N, SelectionDAG &DAG,
44529 const X86Subtarget &Subtarget) {
44530 unsigned Opcode = N->getOpcode();
44531 if (Opcode != X86ISD::BLENDV && Opcode != ISD::VSELECT)
44532 return SDValue();
44534 // TODO: Split 512-bit vectors too?
44535 EVT VT = N->getValueType(0);
44536 if (!VT.is256BitVector())
44537 return SDValue();
44539 // TODO: Split as long as any 2 of the 3 operands are concatenated?
44540 SDValue Cond = N->getOperand(0);
44541 SDValue TVal = N->getOperand(1);
44542 SDValue FVal = N->getOperand(2);
44543 if (!TVal.hasOneUse() || !FVal.hasOneUse() ||
44544 !isFreeToSplitVector(TVal.getNode(), DAG) ||
44545 !isFreeToSplitVector(FVal.getNode(), DAG))
44546 return SDValue();
44548 auto makeBlend = [Opcode](SelectionDAG &DAG, const SDLoc &DL,
44549 ArrayRef<SDValue> Ops) {
44550 return DAG.getNode(Opcode, DL, Ops[1].getValueType(), Ops);
44552 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { Cond, TVal, FVal },
44553 makeBlend, /*CheckBWI*/ false);
44556 static SDValue combineSelectOfTwoConstants(SDNode *N, SelectionDAG &DAG) {
44557 SDValue Cond = N->getOperand(0);
44558 SDValue LHS = N->getOperand(1);
44559 SDValue RHS = N->getOperand(2);
44560 SDLoc DL(N);
44562 auto *TrueC = dyn_cast<ConstantSDNode>(LHS);
44563 auto *FalseC = dyn_cast<ConstantSDNode>(RHS);
44564 if (!TrueC || !FalseC)
44565 return SDValue();
44567 // Don't do this for crazy integer types.
44568 EVT VT = N->getValueType(0);
44569 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
44570 return SDValue();
44572 // We're going to use the condition bit in math or logic ops. We could allow
44573 // this with a wider condition value (post-legalization it becomes an i8),
44574 // but if nothing is creating selects that late, it doesn't matter.
44575 if (Cond.getValueType() != MVT::i1)
44576 return SDValue();
44578 // A power-of-2 multiply is just a shift. LEA also cheaply handles multiply by
44579 // 3, 5, or 9 with i32/i64, so those get transformed too.
44580 // TODO: For constants that overflow or do not differ by power-of-2 or small
44581 // multiplier, convert to 'and' + 'add'.
44582 const APInt &TrueVal = TrueC->getAPIntValue();
44583 const APInt &FalseVal = FalseC->getAPIntValue();
44585 // We have a more efficient lowering for "(X == 0) ? Y : -1" using SBB.
44586 if ((TrueVal.isAllOnes() || FalseVal.isAllOnes()) &&
44587 Cond.getOpcode() == ISD::SETCC && isNullConstant(Cond.getOperand(1))) {
44588 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
44589 if (CC == ISD::SETEQ || CC == ISD::SETNE)
44590 return SDValue();
44593 bool OV;
44594 APInt Diff = TrueVal.ssub_ov(FalseVal, OV);
44595 if (OV)
44596 return SDValue();
44598 APInt AbsDiff = Diff.abs();
44599 if (AbsDiff.isPowerOf2() ||
44600 ((VT == MVT::i32 || VT == MVT::i64) &&
44601 (AbsDiff == 3 || AbsDiff == 5 || AbsDiff == 9))) {
44603 // We need a positive multiplier constant for shift/LEA codegen. The 'not'
44604 // of the condition can usually be folded into a compare predicate, but even
44605 // without that, the sequence should be cheaper than a CMOV alternative.
44606 if (TrueVal.slt(FalseVal)) {
44607 Cond = DAG.getNOT(DL, Cond, MVT::i1);
44608 std::swap(TrueC, FalseC);
44611 // select Cond, TC, FC --> (zext(Cond) * (TC - FC)) + FC
44612 SDValue R = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond);
44614 // Multiply condition by the difference if non-one.
44615 if (!AbsDiff.isOne())
44616 R = DAG.getNode(ISD::MUL, DL, VT, R, DAG.getConstant(AbsDiff, DL, VT));
44618 // Add the base if non-zero.
44619 if (!FalseC->isZero())
44620 R = DAG.getNode(ISD::ADD, DL, VT, R, SDValue(FalseC, 0));
44622 return R;
44625 return SDValue();
44628 /// If this is a *dynamic* select (non-constant condition) and we can match
44629 /// this node with one of the variable blend instructions, restructure the
44630 /// condition so that blends can use the high (sign) bit of each element.
44631 /// This function will also call SimplifyDemandedBits on already created
44632 /// BLENDV to perform additional simplifications.
44633 static SDValue combineVSelectToBLENDV(SDNode *N, SelectionDAG &DAG,
44634 TargetLowering::DAGCombinerInfo &DCI,
44635 const X86Subtarget &Subtarget) {
44636 SDValue Cond = N->getOperand(0);
44637 if ((N->getOpcode() != ISD::VSELECT &&
44638 N->getOpcode() != X86ISD::BLENDV) ||
44639 ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
44640 return SDValue();
44642 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44643 unsigned BitWidth = Cond.getScalarValueSizeInBits();
44644 EVT VT = N->getValueType(0);
44646 // We can only handle the cases where VSELECT is directly legal on the
44647 // subtarget. We custom lower VSELECT nodes with constant conditions and
44648 // this makes it hard to see whether a dynamic VSELECT will correctly
44649 // lower, so we both check the operation's status and explicitly handle the
44650 // cases where a *dynamic* blend will fail even though a constant-condition
44651 // blend could be custom lowered.
44652 // FIXME: We should find a better way to handle this class of problems.
44653 // Potentially, we should combine constant-condition vselect nodes
44654 // pre-legalization into shuffles and not mark as many types as custom
44655 // lowered.
44656 if (!TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
44657 return SDValue();
44658 // FIXME: We don't support i16-element blends currently. We could and
44659 // should support them by making *all* the bits in the condition be set
44660 // rather than just the high bit and using an i8-element blend.
44661 if (VT.getVectorElementType() == MVT::i16)
44662 return SDValue();
44663 // Dynamic blending was only available from SSE4.1 onward.
44664 if (VT.is128BitVector() && !Subtarget.hasSSE41())
44665 return SDValue();
44666 // Byte blends are only available in AVX2
44667 if (VT == MVT::v32i8 && !Subtarget.hasAVX2())
44668 return SDValue();
44669 // There are no 512-bit blend instructions that use sign bits.
44670 if (VT.is512BitVector())
44671 return SDValue();
44673 // Don't optimize before the condition has been transformed to a legal type
44674 // and don't ever optimize vector selects that map to AVX512 mask-registers.
44675 if (BitWidth < 8 || BitWidth > 64)
44676 return SDValue();
44678 auto OnlyUsedAsSelectCond = [](SDValue Cond) {
44679 for (SDNode::use_iterator UI = Cond->use_begin(), UE = Cond->use_end();
44680 UI != UE; ++UI)
44681 if ((UI->getOpcode() != ISD::VSELECT &&
44682 UI->getOpcode() != X86ISD::BLENDV) ||
44683 UI.getOperandNo() != 0)
44684 return false;
44686 return true;
44689 APInt DemandedBits(APInt::getSignMask(BitWidth));
44691 if (OnlyUsedAsSelectCond(Cond)) {
44692 KnownBits Known;
44693 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
44694 !DCI.isBeforeLegalizeOps());
44695 if (!TLI.SimplifyDemandedBits(Cond, DemandedBits, Known, TLO, 0, true))
44696 return SDValue();
44698 // If we changed the computation somewhere in the DAG, this change will
44699 // affect all users of Cond. Update all the nodes so that we do not use
44700 // the generic VSELECT anymore. Otherwise, we may perform wrong
44701 // optimizations as we messed with the actual expectation for the vector
44702 // boolean values.
44703 for (SDNode *U : Cond->uses()) {
44704 if (U->getOpcode() == X86ISD::BLENDV)
44705 continue;
44707 SDValue SB = DAG.getNode(X86ISD::BLENDV, SDLoc(U), U->getValueType(0),
44708 Cond, U->getOperand(1), U->getOperand(2));
44709 DAG.ReplaceAllUsesOfValueWith(SDValue(U, 0), SB);
44710 DCI.AddToWorklist(U);
44712 DCI.CommitTargetLoweringOpt(TLO);
44713 return SDValue(N, 0);
44716 // Otherwise we can still at least try to simplify multiple use bits.
44717 if (SDValue V = TLI.SimplifyMultipleUseDemandedBits(Cond, DemandedBits, DAG))
44718 return DAG.getNode(X86ISD::BLENDV, SDLoc(N), N->getValueType(0), V,
44719 N->getOperand(1), N->getOperand(2));
44721 return SDValue();
44724 // Try to match:
44725 // (or (and (M, (sub 0, X)), (pandn M, X)))
44726 // which is a special case of:
44727 // (select M, (sub 0, X), X)
44728 // Per:
44729 // http://graphics.stanford.edu/~seander/bithacks.html#ConditionalNegate
44730 // We know that, if fNegate is 0 or 1:
44731 // (fNegate ? -v : v) == ((v ^ -fNegate) + fNegate)
44733 // Here, we have a mask, M (all 1s or 0), and, similarly, we know that:
44734 // ((M & 1) ? -X : X) == ((X ^ -(M & 1)) + (M & 1))
44735 // ( M ? -X : X) == ((X ^ M ) + (M & 1))
44736 // This lets us transform our vselect to:
44737 // (add (xor X, M), (and M, 1))
44738 // And further to:
44739 // (sub (xor X, M), M)
44740 static SDValue combineLogicBlendIntoConditionalNegate(
44741 EVT VT, SDValue Mask, SDValue X, SDValue Y, const SDLoc &DL,
44742 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
44743 EVT MaskVT = Mask.getValueType();
44744 assert(MaskVT.isInteger() &&
44745 DAG.ComputeNumSignBits(Mask) == MaskVT.getScalarSizeInBits() &&
44746 "Mask must be zero/all-bits");
44748 if (X.getValueType() != MaskVT || Y.getValueType() != MaskVT)
44749 return SDValue();
44750 if (!DAG.getTargetLoweringInfo().isOperationLegal(ISD::SUB, MaskVT))
44751 return SDValue();
44753 auto IsNegV = [](SDNode *N, SDValue V) {
44754 return N->getOpcode() == ISD::SUB && N->getOperand(1) == V &&
44755 ISD::isBuildVectorAllZeros(N->getOperand(0).getNode());
44758 SDValue V;
44759 if (IsNegV(Y.getNode(), X))
44760 V = X;
44761 else if (IsNegV(X.getNode(), Y))
44762 V = Y;
44763 else
44764 return SDValue();
44766 SDValue SubOp1 = DAG.getNode(ISD::XOR, DL, MaskVT, V, Mask);
44767 SDValue SubOp2 = Mask;
44769 // If the negate was on the false side of the select, then
44770 // the operands of the SUB need to be swapped. PR 27251.
44771 // This is because the pattern being matched above is
44772 // (vselect M, (sub (0, X), X) -> (sub (xor X, M), M)
44773 // but if the pattern matched was
44774 // (vselect M, X, (sub (0, X))), that is really negation of the pattern
44775 // above, -(vselect M, (sub 0, X), X), and therefore the replacement
44776 // pattern also needs to be a negation of the replacement pattern above.
44777 // And -(sub X, Y) is just sub (Y, X), so swapping the operands of the
44778 // sub accomplishes the negation of the replacement pattern.
44779 if (V == Y)
44780 std::swap(SubOp1, SubOp2);
44782 SDValue Res = DAG.getNode(ISD::SUB, DL, MaskVT, SubOp1, SubOp2);
44783 return DAG.getBitcast(VT, Res);
44786 static SDValue commuteSelect(SDNode *N, SelectionDAG &DAG,
44787 const X86Subtarget &Subtarget) {
44788 if (!Subtarget.hasAVX512())
44789 return SDValue();
44790 if (N->getOpcode() != ISD::VSELECT)
44791 return SDValue();
44793 SDLoc DL(N);
44794 SDValue Cond = N->getOperand(0);
44795 SDValue LHS = N->getOperand(1);
44796 SDValue RHS = N->getOperand(2);
44798 if (canCombineAsMaskOperation(LHS, Subtarget))
44799 return SDValue();
44801 if (!canCombineAsMaskOperation(RHS, Subtarget))
44802 return SDValue();
44804 if (Cond.getOpcode() != ISD::SETCC || !Cond.hasOneUse())
44805 return SDValue();
44807 // Commute LHS and RHS to create opportunity to select mask instruction.
44808 // (vselect M, L, R) -> (vselect ~M, R, L)
44809 ISD::CondCode NewCC =
44810 ISD::getSetCCInverse(cast<CondCodeSDNode>(Cond.getOperand(2))->get(),
44811 Cond.getOperand(0).getValueType());
44812 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(), Cond.getOperand(0),
44813 Cond.getOperand(1), NewCC);
44814 return DAG.getSelect(DL, LHS.getValueType(), Cond, RHS, LHS);
44817 /// Do target-specific dag combines on SELECT and VSELECT nodes.
44818 static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
44819 TargetLowering::DAGCombinerInfo &DCI,
44820 const X86Subtarget &Subtarget) {
44821 SDLoc DL(N);
44822 SDValue Cond = N->getOperand(0);
44823 SDValue LHS = N->getOperand(1);
44824 SDValue RHS = N->getOperand(2);
44826 // Try simplification again because we use this function to optimize
44827 // BLENDV nodes that are not handled by the generic combiner.
44828 if (SDValue V = DAG.simplifySelect(Cond, LHS, RHS))
44829 return V;
44831 // When avx512 is available the lhs operand of select instruction can be
44832 // folded with mask instruction, while the rhs operand can't. Commute the
44833 // lhs and rhs of the select instruction to create the opportunity of
44834 // folding.
44835 if (SDValue V = commuteSelect(N, DAG, Subtarget))
44836 return V;
44838 EVT VT = LHS.getValueType();
44839 EVT CondVT = Cond.getValueType();
44840 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44841 bool CondConstantVector = ISD::isBuildVectorOfConstantSDNodes(Cond.getNode());
44843 // Attempt to combine (select M, (sub 0, X), X) -> (sub (xor X, M), M).
44844 // Limit this to cases of non-constant masks that createShuffleMaskFromVSELECT
44845 // can't catch, plus vXi8 cases where we'd likely end up with BLENDV.
44846 if (CondVT.isVector() && CondVT.isInteger() &&
44847 CondVT.getScalarSizeInBits() == VT.getScalarSizeInBits() &&
44848 (!CondConstantVector || CondVT.getScalarType() == MVT::i8) &&
44849 DAG.ComputeNumSignBits(Cond) == CondVT.getScalarSizeInBits())
44850 if (SDValue V = combineLogicBlendIntoConditionalNegate(VT, Cond, RHS, LHS,
44851 DL, DAG, Subtarget))
44852 return V;
44854 // Convert vselects with constant condition into shuffles.
44855 if (CondConstantVector && DCI.isBeforeLegalizeOps() &&
44856 (N->getOpcode() == ISD::VSELECT || N->getOpcode() == X86ISD::BLENDV)) {
44857 SmallVector<int, 64> Mask;
44858 if (createShuffleMaskFromVSELECT(Mask, Cond,
44859 N->getOpcode() == X86ISD::BLENDV))
44860 return DAG.getVectorShuffle(VT, DL, LHS, RHS, Mask);
44863 // fold vselect(cond, pshufb(x), pshufb(y)) -> or (pshufb(x), pshufb(y))
44864 // by forcing the unselected elements to zero.
44865 // TODO: Can we handle more shuffles with this?
44866 if (N->getOpcode() == ISD::VSELECT && CondVT.isVector() &&
44867 LHS.getOpcode() == X86ISD::PSHUFB && RHS.getOpcode() == X86ISD::PSHUFB &&
44868 LHS.hasOneUse() && RHS.hasOneUse()) {
44869 MVT SimpleVT = VT.getSimpleVT();
44870 SmallVector<SDValue, 1> LHSOps, RHSOps;
44871 SmallVector<int, 64> LHSMask, RHSMask, CondMask;
44872 if (createShuffleMaskFromVSELECT(CondMask, Cond) &&
44873 getTargetShuffleMask(LHS.getNode(), SimpleVT, true, LHSOps, LHSMask) &&
44874 getTargetShuffleMask(RHS.getNode(), SimpleVT, true, RHSOps, RHSMask)) {
44875 int NumElts = VT.getVectorNumElements();
44876 for (int i = 0; i != NumElts; ++i) {
44877 // getConstVector sets negative shuffle mask values as undef, so ensure
44878 // we hardcode SM_SentinelZero values to zero (0x80).
44879 if (CondMask[i] < NumElts) {
44880 LHSMask[i] = isUndefOrZero(LHSMask[i]) ? 0x80 : LHSMask[i];
44881 RHSMask[i] = 0x80;
44882 } else {
44883 LHSMask[i] = 0x80;
44884 RHSMask[i] = isUndefOrZero(RHSMask[i]) ? 0x80 : RHSMask[i];
44887 LHS = DAG.getNode(X86ISD::PSHUFB, DL, VT, LHS.getOperand(0),
44888 getConstVector(LHSMask, SimpleVT, DAG, DL, true));
44889 RHS = DAG.getNode(X86ISD::PSHUFB, DL, VT, RHS.getOperand(0),
44890 getConstVector(RHSMask, SimpleVT, DAG, DL, true));
44891 return DAG.getNode(ISD::OR, DL, VT, LHS, RHS);
44895 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
44896 // instructions match the semantics of the common C idiom x<y?x:y but not
44897 // x<=y?x:y, because of how they handle negative zero (which can be
44898 // ignored in unsafe-math mode).
44899 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
44900 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
44901 VT != MVT::f80 && VT != MVT::f128 && !isSoftF16(VT, Subtarget) &&
44902 (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
44903 (Subtarget.hasSSE2() ||
44904 (Subtarget.hasSSE1() && VT.getScalarType() == MVT::f32))) {
44905 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
44907 unsigned Opcode = 0;
44908 // Check for x CC y ? x : y.
44909 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
44910 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
44911 switch (CC) {
44912 default: break;
44913 case ISD::SETULT:
44914 // Converting this to a min would handle NaNs incorrectly, and swapping
44915 // the operands would cause it to handle comparisons between positive
44916 // and negative zero incorrectly.
44917 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
44918 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
44919 !(DAG.isKnownNeverZeroFloat(LHS) ||
44920 DAG.isKnownNeverZeroFloat(RHS)))
44921 break;
44922 std::swap(LHS, RHS);
44924 Opcode = X86ISD::FMIN;
44925 break;
44926 case ISD::SETOLE:
44927 // Converting this to a min would handle comparisons between positive
44928 // and negative zero incorrectly.
44929 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
44930 !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
44931 break;
44932 Opcode = X86ISD::FMIN;
44933 break;
44934 case ISD::SETULE:
44935 // Converting this to a min would handle both negative zeros and NaNs
44936 // incorrectly, but we can swap the operands to fix both.
44937 std::swap(LHS, RHS);
44938 [[fallthrough]];
44939 case ISD::SETOLT:
44940 case ISD::SETLT:
44941 case ISD::SETLE:
44942 Opcode = X86ISD::FMIN;
44943 break;
44945 case ISD::SETOGE:
44946 // Converting this to a max would handle comparisons between positive
44947 // and negative zero incorrectly.
44948 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
44949 !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
44950 break;
44951 Opcode = X86ISD::FMAX;
44952 break;
44953 case ISD::SETUGT:
44954 // Converting this to a max would handle NaNs incorrectly, and swapping
44955 // the operands would cause it to handle comparisons between positive
44956 // and negative zero incorrectly.
44957 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
44958 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
44959 !(DAG.isKnownNeverZeroFloat(LHS) ||
44960 DAG.isKnownNeverZeroFloat(RHS)))
44961 break;
44962 std::swap(LHS, RHS);
44964 Opcode = X86ISD::FMAX;
44965 break;
44966 case ISD::SETUGE:
44967 // Converting this to a max would handle both negative zeros and NaNs
44968 // incorrectly, but we can swap the operands to fix both.
44969 std::swap(LHS, RHS);
44970 [[fallthrough]];
44971 case ISD::SETOGT:
44972 case ISD::SETGT:
44973 case ISD::SETGE:
44974 Opcode = X86ISD::FMAX;
44975 break;
44977 // Check for x CC y ? y : x -- a min/max with reversed arms.
44978 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
44979 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
44980 switch (CC) {
44981 default: break;
44982 case ISD::SETOGE:
44983 // Converting this to a min would handle comparisons between positive
44984 // and negative zero incorrectly, and swapping the operands would
44985 // cause it to handle NaNs incorrectly.
44986 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
44987 !(DAG.isKnownNeverZeroFloat(LHS) ||
44988 DAG.isKnownNeverZeroFloat(RHS))) {
44989 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
44990 break;
44991 std::swap(LHS, RHS);
44993 Opcode = X86ISD::FMIN;
44994 break;
44995 case ISD::SETUGT:
44996 // Converting this to a min would handle NaNs incorrectly.
44997 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
44998 break;
44999 Opcode = X86ISD::FMIN;
45000 break;
45001 case ISD::SETUGE:
45002 // Converting this to a min would handle both negative zeros and NaNs
45003 // incorrectly, but we can swap the operands to fix both.
45004 std::swap(LHS, RHS);
45005 [[fallthrough]];
45006 case ISD::SETOGT:
45007 case ISD::SETGT:
45008 case ISD::SETGE:
45009 Opcode = X86ISD::FMIN;
45010 break;
45012 case ISD::SETULT:
45013 // Converting this to a max would handle NaNs incorrectly.
45014 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
45015 break;
45016 Opcode = X86ISD::FMAX;
45017 break;
45018 case ISD::SETOLE:
45019 // Converting this to a max would handle comparisons between positive
45020 // and negative zero incorrectly, and swapping the operands would
45021 // cause it to handle NaNs incorrectly.
45022 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
45023 !DAG.isKnownNeverZeroFloat(LHS) &&
45024 !DAG.isKnownNeverZeroFloat(RHS)) {
45025 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
45026 break;
45027 std::swap(LHS, RHS);
45029 Opcode = X86ISD::FMAX;
45030 break;
45031 case ISD::SETULE:
45032 // Converting this to a max would handle both negative zeros and NaNs
45033 // incorrectly, but we can swap the operands to fix both.
45034 std::swap(LHS, RHS);
45035 [[fallthrough]];
45036 case ISD::SETOLT:
45037 case ISD::SETLT:
45038 case ISD::SETLE:
45039 Opcode = X86ISD::FMAX;
45040 break;
45044 if (Opcode)
45045 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
45048 // Some mask scalar intrinsics rely on checking if only one bit is set
45049 // and implement it in C code like this:
45050 // A[0] = (U & 1) ? A[0] : W[0];
45051 // This creates some redundant instructions that break pattern matching.
45052 // fold (select (setcc (and (X, 1), 0, seteq), Y, Z)) -> select(and(X, 1),Z,Y)
45053 if (Subtarget.hasAVX512() && N->getOpcode() == ISD::SELECT &&
45054 Cond.getOpcode() == ISD::SETCC && (VT == MVT::f32 || VT == MVT::f64)) {
45055 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
45056 SDValue AndNode = Cond.getOperand(0);
45057 if (AndNode.getOpcode() == ISD::AND && CC == ISD::SETEQ &&
45058 isNullConstant(Cond.getOperand(1)) &&
45059 isOneConstant(AndNode.getOperand(1))) {
45060 // LHS and RHS swapped due to
45061 // setcc outputting 1 when AND resulted in 0 and vice versa.
45062 AndNode = DAG.getZExtOrTrunc(AndNode, DL, MVT::i8);
45063 return DAG.getNode(ISD::SELECT, DL, VT, AndNode, RHS, LHS);
45067 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
45068 // lowering on KNL. In this case we convert it to
45069 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
45070 // The same situation all vectors of i8 and i16 without BWI.
45071 // Make sure we extend these even before type legalization gets a chance to
45072 // split wide vectors.
45073 // Since SKX these selects have a proper lowering.
45074 if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && CondVT.isVector() &&
45075 CondVT.getVectorElementType() == MVT::i1 &&
45076 (VT.getVectorElementType() == MVT::i8 ||
45077 VT.getVectorElementType() == MVT::i16)) {
45078 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond);
45079 return DAG.getNode(N->getOpcode(), DL, VT, Cond, LHS, RHS);
45082 // AVX512 - Extend select with zero to merge with target shuffle.
45083 // select(mask, extract_subvector(shuffle(x)), zero) -->
45084 // extract_subvector(select(insert_subvector(mask), shuffle(x), zero))
45085 // TODO - support non target shuffles as well.
45086 if (Subtarget.hasAVX512() && CondVT.isVector() &&
45087 CondVT.getVectorElementType() == MVT::i1) {
45088 auto SelectableOp = [&TLI](SDValue Op) {
45089 return Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
45090 isTargetShuffle(Op.getOperand(0).getOpcode()) &&
45091 isNullConstant(Op.getOperand(1)) &&
45092 TLI.isTypeLegal(Op.getOperand(0).getValueType()) &&
45093 Op.hasOneUse() && Op.getOperand(0).hasOneUse();
45096 bool SelectableLHS = SelectableOp(LHS);
45097 bool SelectableRHS = SelectableOp(RHS);
45098 bool ZeroLHS = ISD::isBuildVectorAllZeros(LHS.getNode());
45099 bool ZeroRHS = ISD::isBuildVectorAllZeros(RHS.getNode());
45101 if ((SelectableLHS && ZeroRHS) || (SelectableRHS && ZeroLHS)) {
45102 EVT SrcVT = SelectableLHS ? LHS.getOperand(0).getValueType()
45103 : RHS.getOperand(0).getValueType();
45104 EVT SrcCondVT = SrcVT.changeVectorElementType(MVT::i1);
45105 LHS = insertSubVector(DAG.getUNDEF(SrcVT), LHS, 0, DAG, DL,
45106 VT.getSizeInBits());
45107 RHS = insertSubVector(DAG.getUNDEF(SrcVT), RHS, 0, DAG, DL,
45108 VT.getSizeInBits());
45109 Cond = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SrcCondVT,
45110 DAG.getUNDEF(SrcCondVT), Cond,
45111 DAG.getIntPtrConstant(0, DL));
45112 SDValue Res = DAG.getSelect(DL, SrcVT, Cond, LHS, RHS);
45113 return extractSubVector(Res, 0, DAG, DL, VT.getSizeInBits());
45117 if (SDValue V = combineSelectOfTwoConstants(N, DAG))
45118 return V;
45120 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
45121 Cond.hasOneUse()) {
45122 EVT CondVT = Cond.getValueType();
45123 SDValue Cond0 = Cond.getOperand(0);
45124 SDValue Cond1 = Cond.getOperand(1);
45125 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
45127 // Canonicalize min/max:
45128 // (x > 0) ? x : 0 -> (x >= 0) ? x : 0
45129 // (x < -1) ? x : -1 -> (x <= -1) ? x : -1
45130 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
45131 // the need for an extra compare against zero. e.g.
45132 // (a - b) > 0 : (a - b) ? 0 -> (a - b) >= 0 : (a - b) ? 0
45133 // subl %esi, %edi
45134 // testl %edi, %edi
45135 // movl $0, %eax
45136 // cmovgl %edi, %eax
45137 // =>
45138 // xorl %eax, %eax
45139 // subl %esi, $edi
45140 // cmovsl %eax, %edi
45142 // We can also canonicalize
45143 // (x s> 1) ? x : 1 -> (x s>= 1) ? x : 1 -> (x s> 0) ? x : 1
45144 // (x u> 1) ? x : 1 -> (x u>= 1) ? x : 1 -> (x != 0) ? x : 1
45145 // This allows the use of a test instruction for the compare.
45146 if (LHS == Cond0 && RHS == Cond1) {
45147 if ((CC == ISD::SETGT && (isNullConstant(RHS) || isOneConstant(RHS))) ||
45148 (CC == ISD::SETLT && isAllOnesConstant(RHS))) {
45149 ISD::CondCode NewCC = CC == ISD::SETGT ? ISD::SETGE : ISD::SETLE;
45150 Cond = DAG.getSetCC(SDLoc(Cond), CondVT, Cond0, Cond1, NewCC);
45151 return DAG.getSelect(DL, VT, Cond, LHS, RHS);
45153 if (CC == ISD::SETUGT && isOneConstant(RHS)) {
45154 ISD::CondCode NewCC = ISD::SETUGE;
45155 Cond = DAG.getSetCC(SDLoc(Cond), CondVT, Cond0, Cond1, NewCC);
45156 return DAG.getSelect(DL, VT, Cond, LHS, RHS);
45160 // Similar to DAGCombine's select(or(CC0,CC1),X,Y) fold but for legal types.
45161 // fold eq + gt/lt nested selects into ge/le selects
45162 // select (cmpeq Cond0, Cond1), LHS, (select (cmpugt Cond0, Cond1), LHS, Y)
45163 // --> (select (cmpuge Cond0, Cond1), LHS, Y)
45164 // select (cmpslt Cond0, Cond1), LHS, (select (cmpeq Cond0, Cond1), LHS, Y)
45165 // --> (select (cmpsle Cond0, Cond1), LHS, Y)
45166 // .. etc ..
45167 if (RHS.getOpcode() == ISD::SELECT && RHS.getOperand(1) == LHS &&
45168 RHS.getOperand(0).getOpcode() == ISD::SETCC) {
45169 SDValue InnerSetCC = RHS.getOperand(0);
45170 ISD::CondCode InnerCC =
45171 cast<CondCodeSDNode>(InnerSetCC.getOperand(2))->get();
45172 if ((CC == ISD::SETEQ || InnerCC == ISD::SETEQ) &&
45173 Cond0 == InnerSetCC.getOperand(0) &&
45174 Cond1 == InnerSetCC.getOperand(1)) {
45175 ISD::CondCode NewCC;
45176 switch (CC == ISD::SETEQ ? InnerCC : CC) {
45177 case ISD::SETGT: NewCC = ISD::SETGE; break;
45178 case ISD::SETLT: NewCC = ISD::SETLE; break;
45179 case ISD::SETUGT: NewCC = ISD::SETUGE; break;
45180 case ISD::SETULT: NewCC = ISD::SETULE; break;
45181 default: NewCC = ISD::SETCC_INVALID; break;
45183 if (NewCC != ISD::SETCC_INVALID) {
45184 Cond = DAG.getSetCC(DL, CondVT, Cond0, Cond1, NewCC);
45185 return DAG.getSelect(DL, VT, Cond, LHS, RHS.getOperand(2));
45191 // Check if the first operand is all zeros and Cond type is vXi1.
45192 // If this an avx512 target we can improve the use of zero masking by
45193 // swapping the operands and inverting the condition.
45194 if (N->getOpcode() == ISD::VSELECT && Cond.hasOneUse() &&
45195 Subtarget.hasAVX512() && CondVT.getVectorElementType() == MVT::i1 &&
45196 ISD::isBuildVectorAllZeros(LHS.getNode()) &&
45197 !ISD::isBuildVectorAllZeros(RHS.getNode())) {
45198 // Invert the cond to not(cond) : xor(op,allones)=not(op)
45199 SDValue CondNew = DAG.getNOT(DL, Cond, CondVT);
45200 // Vselect cond, op1, op2 = Vselect not(cond), op2, op1
45201 return DAG.getSelect(DL, VT, CondNew, RHS, LHS);
45204 // Attempt to convert a (vXi1 bitcast(iX Cond)) selection mask before it might
45205 // get split by legalization.
45206 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::BITCAST &&
45207 CondVT.getVectorElementType() == MVT::i1 &&
45208 TLI.isTypeLegal(VT.getScalarType())) {
45209 EVT ExtCondVT = VT.changeVectorElementTypeToInteger();
45210 if (SDValue ExtCond = combineToExtendBoolVectorInReg(
45211 ISD::SIGN_EXTEND, DL, ExtCondVT, Cond, DAG, DCI, Subtarget)) {
45212 ExtCond = DAG.getNode(ISD::TRUNCATE, DL, CondVT, ExtCond);
45213 return DAG.getSelect(DL, VT, ExtCond, LHS, RHS);
45217 // Early exit check
45218 if (!TLI.isTypeLegal(VT) || isSoftF16(VT, Subtarget))
45219 return SDValue();
45221 if (SDValue V = combineVSelectWithAllOnesOrZeros(N, DAG, DCI, Subtarget))
45222 return V;
45224 if (SDValue V = combineVSelectToBLENDV(N, DAG, DCI, Subtarget))
45225 return V;
45227 if (SDValue V = narrowVectorSelect(N, DAG, Subtarget))
45228 return V;
45230 // select(~Cond, X, Y) -> select(Cond, Y, X)
45231 if (CondVT.getScalarType() != MVT::i1) {
45232 if (SDValue CondNot = IsNOT(Cond, DAG))
45233 return DAG.getNode(N->getOpcode(), DL, VT,
45234 DAG.getBitcast(CondVT, CondNot), RHS, LHS);
45236 // pcmpgt(X, -1) -> pcmpgt(0, X) to help select/blendv just use the
45237 // signbit.
45238 if (Cond.getOpcode() == X86ISD::PCMPGT &&
45239 ISD::isBuildVectorAllOnes(Cond.getOperand(1).getNode()) &&
45240 Cond.hasOneUse()) {
45241 Cond = DAG.getNode(X86ISD::PCMPGT, DL, CondVT,
45242 DAG.getConstant(0, DL, CondVT), Cond.getOperand(0));
45243 return DAG.getNode(N->getOpcode(), DL, VT, Cond, RHS, LHS);
45247 // Try to optimize vXi1 selects if both operands are either all constants or
45248 // bitcasts from scalar integer type. In that case we can convert the operands
45249 // to integer and use an integer select which will be converted to a CMOV.
45250 // We need to take a little bit of care to avoid creating an i64 type after
45251 // type legalization.
45252 if (N->getOpcode() == ISD::SELECT && VT.isVector() &&
45253 VT.getVectorElementType() == MVT::i1 &&
45254 (DCI.isBeforeLegalize() || (VT != MVT::v64i1 || Subtarget.is64Bit()))) {
45255 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getVectorNumElements());
45256 if (DCI.isBeforeLegalize() || TLI.isTypeLegal(IntVT)) {
45257 bool LHSIsConst = ISD::isBuildVectorOfConstantSDNodes(LHS.getNode());
45258 bool RHSIsConst = ISD::isBuildVectorOfConstantSDNodes(RHS.getNode());
45260 if ((LHSIsConst || (LHS.getOpcode() == ISD::BITCAST &&
45261 LHS.getOperand(0).getValueType() == IntVT)) &&
45262 (RHSIsConst || (RHS.getOpcode() == ISD::BITCAST &&
45263 RHS.getOperand(0).getValueType() == IntVT))) {
45264 if (LHSIsConst)
45265 LHS = combinevXi1ConstantToInteger(LHS, DAG);
45266 else
45267 LHS = LHS.getOperand(0);
45269 if (RHSIsConst)
45270 RHS = combinevXi1ConstantToInteger(RHS, DAG);
45271 else
45272 RHS = RHS.getOperand(0);
45274 SDValue Select = DAG.getSelect(DL, IntVT, Cond, LHS, RHS);
45275 return DAG.getBitcast(VT, Select);
45280 // If this is "((X & C) == 0) ? Y : Z" and C is a constant mask vector of
45281 // single bits, then invert the predicate and swap the select operands.
45282 // This can lower using a vector shift bit-hack rather than mask and compare.
45283 if (DCI.isBeforeLegalize() && !Subtarget.hasAVX512() &&
45284 N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
45285 Cond.hasOneUse() && CondVT.getVectorElementType() == MVT::i1 &&
45286 Cond.getOperand(0).getOpcode() == ISD::AND &&
45287 isNullOrNullSplat(Cond.getOperand(1)) &&
45288 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
45289 Cond.getOperand(0).getValueType() == VT) {
45290 // The 'and' mask must be composed of power-of-2 constants.
45291 SDValue And = Cond.getOperand(0);
45292 auto *C = isConstOrConstSplat(And.getOperand(1));
45293 if (C && C->getAPIntValue().isPowerOf2()) {
45294 // vselect (X & C == 0), LHS, RHS --> vselect (X & C != 0), RHS, LHS
45295 SDValue NotCond =
45296 DAG.getSetCC(DL, CondVT, And, Cond.getOperand(1), ISD::SETNE);
45297 return DAG.getSelect(DL, VT, NotCond, RHS, LHS);
45300 // If we have a non-splat but still powers-of-2 mask, AVX1 can use pmulld
45301 // and AVX2 can use vpsllv{dq}. 8-bit lacks a proper shift or multiply.
45302 // 16-bit lacks a proper blendv.
45303 unsigned EltBitWidth = VT.getScalarSizeInBits();
45304 bool CanShiftBlend =
45305 TLI.isTypeLegal(VT) && ((Subtarget.hasAVX() && EltBitWidth == 32) ||
45306 (Subtarget.hasAVX2() && EltBitWidth == 64) ||
45307 (Subtarget.hasXOP()));
45308 if (CanShiftBlend &&
45309 ISD::matchUnaryPredicate(And.getOperand(1), [](ConstantSDNode *C) {
45310 return C->getAPIntValue().isPowerOf2();
45311 })) {
45312 // Create a left-shift constant to get the mask bits over to the sign-bit.
45313 SDValue Mask = And.getOperand(1);
45314 SmallVector<int, 32> ShlVals;
45315 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
45316 auto *MaskVal = cast<ConstantSDNode>(Mask.getOperand(i));
45317 ShlVals.push_back(EltBitWidth - 1 -
45318 MaskVal->getAPIntValue().exactLogBase2());
45320 // vsel ((X & C) == 0), LHS, RHS --> vsel ((shl X, C') < 0), RHS, LHS
45321 SDValue ShlAmt = getConstVector(ShlVals, VT.getSimpleVT(), DAG, DL);
45322 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And.getOperand(0), ShlAmt);
45323 SDValue NewCond =
45324 DAG.getSetCC(DL, CondVT, Shl, Cond.getOperand(1), ISD::SETLT);
45325 return DAG.getSelect(DL, VT, NewCond, RHS, LHS);
45329 return SDValue();
45332 /// Combine:
45333 /// (brcond/cmov/setcc .., (cmp (atomic_load_add x, 1), 0), COND_S)
45334 /// to:
45335 /// (brcond/cmov/setcc .., (LADD x, 1), COND_LE)
45336 /// i.e., reusing the EFLAGS produced by the LOCKed instruction.
45337 /// Note that this is only legal for some op/cc combinations.
45338 static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
45339 SelectionDAG &DAG,
45340 const X86Subtarget &Subtarget) {
45341 // This combine only operates on CMP-like nodes.
45342 if (!(Cmp.getOpcode() == X86ISD::CMP ||
45343 (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
45344 return SDValue();
45346 // Can't replace the cmp if it has more uses than the one we're looking at.
45347 // FIXME: We would like to be able to handle this, but would need to make sure
45348 // all uses were updated.
45349 if (!Cmp.hasOneUse())
45350 return SDValue();
45352 // This only applies to variations of the common case:
45353 // (icmp slt x, 0) -> (icmp sle (add x, 1), 0)
45354 // (icmp sge x, 0) -> (icmp sgt (add x, 1), 0)
45355 // (icmp sle x, 0) -> (icmp slt (sub x, 1), 0)
45356 // (icmp sgt x, 0) -> (icmp sge (sub x, 1), 0)
45357 // Using the proper condcodes (see below), overflow is checked for.
45359 // FIXME: We can generalize both constraints:
45360 // - XOR/OR/AND (if they were made to survive AtomicExpand)
45361 // - LHS != 1
45362 // if the result is compared.
45364 SDValue CmpLHS = Cmp.getOperand(0);
45365 SDValue CmpRHS = Cmp.getOperand(1);
45366 EVT CmpVT = CmpLHS.getValueType();
45368 if (!CmpLHS.hasOneUse())
45369 return SDValue();
45371 unsigned Opc = CmpLHS.getOpcode();
45372 if (Opc != ISD::ATOMIC_LOAD_ADD && Opc != ISD::ATOMIC_LOAD_SUB)
45373 return SDValue();
45375 SDValue OpRHS = CmpLHS.getOperand(2);
45376 auto *OpRHSC = dyn_cast<ConstantSDNode>(OpRHS);
45377 if (!OpRHSC)
45378 return SDValue();
45380 APInt Addend = OpRHSC->getAPIntValue();
45381 if (Opc == ISD::ATOMIC_LOAD_SUB)
45382 Addend = -Addend;
45384 auto *CmpRHSC = dyn_cast<ConstantSDNode>(CmpRHS);
45385 if (!CmpRHSC)
45386 return SDValue();
45388 APInt Comparison = CmpRHSC->getAPIntValue();
45389 APInt NegAddend = -Addend;
45391 // See if we can adjust the CC to make the comparison match the negated
45392 // addend.
45393 if (Comparison != NegAddend) {
45394 APInt IncComparison = Comparison + 1;
45395 if (IncComparison == NegAddend) {
45396 if (CC == X86::COND_A && !Comparison.isMaxValue()) {
45397 Comparison = IncComparison;
45398 CC = X86::COND_AE;
45399 } else if (CC == X86::COND_LE && !Comparison.isMaxSignedValue()) {
45400 Comparison = IncComparison;
45401 CC = X86::COND_L;
45404 APInt DecComparison = Comparison - 1;
45405 if (DecComparison == NegAddend) {
45406 if (CC == X86::COND_AE && !Comparison.isMinValue()) {
45407 Comparison = DecComparison;
45408 CC = X86::COND_A;
45409 } else if (CC == X86::COND_L && !Comparison.isMinSignedValue()) {
45410 Comparison = DecComparison;
45411 CC = X86::COND_LE;
45416 // If the addend is the negation of the comparison value, then we can do
45417 // a full comparison by emitting the atomic arithmetic as a locked sub.
45418 if (Comparison == NegAddend) {
45419 // The CC is fine, but we need to rewrite the LHS of the comparison as an
45420 // atomic sub.
45421 auto *AN = cast<AtomicSDNode>(CmpLHS.getNode());
45422 auto AtomicSub = DAG.getAtomic(
45423 ISD::ATOMIC_LOAD_SUB, SDLoc(CmpLHS), CmpVT,
45424 /*Chain*/ CmpLHS.getOperand(0), /*LHS*/ CmpLHS.getOperand(1),
45425 /*RHS*/ DAG.getConstant(NegAddend, SDLoc(CmpRHS), CmpVT),
45426 AN->getMemOperand());
45427 auto LockOp = lowerAtomicArithWithLOCK(AtomicSub, DAG, Subtarget);
45428 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0), DAG.getUNDEF(CmpVT));
45429 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
45430 return LockOp;
45433 // We can handle comparisons with zero in a number of cases by manipulating
45434 // the CC used.
45435 if (!Comparison.isZero())
45436 return SDValue();
45438 if (CC == X86::COND_S && Addend == 1)
45439 CC = X86::COND_LE;
45440 else if (CC == X86::COND_NS && Addend == 1)
45441 CC = X86::COND_G;
45442 else if (CC == X86::COND_G && Addend == -1)
45443 CC = X86::COND_GE;
45444 else if (CC == X86::COND_LE && Addend == -1)
45445 CC = X86::COND_L;
45446 else
45447 return SDValue();
45449 SDValue LockOp = lowerAtomicArithWithLOCK(CmpLHS, DAG, Subtarget);
45450 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0), DAG.getUNDEF(CmpVT));
45451 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
45452 return LockOp;
45455 // Check whether a boolean test is testing a boolean value generated by
45456 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
45457 // code.
45459 // Simplify the following patterns:
45460 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
45461 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
45462 // to (Op EFLAGS Cond)
45464 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
45465 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
45466 // to (Op EFLAGS !Cond)
45468 // where Op could be BRCOND or CMOV.
45470 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
45471 // This combine only operates on CMP-like nodes.
45472 if (!(Cmp.getOpcode() == X86ISD::CMP ||
45473 (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
45474 return SDValue();
45476 // Quit if not used as a boolean value.
45477 if (CC != X86::COND_E && CC != X86::COND_NE)
45478 return SDValue();
45480 // Check CMP operands. One of them should be 0 or 1 and the other should be
45481 // an SetCC or extended from it.
45482 SDValue Op1 = Cmp.getOperand(0);
45483 SDValue Op2 = Cmp.getOperand(1);
45485 SDValue SetCC;
45486 const ConstantSDNode* C = nullptr;
45487 bool needOppositeCond = (CC == X86::COND_E);
45488 bool checkAgainstTrue = false; // Is it a comparison against 1?
45490 if ((C = dyn_cast<ConstantSDNode>(Op1)))
45491 SetCC = Op2;
45492 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
45493 SetCC = Op1;
45494 else // Quit if all operands are not constants.
45495 return SDValue();
45497 if (C->getZExtValue() == 1) {
45498 needOppositeCond = !needOppositeCond;
45499 checkAgainstTrue = true;
45500 } else if (C->getZExtValue() != 0)
45501 // Quit if the constant is neither 0 or 1.
45502 return SDValue();
45504 bool truncatedToBoolWithAnd = false;
45505 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
45506 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
45507 SetCC.getOpcode() == ISD::TRUNCATE ||
45508 SetCC.getOpcode() == ISD::AND) {
45509 if (SetCC.getOpcode() == ISD::AND) {
45510 int OpIdx = -1;
45511 if (isOneConstant(SetCC.getOperand(0)))
45512 OpIdx = 1;
45513 if (isOneConstant(SetCC.getOperand(1)))
45514 OpIdx = 0;
45515 if (OpIdx < 0)
45516 break;
45517 SetCC = SetCC.getOperand(OpIdx);
45518 truncatedToBoolWithAnd = true;
45519 } else
45520 SetCC = SetCC.getOperand(0);
45523 switch (SetCC.getOpcode()) {
45524 case X86ISD::SETCC_CARRY:
45525 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
45526 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
45527 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
45528 // truncated to i1 using 'and'.
45529 if (checkAgainstTrue && !truncatedToBoolWithAnd)
45530 break;
45531 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
45532 "Invalid use of SETCC_CARRY!");
45533 [[fallthrough]];
45534 case X86ISD::SETCC:
45535 // Set the condition code or opposite one if necessary.
45536 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
45537 if (needOppositeCond)
45538 CC = X86::GetOppositeBranchCondition(CC);
45539 return SetCC.getOperand(1);
45540 case X86ISD::CMOV: {
45541 // Check whether false/true value has canonical one, i.e. 0 or 1.
45542 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
45543 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
45544 // Quit if true value is not a constant.
45545 if (!TVal)
45546 return SDValue();
45547 // Quit if false value is not a constant.
45548 if (!FVal) {
45549 SDValue Op = SetCC.getOperand(0);
45550 // Skip 'zext' or 'trunc' node.
45551 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
45552 Op.getOpcode() == ISD::TRUNCATE)
45553 Op = Op.getOperand(0);
45554 // A special case for rdrand/rdseed, where 0 is set if false cond is
45555 // found.
45556 if ((Op.getOpcode() != X86ISD::RDRAND &&
45557 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
45558 return SDValue();
45560 // Quit if false value is not the constant 0 or 1.
45561 bool FValIsFalse = true;
45562 if (FVal && FVal->getZExtValue() != 0) {
45563 if (FVal->getZExtValue() != 1)
45564 return SDValue();
45565 // If FVal is 1, opposite cond is needed.
45566 needOppositeCond = !needOppositeCond;
45567 FValIsFalse = false;
45569 // Quit if TVal is not the constant opposite of FVal.
45570 if (FValIsFalse && TVal->getZExtValue() != 1)
45571 return SDValue();
45572 if (!FValIsFalse && TVal->getZExtValue() != 0)
45573 return SDValue();
45574 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
45575 if (needOppositeCond)
45576 CC = X86::GetOppositeBranchCondition(CC);
45577 return SetCC.getOperand(3);
45581 return SDValue();
45584 /// Check whether Cond is an AND/OR of SETCCs off of the same EFLAGS.
45585 /// Match:
45586 /// (X86or (X86setcc) (X86setcc))
45587 /// (X86cmp (and (X86setcc) (X86setcc)), 0)
45588 static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0,
45589 X86::CondCode &CC1, SDValue &Flags,
45590 bool &isAnd) {
45591 if (Cond->getOpcode() == X86ISD::CMP) {
45592 if (!isNullConstant(Cond->getOperand(1)))
45593 return false;
45595 Cond = Cond->getOperand(0);
45598 isAnd = false;
45600 SDValue SetCC0, SetCC1;
45601 switch (Cond->getOpcode()) {
45602 default: return false;
45603 case ISD::AND:
45604 case X86ISD::AND:
45605 isAnd = true;
45606 [[fallthrough]];
45607 case ISD::OR:
45608 case X86ISD::OR:
45609 SetCC0 = Cond->getOperand(0);
45610 SetCC1 = Cond->getOperand(1);
45611 break;
45614 // Make sure we have SETCC nodes, using the same flags value.
45615 if (SetCC0.getOpcode() != X86ISD::SETCC ||
45616 SetCC1.getOpcode() != X86ISD::SETCC ||
45617 SetCC0->getOperand(1) != SetCC1->getOperand(1))
45618 return false;
45620 CC0 = (X86::CondCode)SetCC0->getConstantOperandVal(0);
45621 CC1 = (X86::CondCode)SetCC1->getConstantOperandVal(0);
45622 Flags = SetCC0->getOperand(1);
45623 return true;
45626 // When legalizing carry, we create carries via add X, -1
45627 // If that comes from an actual carry, via setcc, we use the
45628 // carry directly.
45629 static SDValue combineCarryThroughADD(SDValue EFLAGS, SelectionDAG &DAG) {
45630 if (EFLAGS.getOpcode() == X86ISD::ADD) {
45631 if (isAllOnesConstant(EFLAGS.getOperand(1))) {
45632 bool FoundAndLSB = false;
45633 SDValue Carry = EFLAGS.getOperand(0);
45634 while (Carry.getOpcode() == ISD::TRUNCATE ||
45635 Carry.getOpcode() == ISD::ZERO_EXTEND ||
45636 (Carry.getOpcode() == ISD::AND &&
45637 isOneConstant(Carry.getOperand(1)))) {
45638 FoundAndLSB |= Carry.getOpcode() == ISD::AND;
45639 Carry = Carry.getOperand(0);
45641 if (Carry.getOpcode() == X86ISD::SETCC ||
45642 Carry.getOpcode() == X86ISD::SETCC_CARRY) {
45643 // TODO: Merge this code with equivalent in combineAddOrSubToADCOrSBB?
45644 uint64_t CarryCC = Carry.getConstantOperandVal(0);
45645 SDValue CarryOp1 = Carry.getOperand(1);
45646 if (CarryCC == X86::COND_B)
45647 return CarryOp1;
45648 if (CarryCC == X86::COND_A) {
45649 // Try to convert COND_A into COND_B in an attempt to facilitate
45650 // materializing "setb reg".
45652 // Do not flip "e > c", where "c" is a constant, because Cmp
45653 // instruction cannot take an immediate as its first operand.
45655 if (CarryOp1.getOpcode() == X86ISD::SUB &&
45656 CarryOp1.getNode()->hasOneUse() &&
45657 CarryOp1.getValueType().isInteger() &&
45658 !isa<ConstantSDNode>(CarryOp1.getOperand(1))) {
45659 SDValue SubCommute =
45660 DAG.getNode(X86ISD::SUB, SDLoc(CarryOp1), CarryOp1->getVTList(),
45661 CarryOp1.getOperand(1), CarryOp1.getOperand(0));
45662 return SDValue(SubCommute.getNode(), CarryOp1.getResNo());
45665 // If this is a check of the z flag of an add with 1, switch to the
45666 // C flag.
45667 if (CarryCC == X86::COND_E &&
45668 CarryOp1.getOpcode() == X86ISD::ADD &&
45669 isOneConstant(CarryOp1.getOperand(1)))
45670 return CarryOp1;
45671 } else if (FoundAndLSB) {
45672 SDLoc DL(Carry);
45673 SDValue BitNo = DAG.getConstant(0, DL, Carry.getValueType());
45674 if (Carry.getOpcode() == ISD::SRL) {
45675 BitNo = Carry.getOperand(1);
45676 Carry = Carry.getOperand(0);
45678 return getBT(Carry, BitNo, DL, DAG);
45683 return SDValue();
45686 /// If we are inverting an PTEST/TESTP operand, attempt to adjust the CC
45687 /// to avoid the inversion.
45688 static SDValue combinePTESTCC(SDValue EFLAGS, X86::CondCode &CC,
45689 SelectionDAG &DAG,
45690 const X86Subtarget &Subtarget) {
45691 // TODO: Handle X86ISD::KTEST/X86ISD::KORTEST.
45692 if (EFLAGS.getOpcode() != X86ISD::PTEST &&
45693 EFLAGS.getOpcode() != X86ISD::TESTP)
45694 return SDValue();
45696 // PTEST/TESTP sets EFLAGS as:
45697 // TESTZ: ZF = (Op0 & Op1) == 0
45698 // TESTC: CF = (~Op0 & Op1) == 0
45699 // TESTNZC: ZF == 0 && CF == 0
45700 MVT VT = EFLAGS.getSimpleValueType();
45701 SDValue Op0 = EFLAGS.getOperand(0);
45702 SDValue Op1 = EFLAGS.getOperand(1);
45703 MVT OpVT = Op0.getSimpleValueType();
45705 // TEST*(~X,Y) == TEST*(X,Y)
45706 if (SDValue NotOp0 = IsNOT(Op0, DAG)) {
45707 X86::CondCode InvCC;
45708 switch (CC) {
45709 case X86::COND_B:
45710 // testc -> testz.
45711 InvCC = X86::COND_E;
45712 break;
45713 case X86::COND_AE:
45714 // !testc -> !testz.
45715 InvCC = X86::COND_NE;
45716 break;
45717 case X86::COND_E:
45718 // testz -> testc.
45719 InvCC = X86::COND_B;
45720 break;
45721 case X86::COND_NE:
45722 // !testz -> !testc.
45723 InvCC = X86::COND_AE;
45724 break;
45725 case X86::COND_A:
45726 case X86::COND_BE:
45727 // testnzc -> testnzc (no change).
45728 InvCC = CC;
45729 break;
45730 default:
45731 InvCC = X86::COND_INVALID;
45732 break;
45735 if (InvCC != X86::COND_INVALID) {
45736 CC = InvCC;
45737 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
45738 DAG.getBitcast(OpVT, NotOp0), Op1);
45742 if (CC == X86::COND_B || CC == X86::COND_AE) {
45743 // TESTC(X,~X) == TESTC(X,-1)
45744 if (SDValue NotOp1 = IsNOT(Op1, DAG)) {
45745 if (peekThroughBitcasts(NotOp1) == peekThroughBitcasts(Op0)) {
45746 SDLoc DL(EFLAGS);
45747 return DAG.getNode(
45748 EFLAGS.getOpcode(), DL, VT, DAG.getBitcast(OpVT, NotOp1),
45749 DAG.getBitcast(OpVT,
45750 DAG.getAllOnesConstant(DL, NotOp1.getValueType())));
45755 if (CC == X86::COND_E || CC == X86::COND_NE) {
45756 // TESTZ(X,~Y) == TESTC(Y,X)
45757 if (SDValue NotOp1 = IsNOT(Op1, DAG)) {
45758 CC = (CC == X86::COND_E ? X86::COND_B : X86::COND_AE);
45759 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
45760 DAG.getBitcast(OpVT, NotOp1), Op0);
45763 if (Op0 == Op1) {
45764 SDValue BC = peekThroughBitcasts(Op0);
45765 EVT BCVT = BC.getValueType();
45767 // TESTZ(AND(X,Y),AND(X,Y)) == TESTZ(X,Y)
45768 if (BC.getOpcode() == ISD::AND || BC.getOpcode() == X86ISD::FAND) {
45769 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
45770 DAG.getBitcast(OpVT, BC.getOperand(0)),
45771 DAG.getBitcast(OpVT, BC.getOperand(1)));
45774 // TESTZ(AND(~X,Y),AND(~X,Y)) == TESTC(X,Y)
45775 if (BC.getOpcode() == X86ISD::ANDNP || BC.getOpcode() == X86ISD::FANDN) {
45776 CC = (CC == X86::COND_E ? X86::COND_B : X86::COND_AE);
45777 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
45778 DAG.getBitcast(OpVT, BC.getOperand(0)),
45779 DAG.getBitcast(OpVT, BC.getOperand(1)));
45782 // If every element is an all-sign value, see if we can use TESTP/MOVMSK
45783 // to more efficiently extract the sign bits and compare that.
45784 // TODO: Handle TESTC with comparison inversion.
45785 // TODO: Can we remove SimplifyMultipleUseDemandedBits and rely on
45786 // TESTP/MOVMSK combines to make sure its never worse than PTEST?
45787 if (BCVT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(BCVT)) {
45788 unsigned EltBits = BCVT.getScalarSizeInBits();
45789 if (DAG.ComputeNumSignBits(BC) == EltBits) {
45790 assert(VT == MVT::i32 && "Expected i32 EFLAGS comparison result");
45791 APInt SignMask = APInt::getSignMask(EltBits);
45792 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45793 if (SDValue Res =
45794 TLI.SimplifyMultipleUseDemandedBits(BC, SignMask, DAG)) {
45795 // For vXi16 cases we need to use pmovmksb and extract every other
45796 // sign bit.
45797 SDLoc DL(EFLAGS);
45798 if ((EltBits == 32 || EltBits == 64) && Subtarget.hasAVX()) {
45799 MVT FloatSVT = MVT::getFloatingPointVT(EltBits);
45800 MVT FloatVT =
45801 MVT::getVectorVT(FloatSVT, OpVT.getSizeInBits() / EltBits);
45802 Res = DAG.getBitcast(FloatVT, Res);
45803 return DAG.getNode(X86ISD::TESTP, SDLoc(EFLAGS), VT, Res, Res);
45804 } else if (EltBits == 16) {
45805 MVT MovmskVT = BCVT.is128BitVector() ? MVT::v16i8 : MVT::v32i8;
45806 Res = DAG.getBitcast(MovmskVT, Res);
45807 Res = getPMOVMSKB(DL, Res, DAG, Subtarget);
45808 Res = DAG.getNode(ISD::AND, DL, MVT::i32, Res,
45809 DAG.getConstant(0xAAAAAAAA, DL, MVT::i32));
45810 } else {
45811 Res = getPMOVMSKB(DL, Res, DAG, Subtarget);
45813 return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Res,
45814 DAG.getConstant(0, DL, MVT::i32));
45820 // TESTZ(-1,X) == TESTZ(X,X)
45821 if (ISD::isBuildVectorAllOnes(Op0.getNode()))
45822 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT, Op1, Op1);
45824 // TESTZ(X,-1) == TESTZ(X,X)
45825 if (ISD::isBuildVectorAllOnes(Op1.getNode()))
45826 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT, Op0, Op0);
45828 // TESTZ(OR(LO(X),HI(X)),OR(LO(Y),HI(Y))) -> TESTZ(X,Y)
45829 // TODO: Add COND_NE handling?
45830 if (CC == X86::COND_E && OpVT.is128BitVector() && Subtarget.hasAVX()) {
45831 SDValue Src0 = peekThroughBitcasts(Op0);
45832 SDValue Src1 = peekThroughBitcasts(Op1);
45833 if (Src0.getOpcode() == ISD::OR && Src1.getOpcode() == ISD::OR) {
45834 Src0 = getSplitVectorSrc(peekThroughBitcasts(Src0.getOperand(0)),
45835 peekThroughBitcasts(Src0.getOperand(1)), true);
45836 Src1 = getSplitVectorSrc(peekThroughBitcasts(Src1.getOperand(0)),
45837 peekThroughBitcasts(Src1.getOperand(1)), true);
45838 if (Src0 && Src1) {
45839 MVT OpVT2 = OpVT.getDoubleNumVectorElementsVT();
45840 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
45841 DAG.getBitcast(OpVT2, Src0),
45842 DAG.getBitcast(OpVT2, Src1));
45848 return SDValue();
45851 // Attempt to simplify the MOVMSK input based on the comparison type.
45852 static SDValue combineSetCCMOVMSK(SDValue EFLAGS, X86::CondCode &CC,
45853 SelectionDAG &DAG,
45854 const X86Subtarget &Subtarget) {
45855 // Handle eq/ne against zero (any_of).
45856 // Handle eq/ne against -1 (all_of).
45857 if (!(CC == X86::COND_E || CC == X86::COND_NE))
45858 return SDValue();
45859 if (EFLAGS.getValueType() != MVT::i32)
45860 return SDValue();
45861 unsigned CmpOpcode = EFLAGS.getOpcode();
45862 if (CmpOpcode != X86ISD::CMP && CmpOpcode != X86ISD::SUB)
45863 return SDValue();
45864 auto *CmpConstant = dyn_cast<ConstantSDNode>(EFLAGS.getOperand(1));
45865 if (!CmpConstant)
45866 return SDValue();
45867 const APInt &CmpVal = CmpConstant->getAPIntValue();
45869 SDValue CmpOp = EFLAGS.getOperand(0);
45870 unsigned CmpBits = CmpOp.getValueSizeInBits();
45871 assert(CmpBits == CmpVal.getBitWidth() && "Value size mismatch");
45873 // Peek through any truncate.
45874 if (CmpOp.getOpcode() == ISD::TRUNCATE)
45875 CmpOp = CmpOp.getOperand(0);
45877 // Bail if we don't find a MOVMSK.
45878 if (CmpOp.getOpcode() != X86ISD::MOVMSK)
45879 return SDValue();
45881 SDValue Vec = CmpOp.getOperand(0);
45882 MVT VecVT = Vec.getSimpleValueType();
45883 assert((VecVT.is128BitVector() || VecVT.is256BitVector()) &&
45884 "Unexpected MOVMSK operand");
45885 unsigned NumElts = VecVT.getVectorNumElements();
45886 unsigned NumEltBits = VecVT.getScalarSizeInBits();
45888 bool IsAnyOf = CmpOpcode == X86ISD::CMP && CmpVal.isZero();
45889 bool IsAllOf = (CmpOpcode == X86ISD::SUB || CmpOpcode == X86ISD::CMP) &&
45890 NumElts <= CmpBits && CmpVal.isMask(NumElts);
45891 if (!IsAnyOf && !IsAllOf)
45892 return SDValue();
45894 // TODO: Check more combining cases for me.
45895 // Here we check the cmp use number to decide do combining or not.
45896 // Currently we only get 2 tests about combining "MOVMSK(CONCAT(..))"
45897 // and "MOVMSK(PCMPEQ(..))" are fit to use this constraint.
45898 bool IsOneUse = CmpOp.getNode()->hasOneUse();
45900 // See if we can peek through to a vector with a wider element type, if the
45901 // signbits extend down to all the sub-elements as well.
45902 // Calling MOVMSK with the wider type, avoiding the bitcast, helps expose
45903 // potential SimplifyDemandedBits/Elts cases.
45904 // If we looked through a truncate that discard bits, we can't do this
45905 // transform.
45906 // FIXME: We could do this transform for truncates that discarded bits by
45907 // inserting an AND mask between the new MOVMSK and the CMP.
45908 if (Vec.getOpcode() == ISD::BITCAST && NumElts <= CmpBits) {
45909 SDValue BC = peekThroughBitcasts(Vec);
45910 MVT BCVT = BC.getSimpleValueType();
45911 unsigned BCNumElts = BCVT.getVectorNumElements();
45912 unsigned BCNumEltBits = BCVT.getScalarSizeInBits();
45913 if ((BCNumEltBits == 32 || BCNumEltBits == 64) &&
45914 BCNumEltBits > NumEltBits &&
45915 DAG.ComputeNumSignBits(BC) > (BCNumEltBits - NumEltBits)) {
45916 SDLoc DL(EFLAGS);
45917 APInt CmpMask = APInt::getLowBitsSet(32, IsAnyOf ? 0 : BCNumElts);
45918 return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
45919 DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, BC),
45920 DAG.getConstant(CmpMask, DL, MVT::i32));
45924 // MOVMSK(CONCAT(X,Y)) == 0 -> MOVMSK(OR(X,Y)).
45925 // MOVMSK(CONCAT(X,Y)) != 0 -> MOVMSK(OR(X,Y)).
45926 // MOVMSK(CONCAT(X,Y)) == -1 -> MOVMSK(AND(X,Y)).
45927 // MOVMSK(CONCAT(X,Y)) != -1 -> MOVMSK(AND(X,Y)).
45928 if (VecVT.is256BitVector() && NumElts <= CmpBits && IsOneUse) {
45929 SmallVector<SDValue> Ops;
45930 if (collectConcatOps(peekThroughBitcasts(Vec).getNode(), Ops, DAG) &&
45931 Ops.size() == 2) {
45932 SDLoc DL(EFLAGS);
45933 EVT SubVT = Ops[0].getValueType().changeTypeToInteger();
45934 APInt CmpMask = APInt::getLowBitsSet(32, IsAnyOf ? 0 : NumElts / 2);
45935 SDValue V = DAG.getNode(IsAnyOf ? ISD::OR : ISD::AND, DL, SubVT,
45936 DAG.getBitcast(SubVT, Ops[0]),
45937 DAG.getBitcast(SubVT, Ops[1]));
45938 V = DAG.getBitcast(VecVT.getHalfNumVectorElementsVT(), V);
45939 return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
45940 DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V),
45941 DAG.getConstant(CmpMask, DL, MVT::i32));
45945 // MOVMSK(PCMPEQ(X,0)) == -1 -> PTESTZ(X,X).
45946 // MOVMSK(PCMPEQ(X,0)) != -1 -> !PTESTZ(X,X).
45947 // MOVMSK(PCMPEQ(X,Y)) == -1 -> PTESTZ(XOR(X,Y),XOR(X,Y)).
45948 // MOVMSK(PCMPEQ(X,Y)) != -1 -> !PTESTZ(XOR(X,Y),XOR(X,Y)).
45949 if (IsAllOf && Subtarget.hasSSE41() && IsOneUse) {
45950 MVT TestVT = VecVT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
45951 SDValue BC = peekThroughBitcasts(Vec);
45952 // Ensure MOVMSK was testing every signbit of BC.
45953 if (BC.getValueType().getVectorNumElements() <= NumElts) {
45954 if (BC.getOpcode() == X86ISD::PCMPEQ) {
45955 SDValue V = DAG.getNode(ISD::XOR, SDLoc(BC), BC.getValueType(),
45956 BC.getOperand(0), BC.getOperand(1));
45957 V = DAG.getBitcast(TestVT, V);
45958 return DAG.getNode(X86ISD::PTEST, SDLoc(EFLAGS), MVT::i32, V, V);
45960 // Check for 256-bit split vector cases.
45961 if (BC.getOpcode() == ISD::AND &&
45962 BC.getOperand(0).getOpcode() == X86ISD::PCMPEQ &&
45963 BC.getOperand(1).getOpcode() == X86ISD::PCMPEQ) {
45964 SDValue LHS = BC.getOperand(0);
45965 SDValue RHS = BC.getOperand(1);
45966 LHS = DAG.getNode(ISD::XOR, SDLoc(LHS), LHS.getValueType(),
45967 LHS.getOperand(0), LHS.getOperand(1));
45968 RHS = DAG.getNode(ISD::XOR, SDLoc(RHS), RHS.getValueType(),
45969 RHS.getOperand(0), RHS.getOperand(1));
45970 LHS = DAG.getBitcast(TestVT, LHS);
45971 RHS = DAG.getBitcast(TestVT, RHS);
45972 SDValue V = DAG.getNode(ISD::OR, SDLoc(EFLAGS), TestVT, LHS, RHS);
45973 return DAG.getNode(X86ISD::PTEST, SDLoc(EFLAGS), MVT::i32, V, V);
45978 // See if we can avoid a PACKSS by calling MOVMSK on the sources.
45979 // For vXi16 cases we can use a v2Xi8 PMOVMSKB. We must mask out
45980 // sign bits prior to the comparison with zero unless we know that
45981 // the vXi16 splats the sign bit down to the lower i8 half.
45982 // TODO: Handle all_of patterns.
45983 if (Vec.getOpcode() == X86ISD::PACKSS && VecVT == MVT::v16i8) {
45984 SDValue VecOp0 = Vec.getOperand(0);
45985 SDValue VecOp1 = Vec.getOperand(1);
45986 bool SignExt0 = DAG.ComputeNumSignBits(VecOp0) > 8;
45987 bool SignExt1 = DAG.ComputeNumSignBits(VecOp1) > 8;
45988 // PMOVMSKB(PACKSSBW(X, undef)) -> PMOVMSKB(BITCAST_v16i8(X)) & 0xAAAA.
45989 if (IsAnyOf && CmpBits == 8 && VecOp1.isUndef()) {
45990 SDLoc DL(EFLAGS);
45991 SDValue Result = DAG.getBitcast(MVT::v16i8, VecOp0);
45992 Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
45993 Result = DAG.getZExtOrTrunc(Result, DL, MVT::i16);
45994 if (!SignExt0) {
45995 Result = DAG.getNode(ISD::AND, DL, MVT::i16, Result,
45996 DAG.getConstant(0xAAAA, DL, MVT::i16));
45998 return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Result,
45999 DAG.getConstant(0, DL, MVT::i16));
46001 // PMOVMSKB(PACKSSBW(LO(X), HI(X)))
46002 // -> PMOVMSKB(BITCAST_v32i8(X)) & 0xAAAAAAAA.
46003 if (CmpBits >= 16 && Subtarget.hasInt256() &&
46004 (IsAnyOf || (SignExt0 && SignExt1))) {
46005 if (SDValue Src = getSplitVectorSrc(VecOp0, VecOp1, true)) {
46006 SDLoc DL(EFLAGS);
46007 SDValue Result = peekThroughBitcasts(Src);
46008 if (IsAllOf && Result.getOpcode() == X86ISD::PCMPEQ &&
46009 Result.getValueType().getVectorNumElements() <= NumElts) {
46010 SDValue V = DAG.getNode(ISD::XOR, DL, Result.getValueType(),
46011 Result.getOperand(0), Result.getOperand(1));
46012 V = DAG.getBitcast(MVT::v4i64, V);
46013 return DAG.getNode(X86ISD::PTEST, SDLoc(EFLAGS), MVT::i32, V, V);
46015 Result = DAG.getBitcast(MVT::v32i8, Result);
46016 Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
46017 unsigned CmpMask = IsAnyOf ? 0 : 0xFFFFFFFF;
46018 if (!SignExt0 || !SignExt1) {
46019 assert(IsAnyOf &&
46020 "Only perform v16i16 signmasks for any_of patterns");
46021 Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result,
46022 DAG.getConstant(0xAAAAAAAA, DL, MVT::i32));
46024 return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Result,
46025 DAG.getConstant(CmpMask, DL, MVT::i32));
46030 // MOVMSK(SHUFFLE(X,u)) -> MOVMSK(X) iff every element is referenced.
46031 // Since we peek through a bitcast, we need to be careful if the base vector
46032 // type has smaller elements than the MOVMSK type. In that case, even if
46033 // all the elements are demanded by the shuffle mask, only the "high"
46034 // elements which have highbits that align with highbits in the MOVMSK vec
46035 // elements are actually demanded. A simplification of spurious operations
46036 // on the "low" elements take place during other simplifications.
46038 // For example:
46039 // MOVMSK64(BITCAST(SHUF32 X, (1,0,3,2))) even though all the elements are
46040 // demanded, because we are swapping around the result can change.
46042 // To address this, we check that we can scale the shuffle mask to MOVMSK
46043 // element width (this will ensure "high" elements match). Its slightly overly
46044 // conservative, but fine for an edge case fold.
46045 SmallVector<int, 32> ShuffleMask, ScaledMaskUnused;
46046 SmallVector<SDValue, 2> ShuffleInputs;
46047 if (NumElts <= CmpBits &&
46048 getTargetShuffleInputs(peekThroughBitcasts(Vec), ShuffleInputs,
46049 ShuffleMask, DAG) &&
46050 ShuffleInputs.size() == 1 && !isAnyZeroOrUndef(ShuffleMask) &&
46051 ShuffleInputs[0].getValueSizeInBits() == VecVT.getSizeInBits() &&
46052 scaleShuffleElements(ShuffleMask, NumElts, ScaledMaskUnused)) {
46053 unsigned NumShuffleElts = ShuffleMask.size();
46054 APInt DemandedElts = APInt::getZero(NumShuffleElts);
46055 for (int M : ShuffleMask) {
46056 assert(0 <= M && M < (int)NumShuffleElts && "Bad unary shuffle index");
46057 DemandedElts.setBit(M);
46059 if (DemandedElts.isAllOnes()) {
46060 SDLoc DL(EFLAGS);
46061 SDValue Result = DAG.getBitcast(VecVT, ShuffleInputs[0]);
46062 Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
46063 Result =
46064 DAG.getZExtOrTrunc(Result, DL, EFLAGS.getOperand(0).getValueType());
46065 return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Result,
46066 EFLAGS.getOperand(1));
46070 // MOVMSKPS(V) !=/== 0 -> TESTPS(V,V)
46071 // MOVMSKPD(V) !=/== 0 -> TESTPD(V,V)
46072 // MOVMSKPS(V) !=/== -1 -> TESTPS(V,V)
46073 // MOVMSKPD(V) !=/== -1 -> TESTPD(V,V)
46074 // iff every element is referenced.
46075 if (NumElts <= CmpBits && Subtarget.hasAVX() &&
46076 !Subtarget.preferMovmskOverVTest() && IsOneUse &&
46077 (NumEltBits == 32 || NumEltBits == 64)) {
46078 SDLoc DL(EFLAGS);
46079 MVT FloatSVT = MVT::getFloatingPointVT(NumEltBits);
46080 MVT FloatVT = MVT::getVectorVT(FloatSVT, NumElts);
46081 MVT IntVT = FloatVT.changeVectorElementTypeToInteger();
46082 SDValue LHS = Vec;
46083 SDValue RHS = IsAnyOf ? Vec : DAG.getAllOnesConstant(DL, IntVT);
46084 CC = IsAnyOf ? CC : (CC == X86::COND_E ? X86::COND_B : X86::COND_AE);
46085 return DAG.getNode(X86ISD::TESTP, DL, MVT::i32,
46086 DAG.getBitcast(FloatVT, LHS),
46087 DAG.getBitcast(FloatVT, RHS));
46090 return SDValue();
46093 /// Optimize an EFLAGS definition used according to the condition code \p CC
46094 /// into a simpler EFLAGS value, potentially returning a new \p CC and replacing
46095 /// uses of chain values.
46096 static SDValue combineSetCCEFLAGS(SDValue EFLAGS, X86::CondCode &CC,
46097 SelectionDAG &DAG,
46098 const X86Subtarget &Subtarget) {
46099 if (CC == X86::COND_B)
46100 if (SDValue Flags = combineCarryThroughADD(EFLAGS, DAG))
46101 return Flags;
46103 if (SDValue R = checkBoolTestSetCCCombine(EFLAGS, CC))
46104 return R;
46106 if (SDValue R = combinePTESTCC(EFLAGS, CC, DAG, Subtarget))
46107 return R;
46109 if (SDValue R = combineSetCCMOVMSK(EFLAGS, CC, DAG, Subtarget))
46110 return R;
46112 return combineSetCCAtomicArith(EFLAGS, CC, DAG, Subtarget);
46115 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
46116 static SDValue combineCMov(SDNode *N, SelectionDAG &DAG,
46117 TargetLowering::DAGCombinerInfo &DCI,
46118 const X86Subtarget &Subtarget) {
46119 SDLoc DL(N);
46121 SDValue FalseOp = N->getOperand(0);
46122 SDValue TrueOp = N->getOperand(1);
46123 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
46124 SDValue Cond = N->getOperand(3);
46126 // cmov X, X, ?, ? --> X
46127 if (TrueOp == FalseOp)
46128 return TrueOp;
46130 // Try to simplify the EFLAGS and condition code operands.
46131 // We can't always do this as FCMOV only supports a subset of X86 cond.
46132 if (SDValue Flags = combineSetCCEFLAGS(Cond, CC, DAG, Subtarget)) {
46133 if (!(FalseOp.getValueType() == MVT::f80 ||
46134 (FalseOp.getValueType() == MVT::f64 && !Subtarget.hasSSE2()) ||
46135 (FalseOp.getValueType() == MVT::f32 && !Subtarget.hasSSE1())) ||
46136 !Subtarget.canUseCMOV() || hasFPCMov(CC)) {
46137 SDValue Ops[] = {FalseOp, TrueOp, DAG.getTargetConstant(CC, DL, MVT::i8),
46138 Flags};
46139 return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
46143 // If this is a select between two integer constants, try to do some
46144 // optimizations. Note that the operands are ordered the opposite of SELECT
46145 // operands.
46146 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
46147 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
46148 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
46149 // larger than FalseC (the false value).
46150 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
46151 CC = X86::GetOppositeBranchCondition(CC);
46152 std::swap(TrueC, FalseC);
46153 std::swap(TrueOp, FalseOp);
46156 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
46157 // This is efficient for any integer data type (including i8/i16) and
46158 // shift amount.
46159 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
46160 Cond = getSETCC(CC, Cond, DL, DAG);
46162 // Zero extend the condition if needed.
46163 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
46165 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
46166 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
46167 DAG.getConstant(ShAmt, DL, MVT::i8));
46168 return Cond;
46171 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
46172 // for any integer data type, including i8/i16.
46173 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
46174 Cond = getSETCC(CC, Cond, DL, DAG);
46176 // Zero extend the condition if needed.
46177 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
46178 FalseC->getValueType(0), Cond);
46179 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
46180 SDValue(FalseC, 0));
46181 return Cond;
46184 // Optimize cases that will turn into an LEA instruction. This requires
46185 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
46186 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
46187 APInt Diff = TrueC->getAPIntValue() - FalseC->getAPIntValue();
46188 assert(Diff.getBitWidth() == N->getValueType(0).getSizeInBits() &&
46189 "Implicit constant truncation");
46191 bool isFastMultiplier = false;
46192 if (Diff.ult(10)) {
46193 switch (Diff.getZExtValue()) {
46194 default: break;
46195 case 1: // result = add base, cond
46196 case 2: // result = lea base( , cond*2)
46197 case 3: // result = lea base(cond, cond*2)
46198 case 4: // result = lea base( , cond*4)
46199 case 5: // result = lea base(cond, cond*4)
46200 case 8: // result = lea base( , cond*8)
46201 case 9: // result = lea base(cond, cond*8)
46202 isFastMultiplier = true;
46203 break;
46207 if (isFastMultiplier) {
46208 Cond = getSETCC(CC, Cond, DL ,DAG);
46209 // Zero extend the condition if needed.
46210 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
46211 Cond);
46212 // Scale the condition by the difference.
46213 if (Diff != 1)
46214 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
46215 DAG.getConstant(Diff, DL, Cond.getValueType()));
46217 // Add the base if non-zero.
46218 if (FalseC->getAPIntValue() != 0)
46219 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
46220 SDValue(FalseC, 0));
46221 return Cond;
46227 // Handle these cases:
46228 // (select (x != c), e, c) -> select (x != c), e, x),
46229 // (select (x == c), c, e) -> select (x == c), x, e)
46230 // where the c is an integer constant, and the "select" is the combination
46231 // of CMOV and CMP.
46233 // The rationale for this change is that the conditional-move from a constant
46234 // needs two instructions, however, conditional-move from a register needs
46235 // only one instruction.
46237 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
46238 // some instruction-combining opportunities. This opt needs to be
46239 // postponed as late as possible.
46241 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
46242 // the DCI.xxxx conditions are provided to postpone the optimization as
46243 // late as possible.
46245 ConstantSDNode *CmpAgainst = nullptr;
46246 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
46247 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
46248 !isa<ConstantSDNode>(Cond.getOperand(0))) {
46250 if (CC == X86::COND_NE &&
46251 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
46252 CC = X86::GetOppositeBranchCondition(CC);
46253 std::swap(TrueOp, FalseOp);
46256 if (CC == X86::COND_E &&
46257 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
46258 SDValue Ops[] = {FalseOp, Cond.getOperand(0),
46259 DAG.getTargetConstant(CC, DL, MVT::i8), Cond};
46260 return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
46265 // Transform:
46267 // (cmov 1 T (uge T 2))
46269 // to:
46271 // (adc T 0 (sub T 1))
46272 if (CC == X86::COND_AE && isOneConstant(FalseOp) &&
46273 Cond.getOpcode() == X86ISD::SUB && Cond->hasOneUse()) {
46274 SDValue Cond0 = Cond.getOperand(0);
46275 if (Cond0.getOpcode() == ISD::TRUNCATE)
46276 Cond0 = Cond0.getOperand(0);
46277 auto *Sub1C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
46278 if (Cond0 == TrueOp && Sub1C && Sub1C->getZExtValue() == 2) {
46279 EVT CondVT = Cond->getValueType(0);
46280 EVT OuterVT = N->getValueType(0);
46281 // Subtract 1 and generate a carry.
46282 SDValue NewSub =
46283 DAG.getNode(X86ISD::SUB, DL, Cond->getVTList(), Cond.getOperand(0),
46284 DAG.getConstant(1, DL, CondVT));
46285 SDValue EFLAGS(NewSub.getNode(), 1);
46286 return DAG.getNode(X86ISD::ADC, DL, DAG.getVTList(OuterVT, MVT::i32),
46287 TrueOp, DAG.getConstant(0, DL, OuterVT), EFLAGS);
46291 // Fold and/or of setcc's to double CMOV:
46292 // (CMOV F, T, ((cc1 | cc2) != 0)) -> (CMOV (CMOV F, T, cc1), T, cc2)
46293 // (CMOV F, T, ((cc1 & cc2) != 0)) -> (CMOV (CMOV T, F, !cc1), F, !cc2)
46295 // This combine lets us generate:
46296 // cmovcc1 (jcc1 if we don't have CMOV)
46297 // cmovcc2 (same)
46298 // instead of:
46299 // setcc1
46300 // setcc2
46301 // and/or
46302 // cmovne (jne if we don't have CMOV)
46303 // When we can't use the CMOV instruction, it might increase branch
46304 // mispredicts.
46305 // When we can use CMOV, or when there is no mispredict, this improves
46306 // throughput and reduces register pressure.
46308 if (CC == X86::COND_NE) {
46309 SDValue Flags;
46310 X86::CondCode CC0, CC1;
46311 bool isAndSetCC;
46312 if (checkBoolTestAndOrSetCCCombine(Cond, CC0, CC1, Flags, isAndSetCC)) {
46313 if (isAndSetCC) {
46314 std::swap(FalseOp, TrueOp);
46315 CC0 = X86::GetOppositeBranchCondition(CC0);
46316 CC1 = X86::GetOppositeBranchCondition(CC1);
46319 SDValue LOps[] = {FalseOp, TrueOp,
46320 DAG.getTargetConstant(CC0, DL, MVT::i8), Flags};
46321 SDValue LCMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), LOps);
46322 SDValue Ops[] = {LCMOV, TrueOp, DAG.getTargetConstant(CC1, DL, MVT::i8),
46323 Flags};
46324 SDValue CMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
46325 return CMOV;
46329 // Fold (CMOV C1, (ADD (CTTZ X), C2), (X != 0)) ->
46330 // (ADD (CMOV C1-C2, (CTTZ X), (X != 0)), C2)
46331 // Or (CMOV (ADD (CTTZ X), C2), C1, (X == 0)) ->
46332 // (ADD (CMOV (CTTZ X), C1-C2, (X == 0)), C2)
46333 if ((CC == X86::COND_NE || CC == X86::COND_E) &&
46334 Cond.getOpcode() == X86ISD::CMP && isNullConstant(Cond.getOperand(1))) {
46335 SDValue Add = TrueOp;
46336 SDValue Const = FalseOp;
46337 // Canonicalize the condition code for easier matching and output.
46338 if (CC == X86::COND_E)
46339 std::swap(Add, Const);
46341 // We might have replaced the constant in the cmov with the LHS of the
46342 // compare. If so change it to the RHS of the compare.
46343 if (Const == Cond.getOperand(0))
46344 Const = Cond.getOperand(1);
46346 // Ok, now make sure that Add is (add (cttz X), C2) and Const is a constant.
46347 if (isa<ConstantSDNode>(Const) && Add.getOpcode() == ISD::ADD &&
46348 Add.hasOneUse() && isa<ConstantSDNode>(Add.getOperand(1)) &&
46349 (Add.getOperand(0).getOpcode() == ISD::CTTZ_ZERO_UNDEF ||
46350 Add.getOperand(0).getOpcode() == ISD::CTTZ) &&
46351 Add.getOperand(0).getOperand(0) == Cond.getOperand(0)) {
46352 EVT VT = N->getValueType(0);
46353 // This should constant fold.
46354 SDValue Diff = DAG.getNode(ISD::SUB, DL, VT, Const, Add.getOperand(1));
46355 SDValue CMov =
46356 DAG.getNode(X86ISD::CMOV, DL, VT, Diff, Add.getOperand(0),
46357 DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8), Cond);
46358 return DAG.getNode(ISD::ADD, DL, VT, CMov, Add.getOperand(1));
46362 return SDValue();
46365 /// Different mul shrinking modes.
46366 enum class ShrinkMode { MULS8, MULU8, MULS16, MULU16 };
46368 static bool canReduceVMulWidth(SDNode *N, SelectionDAG &DAG, ShrinkMode &Mode) {
46369 EVT VT = N->getOperand(0).getValueType();
46370 if (VT.getScalarSizeInBits() != 32)
46371 return false;
46373 assert(N->getNumOperands() == 2 && "NumOperands of Mul are 2");
46374 unsigned SignBits[2] = {1, 1};
46375 bool IsPositive[2] = {false, false};
46376 for (unsigned i = 0; i < 2; i++) {
46377 SDValue Opd = N->getOperand(i);
46379 SignBits[i] = DAG.ComputeNumSignBits(Opd);
46380 IsPositive[i] = DAG.SignBitIsZero(Opd);
46383 bool AllPositive = IsPositive[0] && IsPositive[1];
46384 unsigned MinSignBits = std::min(SignBits[0], SignBits[1]);
46385 // When ranges are from -128 ~ 127, use MULS8 mode.
46386 if (MinSignBits >= 25)
46387 Mode = ShrinkMode::MULS8;
46388 // When ranges are from 0 ~ 255, use MULU8 mode.
46389 else if (AllPositive && MinSignBits >= 24)
46390 Mode = ShrinkMode::MULU8;
46391 // When ranges are from -32768 ~ 32767, use MULS16 mode.
46392 else if (MinSignBits >= 17)
46393 Mode = ShrinkMode::MULS16;
46394 // When ranges are from 0 ~ 65535, use MULU16 mode.
46395 else if (AllPositive && MinSignBits >= 16)
46396 Mode = ShrinkMode::MULU16;
46397 else
46398 return false;
46399 return true;
46402 /// When the operands of vector mul are extended from smaller size values,
46403 /// like i8 and i16, the type of mul may be shrinked to generate more
46404 /// efficient code. Two typical patterns are handled:
46405 /// Pattern1:
46406 /// %2 = sext/zext <N x i8> %1 to <N x i32>
46407 /// %4 = sext/zext <N x i8> %3 to <N x i32>
46408 // or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
46409 /// %5 = mul <N x i32> %2, %4
46411 /// Pattern2:
46412 /// %2 = zext/sext <N x i16> %1 to <N x i32>
46413 /// %4 = zext/sext <N x i16> %3 to <N x i32>
46414 /// or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
46415 /// %5 = mul <N x i32> %2, %4
46417 /// There are four mul shrinking modes:
46418 /// If %2 == sext32(trunc8(%2)), i.e., the scalar value range of %2 is
46419 /// -128 to 128, and the scalar value range of %4 is also -128 to 128,
46420 /// generate pmullw+sext32 for it (MULS8 mode).
46421 /// If %2 == zext32(trunc8(%2)), i.e., the scalar value range of %2 is
46422 /// 0 to 255, and the scalar value range of %4 is also 0 to 255,
46423 /// generate pmullw+zext32 for it (MULU8 mode).
46424 /// If %2 == sext32(trunc16(%2)), i.e., the scalar value range of %2 is
46425 /// -32768 to 32767, and the scalar value range of %4 is also -32768 to 32767,
46426 /// generate pmullw+pmulhw for it (MULS16 mode).
46427 /// If %2 == zext32(trunc16(%2)), i.e., the scalar value range of %2 is
46428 /// 0 to 65535, and the scalar value range of %4 is also 0 to 65535,
46429 /// generate pmullw+pmulhuw for it (MULU16 mode).
46430 static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG,
46431 const X86Subtarget &Subtarget) {
46432 // Check for legality
46433 // pmullw/pmulhw are not supported by SSE.
46434 if (!Subtarget.hasSSE2())
46435 return SDValue();
46437 // Check for profitability
46438 // pmulld is supported since SSE41. It is better to use pmulld
46439 // instead of pmullw+pmulhw, except for subtargets where pmulld is slower than
46440 // the expansion.
46441 bool OptForMinSize = DAG.getMachineFunction().getFunction().hasMinSize();
46442 if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow()))
46443 return SDValue();
46445 ShrinkMode Mode;
46446 if (!canReduceVMulWidth(N, DAG, Mode))
46447 return SDValue();
46449 SDLoc DL(N);
46450 SDValue N0 = N->getOperand(0);
46451 SDValue N1 = N->getOperand(1);
46452 EVT VT = N->getOperand(0).getValueType();
46453 unsigned NumElts = VT.getVectorNumElements();
46454 if ((NumElts % 2) != 0)
46455 return SDValue();
46457 EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts);
46459 // Shrink the operands of mul.
46460 SDValue NewN0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N0);
46461 SDValue NewN1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N1);
46463 // Generate the lower part of mul: pmullw. For MULU8/MULS8, only the
46464 // lower part is needed.
46465 SDValue MulLo = DAG.getNode(ISD::MUL, DL, ReducedVT, NewN0, NewN1);
46466 if (Mode == ShrinkMode::MULU8 || Mode == ShrinkMode::MULS8)
46467 return DAG.getNode((Mode == ShrinkMode::MULU8) ? ISD::ZERO_EXTEND
46468 : ISD::SIGN_EXTEND,
46469 DL, VT, MulLo);
46471 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts / 2);
46472 // Generate the higher part of mul: pmulhw/pmulhuw. For MULU16/MULS16,
46473 // the higher part is also needed.
46474 SDValue MulHi =
46475 DAG.getNode(Mode == ShrinkMode::MULS16 ? ISD::MULHS : ISD::MULHU, DL,
46476 ReducedVT, NewN0, NewN1);
46478 // Repack the lower part and higher part result of mul into a wider
46479 // result.
46480 // Generate shuffle functioning as punpcklwd.
46481 SmallVector<int, 16> ShuffleMask(NumElts);
46482 for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
46483 ShuffleMask[2 * i] = i;
46484 ShuffleMask[2 * i + 1] = i + NumElts;
46486 SDValue ResLo =
46487 DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
46488 ResLo = DAG.getBitcast(ResVT, ResLo);
46489 // Generate shuffle functioning as punpckhwd.
46490 for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
46491 ShuffleMask[2 * i] = i + NumElts / 2;
46492 ShuffleMask[2 * i + 1] = i + NumElts * 3 / 2;
46494 SDValue ResHi =
46495 DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
46496 ResHi = DAG.getBitcast(ResVT, ResHi);
46497 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ResLo, ResHi);
46500 static SDValue combineMulSpecial(uint64_t MulAmt, SDNode *N, SelectionDAG &DAG,
46501 EVT VT, const SDLoc &DL) {
46503 auto combineMulShlAddOrSub = [&](int Mult, int Shift, bool isAdd) {
46504 SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
46505 DAG.getConstant(Mult, DL, VT));
46506 Result = DAG.getNode(ISD::SHL, DL, VT, Result,
46507 DAG.getConstant(Shift, DL, MVT::i8));
46508 Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
46509 N->getOperand(0));
46510 return Result;
46513 auto combineMulMulAddOrSub = [&](int Mul1, int Mul2, bool isAdd) {
46514 SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
46515 DAG.getConstant(Mul1, DL, VT));
46516 Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, Result,
46517 DAG.getConstant(Mul2, DL, VT));
46518 Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
46519 N->getOperand(0));
46520 return Result;
46523 switch (MulAmt) {
46524 default:
46525 break;
46526 case 11:
46527 // mul x, 11 => add ((shl (mul x, 5), 1), x)
46528 return combineMulShlAddOrSub(5, 1, /*isAdd*/ true);
46529 case 21:
46530 // mul x, 21 => add ((shl (mul x, 5), 2), x)
46531 return combineMulShlAddOrSub(5, 2, /*isAdd*/ true);
46532 case 41:
46533 // mul x, 41 => add ((shl (mul x, 5), 3), x)
46534 return combineMulShlAddOrSub(5, 3, /*isAdd*/ true);
46535 case 22:
46536 // mul x, 22 => add (add ((shl (mul x, 5), 2), x), x)
46537 return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
46538 combineMulShlAddOrSub(5, 2, /*isAdd*/ true));
46539 case 19:
46540 // mul x, 19 => add ((shl (mul x, 9), 1), x)
46541 return combineMulShlAddOrSub(9, 1, /*isAdd*/ true);
46542 case 37:
46543 // mul x, 37 => add ((shl (mul x, 9), 2), x)
46544 return combineMulShlAddOrSub(9, 2, /*isAdd*/ true);
46545 case 73:
46546 // mul x, 73 => add ((shl (mul x, 9), 3), x)
46547 return combineMulShlAddOrSub(9, 3, /*isAdd*/ true);
46548 case 13:
46549 // mul x, 13 => add ((shl (mul x, 3), 2), x)
46550 return combineMulShlAddOrSub(3, 2, /*isAdd*/ true);
46551 case 23:
46552 // mul x, 23 => sub ((shl (mul x, 3), 3), x)
46553 return combineMulShlAddOrSub(3, 3, /*isAdd*/ false);
46554 case 26:
46555 // mul x, 26 => add ((mul (mul x, 5), 5), x)
46556 return combineMulMulAddOrSub(5, 5, /*isAdd*/ true);
46557 case 28:
46558 // mul x, 28 => add ((mul (mul x, 9), 3), x)
46559 return combineMulMulAddOrSub(9, 3, /*isAdd*/ true);
46560 case 29:
46561 // mul x, 29 => add (add ((mul (mul x, 9), 3), x), x)
46562 return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
46563 combineMulMulAddOrSub(9, 3, /*isAdd*/ true));
46566 // Another trick. If this is a power 2 + 2/4/8, we can use a shift followed
46567 // by a single LEA.
46568 // First check if this a sum of two power of 2s because that's easy. Then
46569 // count how many zeros are up to the first bit.
46570 // TODO: We can do this even without LEA at a cost of two shifts and an add.
46571 if (isPowerOf2_64(MulAmt & (MulAmt - 1))) {
46572 unsigned ScaleShift = llvm::countr_zero(MulAmt);
46573 if (ScaleShift >= 1 && ScaleShift < 4) {
46574 unsigned ShiftAmt = Log2_64((MulAmt & (MulAmt - 1)));
46575 SDValue Shift1 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46576 DAG.getConstant(ShiftAmt, DL, MVT::i8));
46577 SDValue Shift2 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46578 DAG.getConstant(ScaleShift, DL, MVT::i8));
46579 return DAG.getNode(ISD::ADD, DL, VT, Shift1, Shift2);
46583 return SDValue();
46586 // If the upper 17 bits of either element are zero and the other element are
46587 // zero/sign bits then we can use PMADDWD, which is always at least as quick as
46588 // PMULLD, except on KNL.
46589 static SDValue combineMulToPMADDWD(SDNode *N, SelectionDAG &DAG,
46590 const X86Subtarget &Subtarget) {
46591 if (!Subtarget.hasSSE2())
46592 return SDValue();
46594 if (Subtarget.isPMADDWDSlow())
46595 return SDValue();
46597 EVT VT = N->getValueType(0);
46599 // Only support vXi32 vectors.
46600 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32)
46601 return SDValue();
46603 // Make sure the type is legal or can split/widen to a legal type.
46604 // With AVX512 but without BWI, we would need to split v32i16.
46605 unsigned NumElts = VT.getVectorNumElements();
46606 if (NumElts == 1 || !isPowerOf2_32(NumElts))
46607 return SDValue();
46609 // With AVX512 but without BWI, we would need to split v32i16.
46610 if (32 <= (2 * NumElts) && Subtarget.hasAVX512() && !Subtarget.hasBWI())
46611 return SDValue();
46613 SDValue N0 = N->getOperand(0);
46614 SDValue N1 = N->getOperand(1);
46616 // If we are zero/sign extending two steps without SSE4.1, its better to
46617 // reduce the vmul width instead.
46618 if (!Subtarget.hasSSE41() &&
46619 (((N0.getOpcode() == ISD::ZERO_EXTEND &&
46620 N0.getOperand(0).getScalarValueSizeInBits() <= 8) &&
46621 (N1.getOpcode() == ISD::ZERO_EXTEND &&
46622 N1.getOperand(0).getScalarValueSizeInBits() <= 8)) ||
46623 ((N0.getOpcode() == ISD::SIGN_EXTEND &&
46624 N0.getOperand(0).getScalarValueSizeInBits() <= 8) &&
46625 (N1.getOpcode() == ISD::SIGN_EXTEND &&
46626 N1.getOperand(0).getScalarValueSizeInBits() <= 8))))
46627 return SDValue();
46629 // If we are sign extending a wide vector without SSE4.1, its better to reduce
46630 // the vmul width instead.
46631 if (!Subtarget.hasSSE41() &&
46632 (N0.getOpcode() == ISD::SIGN_EXTEND &&
46633 N0.getOperand(0).getValueSizeInBits() > 128) &&
46634 (N1.getOpcode() == ISD::SIGN_EXTEND &&
46635 N1.getOperand(0).getValueSizeInBits() > 128))
46636 return SDValue();
46638 // Sign bits must extend down to the lowest i16.
46639 if (DAG.ComputeMaxSignificantBits(N1) > 16 ||
46640 DAG.ComputeMaxSignificantBits(N0) > 16)
46641 return SDValue();
46643 // At least one of the elements must be zero in the upper 17 bits, or can be
46644 // safely made zero without altering the final result.
46645 auto GetZeroableOp = [&](SDValue Op) {
46646 APInt Mask17 = APInt::getHighBitsSet(32, 17);
46647 if (DAG.MaskedValueIsZero(Op, Mask17))
46648 return Op;
46649 // Mask off upper 16-bits of sign-extended constants.
46650 if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode()))
46651 return DAG.getNode(ISD::AND, SDLoc(N), VT, Op,
46652 DAG.getConstant(0xFFFF, SDLoc(N), VT));
46653 if (Op.getOpcode() == ISD::SIGN_EXTEND && N->isOnlyUserOf(Op.getNode())) {
46654 SDValue Src = Op.getOperand(0);
46655 // Convert sext(vXi16) to zext(vXi16).
46656 if (Src.getScalarValueSizeInBits() == 16 && VT.getSizeInBits() <= 128)
46657 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, Src);
46658 // Convert sext(vXi8) to zext(vXi16 sext(vXi8)) on pre-SSE41 targets
46659 // which will expand the extension.
46660 if (Src.getScalarValueSizeInBits() < 16 && !Subtarget.hasSSE41()) {
46661 EVT ExtVT = VT.changeVectorElementType(MVT::i16);
46662 Src = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), ExtVT, Src);
46663 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, Src);
46666 // Convert SIGN_EXTEND_VECTOR_INREG to ZEXT_EXTEND_VECTOR_INREG.
46667 if (Op.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG &&
46668 N->isOnlyUserOf(Op.getNode())) {
46669 SDValue Src = Op.getOperand(0);
46670 if (Src.getScalarValueSizeInBits() == 16)
46671 return DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(N), VT, Src);
46673 // Convert VSRAI(Op, 16) to VSRLI(Op, 16).
46674 if (Op.getOpcode() == X86ISD::VSRAI && Op.getConstantOperandVal(1) == 16 &&
46675 N->isOnlyUserOf(Op.getNode())) {
46676 return DAG.getNode(X86ISD::VSRLI, SDLoc(N), VT, Op.getOperand(0),
46677 Op.getOperand(1));
46679 return SDValue();
46681 SDValue ZeroN0 = GetZeroableOp(N0);
46682 SDValue ZeroN1 = GetZeroableOp(N1);
46683 if (!ZeroN0 && !ZeroN1)
46684 return SDValue();
46685 N0 = ZeroN0 ? ZeroN0 : N0;
46686 N1 = ZeroN1 ? ZeroN1 : N1;
46688 // Use SplitOpsAndApply to handle AVX splitting.
46689 auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
46690 ArrayRef<SDValue> Ops) {
46691 MVT ResVT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
46692 MVT OpVT = MVT::getVectorVT(MVT::i16, Ops[0].getValueSizeInBits() / 16);
46693 return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT,
46694 DAG.getBitcast(OpVT, Ops[0]),
46695 DAG.getBitcast(OpVT, Ops[1]));
46697 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {N0, N1},
46698 PMADDWDBuilder);
46701 static SDValue combineMulToPMULDQ(SDNode *N, SelectionDAG &DAG,
46702 const X86Subtarget &Subtarget) {
46703 if (!Subtarget.hasSSE2())
46704 return SDValue();
46706 EVT VT = N->getValueType(0);
46708 // Only support vXi64 vectors.
46709 if (!VT.isVector() || VT.getVectorElementType() != MVT::i64 ||
46710 VT.getVectorNumElements() < 2 ||
46711 !isPowerOf2_32(VT.getVectorNumElements()))
46712 return SDValue();
46714 SDValue N0 = N->getOperand(0);
46715 SDValue N1 = N->getOperand(1);
46717 // MULDQ returns the 64-bit result of the signed multiplication of the lower
46718 // 32-bits. We can lower with this if the sign bits stretch that far.
46719 if (Subtarget.hasSSE41() && DAG.ComputeNumSignBits(N0) > 32 &&
46720 DAG.ComputeNumSignBits(N1) > 32) {
46721 auto PMULDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
46722 ArrayRef<SDValue> Ops) {
46723 return DAG.getNode(X86ISD::PMULDQ, DL, Ops[0].getValueType(), Ops);
46725 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
46726 PMULDQBuilder, /*CheckBWI*/false);
46729 // If the upper bits are zero we can use a single pmuludq.
46730 APInt Mask = APInt::getHighBitsSet(64, 32);
46731 if (DAG.MaskedValueIsZero(N0, Mask) && DAG.MaskedValueIsZero(N1, Mask)) {
46732 auto PMULUDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
46733 ArrayRef<SDValue> Ops) {
46734 return DAG.getNode(X86ISD::PMULUDQ, DL, Ops[0].getValueType(), Ops);
46736 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
46737 PMULUDQBuilder, /*CheckBWI*/false);
46740 return SDValue();
46743 static SDValue combineMul(SDNode *N, SelectionDAG &DAG,
46744 TargetLowering::DAGCombinerInfo &DCI,
46745 const X86Subtarget &Subtarget) {
46746 EVT VT = N->getValueType(0);
46748 if (SDValue V = combineMulToPMADDWD(N, DAG, Subtarget))
46749 return V;
46751 if (SDValue V = combineMulToPMULDQ(N, DAG, Subtarget))
46752 return V;
46754 if (DCI.isBeforeLegalize() && VT.isVector())
46755 return reduceVMULWidth(N, DAG, Subtarget);
46757 // Optimize a single multiply with constant into two operations in order to
46758 // implement it with two cheaper instructions, e.g. LEA + SHL, LEA + LEA.
46759 if (!MulConstantOptimization)
46760 return SDValue();
46762 // An imul is usually smaller than the alternative sequence.
46763 if (DAG.getMachineFunction().getFunction().hasMinSize())
46764 return SDValue();
46766 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
46767 return SDValue();
46769 if (VT != MVT::i64 && VT != MVT::i32 &&
46770 (!VT.isVector() || !VT.isSimple() || !VT.isInteger()))
46771 return SDValue();
46773 ConstantSDNode *CNode = isConstOrConstSplat(
46774 N->getOperand(1), /*AllowUndefs*/ true, /*AllowTrunc*/ false);
46775 const APInt *C = nullptr;
46776 if (!CNode) {
46777 if (VT.isVector())
46778 if (auto *RawC = getTargetConstantFromNode(N->getOperand(1)))
46779 if (auto *SplatC = RawC->getSplatValue())
46780 C = &(SplatC->getUniqueInteger());
46782 if (!C || C->getBitWidth() != VT.getScalarSizeInBits())
46783 return SDValue();
46784 } else {
46785 C = &(CNode->getAPIntValue());
46788 if (isPowerOf2_64(C->getZExtValue()))
46789 return SDValue();
46791 int64_t SignMulAmt = C->getSExtValue();
46792 assert(SignMulAmt != INT64_MIN && "Int min should have been handled!");
46793 uint64_t AbsMulAmt = SignMulAmt < 0 ? -SignMulAmt : SignMulAmt;
46795 SDLoc DL(N);
46796 SDValue NewMul = SDValue();
46797 if (VT == MVT::i64 || VT == MVT::i32) {
46798 if (AbsMulAmt == 3 || AbsMulAmt == 5 || AbsMulAmt == 9) {
46799 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
46800 DAG.getConstant(AbsMulAmt, DL, VT));
46801 if (SignMulAmt < 0)
46802 NewMul =
46803 DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), NewMul);
46805 return NewMul;
46808 uint64_t MulAmt1 = 0;
46809 uint64_t MulAmt2 = 0;
46810 if ((AbsMulAmt % 9) == 0) {
46811 MulAmt1 = 9;
46812 MulAmt2 = AbsMulAmt / 9;
46813 } else if ((AbsMulAmt % 5) == 0) {
46814 MulAmt1 = 5;
46815 MulAmt2 = AbsMulAmt / 5;
46816 } else if ((AbsMulAmt % 3) == 0) {
46817 MulAmt1 = 3;
46818 MulAmt2 = AbsMulAmt / 3;
46821 // For negative multiply amounts, only allow MulAmt2 to be a power of 2.
46822 if (MulAmt2 &&
46823 (isPowerOf2_64(MulAmt2) ||
46824 (SignMulAmt >= 0 && (MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)))) {
46826 if (isPowerOf2_64(MulAmt2) && !(SignMulAmt >= 0 && N->hasOneUse() &&
46827 N->use_begin()->getOpcode() == ISD::ADD))
46828 // If second multiplifer is pow2, issue it first. We want the multiply
46829 // by 3, 5, or 9 to be folded into the addressing mode unless the lone
46830 // use is an add. Only do this for positive multiply amounts since the
46831 // negate would prevent it from being used as an address mode anyway.
46832 std::swap(MulAmt1, MulAmt2);
46834 if (isPowerOf2_64(MulAmt1))
46835 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46836 DAG.getConstant(Log2_64(MulAmt1), DL, MVT::i8));
46837 else
46838 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
46839 DAG.getConstant(MulAmt1, DL, VT));
46841 if (isPowerOf2_64(MulAmt2))
46842 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
46843 DAG.getConstant(Log2_64(MulAmt2), DL, MVT::i8));
46844 else
46845 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
46846 DAG.getConstant(MulAmt2, DL, VT));
46848 // Negate the result.
46849 if (SignMulAmt < 0)
46850 NewMul =
46851 DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), NewMul);
46852 } else if (!Subtarget.slowLEA())
46853 NewMul = combineMulSpecial(C->getZExtValue(), N, DAG, VT, DL);
46855 if (!NewMul) {
46856 EVT ShiftVT = VT.isVector() ? VT : MVT::i8;
46857 assert(C->getZExtValue() != 0 &&
46858 C->getZExtValue() != maxUIntN(VT.getScalarSizeInBits()) &&
46859 "Both cases that could cause potential overflows should have "
46860 "already been handled.");
46861 if (isPowerOf2_64(AbsMulAmt - 1)) {
46862 // (mul x, 2^N + 1) => (add (shl x, N), x)
46863 NewMul = DAG.getNode(
46864 ISD::ADD, DL, VT, N->getOperand(0),
46865 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46866 DAG.getConstant(Log2_64(AbsMulAmt - 1), DL, ShiftVT)));
46867 // To negate, subtract the number from zero
46868 if (SignMulAmt < 0)
46869 NewMul =
46870 DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), NewMul);
46871 } else if (isPowerOf2_64(AbsMulAmt + 1)) {
46872 // (mul x, 2^N - 1) => (sub (shl x, N), x)
46873 NewMul =
46874 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46875 DAG.getConstant(Log2_64(AbsMulAmt + 1), DL, ShiftVT));
46876 // To negate, reverse the operands of the subtract.
46877 if (SignMulAmt < 0)
46878 NewMul = DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), NewMul);
46879 else
46880 NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
46881 } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt - 2) &&
46882 (!VT.isVector() || Subtarget.fastImmVectorShift())) {
46883 // (mul x, 2^N + 2) => (add (shl x, N), (add x, x))
46884 NewMul =
46885 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46886 DAG.getConstant(Log2_64(AbsMulAmt - 2), DL, ShiftVT));
46887 NewMul = DAG.getNode(
46888 ISD::ADD, DL, VT, NewMul,
46889 DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0), N->getOperand(0)));
46890 } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt + 2) &&
46891 (!VT.isVector() || Subtarget.fastImmVectorShift())) {
46892 // (mul x, 2^N - 2) => (sub (shl x, N), (add x, x))
46893 NewMul =
46894 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46895 DAG.getConstant(Log2_64(AbsMulAmt + 2), DL, ShiftVT));
46896 NewMul = DAG.getNode(
46897 ISD::SUB, DL, VT, NewMul,
46898 DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0), N->getOperand(0)));
46899 } else if (SignMulAmt >= 0 && VT.isVector() &&
46900 Subtarget.fastImmVectorShift()) {
46901 uint64_t AbsMulAmtLowBit = AbsMulAmt & (-AbsMulAmt);
46902 uint64_t ShiftAmt1;
46903 std::optional<unsigned> Opc;
46904 if (isPowerOf2_64(AbsMulAmt - AbsMulAmtLowBit)) {
46905 ShiftAmt1 = AbsMulAmt - AbsMulAmtLowBit;
46906 Opc = ISD::ADD;
46907 } else if (isPowerOf2_64(AbsMulAmt + AbsMulAmtLowBit)) {
46908 ShiftAmt1 = AbsMulAmt + AbsMulAmtLowBit;
46909 Opc = ISD::SUB;
46912 if (Opc) {
46913 SDValue Shift1 =
46914 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46915 DAG.getConstant(Log2_64(ShiftAmt1), DL, ShiftVT));
46916 SDValue Shift2 =
46917 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46918 DAG.getConstant(Log2_64(AbsMulAmtLowBit), DL, ShiftVT));
46919 NewMul = DAG.getNode(*Opc, DL, VT, Shift1, Shift2);
46924 return NewMul;
46927 // Try to form a MULHU or MULHS node by looking for
46928 // (srl (mul ext, ext), 16)
46929 // TODO: This is X86 specific because we want to be able to handle wide types
46930 // before type legalization. But we can only do it if the vector will be
46931 // legalized via widening/splitting. Type legalization can't handle promotion
46932 // of a MULHU/MULHS. There isn't a way to convey this to the generic DAG
46933 // combiner.
46934 static SDValue combineShiftToPMULH(SDNode *N, SelectionDAG &DAG,
46935 const X86Subtarget &Subtarget) {
46936 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&
46937 "SRL or SRA node is required here!");
46938 SDLoc DL(N);
46940 if (!Subtarget.hasSSE2())
46941 return SDValue();
46943 // The operation feeding into the shift must be a multiply.
46944 SDValue ShiftOperand = N->getOperand(0);
46945 if (ShiftOperand.getOpcode() != ISD::MUL || !ShiftOperand.hasOneUse())
46946 return SDValue();
46948 // Input type should be at least vXi32.
46949 EVT VT = N->getValueType(0);
46950 if (!VT.isVector() || VT.getVectorElementType().getSizeInBits() < 32)
46951 return SDValue();
46953 // Need a shift by 16.
46954 APInt ShiftAmt;
46955 if (!ISD::isConstantSplatVector(N->getOperand(1).getNode(), ShiftAmt) ||
46956 ShiftAmt != 16)
46957 return SDValue();
46959 SDValue LHS = ShiftOperand.getOperand(0);
46960 SDValue RHS = ShiftOperand.getOperand(1);
46962 unsigned ExtOpc = LHS.getOpcode();
46963 if ((ExtOpc != ISD::SIGN_EXTEND && ExtOpc != ISD::ZERO_EXTEND) ||
46964 RHS.getOpcode() != ExtOpc)
46965 return SDValue();
46967 // Peek through the extends.
46968 LHS = LHS.getOperand(0);
46969 RHS = RHS.getOperand(0);
46971 // Ensure the input types match.
46972 EVT MulVT = LHS.getValueType();
46973 if (MulVT.getVectorElementType() != MVT::i16 || RHS.getValueType() != MulVT)
46974 return SDValue();
46976 unsigned Opc = ExtOpc == ISD::SIGN_EXTEND ? ISD::MULHS : ISD::MULHU;
46977 SDValue Mulh = DAG.getNode(Opc, DL, MulVT, LHS, RHS);
46979 ExtOpc = N->getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
46980 return DAG.getNode(ExtOpc, DL, VT, Mulh);
46983 static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) {
46984 SDValue N0 = N->getOperand(0);
46985 SDValue N1 = N->getOperand(1);
46986 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
46987 EVT VT = N0.getValueType();
46989 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
46990 // since the result of setcc_c is all zero's or all ones.
46991 if (VT.isInteger() && !VT.isVector() &&
46992 N1C && N0.getOpcode() == ISD::AND &&
46993 N0.getOperand(1).getOpcode() == ISD::Constant) {
46994 SDValue N00 = N0.getOperand(0);
46995 APInt Mask = N0.getConstantOperandAPInt(1);
46996 Mask <<= N1C->getAPIntValue();
46997 bool MaskOK = false;
46998 // We can handle cases concerning bit-widening nodes containing setcc_c if
46999 // we carefully interrogate the mask to make sure we are semantics
47000 // preserving.
47001 // The transform is not safe if the result of C1 << C2 exceeds the bitwidth
47002 // of the underlying setcc_c operation if the setcc_c was zero extended.
47003 // Consider the following example:
47004 // zext(setcc_c) -> i32 0x0000FFFF
47005 // c1 -> i32 0x0000FFFF
47006 // c2 -> i32 0x00000001
47007 // (shl (and (setcc_c), c1), c2) -> i32 0x0001FFFE
47008 // (and setcc_c, (c1 << c2)) -> i32 0x0000FFFE
47009 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
47010 MaskOK = true;
47011 } else if (N00.getOpcode() == ISD::SIGN_EXTEND &&
47012 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
47013 MaskOK = true;
47014 } else if ((N00.getOpcode() == ISD::ZERO_EXTEND ||
47015 N00.getOpcode() == ISD::ANY_EXTEND) &&
47016 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
47017 MaskOK = Mask.isIntN(N00.getOperand(0).getValueSizeInBits());
47019 if (MaskOK && Mask != 0) {
47020 SDLoc DL(N);
47021 return DAG.getNode(ISD::AND, DL, VT, N00, DAG.getConstant(Mask, DL, VT));
47025 return SDValue();
47028 static SDValue combineShiftRightArithmetic(SDNode *N, SelectionDAG &DAG,
47029 const X86Subtarget &Subtarget) {
47030 SDValue N0 = N->getOperand(0);
47031 SDValue N1 = N->getOperand(1);
47032 EVT VT = N0.getValueType();
47033 unsigned Size = VT.getSizeInBits();
47035 if (SDValue V = combineShiftToPMULH(N, DAG, Subtarget))
47036 return V;
47038 // fold (ashr (shl, a, [56,48,32,24,16]), SarConst)
47039 // into (shl, (sext (a), [56,48,32,24,16] - SarConst)) or
47040 // into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
47041 // depending on sign of (SarConst - [56,48,32,24,16])
47043 // sexts in X86 are MOVs. The MOVs have the same code size
47044 // as above SHIFTs (only SHIFT on 1 has lower code size).
47045 // However the MOVs have 2 advantages to a SHIFT:
47046 // 1. MOVs can write to a register that differs from source
47047 // 2. MOVs accept memory operands
47049 if (VT.isVector() || N1.getOpcode() != ISD::Constant ||
47050 N0.getOpcode() != ISD::SHL || !N0.hasOneUse() ||
47051 N0.getOperand(1).getOpcode() != ISD::Constant)
47052 return SDValue();
47054 SDValue N00 = N0.getOperand(0);
47055 SDValue N01 = N0.getOperand(1);
47056 APInt ShlConst = N01->getAsAPIntVal();
47057 APInt SarConst = N1->getAsAPIntVal();
47058 EVT CVT = N1.getValueType();
47060 if (SarConst.isNegative())
47061 return SDValue();
47063 for (MVT SVT : { MVT::i8, MVT::i16, MVT::i32 }) {
47064 unsigned ShiftSize = SVT.getSizeInBits();
47065 // skipping types without corresponding sext/zext and
47066 // ShlConst that is not one of [56,48,32,24,16]
47067 if (ShiftSize >= Size || ShlConst != Size - ShiftSize)
47068 continue;
47069 SDLoc DL(N);
47070 SDValue NN =
47071 DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N00, DAG.getValueType(SVT));
47072 SarConst = SarConst - (Size - ShiftSize);
47073 if (SarConst == 0)
47074 return NN;
47075 if (SarConst.isNegative())
47076 return DAG.getNode(ISD::SHL, DL, VT, NN,
47077 DAG.getConstant(-SarConst, DL, CVT));
47078 return DAG.getNode(ISD::SRA, DL, VT, NN,
47079 DAG.getConstant(SarConst, DL, CVT));
47081 return SDValue();
47084 static SDValue combineShiftRightLogical(SDNode *N, SelectionDAG &DAG,
47085 TargetLowering::DAGCombinerInfo &DCI,
47086 const X86Subtarget &Subtarget) {
47087 SDValue N0 = N->getOperand(0);
47088 SDValue N1 = N->getOperand(1);
47089 EVT VT = N0.getValueType();
47091 if (SDValue V = combineShiftToPMULH(N, DAG, Subtarget))
47092 return V;
47094 // Only do this on the last DAG combine as it can interfere with other
47095 // combines.
47096 if (!DCI.isAfterLegalizeDAG())
47097 return SDValue();
47099 // Try to improve a sequence of srl (and X, C1), C2 by inverting the order.
47100 // TODO: This is a generic DAG combine that became an x86-only combine to
47101 // avoid shortcomings in other folds such as bswap, bit-test ('bt'), and
47102 // and-not ('andn').
47103 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
47104 return SDValue();
47106 auto *ShiftC = dyn_cast<ConstantSDNode>(N1);
47107 auto *AndC = dyn_cast<ConstantSDNode>(N0.getOperand(1));
47108 if (!ShiftC || !AndC)
47109 return SDValue();
47111 // If we can shrink the constant mask below 8-bits or 32-bits, then this
47112 // transform should reduce code size. It may also enable secondary transforms
47113 // from improved known-bits analysis or instruction selection.
47114 APInt MaskVal = AndC->getAPIntValue();
47116 // If this can be matched by a zero extend, don't optimize.
47117 if (MaskVal.isMask()) {
47118 unsigned TO = MaskVal.countr_one();
47119 if (TO >= 8 && isPowerOf2_32(TO))
47120 return SDValue();
47123 APInt NewMaskVal = MaskVal.lshr(ShiftC->getAPIntValue());
47124 unsigned OldMaskSize = MaskVal.getSignificantBits();
47125 unsigned NewMaskSize = NewMaskVal.getSignificantBits();
47126 if ((OldMaskSize > 8 && NewMaskSize <= 8) ||
47127 (OldMaskSize > 32 && NewMaskSize <= 32)) {
47128 // srl (and X, AndC), ShiftC --> and (srl X, ShiftC), (AndC >> ShiftC)
47129 SDLoc DL(N);
47130 SDValue NewMask = DAG.getConstant(NewMaskVal, DL, VT);
47131 SDValue NewShift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), N1);
47132 return DAG.getNode(ISD::AND, DL, VT, NewShift, NewMask);
47134 return SDValue();
47137 static SDValue combineHorizOpWithShuffle(SDNode *N, SelectionDAG &DAG,
47138 const X86Subtarget &Subtarget) {
47139 unsigned Opcode = N->getOpcode();
47140 assert(isHorizOp(Opcode) && "Unexpected hadd/hsub/pack opcode");
47142 SDLoc DL(N);
47143 EVT VT = N->getValueType(0);
47144 SDValue N0 = N->getOperand(0);
47145 SDValue N1 = N->getOperand(1);
47146 EVT SrcVT = N0.getValueType();
47148 SDValue BC0 =
47149 N->isOnlyUserOf(N0.getNode()) ? peekThroughOneUseBitcasts(N0) : N0;
47150 SDValue BC1 =
47151 N->isOnlyUserOf(N1.getNode()) ? peekThroughOneUseBitcasts(N1) : N1;
47153 // Attempt to fold HOP(LOSUBVECTOR(SHUFFLE(X)),HISUBVECTOR(SHUFFLE(X)))
47154 // to SHUFFLE(HOP(LOSUBVECTOR(X),HISUBVECTOR(X))), this is mainly for
47155 // truncation trees that help us avoid lane crossing shuffles.
47156 // TODO: There's a lot more we can do for PACK/HADD style shuffle combines.
47157 // TODO: We don't handle vXf64 shuffles yet.
47158 if (VT.is128BitVector() && SrcVT.getScalarSizeInBits() <= 32) {
47159 if (SDValue BCSrc = getSplitVectorSrc(BC0, BC1, false)) {
47160 SmallVector<SDValue> ShuffleOps;
47161 SmallVector<int> ShuffleMask, ScaledMask;
47162 SDValue Vec = peekThroughBitcasts(BCSrc);
47163 if (getTargetShuffleInputs(Vec, ShuffleOps, ShuffleMask, DAG)) {
47164 resolveTargetShuffleInputsAndMask(ShuffleOps, ShuffleMask);
47165 // To keep the HOP LHS/RHS coherency, we must be able to scale the unary
47166 // shuffle to a v4X64 width - we can probably relax this in the future.
47167 if (!isAnyZero(ShuffleMask) && ShuffleOps.size() == 1 &&
47168 ShuffleOps[0].getValueType().is256BitVector() &&
47169 scaleShuffleElements(ShuffleMask, 4, ScaledMask)) {
47170 SDValue Lo, Hi;
47171 MVT ShufVT = VT.isFloatingPoint() ? MVT::v4f32 : MVT::v4i32;
47172 std::tie(Lo, Hi) = DAG.SplitVector(ShuffleOps[0], DL);
47173 Lo = DAG.getBitcast(SrcVT, Lo);
47174 Hi = DAG.getBitcast(SrcVT, Hi);
47175 SDValue Res = DAG.getNode(Opcode, DL, VT, Lo, Hi);
47176 Res = DAG.getBitcast(ShufVT, Res);
47177 Res = DAG.getVectorShuffle(ShufVT, DL, Res, Res, ScaledMask);
47178 return DAG.getBitcast(VT, Res);
47184 // Attempt to fold HOP(SHUFFLE(X,Y),SHUFFLE(Z,W)) -> SHUFFLE(HOP()).
47185 if (VT.is128BitVector() && SrcVT.getScalarSizeInBits() <= 32) {
47186 // If either/both ops are a shuffle that can scale to v2x64,
47187 // then see if we can perform this as a v4x32 post shuffle.
47188 SmallVector<SDValue> Ops0, Ops1;
47189 SmallVector<int> Mask0, Mask1, ScaledMask0, ScaledMask1;
47190 bool IsShuf0 =
47191 getTargetShuffleInputs(BC0, Ops0, Mask0, DAG) && !isAnyZero(Mask0) &&
47192 scaleShuffleElements(Mask0, 2, ScaledMask0) &&
47193 all_of(Ops0, [](SDValue Op) { return Op.getValueSizeInBits() == 128; });
47194 bool IsShuf1 =
47195 getTargetShuffleInputs(BC1, Ops1, Mask1, DAG) && !isAnyZero(Mask1) &&
47196 scaleShuffleElements(Mask1, 2, ScaledMask1) &&
47197 all_of(Ops1, [](SDValue Op) { return Op.getValueSizeInBits() == 128; });
47198 if (IsShuf0 || IsShuf1) {
47199 if (!IsShuf0) {
47200 Ops0.assign({BC0});
47201 ScaledMask0.assign({0, 1});
47203 if (!IsShuf1) {
47204 Ops1.assign({BC1});
47205 ScaledMask1.assign({0, 1});
47208 SDValue LHS, RHS;
47209 int PostShuffle[4] = {-1, -1, -1, -1};
47210 auto FindShuffleOpAndIdx = [&](int M, int &Idx, ArrayRef<SDValue> Ops) {
47211 if (M < 0)
47212 return true;
47213 Idx = M % 2;
47214 SDValue Src = Ops[M / 2];
47215 if (!LHS || LHS == Src) {
47216 LHS = Src;
47217 return true;
47219 if (!RHS || RHS == Src) {
47220 Idx += 2;
47221 RHS = Src;
47222 return true;
47224 return false;
47226 if (FindShuffleOpAndIdx(ScaledMask0[0], PostShuffle[0], Ops0) &&
47227 FindShuffleOpAndIdx(ScaledMask0[1], PostShuffle[1], Ops0) &&
47228 FindShuffleOpAndIdx(ScaledMask1[0], PostShuffle[2], Ops1) &&
47229 FindShuffleOpAndIdx(ScaledMask1[1], PostShuffle[3], Ops1)) {
47230 LHS = DAG.getBitcast(SrcVT, LHS);
47231 RHS = DAG.getBitcast(SrcVT, RHS ? RHS : LHS);
47232 MVT ShufVT = VT.isFloatingPoint() ? MVT::v4f32 : MVT::v4i32;
47233 SDValue Res = DAG.getNode(Opcode, DL, VT, LHS, RHS);
47234 Res = DAG.getBitcast(ShufVT, Res);
47235 Res = DAG.getVectorShuffle(ShufVT, DL, Res, Res, PostShuffle);
47236 return DAG.getBitcast(VT, Res);
47241 // Attempt to fold HOP(SHUFFLE(X,Y),SHUFFLE(X,Y)) -> SHUFFLE(HOP(X,Y)).
47242 if (VT.is256BitVector() && Subtarget.hasInt256()) {
47243 SmallVector<int> Mask0, Mask1;
47244 SmallVector<SDValue> Ops0, Ops1;
47245 SmallVector<int, 2> ScaledMask0, ScaledMask1;
47246 if (getTargetShuffleInputs(BC0, Ops0, Mask0, DAG) && !isAnyZero(Mask0) &&
47247 getTargetShuffleInputs(BC1, Ops1, Mask1, DAG) && !isAnyZero(Mask1) &&
47248 !Ops0.empty() && !Ops1.empty() &&
47249 all_of(Ops0,
47250 [](SDValue Op) { return Op.getValueType().is256BitVector(); }) &&
47251 all_of(Ops1,
47252 [](SDValue Op) { return Op.getValueType().is256BitVector(); }) &&
47253 scaleShuffleElements(Mask0, 2, ScaledMask0) &&
47254 scaleShuffleElements(Mask1, 2, ScaledMask1)) {
47255 SDValue Op00 = peekThroughBitcasts(Ops0.front());
47256 SDValue Op10 = peekThroughBitcasts(Ops1.front());
47257 SDValue Op01 = peekThroughBitcasts(Ops0.back());
47258 SDValue Op11 = peekThroughBitcasts(Ops1.back());
47259 if ((Op00 == Op11) && (Op01 == Op10)) {
47260 std::swap(Op10, Op11);
47261 ShuffleVectorSDNode::commuteMask(ScaledMask1);
47263 if ((Op00 == Op10) && (Op01 == Op11)) {
47264 const int Map[4] = {0, 2, 1, 3};
47265 SmallVector<int, 4> ShuffleMask(
47266 {Map[ScaledMask0[0]], Map[ScaledMask1[0]], Map[ScaledMask0[1]],
47267 Map[ScaledMask1[1]]});
47268 MVT ShufVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
47269 SDValue Res = DAG.getNode(Opcode, DL, VT, DAG.getBitcast(SrcVT, Op00),
47270 DAG.getBitcast(SrcVT, Op01));
47271 Res = DAG.getBitcast(ShufVT, Res);
47272 Res = DAG.getVectorShuffle(ShufVT, DL, Res, Res, ShuffleMask);
47273 return DAG.getBitcast(VT, Res);
47278 return SDValue();
47281 static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
47282 TargetLowering::DAGCombinerInfo &DCI,
47283 const X86Subtarget &Subtarget) {
47284 unsigned Opcode = N->getOpcode();
47285 assert((X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) &&
47286 "Unexpected pack opcode");
47288 EVT VT = N->getValueType(0);
47289 SDValue N0 = N->getOperand(0);
47290 SDValue N1 = N->getOperand(1);
47291 unsigned NumDstElts = VT.getVectorNumElements();
47292 unsigned DstBitsPerElt = VT.getScalarSizeInBits();
47293 unsigned SrcBitsPerElt = 2 * DstBitsPerElt;
47294 assert(N0.getScalarValueSizeInBits() == SrcBitsPerElt &&
47295 N1.getScalarValueSizeInBits() == SrcBitsPerElt &&
47296 "Unexpected PACKSS/PACKUS input type");
47298 bool IsSigned = (X86ISD::PACKSS == Opcode);
47300 // Constant Folding.
47301 APInt UndefElts0, UndefElts1;
47302 SmallVector<APInt, 32> EltBits0, EltBits1;
47303 if ((N0.isUndef() || N->isOnlyUserOf(N0.getNode())) &&
47304 (N1.isUndef() || N->isOnlyUserOf(N1.getNode())) &&
47305 getTargetConstantBitsFromNode(N0, SrcBitsPerElt, UndefElts0, EltBits0) &&
47306 getTargetConstantBitsFromNode(N1, SrcBitsPerElt, UndefElts1, EltBits1)) {
47307 unsigned NumLanes = VT.getSizeInBits() / 128;
47308 unsigned NumSrcElts = NumDstElts / 2;
47309 unsigned NumDstEltsPerLane = NumDstElts / NumLanes;
47310 unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes;
47312 APInt Undefs(NumDstElts, 0);
47313 SmallVector<APInt, 32> Bits(NumDstElts, APInt::getZero(DstBitsPerElt));
47314 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
47315 for (unsigned Elt = 0; Elt != NumDstEltsPerLane; ++Elt) {
47316 unsigned SrcIdx = Lane * NumSrcEltsPerLane + Elt % NumSrcEltsPerLane;
47317 auto &UndefElts = (Elt >= NumSrcEltsPerLane ? UndefElts1 : UndefElts0);
47318 auto &EltBits = (Elt >= NumSrcEltsPerLane ? EltBits1 : EltBits0);
47320 if (UndefElts[SrcIdx]) {
47321 Undefs.setBit(Lane * NumDstEltsPerLane + Elt);
47322 continue;
47325 APInt &Val = EltBits[SrcIdx];
47326 if (IsSigned) {
47327 // PACKSS: Truncate signed value with signed saturation.
47328 // Source values less than dst minint are saturated to minint.
47329 // Source values greater than dst maxint are saturated to maxint.
47330 if (Val.isSignedIntN(DstBitsPerElt))
47331 Val = Val.trunc(DstBitsPerElt);
47332 else if (Val.isNegative())
47333 Val = APInt::getSignedMinValue(DstBitsPerElt);
47334 else
47335 Val = APInt::getSignedMaxValue(DstBitsPerElt);
47336 } else {
47337 // PACKUS: Truncate signed value with unsigned saturation.
47338 // Source values less than zero are saturated to zero.
47339 // Source values greater than dst maxuint are saturated to maxuint.
47340 if (Val.isIntN(DstBitsPerElt))
47341 Val = Val.trunc(DstBitsPerElt);
47342 else if (Val.isNegative())
47343 Val = APInt::getZero(DstBitsPerElt);
47344 else
47345 Val = APInt::getAllOnes(DstBitsPerElt);
47347 Bits[Lane * NumDstEltsPerLane + Elt] = Val;
47351 return getConstVector(Bits, Undefs, VT.getSimpleVT(), DAG, SDLoc(N));
47354 // Try to fold PACK(SHUFFLE(),SHUFFLE()) -> SHUFFLE(PACK()).
47355 if (SDValue V = combineHorizOpWithShuffle(N, DAG, Subtarget))
47356 return V;
47358 // Try to fold PACKSS(NOT(X),NOT(Y)) -> NOT(PACKSS(X,Y)).
47359 // Currently limit this to allsignbits cases only.
47360 if (IsSigned &&
47361 (N0.isUndef() || DAG.ComputeNumSignBits(N0) == SrcBitsPerElt) &&
47362 (N1.isUndef() || DAG.ComputeNumSignBits(N1) == SrcBitsPerElt)) {
47363 SDValue Not0 = N0.isUndef() ? N0 : IsNOT(N0, DAG);
47364 SDValue Not1 = N1.isUndef() ? N1 : IsNOT(N1, DAG);
47365 if (Not0 && Not1) {
47366 SDLoc DL(N);
47367 MVT SrcVT = N0.getSimpleValueType();
47368 SDValue Pack =
47369 DAG.getNode(X86ISD::PACKSS, DL, VT, DAG.getBitcast(SrcVT, Not0),
47370 DAG.getBitcast(SrcVT, Not1));
47371 return DAG.getNOT(DL, Pack, VT);
47375 // Try to combine a PACKUSWB/PACKSSWB implemented truncate with a regular
47376 // truncate to create a larger truncate.
47377 if (Subtarget.hasAVX512() &&
47378 N0.getOpcode() == ISD::TRUNCATE && N1.isUndef() && VT == MVT::v16i8 &&
47379 N0.getOperand(0).getValueType() == MVT::v8i32) {
47380 if ((IsSigned && DAG.ComputeNumSignBits(N0) > 8) ||
47381 (!IsSigned &&
47382 DAG.MaskedValueIsZero(N0, APInt::getHighBitsSet(16, 8)))) {
47383 if (Subtarget.hasVLX())
47384 return DAG.getNode(X86ISD::VTRUNC, SDLoc(N), VT, N0.getOperand(0));
47386 // Widen input to v16i32 so we can truncate that.
47387 SDLoc dl(N);
47388 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i32,
47389 N0.getOperand(0), DAG.getUNDEF(MVT::v8i32));
47390 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Concat);
47394 // Try to fold PACK(EXTEND(X),EXTEND(Y)) -> CONCAT(X,Y) subvectors.
47395 if (VT.is128BitVector()) {
47396 unsigned ExtOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
47397 SDValue Src0, Src1;
47398 if (N0.getOpcode() == ExtOpc &&
47399 N0.getOperand(0).getValueType().is64BitVector() &&
47400 N0.getOperand(0).getScalarValueSizeInBits() == DstBitsPerElt) {
47401 Src0 = N0.getOperand(0);
47403 if (N1.getOpcode() == ExtOpc &&
47404 N1.getOperand(0).getValueType().is64BitVector() &&
47405 N1.getOperand(0).getScalarValueSizeInBits() == DstBitsPerElt) {
47406 Src1 = N1.getOperand(0);
47408 if ((Src0 || N0.isUndef()) && (Src1 || N1.isUndef())) {
47409 assert((Src0 || Src1) && "Found PACK(UNDEF,UNDEF)");
47410 Src0 = Src0 ? Src0 : DAG.getUNDEF(Src1.getValueType());
47411 Src1 = Src1 ? Src1 : DAG.getUNDEF(Src0.getValueType());
47412 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Src0, Src1);
47415 // Try again with pack(*_extend_vector_inreg, undef).
47416 unsigned VecInRegOpc = IsSigned ? ISD::SIGN_EXTEND_VECTOR_INREG
47417 : ISD::ZERO_EXTEND_VECTOR_INREG;
47418 if (N0.getOpcode() == VecInRegOpc && N1.isUndef() &&
47419 N0.getOperand(0).getScalarValueSizeInBits() < DstBitsPerElt)
47420 return getEXTEND_VECTOR_INREG(ExtOpc, SDLoc(N), VT, N0.getOperand(0),
47421 DAG);
47424 // Attempt to combine as shuffle.
47425 SDValue Op(N, 0);
47426 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
47427 return Res;
47429 return SDValue();
47432 static SDValue combineVectorHADDSUB(SDNode *N, SelectionDAG &DAG,
47433 TargetLowering::DAGCombinerInfo &DCI,
47434 const X86Subtarget &Subtarget) {
47435 assert((X86ISD::HADD == N->getOpcode() || X86ISD::FHADD == N->getOpcode() ||
47436 X86ISD::HSUB == N->getOpcode() || X86ISD::FHSUB == N->getOpcode()) &&
47437 "Unexpected horizontal add/sub opcode");
47439 if (!shouldUseHorizontalOp(true, DAG, Subtarget)) {
47440 MVT VT = N->getSimpleValueType(0);
47441 SDValue LHS = N->getOperand(0);
47442 SDValue RHS = N->getOperand(1);
47444 // HOP(HOP'(X,X),HOP'(Y,Y)) -> HOP(PERMUTE(HOP'(X,Y)),PERMUTE(HOP'(X,Y)).
47445 if (LHS != RHS && LHS.getOpcode() == N->getOpcode() &&
47446 LHS.getOpcode() == RHS.getOpcode() &&
47447 LHS.getValueType() == RHS.getValueType() &&
47448 N->isOnlyUserOf(LHS.getNode()) && N->isOnlyUserOf(RHS.getNode())) {
47449 SDValue LHS0 = LHS.getOperand(0);
47450 SDValue LHS1 = LHS.getOperand(1);
47451 SDValue RHS0 = RHS.getOperand(0);
47452 SDValue RHS1 = RHS.getOperand(1);
47453 if ((LHS0 == LHS1 || LHS0.isUndef() || LHS1.isUndef()) &&
47454 (RHS0 == RHS1 || RHS0.isUndef() || RHS1.isUndef())) {
47455 SDLoc DL(N);
47456 SDValue Res = DAG.getNode(LHS.getOpcode(), DL, LHS.getValueType(),
47457 LHS0.isUndef() ? LHS1 : LHS0,
47458 RHS0.isUndef() ? RHS1 : RHS0);
47459 MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits() / 32);
47460 Res = DAG.getBitcast(ShufVT, Res);
47461 SDValue NewLHS =
47462 DAG.getNode(X86ISD::PSHUFD, DL, ShufVT, Res,
47463 getV4X86ShuffleImm8ForMask({0, 1, 0, 1}, DL, DAG));
47464 SDValue NewRHS =
47465 DAG.getNode(X86ISD::PSHUFD, DL, ShufVT, Res,
47466 getV4X86ShuffleImm8ForMask({2, 3, 2, 3}, DL, DAG));
47467 return DAG.getNode(N->getOpcode(), DL, VT, DAG.getBitcast(VT, NewLHS),
47468 DAG.getBitcast(VT, NewRHS));
47473 // Try to fold HOP(SHUFFLE(),SHUFFLE()) -> SHUFFLE(HOP()).
47474 if (SDValue V = combineHorizOpWithShuffle(N, DAG, Subtarget))
47475 return V;
47477 return SDValue();
47480 static SDValue combineVectorShiftVar(SDNode *N, SelectionDAG &DAG,
47481 TargetLowering::DAGCombinerInfo &DCI,
47482 const X86Subtarget &Subtarget) {
47483 assert((X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N->getOpcode() ||
47484 X86ISD::VSRL == N->getOpcode()) &&
47485 "Unexpected shift opcode");
47486 EVT VT = N->getValueType(0);
47487 SDValue N0 = N->getOperand(0);
47488 SDValue N1 = N->getOperand(1);
47490 // Shift zero -> zero.
47491 if (ISD::isBuildVectorAllZeros(N0.getNode()))
47492 return DAG.getConstant(0, SDLoc(N), VT);
47494 // Detect constant shift amounts.
47495 APInt UndefElts;
47496 SmallVector<APInt, 32> EltBits;
47497 if (getTargetConstantBitsFromNode(N1, 64, UndefElts, EltBits, true, false)) {
47498 unsigned X86Opc = getTargetVShiftUniformOpcode(N->getOpcode(), false);
47499 return getTargetVShiftByConstNode(X86Opc, SDLoc(N), VT.getSimpleVT(), N0,
47500 EltBits[0].getZExtValue(), DAG);
47503 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
47504 APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
47505 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, DCI))
47506 return SDValue(N, 0);
47508 return SDValue();
47511 static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
47512 TargetLowering::DAGCombinerInfo &DCI,
47513 const X86Subtarget &Subtarget) {
47514 unsigned Opcode = N->getOpcode();
47515 assert((X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode ||
47516 X86ISD::VSRLI == Opcode) &&
47517 "Unexpected shift opcode");
47518 bool LogicalShift = X86ISD::VSHLI == Opcode || X86ISD::VSRLI == Opcode;
47519 EVT VT = N->getValueType(0);
47520 SDValue N0 = N->getOperand(0);
47521 SDValue N1 = N->getOperand(1);
47522 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
47523 assert(VT == N0.getValueType() && (NumBitsPerElt % 8) == 0 &&
47524 "Unexpected value type");
47525 assert(N1.getValueType() == MVT::i8 && "Unexpected shift amount type");
47527 // (shift undef, X) -> 0
47528 if (N0.isUndef())
47529 return DAG.getConstant(0, SDLoc(N), VT);
47531 // Out of range logical bit shifts are guaranteed to be zero.
47532 // Out of range arithmetic bit shifts splat the sign bit.
47533 unsigned ShiftVal = N->getConstantOperandVal(1);
47534 if (ShiftVal >= NumBitsPerElt) {
47535 if (LogicalShift)
47536 return DAG.getConstant(0, SDLoc(N), VT);
47537 ShiftVal = NumBitsPerElt - 1;
47540 // (shift X, 0) -> X
47541 if (!ShiftVal)
47542 return N0;
47544 // (shift 0, C) -> 0
47545 if (ISD::isBuildVectorAllZeros(N0.getNode()))
47546 // N0 is all zeros or undef. We guarantee that the bits shifted into the
47547 // result are all zeros, not undef.
47548 return DAG.getConstant(0, SDLoc(N), VT);
47550 // (VSRAI -1, C) -> -1
47551 if (!LogicalShift && ISD::isBuildVectorAllOnes(N0.getNode()))
47552 // N0 is all ones or undef. We guarantee that the bits shifted into the
47553 // result are all ones, not undef.
47554 return DAG.getConstant(-1, SDLoc(N), VT);
47556 auto MergeShifts = [&](SDValue X, uint64_t Amt0, uint64_t Amt1) {
47557 unsigned NewShiftVal = Amt0 + Amt1;
47558 if (NewShiftVal >= NumBitsPerElt) {
47559 // Out of range logical bit shifts are guaranteed to be zero.
47560 // Out of range arithmetic bit shifts splat the sign bit.
47561 if (LogicalShift)
47562 return DAG.getConstant(0, SDLoc(N), VT);
47563 NewShiftVal = NumBitsPerElt - 1;
47565 return DAG.getNode(Opcode, SDLoc(N), VT, N0.getOperand(0),
47566 DAG.getTargetConstant(NewShiftVal, SDLoc(N), MVT::i8));
47569 // (shift (shift X, C2), C1) -> (shift X, (C1 + C2))
47570 if (Opcode == N0.getOpcode())
47571 return MergeShifts(N0.getOperand(0), ShiftVal, N0.getConstantOperandVal(1));
47573 // (shl (add X, X), C) -> (shl X, (C + 1))
47574 if (Opcode == X86ISD::VSHLI && N0.getOpcode() == ISD::ADD &&
47575 N0.getOperand(0) == N0.getOperand(1))
47576 return MergeShifts(N0.getOperand(0), ShiftVal, 1);
47578 // We can decode 'whole byte' logical bit shifts as shuffles.
47579 if (LogicalShift && (ShiftVal % 8) == 0) {
47580 SDValue Op(N, 0);
47581 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
47582 return Res;
47585 // Attempt to detect an expanded vXi64 SIGN_EXTEND_INREG vXi1 pattern, and
47586 // convert to a splatted v2Xi32 SIGN_EXTEND_INREG pattern:
47587 // psrad(pshufd(psllq(X,63),1,1,3,3),31) ->
47588 // pshufd(psrad(pslld(X,31),31),0,0,2,2).
47589 if (Opcode == X86ISD::VSRAI && NumBitsPerElt == 32 && ShiftVal == 31 &&
47590 N0.getOpcode() == X86ISD::PSHUFD &&
47591 N0.getConstantOperandVal(1) == getV4X86ShuffleImm({1, 1, 3, 3}) &&
47592 N0->hasOneUse()) {
47593 SDValue BC = peekThroughOneUseBitcasts(N0.getOperand(0));
47594 if (BC.getOpcode() == X86ISD::VSHLI &&
47595 BC.getScalarValueSizeInBits() == 64 &&
47596 BC.getConstantOperandVal(1) == 63) {
47597 SDLoc DL(N);
47598 SDValue Src = BC.getOperand(0);
47599 Src = DAG.getBitcast(VT, Src);
47600 Src = DAG.getNode(X86ISD::PSHUFD, DL, VT, Src,
47601 getV4X86ShuffleImm8ForMask({0, 0, 2, 2}, DL, DAG));
47602 Src = DAG.getNode(X86ISD::VSHLI, DL, VT, Src, N1);
47603 Src = DAG.getNode(X86ISD::VSRAI, DL, VT, Src, N1);
47604 return Src;
47608 auto TryConstantFold = [&](SDValue V) {
47609 APInt UndefElts;
47610 SmallVector<APInt, 32> EltBits;
47611 if (!getTargetConstantBitsFromNode(V, NumBitsPerElt, UndefElts, EltBits))
47612 return SDValue();
47613 assert(EltBits.size() == VT.getVectorNumElements() &&
47614 "Unexpected shift value type");
47615 // Undef elements need to fold to 0. It's possible SimplifyDemandedBits
47616 // created an undef input due to no input bits being demanded, but user
47617 // still expects 0 in other bits.
47618 for (unsigned i = 0, e = EltBits.size(); i != e; ++i) {
47619 APInt &Elt = EltBits[i];
47620 if (UndefElts[i])
47621 Elt = 0;
47622 else if (X86ISD::VSHLI == Opcode)
47623 Elt <<= ShiftVal;
47624 else if (X86ISD::VSRAI == Opcode)
47625 Elt.ashrInPlace(ShiftVal);
47626 else
47627 Elt.lshrInPlace(ShiftVal);
47629 // Reset undef elements since they were zeroed above.
47630 UndefElts = 0;
47631 return getConstVector(EltBits, UndefElts, VT.getSimpleVT(), DAG, SDLoc(N));
47634 // Constant Folding.
47635 if (N->isOnlyUserOf(N0.getNode())) {
47636 if (SDValue C = TryConstantFold(N0))
47637 return C;
47639 // Fold (shift (logic X, C2), C1) -> (logic (shift X, C1), (shift C2, C1))
47640 // Don't break NOT patterns.
47641 SDValue BC = peekThroughOneUseBitcasts(N0);
47642 if (ISD::isBitwiseLogicOp(BC.getOpcode()) &&
47643 BC->isOnlyUserOf(BC.getOperand(1).getNode()) &&
47644 !ISD::isBuildVectorAllOnes(BC.getOperand(1).getNode())) {
47645 if (SDValue RHS = TryConstantFold(BC.getOperand(1))) {
47646 SDLoc DL(N);
47647 SDValue LHS = DAG.getNode(Opcode, DL, VT,
47648 DAG.getBitcast(VT, BC.getOperand(0)), N1);
47649 return DAG.getNode(BC.getOpcode(), DL, VT, LHS, RHS);
47654 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
47655 if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(NumBitsPerElt),
47656 DCI))
47657 return SDValue(N, 0);
47659 return SDValue();
47662 static SDValue combineVectorInsert(SDNode *N, SelectionDAG &DAG,
47663 TargetLowering::DAGCombinerInfo &DCI,
47664 const X86Subtarget &Subtarget) {
47665 EVT VT = N->getValueType(0);
47666 unsigned Opcode = N->getOpcode();
47667 assert(((Opcode == X86ISD::PINSRB && VT == MVT::v16i8) ||
47668 (Opcode == X86ISD::PINSRW && VT == MVT::v8i16) ||
47669 Opcode == ISD::INSERT_VECTOR_ELT) &&
47670 "Unexpected vector insertion");
47672 SDValue Vec = N->getOperand(0);
47673 SDValue Scl = N->getOperand(1);
47674 SDValue Idx = N->getOperand(2);
47676 // Fold insert_vector_elt(undef, elt, 0) --> scalar_to_vector(elt).
47677 if (Opcode == ISD::INSERT_VECTOR_ELT && Vec.isUndef() && isNullConstant(Idx))
47678 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), VT, Scl);
47680 if (Opcode == X86ISD::PINSRB || Opcode == X86ISD::PINSRW) {
47681 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
47682 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
47683 if (TLI.SimplifyDemandedBits(SDValue(N, 0),
47684 APInt::getAllOnes(NumBitsPerElt), DCI))
47685 return SDValue(N, 0);
47688 // Attempt to combine insertion patterns to a shuffle.
47689 if (VT.isSimple() && DCI.isAfterLegalizeDAG()) {
47690 SDValue Op(N, 0);
47691 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
47692 return Res;
47695 return SDValue();
47698 /// Recognize the distinctive (AND (setcc ...) (setcc ..)) where both setccs
47699 /// reference the same FP CMP, and rewrite for CMPEQSS and friends. Likewise for
47700 /// OR -> CMPNEQSS.
47701 static SDValue combineCompareEqual(SDNode *N, SelectionDAG &DAG,
47702 TargetLowering::DAGCombinerInfo &DCI,
47703 const X86Subtarget &Subtarget) {
47704 unsigned opcode;
47706 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
47707 // we're requiring SSE2 for both.
47708 if (Subtarget.hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
47709 SDValue N0 = N->getOperand(0);
47710 SDValue N1 = N->getOperand(1);
47711 SDValue CMP0 = N0.getOperand(1);
47712 SDValue CMP1 = N1.getOperand(1);
47713 SDLoc DL(N);
47715 // The SETCCs should both refer to the same CMP.
47716 if (CMP0.getOpcode() != X86ISD::FCMP || CMP0 != CMP1)
47717 return SDValue();
47719 SDValue CMP00 = CMP0->getOperand(0);
47720 SDValue CMP01 = CMP0->getOperand(1);
47721 EVT VT = CMP00.getValueType();
47723 if (VT == MVT::f32 || VT == MVT::f64 ||
47724 (VT == MVT::f16 && Subtarget.hasFP16())) {
47725 bool ExpectingFlags = false;
47726 // Check for any users that want flags:
47727 for (const SDNode *U : N->uses()) {
47728 if (ExpectingFlags)
47729 break;
47731 switch (U->getOpcode()) {
47732 default:
47733 case ISD::BR_CC:
47734 case ISD::BRCOND:
47735 case ISD::SELECT:
47736 ExpectingFlags = true;
47737 break;
47738 case ISD::CopyToReg:
47739 case ISD::SIGN_EXTEND:
47740 case ISD::ZERO_EXTEND:
47741 case ISD::ANY_EXTEND:
47742 break;
47746 if (!ExpectingFlags) {
47747 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
47748 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
47750 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
47751 X86::CondCode tmp = cc0;
47752 cc0 = cc1;
47753 cc1 = tmp;
47756 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
47757 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
47758 // FIXME: need symbolic constants for these magic numbers.
47759 // See X86ATTInstPrinter.cpp:printSSECC().
47760 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
47761 if (Subtarget.hasAVX512()) {
47762 SDValue FSetCC =
47763 DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CMP00, CMP01,
47764 DAG.getTargetConstant(x86cc, DL, MVT::i8));
47765 // Need to fill with zeros to ensure the bitcast will produce zeroes
47766 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
47767 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v16i1,
47768 DAG.getConstant(0, DL, MVT::v16i1),
47769 FSetCC, DAG.getIntPtrConstant(0, DL));
47770 return DAG.getZExtOrTrunc(DAG.getBitcast(MVT::i16, Ins), DL,
47771 N->getSimpleValueType(0));
47773 SDValue OnesOrZeroesF =
47774 DAG.getNode(X86ISD::FSETCC, DL, CMP00.getValueType(), CMP00,
47775 CMP01, DAG.getTargetConstant(x86cc, DL, MVT::i8));
47777 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
47778 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
47780 if (is64BitFP && !Subtarget.is64Bit()) {
47781 // On a 32-bit target, we cannot bitcast the 64-bit float to a
47782 // 64-bit integer, since that's not a legal type. Since
47783 // OnesOrZeroesF is all ones or all zeroes, we don't need all the
47784 // bits, but can do this little dance to extract the lowest 32 bits
47785 // and work with those going forward.
47786 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
47787 OnesOrZeroesF);
47788 SDValue Vector32 = DAG.getBitcast(MVT::v4f32, Vector64);
47789 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
47790 Vector32, DAG.getIntPtrConstant(0, DL));
47791 IntVT = MVT::i32;
47794 SDValue OnesOrZeroesI = DAG.getBitcast(IntVT, OnesOrZeroesF);
47795 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
47796 DAG.getConstant(1, DL, IntVT));
47797 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
47798 ANDed);
47799 return OneBitOfTruth;
47804 return SDValue();
47807 /// Try to fold: (and (xor X, -1), Y) -> (andnp X, Y).
47808 static SDValue combineAndNotIntoANDNP(SDNode *N, SelectionDAG &DAG) {
47809 assert(N->getOpcode() == ISD::AND && "Unexpected opcode combine into ANDNP");
47811 MVT VT = N->getSimpleValueType(0);
47812 if (!VT.is128BitVector() && !VT.is256BitVector() && !VT.is512BitVector())
47813 return SDValue();
47815 SDValue X, Y;
47816 SDValue N0 = N->getOperand(0);
47817 SDValue N1 = N->getOperand(1);
47819 if (SDValue Not = IsNOT(N0, DAG)) {
47820 X = Not;
47821 Y = N1;
47822 } else if (SDValue Not = IsNOT(N1, DAG)) {
47823 X = Not;
47824 Y = N0;
47825 } else
47826 return SDValue();
47828 X = DAG.getBitcast(VT, X);
47829 Y = DAG.getBitcast(VT, Y);
47830 return DAG.getNode(X86ISD::ANDNP, SDLoc(N), VT, X, Y);
47833 /// Try to fold:
47834 /// and (vector_shuffle<Z,...,Z>
47835 /// (insert_vector_elt undef, (xor X, -1), Z), undef), Y
47836 /// ->
47837 /// andnp (vector_shuffle<Z,...,Z>
47838 /// (insert_vector_elt undef, X, Z), undef), Y
47839 static SDValue combineAndShuffleNot(SDNode *N, SelectionDAG &DAG,
47840 const X86Subtarget &Subtarget) {
47841 assert(N->getOpcode() == ISD::AND && "Unexpected opcode combine into ANDNP");
47843 EVT VT = N->getValueType(0);
47844 // Do not split 256 and 512 bit vectors with SSE2 as they overwrite original
47845 // value and require extra moves.
47846 if (!((VT.is128BitVector() && Subtarget.hasSSE2()) ||
47847 ((VT.is256BitVector() || VT.is512BitVector()) && Subtarget.hasAVX())))
47848 return SDValue();
47850 auto GetNot = [&DAG](SDValue V) {
47851 auto *SVN = dyn_cast<ShuffleVectorSDNode>(peekThroughOneUseBitcasts(V));
47852 // TODO: SVN->hasOneUse() is a strong condition. It can be relaxed if all
47853 // end-users are ISD::AND including cases
47854 // (and(extract_vector_element(SVN), Y)).
47855 if (!SVN || !SVN->hasOneUse() || !SVN->isSplat() ||
47856 !SVN->getOperand(1).isUndef()) {
47857 return SDValue();
47859 SDValue IVEN = SVN->getOperand(0);
47860 if (IVEN.getOpcode() != ISD::INSERT_VECTOR_ELT ||
47861 !IVEN.getOperand(0).isUndef() || !IVEN.hasOneUse())
47862 return SDValue();
47863 if (!isa<ConstantSDNode>(IVEN.getOperand(2)) ||
47864 IVEN.getConstantOperandAPInt(2) != SVN->getSplatIndex())
47865 return SDValue();
47866 SDValue Src = IVEN.getOperand(1);
47867 if (SDValue Not = IsNOT(Src, DAG)) {
47868 SDValue NotSrc = DAG.getBitcast(Src.getValueType(), Not);
47869 SDValue NotIVEN =
47870 DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(IVEN), IVEN.getValueType(),
47871 IVEN.getOperand(0), NotSrc, IVEN.getOperand(2));
47872 return DAG.getVectorShuffle(SVN->getValueType(0), SDLoc(SVN), NotIVEN,
47873 SVN->getOperand(1), SVN->getMask());
47875 return SDValue();
47878 SDValue X, Y;
47879 SDValue N0 = N->getOperand(0);
47880 SDValue N1 = N->getOperand(1);
47881 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
47883 if (SDValue Not = GetNot(N0)) {
47884 X = Not;
47885 Y = N1;
47886 } else if (SDValue Not = GetNot(N1)) {
47887 X = Not;
47888 Y = N0;
47889 } else
47890 return SDValue();
47892 X = DAG.getBitcast(VT, X);
47893 Y = DAG.getBitcast(VT, Y);
47894 SDLoc DL(N);
47896 // We do not split for SSE at all, but we need to split vectors for AVX1 and
47897 // AVX2.
47898 if (!Subtarget.useAVX512Regs() && VT.is512BitVector() &&
47899 TLI.isTypeLegal(VT.getHalfNumVectorElementsVT(*DAG.getContext()))) {
47900 SDValue LoX, HiX;
47901 std::tie(LoX, HiX) = splitVector(X, DAG, DL);
47902 SDValue LoY, HiY;
47903 std::tie(LoY, HiY) = splitVector(Y, DAG, DL);
47904 EVT SplitVT = LoX.getValueType();
47905 SDValue LoV = DAG.getNode(X86ISD::ANDNP, DL, SplitVT, {LoX, LoY});
47906 SDValue HiV = DAG.getNode(X86ISD::ANDNP, DL, SplitVT, {HiX, HiY});
47907 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, {LoV, HiV});
47910 if (TLI.isTypeLegal(VT))
47911 return DAG.getNode(X86ISD::ANDNP, DL, VT, {X, Y});
47913 return SDValue();
47916 // Try to widen AND, OR and XOR nodes to VT in order to remove casts around
47917 // logical operations, like in the example below.
47918 // or (and (truncate x, truncate y)),
47919 // (xor (truncate z, build_vector (constants)))
47920 // Given a target type \p VT, we generate
47921 // or (and x, y), (xor z, zext(build_vector (constants)))
47922 // given x, y and z are of type \p VT. We can do so, if operands are either
47923 // truncates from VT types, the second operand is a vector of constants or can
47924 // be recursively promoted.
47925 static SDValue PromoteMaskArithmetic(SDNode *N, EVT VT, SelectionDAG &DAG,
47926 unsigned Depth) {
47927 // Limit recursion to avoid excessive compile times.
47928 if (Depth >= SelectionDAG::MaxRecursionDepth)
47929 return SDValue();
47931 if (N->getOpcode() != ISD::XOR && N->getOpcode() != ISD::AND &&
47932 N->getOpcode() != ISD::OR)
47933 return SDValue();
47935 SDValue N0 = N->getOperand(0);
47936 SDValue N1 = N->getOperand(1);
47937 SDLoc DL(N);
47939 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
47940 if (!TLI.isOperationLegalOrPromote(N->getOpcode(), VT))
47941 return SDValue();
47943 if (SDValue NN0 = PromoteMaskArithmetic(N0.getNode(), VT, DAG, Depth + 1))
47944 N0 = NN0;
47945 else {
47946 // The Left side has to be a trunc.
47947 if (N0.getOpcode() != ISD::TRUNCATE)
47948 return SDValue();
47950 // The type of the truncated inputs.
47951 if (N0.getOperand(0).getValueType() != VT)
47952 return SDValue();
47954 N0 = N0.getOperand(0);
47957 if (SDValue NN1 = PromoteMaskArithmetic(N1.getNode(), VT, DAG, Depth + 1))
47958 N1 = NN1;
47959 else {
47960 // The right side has to be a 'trunc' or a constant vector.
47961 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE &&
47962 N1.getOperand(0).getValueType() == VT;
47963 if (!RHSTrunc && !ISD::isBuildVectorOfConstantSDNodes(N1.getNode()))
47964 return SDValue();
47966 if (RHSTrunc)
47967 N1 = N1.getOperand(0);
47968 else
47969 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N1);
47972 return DAG.getNode(N->getOpcode(), DL, VT, N0, N1);
47975 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
47976 // register. In most cases we actually compare or select YMM-sized registers
47977 // and mixing the two types creates horrible code. This method optimizes
47978 // some of the transition sequences.
47979 // Even with AVX-512 this is still useful for removing casts around logical
47980 // operations on vXi1 mask types.
47981 static SDValue PromoteMaskArithmetic(SDNode *N, SelectionDAG &DAG,
47982 const X86Subtarget &Subtarget) {
47983 EVT VT = N->getValueType(0);
47984 assert(VT.isVector() && "Expected vector type");
47986 SDLoc DL(N);
47987 assert((N->getOpcode() == ISD::ANY_EXTEND ||
47988 N->getOpcode() == ISD::ZERO_EXTEND ||
47989 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
47991 SDValue Narrow = N->getOperand(0);
47992 EVT NarrowVT = Narrow.getValueType();
47994 // Generate the wide operation.
47995 SDValue Op = PromoteMaskArithmetic(Narrow.getNode(), VT, DAG, 0);
47996 if (!Op)
47997 return SDValue();
47998 switch (N->getOpcode()) {
47999 default: llvm_unreachable("Unexpected opcode");
48000 case ISD::ANY_EXTEND:
48001 return Op;
48002 case ISD::ZERO_EXTEND:
48003 return DAG.getZeroExtendInReg(Op, DL, NarrowVT);
48004 case ISD::SIGN_EXTEND:
48005 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
48006 Op, DAG.getValueType(NarrowVT));
48010 static unsigned convertIntLogicToFPLogicOpcode(unsigned Opcode) {
48011 unsigned FPOpcode;
48012 switch (Opcode) {
48013 default: llvm_unreachable("Unexpected input node for FP logic conversion");
48014 case ISD::AND: FPOpcode = X86ISD::FAND; break;
48015 case ISD::OR: FPOpcode = X86ISD::FOR; break;
48016 case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
48018 return FPOpcode;
48021 /// If both input operands of a logic op are being cast from floating-point
48022 /// types or FP compares, try to convert this into a floating-point logic node
48023 /// to avoid unnecessary moves from SSE to integer registers.
48024 static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
48025 TargetLowering::DAGCombinerInfo &DCI,
48026 const X86Subtarget &Subtarget) {
48027 EVT VT = N->getValueType(0);
48028 SDValue N0 = N->getOperand(0);
48029 SDValue N1 = N->getOperand(1);
48030 SDLoc DL(N);
48032 if (!((N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST) ||
48033 (N0.getOpcode() == ISD::SETCC && N1.getOpcode() == ISD::SETCC)))
48034 return SDValue();
48036 SDValue N00 = N0.getOperand(0);
48037 SDValue N10 = N1.getOperand(0);
48038 EVT N00Type = N00.getValueType();
48039 EVT N10Type = N10.getValueType();
48041 // Ensure that both types are the same and are legal scalar fp types.
48042 if (N00Type != N10Type || !((Subtarget.hasSSE1() && N00Type == MVT::f32) ||
48043 (Subtarget.hasSSE2() && N00Type == MVT::f64) ||
48044 (Subtarget.hasFP16() && N00Type == MVT::f16)))
48045 return SDValue();
48047 if (N0.getOpcode() == ISD::BITCAST && !DCI.isBeforeLegalizeOps()) {
48048 unsigned FPOpcode = convertIntLogicToFPLogicOpcode(N->getOpcode());
48049 SDValue FPLogic = DAG.getNode(FPOpcode, DL, N00Type, N00, N10);
48050 return DAG.getBitcast(VT, FPLogic);
48053 if (VT != MVT::i1 || N0.getOpcode() != ISD::SETCC || !N0.hasOneUse() ||
48054 !N1.hasOneUse())
48055 return SDValue();
48057 ISD::CondCode CC0 = cast<CondCodeSDNode>(N0.getOperand(2))->get();
48058 ISD::CondCode CC1 = cast<CondCodeSDNode>(N1.getOperand(2))->get();
48060 // The vector ISA for FP predicates is incomplete before AVX, so converting
48061 // COMIS* to CMPS* may not be a win before AVX.
48062 if (!Subtarget.hasAVX() &&
48063 !(cheapX86FSETCC_SSE(CC0) && cheapX86FSETCC_SSE(CC1)))
48064 return SDValue();
48066 // Convert scalar FP compares and logic to vector compares (COMIS* to CMPS*)
48067 // and vector logic:
48068 // logic (setcc N00, N01), (setcc N10, N11) -->
48069 // extelt (logic (setcc (s2v N00), (s2v N01)), setcc (s2v N10), (s2v N11))), 0
48070 unsigned NumElts = 128 / N00Type.getSizeInBits();
48071 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), N00Type, NumElts);
48072 EVT BoolVecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
48073 SDValue ZeroIndex = DAG.getVectorIdxConstant(0, DL);
48074 SDValue N01 = N0.getOperand(1);
48075 SDValue N11 = N1.getOperand(1);
48076 SDValue Vec00 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, N00);
48077 SDValue Vec01 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, N01);
48078 SDValue Vec10 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, N10);
48079 SDValue Vec11 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, N11);
48080 SDValue Setcc0 = DAG.getSetCC(DL, BoolVecVT, Vec00, Vec01, CC0);
48081 SDValue Setcc1 = DAG.getSetCC(DL, BoolVecVT, Vec10, Vec11, CC1);
48082 SDValue Logic = DAG.getNode(N->getOpcode(), DL, BoolVecVT, Setcc0, Setcc1);
48083 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Logic, ZeroIndex);
48086 // Attempt to fold BITOP(MOVMSK(X),MOVMSK(Y)) -> MOVMSK(BITOP(X,Y))
48087 // to reduce XMM->GPR traffic.
48088 static SDValue combineBitOpWithMOVMSK(SDNode *N, SelectionDAG &DAG) {
48089 unsigned Opc = N->getOpcode();
48090 assert((Opc == ISD::OR || Opc == ISD::AND || Opc == ISD::XOR) &&
48091 "Unexpected bit opcode");
48093 SDValue N0 = N->getOperand(0);
48094 SDValue N1 = N->getOperand(1);
48096 // Both operands must be single use MOVMSK.
48097 if (N0.getOpcode() != X86ISD::MOVMSK || !N0.hasOneUse() ||
48098 N1.getOpcode() != X86ISD::MOVMSK || !N1.hasOneUse())
48099 return SDValue();
48101 SDValue Vec0 = N0.getOperand(0);
48102 SDValue Vec1 = N1.getOperand(0);
48103 EVT VecVT0 = Vec0.getValueType();
48104 EVT VecVT1 = Vec1.getValueType();
48106 // Both MOVMSK operands must be from vectors of the same size and same element
48107 // size, but its OK for a fp/int diff.
48108 if (VecVT0.getSizeInBits() != VecVT1.getSizeInBits() ||
48109 VecVT0.getScalarSizeInBits() != VecVT1.getScalarSizeInBits())
48110 return SDValue();
48112 SDLoc DL(N);
48113 unsigned VecOpc =
48114 VecVT0.isFloatingPoint() ? convertIntLogicToFPLogicOpcode(Opc) : Opc;
48115 SDValue Result =
48116 DAG.getNode(VecOpc, DL, VecVT0, Vec0, DAG.getBitcast(VecVT0, Vec1));
48117 return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
48120 // Attempt to fold BITOP(SHIFT(X,Z),SHIFT(Y,Z)) -> SHIFT(BITOP(X,Y),Z).
48121 // NOTE: This is a very limited case of what SimplifyUsingDistributiveLaws
48122 // handles in InstCombine.
48123 static SDValue combineBitOpWithShift(SDNode *N, SelectionDAG &DAG) {
48124 unsigned Opc = N->getOpcode();
48125 assert((Opc == ISD::OR || Opc == ISD::AND || Opc == ISD::XOR) &&
48126 "Unexpected bit opcode");
48128 SDValue N0 = N->getOperand(0);
48129 SDValue N1 = N->getOperand(1);
48130 EVT VT = N->getValueType(0);
48132 // Both operands must be single use.
48133 if (!N0.hasOneUse() || !N1.hasOneUse())
48134 return SDValue();
48136 // Search for matching shifts.
48137 SDValue BC0 = peekThroughOneUseBitcasts(N0);
48138 SDValue BC1 = peekThroughOneUseBitcasts(N1);
48140 unsigned BCOpc = BC0.getOpcode();
48141 EVT BCVT = BC0.getValueType();
48142 if (BCOpc != BC1->getOpcode() || BCVT != BC1.getValueType())
48143 return SDValue();
48145 switch (BCOpc) {
48146 case X86ISD::VSHLI:
48147 case X86ISD::VSRLI:
48148 case X86ISD::VSRAI: {
48149 if (BC0.getOperand(1) != BC1.getOperand(1))
48150 return SDValue();
48152 SDLoc DL(N);
48153 SDValue BitOp =
48154 DAG.getNode(Opc, DL, BCVT, BC0.getOperand(0), BC1.getOperand(0));
48155 SDValue Shift = DAG.getNode(BCOpc, DL, BCVT, BitOp, BC0.getOperand(1));
48156 return DAG.getBitcast(VT, Shift);
48160 return SDValue();
48163 // Attempt to fold:
48164 // BITOP(PACKSS(X,Z),PACKSS(Y,W)) --> PACKSS(BITOP(X,Y),BITOP(Z,W)).
48165 // TODO: Handle PACKUS handling.
48166 static SDValue combineBitOpWithPACK(SDNode *N, SelectionDAG &DAG) {
48167 unsigned Opc = N->getOpcode();
48168 assert((Opc == ISD::OR || Opc == ISD::AND || Opc == ISD::XOR) &&
48169 "Unexpected bit opcode");
48171 SDValue N0 = N->getOperand(0);
48172 SDValue N1 = N->getOperand(1);
48173 EVT VT = N->getValueType(0);
48175 // Both operands must be single use.
48176 if (!N0.hasOneUse() || !N1.hasOneUse())
48177 return SDValue();
48179 // Search for matching packs.
48180 N0 = peekThroughOneUseBitcasts(N0);
48181 N1 = peekThroughOneUseBitcasts(N1);
48183 if (N0.getOpcode() != X86ISD::PACKSS || N1.getOpcode() != X86ISD::PACKSS)
48184 return SDValue();
48186 MVT DstVT = N0.getSimpleValueType();
48187 if (DstVT != N1.getSimpleValueType())
48188 return SDValue();
48190 MVT SrcVT = N0.getOperand(0).getSimpleValueType();
48191 unsigned NumSrcBits = SrcVT.getScalarSizeInBits();
48193 // Limit to allsignbits packing.
48194 if (DAG.ComputeNumSignBits(N0.getOperand(0)) != NumSrcBits ||
48195 DAG.ComputeNumSignBits(N0.getOperand(1)) != NumSrcBits ||
48196 DAG.ComputeNumSignBits(N1.getOperand(0)) != NumSrcBits ||
48197 DAG.ComputeNumSignBits(N1.getOperand(1)) != NumSrcBits)
48198 return SDValue();
48200 SDLoc DL(N);
48201 SDValue LHS = DAG.getNode(Opc, DL, SrcVT, N0.getOperand(0), N1.getOperand(0));
48202 SDValue RHS = DAG.getNode(Opc, DL, SrcVT, N0.getOperand(1), N1.getOperand(1));
48203 return DAG.getBitcast(VT, DAG.getNode(X86ISD::PACKSS, DL, DstVT, LHS, RHS));
48206 /// If this is a zero/all-bits result that is bitwise-anded with a low bits
48207 /// mask. (Mask == 1 for the x86 lowering of a SETCC + ZEXT), replace the 'and'
48208 /// with a shift-right to eliminate loading the vector constant mask value.
48209 static SDValue combineAndMaskToShift(SDNode *N, SelectionDAG &DAG,
48210 const X86Subtarget &Subtarget) {
48211 SDValue Op0 = peekThroughBitcasts(N->getOperand(0));
48212 SDValue Op1 = peekThroughBitcasts(N->getOperand(1));
48213 EVT VT = Op0.getValueType();
48214 if (VT != Op1.getValueType() || !VT.isSimple() || !VT.isInteger())
48215 return SDValue();
48217 // Try to convert an "is positive" signbit masking operation into arithmetic
48218 // shift and "andn". This saves a materialization of a -1 vector constant.
48219 // The "is negative" variant should be handled more generally because it only
48220 // requires "and" rather than "andn":
48221 // and (pcmpgt X, -1), Y --> pandn (vsrai X, BitWidth - 1), Y
48223 // This is limited to the original type to avoid producing even more bitcasts.
48224 // If the bitcasts can't be eliminated, then it is unlikely that this fold
48225 // will be profitable.
48226 if (N->getValueType(0) == VT &&
48227 supportedVectorShiftWithImm(VT, Subtarget, ISD::SRA)) {
48228 SDValue X, Y;
48229 if (Op1.getOpcode() == X86ISD::PCMPGT &&
48230 isAllOnesOrAllOnesSplat(Op1.getOperand(1)) && Op1.hasOneUse()) {
48231 X = Op1.getOperand(0);
48232 Y = Op0;
48233 } else if (Op0.getOpcode() == X86ISD::PCMPGT &&
48234 isAllOnesOrAllOnesSplat(Op0.getOperand(1)) && Op0.hasOneUse()) {
48235 X = Op0.getOperand(0);
48236 Y = Op1;
48238 if (X && Y) {
48239 SDLoc DL(N);
48240 SDValue Sra =
48241 getTargetVShiftByConstNode(X86ISD::VSRAI, DL, VT.getSimpleVT(), X,
48242 VT.getScalarSizeInBits() - 1, DAG);
48243 return DAG.getNode(X86ISD::ANDNP, DL, VT, Sra, Y);
48247 APInt SplatVal;
48248 if (!X86::isConstantSplat(Op1, SplatVal, false) || !SplatVal.isMask())
48249 return SDValue();
48251 // Don't prevent creation of ANDN.
48252 if (isBitwiseNot(Op0))
48253 return SDValue();
48255 if (!supportedVectorShiftWithImm(VT, Subtarget, ISD::SRL))
48256 return SDValue();
48258 unsigned EltBitWidth = VT.getScalarSizeInBits();
48259 if (EltBitWidth != DAG.ComputeNumSignBits(Op0))
48260 return SDValue();
48262 SDLoc DL(N);
48263 unsigned ShiftVal = SplatVal.countr_one();
48264 SDValue ShAmt = DAG.getTargetConstant(EltBitWidth - ShiftVal, DL, MVT::i8);
48265 SDValue Shift = DAG.getNode(X86ISD::VSRLI, DL, VT, Op0, ShAmt);
48266 return DAG.getBitcast(N->getValueType(0), Shift);
48269 // Get the index node from the lowered DAG of a GEP IR instruction with one
48270 // indexing dimension.
48271 static SDValue getIndexFromUnindexedLoad(LoadSDNode *Ld) {
48272 if (Ld->isIndexed())
48273 return SDValue();
48275 SDValue Base = Ld->getBasePtr();
48277 if (Base.getOpcode() != ISD::ADD)
48278 return SDValue();
48280 SDValue ShiftedIndex = Base.getOperand(0);
48282 if (ShiftedIndex.getOpcode() != ISD::SHL)
48283 return SDValue();
48285 return ShiftedIndex.getOperand(0);
48289 static bool hasBZHI(const X86Subtarget &Subtarget, MVT VT) {
48290 if (Subtarget.hasBMI2() && VT.isScalarInteger()) {
48291 switch (VT.getSizeInBits()) {
48292 default: return false;
48293 case 64: return Subtarget.is64Bit() ? true : false;
48294 case 32: return true;
48297 return false;
48300 // This function recognizes cases where X86 bzhi instruction can replace and
48301 // 'and-load' sequence.
48302 // In case of loading integer value from an array of constants which is defined
48303 // as follows:
48305 // int array[SIZE] = {0x0, 0x1, 0x3, 0x7, 0xF ..., 2^(SIZE-1) - 1}
48307 // then applying a bitwise and on the result with another input.
48308 // It's equivalent to performing bzhi (zero high bits) on the input, with the
48309 // same index of the load.
48310 static SDValue combineAndLoadToBZHI(SDNode *Node, SelectionDAG &DAG,
48311 const X86Subtarget &Subtarget) {
48312 MVT VT = Node->getSimpleValueType(0);
48313 SDLoc dl(Node);
48315 // Check if subtarget has BZHI instruction for the node's type
48316 if (!hasBZHI(Subtarget, VT))
48317 return SDValue();
48319 // Try matching the pattern for both operands.
48320 for (unsigned i = 0; i < 2; i++) {
48321 SDValue N = Node->getOperand(i);
48322 LoadSDNode *Ld = dyn_cast<LoadSDNode>(N.getNode());
48324 // continue if the operand is not a load instruction
48325 if (!Ld)
48326 return SDValue();
48328 const Value *MemOp = Ld->getMemOperand()->getValue();
48330 if (!MemOp)
48331 return SDValue();
48333 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(MemOp)) {
48334 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) {
48335 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
48337 Constant *Init = GV->getInitializer();
48338 Type *Ty = Init->getType();
48339 if (!isa<ConstantDataArray>(Init) ||
48340 !Ty->getArrayElementType()->isIntegerTy() ||
48341 Ty->getArrayElementType()->getScalarSizeInBits() !=
48342 VT.getSizeInBits() ||
48343 Ty->getArrayNumElements() >
48344 Ty->getArrayElementType()->getScalarSizeInBits())
48345 continue;
48347 // Check if the array's constant elements are suitable to our case.
48348 uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
48349 bool ConstantsMatch = true;
48350 for (uint64_t j = 0; j < ArrayElementCount; j++) {
48351 auto *Elem = cast<ConstantInt>(Init->getAggregateElement(j));
48352 if (Elem->getZExtValue() != (((uint64_t)1 << j) - 1)) {
48353 ConstantsMatch = false;
48354 break;
48357 if (!ConstantsMatch)
48358 continue;
48360 // Do the transformation (For 32-bit type):
48361 // -> (and (load arr[idx]), inp)
48362 // <- (and (srl 0xFFFFFFFF, (sub 32, idx)))
48363 // that will be replaced with one bzhi instruction.
48364 SDValue Inp = (i == 0) ? Node->getOperand(1) : Node->getOperand(0);
48365 SDValue SizeC = DAG.getConstant(VT.getSizeInBits(), dl, MVT::i32);
48367 // Get the Node which indexes into the array.
48368 SDValue Index = getIndexFromUnindexedLoad(Ld);
48369 if (!Index)
48370 return SDValue();
48371 Index = DAG.getZExtOrTrunc(Index, dl, MVT::i32);
48373 SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, SizeC, Index);
48374 Sub = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Sub);
48376 SDValue AllOnes = DAG.getAllOnesConstant(dl, VT);
48377 SDValue LShr = DAG.getNode(ISD::SRL, dl, VT, AllOnes, Sub);
48379 return DAG.getNode(ISD::AND, dl, VT, Inp, LShr);
48384 return SDValue();
48387 // Look for (and (bitcast (vXi1 (concat_vectors (vYi1 setcc), undef,))), C)
48388 // Where C is a mask containing the same number of bits as the setcc and
48389 // where the setcc will freely 0 upper bits of k-register. We can replace the
48390 // undef in the concat with 0s and remove the AND. This mainly helps with
48391 // v2i1/v4i1 setcc being casted to scalar.
48392 static SDValue combineScalarAndWithMaskSetcc(SDNode *N, SelectionDAG &DAG,
48393 const X86Subtarget &Subtarget) {
48394 assert(N->getOpcode() == ISD::AND && "Unexpected opcode!");
48396 EVT VT = N->getValueType(0);
48398 // Make sure this is an AND with constant. We will check the value of the
48399 // constant later.
48400 auto *C1 = dyn_cast<ConstantSDNode>(N->getOperand(1));
48401 if (!C1)
48402 return SDValue();
48404 // This is implied by the ConstantSDNode.
48405 assert(!VT.isVector() && "Expected scalar VT!");
48407 SDValue Src = N->getOperand(0);
48408 if (!Src.hasOneUse())
48409 return SDValue();
48411 // (Optionally) peek through any_extend().
48412 if (Src.getOpcode() == ISD::ANY_EXTEND) {
48413 if (!Src.getOperand(0).hasOneUse())
48414 return SDValue();
48415 Src = Src.getOperand(0);
48418 if (Src.getOpcode() != ISD::BITCAST || !Src.getOperand(0).hasOneUse())
48419 return SDValue();
48421 Src = Src.getOperand(0);
48422 EVT SrcVT = Src.getValueType();
48424 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
48425 if (!SrcVT.isVector() || SrcVT.getVectorElementType() != MVT::i1 ||
48426 !TLI.isTypeLegal(SrcVT))
48427 return SDValue();
48429 if (Src.getOpcode() != ISD::CONCAT_VECTORS)
48430 return SDValue();
48432 // We only care about the first subvector of the concat, we expect the
48433 // other subvectors to be ignored due to the AND if we make the change.
48434 SDValue SubVec = Src.getOperand(0);
48435 EVT SubVecVT = SubVec.getValueType();
48437 // The RHS of the AND should be a mask with as many bits as SubVec.
48438 if (!TLI.isTypeLegal(SubVecVT) ||
48439 !C1->getAPIntValue().isMask(SubVecVT.getVectorNumElements()))
48440 return SDValue();
48442 // First subvector should be a setcc with a legal result type or a
48443 // AND containing at least one setcc with a legal result type.
48444 auto IsLegalSetCC = [&](SDValue V) {
48445 if (V.getOpcode() != ISD::SETCC)
48446 return false;
48447 EVT SetccVT = V.getOperand(0).getValueType();
48448 if (!TLI.isTypeLegal(SetccVT) ||
48449 !(Subtarget.hasVLX() || SetccVT.is512BitVector()))
48450 return false;
48451 if (!(Subtarget.hasBWI() || SetccVT.getScalarSizeInBits() >= 32))
48452 return false;
48453 return true;
48455 if (!(IsLegalSetCC(SubVec) || (SubVec.getOpcode() == ISD::AND &&
48456 (IsLegalSetCC(SubVec.getOperand(0)) ||
48457 IsLegalSetCC(SubVec.getOperand(1))))))
48458 return SDValue();
48460 // We passed all the checks. Rebuild the concat_vectors with zeroes
48461 // and cast it back to VT.
48462 SDLoc dl(N);
48463 SmallVector<SDValue, 4> Ops(Src.getNumOperands(),
48464 DAG.getConstant(0, dl, SubVecVT));
48465 Ops[0] = SubVec;
48466 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT,
48467 Ops);
48468 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), SrcVT.getSizeInBits());
48469 return DAG.getZExtOrTrunc(DAG.getBitcast(IntVT, Concat), dl, VT);
48472 static SDValue getBMIMatchingOp(unsigned Opc, SelectionDAG &DAG,
48473 SDValue OpMustEq, SDValue Op, unsigned Depth) {
48474 // We don't want to go crazy with the recursion here. This isn't a super
48475 // important optimization.
48476 static constexpr unsigned kMaxDepth = 2;
48478 // Only do this re-ordering if op has one use.
48479 if (!Op.hasOneUse())
48480 return SDValue();
48482 SDLoc DL(Op);
48483 // If we hit another assosiative op, recurse further.
48484 if (Op.getOpcode() == Opc) {
48485 // Done recursing.
48486 if (Depth++ >= kMaxDepth)
48487 return SDValue();
48489 for (unsigned OpIdx = 0; OpIdx < 2; ++OpIdx)
48490 if (SDValue R =
48491 getBMIMatchingOp(Opc, DAG, OpMustEq, Op.getOperand(OpIdx), Depth))
48492 return DAG.getNode(Op.getOpcode(), DL, Op.getValueType(), R,
48493 Op.getOperand(1 - OpIdx));
48495 } else if (Op.getOpcode() == ISD::SUB) {
48496 if (Opc == ISD::AND) {
48497 // BLSI: (and x, (sub 0, x))
48498 if (isNullConstant(Op.getOperand(0)) && Op.getOperand(1) == OpMustEq)
48499 return DAG.getNode(Opc, DL, Op.getValueType(), OpMustEq, Op);
48501 // Opc must be ISD::AND or ISD::XOR
48502 // BLSR: (and x, (sub x, 1))
48503 // BLSMSK: (xor x, (sub x, 1))
48504 if (isOneConstant(Op.getOperand(1)) && Op.getOperand(0) == OpMustEq)
48505 return DAG.getNode(Opc, DL, Op.getValueType(), OpMustEq, Op);
48507 } else if (Op.getOpcode() == ISD::ADD) {
48508 // Opc must be ISD::AND or ISD::XOR
48509 // BLSR: (and x, (add x, -1))
48510 // BLSMSK: (xor x, (add x, -1))
48511 if (isAllOnesConstant(Op.getOperand(1)) && Op.getOperand(0) == OpMustEq)
48512 return DAG.getNode(Opc, DL, Op.getValueType(), OpMustEq, Op);
48514 return SDValue();
48517 static SDValue combineBMILogicOp(SDNode *N, SelectionDAG &DAG,
48518 const X86Subtarget &Subtarget) {
48519 EVT VT = N->getValueType(0);
48520 // Make sure this node is a candidate for BMI instructions.
48521 if (!Subtarget.hasBMI() || !VT.isScalarInteger() ||
48522 (VT != MVT::i32 && VT != MVT::i64))
48523 return SDValue();
48525 assert(N->getOpcode() == ISD::AND || N->getOpcode() == ISD::XOR);
48527 // Try and match LHS and RHS.
48528 for (unsigned OpIdx = 0; OpIdx < 2; ++OpIdx)
48529 if (SDValue OpMatch =
48530 getBMIMatchingOp(N->getOpcode(), DAG, N->getOperand(OpIdx),
48531 N->getOperand(1 - OpIdx), 0))
48532 return OpMatch;
48533 return SDValue();
48536 static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
48537 TargetLowering::DAGCombinerInfo &DCI,
48538 const X86Subtarget &Subtarget) {
48539 SDValue N0 = N->getOperand(0);
48540 SDValue N1 = N->getOperand(1);
48541 EVT VT = N->getValueType(0);
48542 SDLoc dl(N);
48543 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
48545 // If this is SSE1 only convert to FAND to avoid scalarization.
48546 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
48547 return DAG.getBitcast(MVT::v4i32,
48548 DAG.getNode(X86ISD::FAND, dl, MVT::v4f32,
48549 DAG.getBitcast(MVT::v4f32, N0),
48550 DAG.getBitcast(MVT::v4f32, N1)));
48553 // Use a 32-bit and+zext if upper bits known zero.
48554 if (VT == MVT::i64 && Subtarget.is64Bit() && !isa<ConstantSDNode>(N1)) {
48555 APInt HiMask = APInt::getHighBitsSet(64, 32);
48556 if (DAG.MaskedValueIsZero(N1, HiMask) ||
48557 DAG.MaskedValueIsZero(N0, HiMask)) {
48558 SDValue LHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N0);
48559 SDValue RHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N1);
48560 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64,
48561 DAG.getNode(ISD::AND, dl, MVT::i32, LHS, RHS));
48565 // Match all-of bool scalar reductions into a bitcast/movmsk + cmp.
48566 // TODO: Support multiple SrcOps.
48567 if (VT == MVT::i1) {
48568 SmallVector<SDValue, 2> SrcOps;
48569 SmallVector<APInt, 2> SrcPartials;
48570 if (matchScalarReduction(SDValue(N, 0), ISD::AND, SrcOps, &SrcPartials) &&
48571 SrcOps.size() == 1) {
48572 unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
48573 EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
48574 SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
48575 if (!Mask && TLI.isTypeLegal(SrcOps[0].getValueType()))
48576 Mask = DAG.getBitcast(MaskVT, SrcOps[0]);
48577 if (Mask) {
48578 assert(SrcPartials[0].getBitWidth() == NumElts &&
48579 "Unexpected partial reduction mask");
48580 SDValue PartialBits = DAG.getConstant(SrcPartials[0], dl, MaskVT);
48581 Mask = DAG.getNode(ISD::AND, dl, MaskVT, Mask, PartialBits);
48582 return DAG.getSetCC(dl, MVT::i1, Mask, PartialBits, ISD::SETEQ);
48587 // InstCombine converts:
48588 // `(-x << C0) & C1`
48589 // to
48590 // `(x * (Pow2_Ceil(C1) - (1 << C0))) & C1`
48591 // This saves an IR instruction but on x86 the neg/shift version is preferable
48592 // so undo the transform.
48594 if (N0.getOpcode() == ISD::MUL && N0.hasOneUse()) {
48595 // TODO: We don't actually need a splat for this, we just need the checks to
48596 // hold for each element.
48597 ConstantSDNode *N1C = isConstOrConstSplat(N1, /*AllowUndefs*/ true,
48598 /*AllowTruncation*/ false);
48599 ConstantSDNode *N01C =
48600 isConstOrConstSplat(N0.getOperand(1), /*AllowUndefs*/ true,
48601 /*AllowTruncation*/ false);
48602 if (N1C && N01C) {
48603 const APInt &MulC = N01C->getAPIntValue();
48604 const APInt &AndC = N1C->getAPIntValue();
48605 APInt MulCLowBit = MulC & (-MulC);
48606 if (MulC.uge(AndC) && !MulC.isPowerOf2() &&
48607 (MulCLowBit + MulC).isPowerOf2()) {
48608 SDValue Neg = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT),
48609 N0.getOperand(0));
48610 int32_t MulCLowBitLog = MulCLowBit.exactLogBase2();
48611 assert(MulCLowBitLog != -1 &&
48612 "Isolated lowbit is somehow not a power of 2!");
48613 SDValue Shift = DAG.getNode(ISD::SHL, dl, VT, Neg,
48614 DAG.getConstant(MulCLowBitLog, dl, VT));
48615 return DAG.getNode(ISD::AND, dl, VT, Shift, N1);
48620 if (SDValue V = combineScalarAndWithMaskSetcc(N, DAG, Subtarget))
48621 return V;
48623 if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
48624 return R;
48626 if (SDValue R = combineBitOpWithShift(N, DAG))
48627 return R;
48629 if (SDValue R = combineBitOpWithPACK(N, DAG))
48630 return R;
48632 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, DCI, Subtarget))
48633 return FPLogic;
48635 if (SDValue R = combineAndShuffleNot(N, DAG, Subtarget))
48636 return R;
48638 if (DCI.isBeforeLegalizeOps())
48639 return SDValue();
48641 if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
48642 return R;
48644 if (SDValue R = combineAndNotIntoANDNP(N, DAG))
48645 return R;
48647 if (SDValue ShiftRight = combineAndMaskToShift(N, DAG, Subtarget))
48648 return ShiftRight;
48650 if (SDValue R = combineAndLoadToBZHI(N, DAG, Subtarget))
48651 return R;
48653 // fold (and (mul x, c1), c2) -> (mul x, (and c1, c2))
48654 // iff c2 is all/no bits mask - i.e. a select-with-zero mask.
48655 // TODO: Handle PMULDQ/PMULUDQ/VPMADDWD/VPMADDUBSW?
48656 if (VT.isVector() && getTargetConstantFromNode(N1)) {
48657 unsigned Opc0 = N0.getOpcode();
48658 if ((Opc0 == ISD::MUL || Opc0 == ISD::MULHU || Opc0 == ISD::MULHS) &&
48659 getTargetConstantFromNode(N0.getOperand(1)) &&
48660 DAG.ComputeNumSignBits(N1) == VT.getScalarSizeInBits() &&
48661 N0->hasOneUse() && N0.getOperand(1)->hasOneUse()) {
48662 SDValue MaskMul = DAG.getNode(ISD::AND, dl, VT, N0.getOperand(1), N1);
48663 return DAG.getNode(Opc0, dl, VT, N0.getOperand(0), MaskMul);
48667 // Fold AND(SRL(X,Y),1) -> SETCC(BT(X,Y), COND_B) iff Y is not a constant
48668 // avoids slow variable shift (moving shift amount to ECX etc.)
48669 if (isOneConstant(N1) && N0->hasOneUse()) {
48670 SDValue Src = N0;
48671 while ((Src.getOpcode() == ISD::ZERO_EXTEND ||
48672 Src.getOpcode() == ISD::TRUNCATE) &&
48673 Src.getOperand(0)->hasOneUse())
48674 Src = Src.getOperand(0);
48675 bool ContainsNOT = false;
48676 X86::CondCode X86CC = X86::COND_B;
48677 // Peek through AND(NOT(SRL(X,Y)),1).
48678 if (isBitwiseNot(Src)) {
48679 Src = Src.getOperand(0);
48680 X86CC = X86::COND_AE;
48681 ContainsNOT = true;
48683 if (Src.getOpcode() == ISD::SRL &&
48684 !isa<ConstantSDNode>(Src.getOperand(1))) {
48685 SDValue BitNo = Src.getOperand(1);
48686 Src = Src.getOperand(0);
48687 // Peek through AND(SRL(NOT(X),Y),1).
48688 if (isBitwiseNot(Src)) {
48689 Src = Src.getOperand(0);
48690 X86CC = X86CC == X86::COND_AE ? X86::COND_B : X86::COND_AE;
48691 ContainsNOT = true;
48693 // If we have BMI2 then SHRX should be faster for i32/i64 cases.
48694 if (!(Subtarget.hasBMI2() && !ContainsNOT && VT.getSizeInBits() >= 32))
48695 if (SDValue BT = getBT(Src, BitNo, dl, DAG))
48696 return DAG.getZExtOrTrunc(getSETCC(X86CC, BT, dl, DAG), dl, VT);
48700 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
48701 // Attempt to recursively combine a bitmask AND with shuffles.
48702 SDValue Op(N, 0);
48703 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
48704 return Res;
48706 // If either operand is a constant mask, then only the elements that aren't
48707 // zero are actually demanded by the other operand.
48708 auto GetDemandedMasks = [&](SDValue Op) {
48709 APInt UndefElts;
48710 SmallVector<APInt> EltBits;
48711 int NumElts = VT.getVectorNumElements();
48712 int EltSizeInBits = VT.getScalarSizeInBits();
48713 APInt DemandedBits = APInt::getAllOnes(EltSizeInBits);
48714 APInt DemandedElts = APInt::getAllOnes(NumElts);
48715 if (getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
48716 EltBits)) {
48717 DemandedBits.clearAllBits();
48718 DemandedElts.clearAllBits();
48719 for (int I = 0; I != NumElts; ++I) {
48720 if (UndefElts[I]) {
48721 // We can't assume an undef src element gives an undef dst - the
48722 // other src might be zero.
48723 DemandedBits.setAllBits();
48724 DemandedElts.setBit(I);
48725 } else if (!EltBits[I].isZero()) {
48726 DemandedBits |= EltBits[I];
48727 DemandedElts.setBit(I);
48731 return std::make_pair(DemandedBits, DemandedElts);
48733 APInt Bits0, Elts0;
48734 APInt Bits1, Elts1;
48735 std::tie(Bits0, Elts0) = GetDemandedMasks(N1);
48736 std::tie(Bits1, Elts1) = GetDemandedMasks(N0);
48738 if (TLI.SimplifyDemandedVectorElts(N0, Elts0, DCI) ||
48739 TLI.SimplifyDemandedVectorElts(N1, Elts1, DCI) ||
48740 TLI.SimplifyDemandedBits(N0, Bits0, Elts0, DCI) ||
48741 TLI.SimplifyDemandedBits(N1, Bits1, Elts1, DCI)) {
48742 if (N->getOpcode() != ISD::DELETED_NODE)
48743 DCI.AddToWorklist(N);
48744 return SDValue(N, 0);
48747 SDValue NewN0 = TLI.SimplifyMultipleUseDemandedBits(N0, Bits0, Elts0, DAG);
48748 SDValue NewN1 = TLI.SimplifyMultipleUseDemandedBits(N1, Bits1, Elts1, DAG);
48749 if (NewN0 || NewN1)
48750 return DAG.getNode(ISD::AND, dl, VT, NewN0 ? NewN0 : N0,
48751 NewN1 ? NewN1 : N1);
48754 // Attempt to combine a scalar bitmask AND with an extracted shuffle.
48755 if ((VT.getScalarSizeInBits() % 8) == 0 &&
48756 N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
48757 isa<ConstantSDNode>(N0.getOperand(1)) && N0->hasOneUse()) {
48758 SDValue BitMask = N1;
48759 SDValue SrcVec = N0.getOperand(0);
48760 EVT SrcVecVT = SrcVec.getValueType();
48762 // Check that the constant bitmask masks whole bytes.
48763 APInt UndefElts;
48764 SmallVector<APInt, 64> EltBits;
48765 if (VT == SrcVecVT.getScalarType() && N0->isOnlyUserOf(SrcVec.getNode()) &&
48766 getTargetConstantBitsFromNode(BitMask, 8, UndefElts, EltBits) &&
48767 llvm::all_of(EltBits, [](const APInt &M) {
48768 return M.isZero() || M.isAllOnes();
48769 })) {
48770 unsigned NumElts = SrcVecVT.getVectorNumElements();
48771 unsigned Scale = SrcVecVT.getScalarSizeInBits() / 8;
48772 unsigned Idx = N0.getConstantOperandVal(1);
48774 // Create a root shuffle mask from the byte mask and the extracted index.
48775 SmallVector<int, 16> ShuffleMask(NumElts * Scale, SM_SentinelUndef);
48776 for (unsigned i = 0; i != Scale; ++i) {
48777 if (UndefElts[i])
48778 continue;
48779 int VecIdx = Scale * Idx + i;
48780 ShuffleMask[VecIdx] = EltBits[i].isZero() ? SM_SentinelZero : VecIdx;
48783 if (SDValue Shuffle = combineX86ShufflesRecursively(
48784 {SrcVec}, 0, SrcVec, ShuffleMask, {}, /*Depth*/ 1,
48785 X86::MaxShuffleCombineDepth,
48786 /*HasVarMask*/ false, /*AllowVarCrossLaneMask*/ true,
48787 /*AllowVarPerLaneMask*/ true, DAG, Subtarget))
48788 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Shuffle,
48789 N0.getOperand(1));
48793 if (SDValue R = combineBMILogicOp(N, DAG, Subtarget))
48794 return R;
48796 return SDValue();
48799 // Canonicalize OR(AND(X,C),AND(Y,~C)) -> OR(AND(X,C),ANDNP(C,Y))
48800 static SDValue canonicalizeBitSelect(SDNode *N, SelectionDAG &DAG,
48801 const X86Subtarget &Subtarget) {
48802 assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
48804 MVT VT = N->getSimpleValueType(0);
48805 unsigned EltSizeInBits = VT.getScalarSizeInBits();
48806 if (!VT.isVector() || (EltSizeInBits % 8) != 0)
48807 return SDValue();
48809 SDValue N0 = peekThroughBitcasts(N->getOperand(0));
48810 SDValue N1 = peekThroughBitcasts(N->getOperand(1));
48811 if (N0.getOpcode() != ISD::AND || N1.getOpcode() != ISD::AND)
48812 return SDValue();
48814 // On XOP we'll lower to PCMOV so accept one use. With AVX512, we can use
48815 // VPTERNLOG. Otherwise only do this if either mask has multiple uses already.
48816 if (!(Subtarget.hasXOP() || useVPTERNLOG(Subtarget, VT) ||
48817 !N0.getOperand(1).hasOneUse() || !N1.getOperand(1).hasOneUse()))
48818 return SDValue();
48820 // Attempt to extract constant byte masks.
48821 APInt UndefElts0, UndefElts1;
48822 SmallVector<APInt, 32> EltBits0, EltBits1;
48823 if (!getTargetConstantBitsFromNode(N0.getOperand(1), 8, UndefElts0, EltBits0,
48824 false, false))
48825 return SDValue();
48826 if (!getTargetConstantBitsFromNode(N1.getOperand(1), 8, UndefElts1, EltBits1,
48827 false, false))
48828 return SDValue();
48830 for (unsigned i = 0, e = EltBits0.size(); i != e; ++i) {
48831 // TODO - add UNDEF elts support.
48832 if (UndefElts0[i] || UndefElts1[i])
48833 return SDValue();
48834 if (EltBits0[i] != ~EltBits1[i])
48835 return SDValue();
48838 SDLoc DL(N);
48840 if (useVPTERNLOG(Subtarget, VT)) {
48841 // Emit a VPTERNLOG node directly - 0xCA is the imm code for A?B:C.
48842 // VPTERNLOG is only available as vXi32/64-bit types.
48843 MVT OpSVT = EltSizeInBits <= 32 ? MVT::i32 : MVT::i64;
48844 MVT OpVT =
48845 MVT::getVectorVT(OpSVT, VT.getSizeInBits() / OpSVT.getSizeInBits());
48846 SDValue A = DAG.getBitcast(OpVT, N0.getOperand(1));
48847 SDValue B = DAG.getBitcast(OpVT, N0.getOperand(0));
48848 SDValue C = DAG.getBitcast(OpVT, N1.getOperand(0));
48849 SDValue Imm = DAG.getTargetConstant(0xCA, DL, MVT::i8);
48850 SDValue Res = getAVX512Node(X86ISD::VPTERNLOG, DL, OpVT, {A, B, C, Imm},
48851 DAG, Subtarget);
48852 return DAG.getBitcast(VT, Res);
48855 SDValue X = N->getOperand(0);
48856 SDValue Y =
48857 DAG.getNode(X86ISD::ANDNP, DL, VT, DAG.getBitcast(VT, N0.getOperand(1)),
48858 DAG.getBitcast(VT, N1.getOperand(0)));
48859 return DAG.getNode(ISD::OR, DL, VT, X, Y);
48862 // Try to match OR(AND(~MASK,X),AND(MASK,Y)) logic pattern.
48863 static bool matchLogicBlend(SDNode *N, SDValue &X, SDValue &Y, SDValue &Mask) {
48864 if (N->getOpcode() != ISD::OR)
48865 return false;
48867 SDValue N0 = N->getOperand(0);
48868 SDValue N1 = N->getOperand(1);
48870 // Canonicalize AND to LHS.
48871 if (N1.getOpcode() == ISD::AND)
48872 std::swap(N0, N1);
48874 // Attempt to match OR(AND(M,Y),ANDNP(M,X)).
48875 if (N0.getOpcode() != ISD::AND || N1.getOpcode() != X86ISD::ANDNP)
48876 return false;
48878 Mask = N1.getOperand(0);
48879 X = N1.getOperand(1);
48881 // Check to see if the mask appeared in both the AND and ANDNP.
48882 if (N0.getOperand(0) == Mask)
48883 Y = N0.getOperand(1);
48884 else if (N0.getOperand(1) == Mask)
48885 Y = N0.getOperand(0);
48886 else
48887 return false;
48889 // TODO: Attempt to match against AND(XOR(-1,M),Y) as well, waiting for
48890 // ANDNP combine allows other combines to happen that prevent matching.
48891 return true;
48894 // Try to fold:
48895 // (or (and (m, y), (pandn m, x)))
48896 // into:
48897 // (vselect m, x, y)
48898 // As a special case, try to fold:
48899 // (or (and (m, (sub 0, x)), (pandn m, x)))
48900 // into:
48901 // (sub (xor X, M), M)
48902 static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
48903 const X86Subtarget &Subtarget) {
48904 assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
48906 EVT VT = N->getValueType(0);
48907 if (!((VT.is128BitVector() && Subtarget.hasSSE2()) ||
48908 (VT.is256BitVector() && Subtarget.hasInt256())))
48909 return SDValue();
48911 SDValue X, Y, Mask;
48912 if (!matchLogicBlend(N, X, Y, Mask))
48913 return SDValue();
48915 // Validate that X, Y, and Mask are bitcasts, and see through them.
48916 Mask = peekThroughBitcasts(Mask);
48917 X = peekThroughBitcasts(X);
48918 Y = peekThroughBitcasts(Y);
48920 EVT MaskVT = Mask.getValueType();
48921 unsigned EltBits = MaskVT.getScalarSizeInBits();
48923 // TODO: Attempt to handle floating point cases as well?
48924 if (!MaskVT.isInteger() || DAG.ComputeNumSignBits(Mask) != EltBits)
48925 return SDValue();
48927 SDLoc DL(N);
48929 // Attempt to combine to conditional negate: (sub (xor X, M), M)
48930 if (SDValue Res = combineLogicBlendIntoConditionalNegate(VT, Mask, X, Y, DL,
48931 DAG, Subtarget))
48932 return Res;
48934 // PBLENDVB is only available on SSE 4.1.
48935 if (!Subtarget.hasSSE41())
48936 return SDValue();
48938 // If we have VPTERNLOG we should prefer that since PBLENDVB is multiple uops.
48939 if (Subtarget.hasVLX())
48940 return SDValue();
48942 MVT BlendVT = VT.is256BitVector() ? MVT::v32i8 : MVT::v16i8;
48944 X = DAG.getBitcast(BlendVT, X);
48945 Y = DAG.getBitcast(BlendVT, Y);
48946 Mask = DAG.getBitcast(BlendVT, Mask);
48947 Mask = DAG.getSelect(DL, BlendVT, Mask, Y, X);
48948 return DAG.getBitcast(VT, Mask);
48951 // Helper function for combineOrCmpEqZeroToCtlzSrl
48952 // Transforms:
48953 // seteq(cmp x, 0)
48954 // into:
48955 // srl(ctlz x), log2(bitsize(x))
48956 // Input pattern is checked by caller.
48957 static SDValue lowerX86CmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) {
48958 SDValue Cmp = Op.getOperand(1);
48959 EVT VT = Cmp.getOperand(0).getValueType();
48960 unsigned Log2b = Log2_32(VT.getSizeInBits());
48961 SDLoc dl(Op);
48962 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Cmp->getOperand(0));
48963 // The result of the shift is true or false, and on X86, the 32-bit
48964 // encoding of shr and lzcnt is more desirable.
48965 SDValue Trunc = DAG.getZExtOrTrunc(Clz, dl, MVT::i32);
48966 SDValue Scc = DAG.getNode(ISD::SRL, dl, MVT::i32, Trunc,
48967 DAG.getConstant(Log2b, dl, MVT::i8));
48968 return Scc;
48971 // Try to transform:
48972 // zext(or(setcc(eq, (cmp x, 0)), setcc(eq, (cmp y, 0))))
48973 // into:
48974 // srl(or(ctlz(x), ctlz(y)), log2(bitsize(x))
48975 // Will also attempt to match more generic cases, eg:
48976 // zext(or(or(setcc(eq, cmp 0), setcc(eq, cmp 0)), setcc(eq, cmp 0)))
48977 // Only applies if the target supports the FastLZCNT feature.
48978 static SDValue combineOrCmpEqZeroToCtlzSrl(SDNode *N, SelectionDAG &DAG,
48979 TargetLowering::DAGCombinerInfo &DCI,
48980 const X86Subtarget &Subtarget) {
48981 if (DCI.isBeforeLegalize() || !Subtarget.getTargetLowering()->isCtlzFast())
48982 return SDValue();
48984 auto isORCandidate = [](SDValue N) {
48985 return (N->getOpcode() == ISD::OR && N->hasOneUse());
48988 // Check the zero extend is extending to 32-bit or more. The code generated by
48989 // srl(ctlz) for 16-bit or less variants of the pattern would require extra
48990 // instructions to clear the upper bits.
48991 if (!N->hasOneUse() || !N->getSimpleValueType(0).bitsGE(MVT::i32) ||
48992 !isORCandidate(N->getOperand(0)))
48993 return SDValue();
48995 // Check the node matches: setcc(eq, cmp 0)
48996 auto isSetCCCandidate = [](SDValue N) {
48997 return N->getOpcode() == X86ISD::SETCC && N->hasOneUse() &&
48998 X86::CondCode(N->getConstantOperandVal(0)) == X86::COND_E &&
48999 N->getOperand(1).getOpcode() == X86ISD::CMP &&
49000 isNullConstant(N->getOperand(1).getOperand(1)) &&
49001 N->getOperand(1).getValueType().bitsGE(MVT::i32);
49004 SDNode *OR = N->getOperand(0).getNode();
49005 SDValue LHS = OR->getOperand(0);
49006 SDValue RHS = OR->getOperand(1);
49008 // Save nodes matching or(or, setcc(eq, cmp 0)).
49009 SmallVector<SDNode *, 2> ORNodes;
49010 while (((isORCandidate(LHS) && isSetCCCandidate(RHS)) ||
49011 (isORCandidate(RHS) && isSetCCCandidate(LHS)))) {
49012 ORNodes.push_back(OR);
49013 OR = (LHS->getOpcode() == ISD::OR) ? LHS.getNode() : RHS.getNode();
49014 LHS = OR->getOperand(0);
49015 RHS = OR->getOperand(1);
49018 // The last OR node should match or(setcc(eq, cmp 0), setcc(eq, cmp 0)).
49019 if (!(isSetCCCandidate(LHS) && isSetCCCandidate(RHS)) ||
49020 !isORCandidate(SDValue(OR, 0)))
49021 return SDValue();
49023 // We have a or(setcc(eq, cmp 0), setcc(eq, cmp 0)) pattern, try to lower it
49024 // to
49025 // or(srl(ctlz),srl(ctlz)).
49026 // The dag combiner can then fold it into:
49027 // srl(or(ctlz, ctlz)).
49028 SDValue NewLHS = lowerX86CmpEqZeroToCtlzSrl(LHS, DAG);
49029 SDValue Ret, NewRHS;
49030 if (NewLHS && (NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, DAG)))
49031 Ret = DAG.getNode(ISD::OR, SDLoc(OR), MVT::i32, NewLHS, NewRHS);
49033 if (!Ret)
49034 return SDValue();
49036 // Try to lower nodes matching the or(or, setcc(eq, cmp 0)) pattern.
49037 while (!ORNodes.empty()) {
49038 OR = ORNodes.pop_back_val();
49039 LHS = OR->getOperand(0);
49040 RHS = OR->getOperand(1);
49041 // Swap rhs with lhs to match or(setcc(eq, cmp, 0), or).
49042 if (RHS->getOpcode() == ISD::OR)
49043 std::swap(LHS, RHS);
49044 NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, DAG);
49045 if (!NewRHS)
49046 return SDValue();
49047 Ret = DAG.getNode(ISD::OR, SDLoc(OR), MVT::i32, Ret, NewRHS);
49050 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), Ret);
49053 static SDValue foldMaskedMergeImpl(SDValue And0_L, SDValue And0_R,
49054 SDValue And1_L, SDValue And1_R,
49055 const SDLoc &DL, SelectionDAG &DAG) {
49056 if (!isBitwiseNot(And0_L, true) || !And0_L->hasOneUse())
49057 return SDValue();
49058 SDValue NotOp = And0_L->getOperand(0);
49059 if (NotOp == And1_R)
49060 std::swap(And1_R, And1_L);
49061 if (NotOp != And1_L)
49062 return SDValue();
49064 // (~(NotOp) & And0_R) | (NotOp & And1_R)
49065 // --> ((And0_R ^ And1_R) & NotOp) ^ And1_R
49066 EVT VT = And1_L->getValueType(0);
49067 SDValue Freeze_And0_R = DAG.getNode(ISD::FREEZE, SDLoc(), VT, And0_R);
49068 SDValue Xor0 = DAG.getNode(ISD::XOR, DL, VT, And1_R, Freeze_And0_R);
49069 SDValue And = DAG.getNode(ISD::AND, DL, VT, Xor0, NotOp);
49070 SDValue Xor1 = DAG.getNode(ISD::XOR, DL, VT, And, Freeze_And0_R);
49071 return Xor1;
49074 /// Fold "masked merge" expressions like `(m & x) | (~m & y)` into the
49075 /// equivalent `((x ^ y) & m) ^ y)` pattern.
49076 /// This is typically a better representation for targets without a fused
49077 /// "and-not" operation. This function is intended to be called from a
49078 /// `TargetLowering::PerformDAGCombine` callback on `ISD::OR` nodes.
49079 static SDValue foldMaskedMerge(SDNode *Node, SelectionDAG &DAG) {
49080 // Note that masked-merge variants using XOR or ADD expressions are
49081 // normalized to OR by InstCombine so we only check for OR.
49082 assert(Node->getOpcode() == ISD::OR && "Must be called with ISD::OR node");
49083 SDValue N0 = Node->getOperand(0);
49084 if (N0->getOpcode() != ISD::AND || !N0->hasOneUse())
49085 return SDValue();
49086 SDValue N1 = Node->getOperand(1);
49087 if (N1->getOpcode() != ISD::AND || !N1->hasOneUse())
49088 return SDValue();
49090 SDLoc DL(Node);
49091 SDValue N00 = N0->getOperand(0);
49092 SDValue N01 = N0->getOperand(1);
49093 SDValue N10 = N1->getOperand(0);
49094 SDValue N11 = N1->getOperand(1);
49095 if (SDValue Result = foldMaskedMergeImpl(N00, N01, N10, N11, DL, DAG))
49096 return Result;
49097 if (SDValue Result = foldMaskedMergeImpl(N01, N00, N10, N11, DL, DAG))
49098 return Result;
49099 if (SDValue Result = foldMaskedMergeImpl(N10, N11, N00, N01, DL, DAG))
49100 return Result;
49101 if (SDValue Result = foldMaskedMergeImpl(N11, N10, N00, N01, DL, DAG))
49102 return Result;
49103 return SDValue();
49106 /// If this is an add or subtract where one operand is produced by a cmp+setcc,
49107 /// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
49108 /// with CMP+{ADC, SBB}.
49109 /// Also try (ADD/SUB)+(AND(SRL,1)) bit extraction pattern with BT+{ADC, SBB}.
49110 static SDValue combineAddOrSubToADCOrSBB(bool IsSub, const SDLoc &DL, EVT VT,
49111 SDValue X, SDValue Y,
49112 SelectionDAG &DAG,
49113 bool ZeroSecondOpOnly = false) {
49114 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
49115 return SDValue();
49117 // Look through a one-use zext.
49118 if (Y.getOpcode() == ISD::ZERO_EXTEND && Y.hasOneUse())
49119 Y = Y.getOperand(0);
49121 X86::CondCode CC;
49122 SDValue EFLAGS;
49123 if (Y.getOpcode() == X86ISD::SETCC && Y.hasOneUse()) {
49124 CC = (X86::CondCode)Y.getConstantOperandVal(0);
49125 EFLAGS = Y.getOperand(1);
49126 } else if (Y.getOpcode() == ISD::AND && isOneConstant(Y.getOperand(1)) &&
49127 Y.hasOneUse()) {
49128 EFLAGS = LowerAndToBT(Y, ISD::SETNE, DL, DAG, CC);
49131 if (!EFLAGS)
49132 return SDValue();
49134 // If X is -1 or 0, then we have an opportunity to avoid constants required in
49135 // the general case below.
49136 auto *ConstantX = dyn_cast<ConstantSDNode>(X);
49137 if (ConstantX && !ZeroSecondOpOnly) {
49138 if ((!IsSub && CC == X86::COND_AE && ConstantX->isAllOnes()) ||
49139 (IsSub && CC == X86::COND_B && ConstantX->isZero())) {
49140 // This is a complicated way to get -1 or 0 from the carry flag:
49141 // -1 + SETAE --> -1 + (!CF) --> CF ? -1 : 0 --> SBB %eax, %eax
49142 // 0 - SETB --> 0 - (CF) --> CF ? -1 : 0 --> SBB %eax, %eax
49143 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
49144 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
49145 EFLAGS);
49148 if ((!IsSub && CC == X86::COND_BE && ConstantX->isAllOnes()) ||
49149 (IsSub && CC == X86::COND_A && ConstantX->isZero())) {
49150 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
49151 EFLAGS.getValueType().isInteger() &&
49152 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
49153 // Swap the operands of a SUB, and we have the same pattern as above.
49154 // -1 + SETBE (SUB A, B) --> -1 + SETAE (SUB B, A) --> SUB + SBB
49155 // 0 - SETA (SUB A, B) --> 0 - SETB (SUB B, A) --> SUB + SBB
49156 SDValue NewSub = DAG.getNode(
49157 X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
49158 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
49159 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
49160 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
49161 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
49162 NewEFLAGS);
49167 if (CC == X86::COND_B) {
49168 // X + SETB Z --> adc X, 0
49169 // X - SETB Z --> sbb X, 0
49170 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
49171 DAG.getVTList(VT, MVT::i32), X,
49172 DAG.getConstant(0, DL, VT), EFLAGS);
49175 if (ZeroSecondOpOnly)
49176 return SDValue();
49178 if (CC == X86::COND_A) {
49179 // Try to convert COND_A into COND_B in an attempt to facilitate
49180 // materializing "setb reg".
49182 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
49183 // cannot take an immediate as its first operand.
49185 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.getNode()->hasOneUse() &&
49186 EFLAGS.getValueType().isInteger() &&
49187 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
49188 SDValue NewSub =
49189 DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
49190 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
49191 SDValue NewEFLAGS = NewSub.getValue(EFLAGS.getResNo());
49192 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
49193 DAG.getVTList(VT, MVT::i32), X,
49194 DAG.getConstant(0, DL, VT), NewEFLAGS);
49198 if (CC == X86::COND_AE) {
49199 // X + SETAE --> sbb X, -1
49200 // X - SETAE --> adc X, -1
49201 return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL,
49202 DAG.getVTList(VT, MVT::i32), X,
49203 DAG.getConstant(-1, DL, VT), EFLAGS);
49206 if (CC == X86::COND_BE) {
49207 // X + SETBE --> sbb X, -1
49208 // X - SETBE --> adc X, -1
49209 // Try to convert COND_BE into COND_AE in an attempt to facilitate
49210 // materializing "setae reg".
49212 // Do not flip "e <= c", where "c" is a constant, because Cmp instruction
49213 // cannot take an immediate as its first operand.
49215 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.getNode()->hasOneUse() &&
49216 EFLAGS.getValueType().isInteger() &&
49217 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
49218 SDValue NewSub =
49219 DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
49220 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
49221 SDValue NewEFLAGS = NewSub.getValue(EFLAGS.getResNo());
49222 return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL,
49223 DAG.getVTList(VT, MVT::i32), X,
49224 DAG.getConstant(-1, DL, VT), NewEFLAGS);
49228 if (CC != X86::COND_E && CC != X86::COND_NE)
49229 return SDValue();
49231 if (EFLAGS.getOpcode() != X86ISD::CMP || !EFLAGS.hasOneUse() ||
49232 !X86::isZeroNode(EFLAGS.getOperand(1)) ||
49233 !EFLAGS.getOperand(0).getValueType().isInteger())
49234 return SDValue();
49236 SDValue Z = EFLAGS.getOperand(0);
49237 EVT ZVT = Z.getValueType();
49239 // If X is -1 or 0, then we have an opportunity to avoid constants required in
49240 // the general case below.
49241 if (ConstantX) {
49242 // 'neg' sets the carry flag when Z != 0, so create 0 or -1 using 'sbb' with
49243 // fake operands:
49244 // 0 - (Z != 0) --> sbb %eax, %eax, (neg Z)
49245 // -1 + (Z == 0) --> sbb %eax, %eax, (neg Z)
49246 if ((IsSub && CC == X86::COND_NE && ConstantX->isZero()) ||
49247 (!IsSub && CC == X86::COND_E && ConstantX->isAllOnes())) {
49248 SDValue Zero = DAG.getConstant(0, DL, ZVT);
49249 SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
49250 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Zero, Z);
49251 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
49252 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
49253 SDValue(Neg.getNode(), 1));
49256 // cmp with 1 sets the carry flag when Z == 0, so create 0 or -1 using 'sbb'
49257 // with fake operands:
49258 // 0 - (Z == 0) --> sbb %eax, %eax, (cmp Z, 1)
49259 // -1 + (Z != 0) --> sbb %eax, %eax, (cmp Z, 1)
49260 if ((IsSub && CC == X86::COND_E && ConstantX->isZero()) ||
49261 (!IsSub && CC == X86::COND_NE && ConstantX->isAllOnes())) {
49262 SDValue One = DAG.getConstant(1, DL, ZVT);
49263 SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
49264 SDValue Cmp1 = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Z, One);
49265 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
49266 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
49267 Cmp1.getValue(1));
49271 // (cmp Z, 1) sets the carry flag if Z is 0.
49272 SDValue One = DAG.getConstant(1, DL, ZVT);
49273 SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
49274 SDValue Cmp1 = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Z, One);
49276 // Add the flags type for ADC/SBB nodes.
49277 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
49279 // X - (Z != 0) --> sub X, (zext(setne Z, 0)) --> adc X, -1, (cmp Z, 1)
49280 // X + (Z != 0) --> add X, (zext(setne Z, 0)) --> sbb X, -1, (cmp Z, 1)
49281 if (CC == X86::COND_NE)
49282 return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL, VTs, X,
49283 DAG.getConstant(-1ULL, DL, VT), Cmp1.getValue(1));
49285 // X - (Z == 0) --> sub X, (zext(sete Z, 0)) --> sbb X, 0, (cmp Z, 1)
49286 // X + (Z == 0) --> add X, (zext(sete Z, 0)) --> adc X, 0, (cmp Z, 1)
49287 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL, VTs, X,
49288 DAG.getConstant(0, DL, VT), Cmp1.getValue(1));
49291 /// If this is an add or subtract where one operand is produced by a cmp+setcc,
49292 /// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
49293 /// with CMP+{ADC, SBB}.
49294 static SDValue combineAddOrSubToADCOrSBB(SDNode *N, SelectionDAG &DAG) {
49295 bool IsSub = N->getOpcode() == ISD::SUB;
49296 SDValue X = N->getOperand(0);
49297 SDValue Y = N->getOperand(1);
49298 EVT VT = N->getValueType(0);
49299 SDLoc DL(N);
49301 if (SDValue ADCOrSBB = combineAddOrSubToADCOrSBB(IsSub, DL, VT, X, Y, DAG))
49302 return ADCOrSBB;
49304 // Commute and try again (negate the result for subtracts).
49305 if (SDValue ADCOrSBB = combineAddOrSubToADCOrSBB(IsSub, DL, VT, Y, X, DAG)) {
49306 if (IsSub)
49307 ADCOrSBB =
49308 DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), ADCOrSBB);
49309 return ADCOrSBB;
49312 return SDValue();
49315 static SDValue combineOrXorWithSETCC(SDNode *N, SDValue N0, SDValue N1,
49316 SelectionDAG &DAG) {
49317 assert((N->getOpcode() == ISD::XOR || N->getOpcode() == ISD::OR) &&
49318 "Unexpected opcode");
49320 // Delegate to combineAddOrSubToADCOrSBB if we have:
49322 // (xor/or (zero_extend (setcc)) imm)
49324 // where imm is odd if and only if we have xor, in which case the XOR/OR are
49325 // equivalent to a SUB/ADD, respectively.
49326 if (N0.getOpcode() == ISD::ZERO_EXTEND &&
49327 N0.getOperand(0).getOpcode() == X86ISD::SETCC && N0.hasOneUse()) {
49328 if (auto *N1C = dyn_cast<ConstantSDNode>(N1)) {
49329 bool IsSub = N->getOpcode() == ISD::XOR;
49330 bool N1COdd = N1C->getZExtValue() & 1;
49331 if (IsSub ? N1COdd : !N1COdd) {
49332 SDLoc DL(N);
49333 EVT VT = N->getValueType(0);
49334 if (SDValue R = combineAddOrSubToADCOrSBB(IsSub, DL, VT, N1, N0, DAG))
49335 return R;
49340 return SDValue();
49343 static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
49344 TargetLowering::DAGCombinerInfo &DCI,
49345 const X86Subtarget &Subtarget) {
49346 SDValue N0 = N->getOperand(0);
49347 SDValue N1 = N->getOperand(1);
49348 EVT VT = N->getValueType(0);
49349 SDLoc dl(N);
49350 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
49352 // If this is SSE1 only convert to FOR to avoid scalarization.
49353 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
49354 return DAG.getBitcast(MVT::v4i32,
49355 DAG.getNode(X86ISD::FOR, dl, MVT::v4f32,
49356 DAG.getBitcast(MVT::v4f32, N0),
49357 DAG.getBitcast(MVT::v4f32, N1)));
49360 // Match any-of bool scalar reductions into a bitcast/movmsk + cmp.
49361 // TODO: Support multiple SrcOps.
49362 if (VT == MVT::i1) {
49363 SmallVector<SDValue, 2> SrcOps;
49364 SmallVector<APInt, 2> SrcPartials;
49365 if (matchScalarReduction(SDValue(N, 0), ISD::OR, SrcOps, &SrcPartials) &&
49366 SrcOps.size() == 1) {
49367 unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
49368 EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
49369 SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
49370 if (!Mask && TLI.isTypeLegal(SrcOps[0].getValueType()))
49371 Mask = DAG.getBitcast(MaskVT, SrcOps[0]);
49372 if (Mask) {
49373 assert(SrcPartials[0].getBitWidth() == NumElts &&
49374 "Unexpected partial reduction mask");
49375 SDValue ZeroBits = DAG.getConstant(0, dl, MaskVT);
49376 SDValue PartialBits = DAG.getConstant(SrcPartials[0], dl, MaskVT);
49377 Mask = DAG.getNode(ISD::AND, dl, MaskVT, Mask, PartialBits);
49378 return DAG.getSetCC(dl, MVT::i1, Mask, ZeroBits, ISD::SETNE);
49383 if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
49384 return R;
49386 if (SDValue R = combineBitOpWithShift(N, DAG))
49387 return R;
49389 if (SDValue R = combineBitOpWithPACK(N, DAG))
49390 return R;
49392 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, DCI, Subtarget))
49393 return FPLogic;
49395 if (DCI.isBeforeLegalizeOps())
49396 return SDValue();
49398 if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
49399 return R;
49401 if (SDValue R = canonicalizeBitSelect(N, DAG, Subtarget))
49402 return R;
49404 if (SDValue R = combineLogicBlendIntoPBLENDV(N, DAG, Subtarget))
49405 return R;
49407 // (0 - SetCC) | C -> (zext (not SetCC)) * (C + 1) - 1 if we can get a LEA out of it.
49408 if ((VT == MVT::i32 || VT == MVT::i64) &&
49409 N0.getOpcode() == ISD::SUB && N0.hasOneUse() &&
49410 isNullConstant(N0.getOperand(0))) {
49411 SDValue Cond = N0.getOperand(1);
49412 if (Cond.getOpcode() == ISD::ZERO_EXTEND && Cond.hasOneUse())
49413 Cond = Cond.getOperand(0);
49415 if (Cond.getOpcode() == X86ISD::SETCC && Cond.hasOneUse()) {
49416 if (auto *CN = dyn_cast<ConstantSDNode>(N1)) {
49417 uint64_t Val = CN->getZExtValue();
49418 if (Val == 1 || Val == 2 || Val == 3 || Val == 4 || Val == 7 || Val == 8) {
49419 X86::CondCode CCode = (X86::CondCode)Cond.getConstantOperandVal(0);
49420 CCode = X86::GetOppositeBranchCondition(CCode);
49421 SDValue NotCond = getSETCC(CCode, Cond.getOperand(1), SDLoc(Cond), DAG);
49423 SDValue R = DAG.getZExtOrTrunc(NotCond, dl, VT);
49424 R = DAG.getNode(ISD::MUL, dl, VT, R, DAG.getConstant(Val + 1, dl, VT));
49425 R = DAG.getNode(ISD::SUB, dl, VT, R, DAG.getConstant(1, dl, VT));
49426 return R;
49432 // Combine OR(X,KSHIFTL(Y,Elts/2)) -> CONCAT_VECTORS(X,Y) == KUNPCK(X,Y).
49433 // Combine OR(KSHIFTL(X,Elts/2),Y) -> CONCAT_VECTORS(Y,X) == KUNPCK(Y,X).
49434 // iff the upper elements of the non-shifted arg are zero.
49435 // KUNPCK require 16+ bool vector elements.
49436 if (N0.getOpcode() == X86ISD::KSHIFTL || N1.getOpcode() == X86ISD::KSHIFTL) {
49437 unsigned NumElts = VT.getVectorNumElements();
49438 unsigned HalfElts = NumElts / 2;
49439 APInt UpperElts = APInt::getHighBitsSet(NumElts, HalfElts);
49440 if (NumElts >= 16 && N1.getOpcode() == X86ISD::KSHIFTL &&
49441 N1.getConstantOperandAPInt(1) == HalfElts &&
49442 DAG.MaskedVectorIsZero(N0, UpperElts)) {
49443 return DAG.getNode(
49444 ISD::CONCAT_VECTORS, dl, VT,
49445 extractSubVector(N0, 0, DAG, dl, HalfElts),
49446 extractSubVector(N1.getOperand(0), 0, DAG, dl, HalfElts));
49448 if (NumElts >= 16 && N0.getOpcode() == X86ISD::KSHIFTL &&
49449 N0.getConstantOperandAPInt(1) == HalfElts &&
49450 DAG.MaskedVectorIsZero(N1, UpperElts)) {
49451 return DAG.getNode(
49452 ISD::CONCAT_VECTORS, dl, VT,
49453 extractSubVector(N1, 0, DAG, dl, HalfElts),
49454 extractSubVector(N0.getOperand(0), 0, DAG, dl, HalfElts));
49458 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
49459 // Attempt to recursively combine an OR of shuffles.
49460 SDValue Op(N, 0);
49461 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
49462 return Res;
49464 // If either operand is a constant mask, then only the elements that aren't
49465 // allones are actually demanded by the other operand.
49466 auto SimplifyUndemandedElts = [&](SDValue Op, SDValue OtherOp) {
49467 APInt UndefElts;
49468 SmallVector<APInt> EltBits;
49469 int NumElts = VT.getVectorNumElements();
49470 int EltSizeInBits = VT.getScalarSizeInBits();
49471 if (!getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts, EltBits))
49472 return false;
49474 APInt DemandedElts = APInt::getZero(NumElts);
49475 for (int I = 0; I != NumElts; ++I)
49476 if (!EltBits[I].isAllOnes())
49477 DemandedElts.setBit(I);
49479 return TLI.SimplifyDemandedVectorElts(OtherOp, DemandedElts, DCI);
49481 if (SimplifyUndemandedElts(N0, N1) || SimplifyUndemandedElts(N1, N0)) {
49482 if (N->getOpcode() != ISD::DELETED_NODE)
49483 DCI.AddToWorklist(N);
49484 return SDValue(N, 0);
49488 // We should fold "masked merge" patterns when `andn` is not available.
49489 if (!Subtarget.hasBMI() && VT.isScalarInteger() && VT != MVT::i1)
49490 if (SDValue R = foldMaskedMerge(N, DAG))
49491 return R;
49493 if (SDValue R = combineOrXorWithSETCC(N, N0, N1, DAG))
49494 return R;
49496 return SDValue();
49499 /// Try to turn tests against the signbit in the form of:
49500 /// XOR(TRUNCATE(SRL(X, size(X)-1)), 1)
49501 /// into:
49502 /// SETGT(X, -1)
49503 static SDValue foldXorTruncShiftIntoCmp(SDNode *N, SelectionDAG &DAG) {
49504 // This is only worth doing if the output type is i8 or i1.
49505 EVT ResultType = N->getValueType(0);
49506 if (ResultType != MVT::i8 && ResultType != MVT::i1)
49507 return SDValue();
49509 SDValue N0 = N->getOperand(0);
49510 SDValue N1 = N->getOperand(1);
49512 // We should be performing an xor against a truncated shift.
49513 if (N0.getOpcode() != ISD::TRUNCATE || !N0.hasOneUse())
49514 return SDValue();
49516 // Make sure we are performing an xor against one.
49517 if (!isOneConstant(N1))
49518 return SDValue();
49520 // SetCC on x86 zero extends so only act on this if it's a logical shift.
49521 SDValue Shift = N0.getOperand(0);
49522 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse())
49523 return SDValue();
49525 // Make sure we are truncating from one of i16, i32 or i64.
49526 EVT ShiftTy = Shift.getValueType();
49527 if (ShiftTy != MVT::i16 && ShiftTy != MVT::i32 && ShiftTy != MVT::i64)
49528 return SDValue();
49530 // Make sure the shift amount extracts the sign bit.
49531 if (!isa<ConstantSDNode>(Shift.getOperand(1)) ||
49532 Shift.getConstantOperandAPInt(1) != (ShiftTy.getSizeInBits() - 1))
49533 return SDValue();
49535 // Create a greater-than comparison against -1.
49536 // N.B. Using SETGE against 0 works but we want a canonical looking
49537 // comparison, using SETGT matches up with what TranslateX86CC.
49538 SDLoc DL(N);
49539 SDValue ShiftOp = Shift.getOperand(0);
49540 EVT ShiftOpTy = ShiftOp.getValueType();
49541 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
49542 EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
49543 *DAG.getContext(), ResultType);
49544 SDValue Cond = DAG.getSetCC(DL, SetCCResultType, ShiftOp,
49545 DAG.getConstant(-1, DL, ShiftOpTy), ISD::SETGT);
49546 if (SetCCResultType != ResultType)
49547 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, ResultType, Cond);
49548 return Cond;
49551 /// Turn vector tests of the signbit in the form of:
49552 /// xor (sra X, elt_size(X)-1), -1
49553 /// into:
49554 /// pcmpgt X, -1
49556 /// This should be called before type legalization because the pattern may not
49557 /// persist after that.
49558 static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
49559 const X86Subtarget &Subtarget) {
49560 EVT VT = N->getValueType(0);
49561 if (!VT.isSimple())
49562 return SDValue();
49564 switch (VT.getSimpleVT().SimpleTy) {
49565 default: return SDValue();
49566 case MVT::v16i8:
49567 case MVT::v8i16:
49568 case MVT::v4i32:
49569 case MVT::v2i64: if (!Subtarget.hasSSE2()) return SDValue(); break;
49570 case MVT::v32i8:
49571 case MVT::v16i16:
49572 case MVT::v8i32:
49573 case MVT::v4i64: if (!Subtarget.hasAVX2()) return SDValue(); break;
49576 // There must be a shift right algebraic before the xor, and the xor must be a
49577 // 'not' operation.
49578 SDValue Shift = N->getOperand(0);
49579 SDValue Ones = N->getOperand(1);
49580 if (Shift.getOpcode() != ISD::SRA || !Shift.hasOneUse() ||
49581 !ISD::isBuildVectorAllOnes(Ones.getNode()))
49582 return SDValue();
49584 // The shift should be smearing the sign bit across each vector element.
49585 auto *ShiftAmt =
49586 isConstOrConstSplat(Shift.getOperand(1), /*AllowUndefs*/ true);
49587 if (!ShiftAmt ||
49588 ShiftAmt->getAPIntValue() != (Shift.getScalarValueSizeInBits() - 1))
49589 return SDValue();
49591 // Create a greater-than comparison against -1. We don't use the more obvious
49592 // greater-than-or-equal-to-zero because SSE/AVX don't have that instruction.
49593 return DAG.getSetCC(SDLoc(N), VT, Shift.getOperand(0), Ones, ISD::SETGT);
49596 /// Detect patterns of truncation with unsigned saturation:
49598 /// 1. (truncate (umin (x, unsigned_max_of_dest_type)) to dest_type).
49599 /// Return the source value x to be truncated or SDValue() if the pattern was
49600 /// not matched.
49602 /// 2. (truncate (smin (smax (x, C1), C2)) to dest_type),
49603 /// where C1 >= 0 and C2 is unsigned max of destination type.
49605 /// (truncate (smax (smin (x, C2), C1)) to dest_type)
49606 /// where C1 >= 0, C2 is unsigned max of destination type and C1 <= C2.
49608 /// These two patterns are equivalent to:
49609 /// (truncate (umin (smax(x, C1), unsigned_max_of_dest_type)) to dest_type)
49610 /// So return the smax(x, C1) value to be truncated or SDValue() if the
49611 /// pattern was not matched.
49612 static SDValue detectUSatPattern(SDValue In, EVT VT, SelectionDAG &DAG,
49613 const SDLoc &DL) {
49614 EVT InVT = In.getValueType();
49616 // Saturation with truncation. We truncate from InVT to VT.
49617 assert(InVT.getScalarSizeInBits() > VT.getScalarSizeInBits() &&
49618 "Unexpected types for truncate operation");
49620 // Match min/max and return limit value as a parameter.
49621 auto MatchMinMax = [](SDValue V, unsigned Opcode, APInt &Limit) -> SDValue {
49622 if (V.getOpcode() == Opcode &&
49623 ISD::isConstantSplatVector(V.getOperand(1).getNode(), Limit))
49624 return V.getOperand(0);
49625 return SDValue();
49628 APInt C1, C2;
49629 if (SDValue UMin = MatchMinMax(In, ISD::UMIN, C2))
49630 // C2 should be equal to UINT32_MAX / UINT16_MAX / UINT8_MAX according
49631 // the element size of the destination type.
49632 if (C2.isMask(VT.getScalarSizeInBits()))
49633 return UMin;
49635 if (SDValue SMin = MatchMinMax(In, ISD::SMIN, C2))
49636 if (MatchMinMax(SMin, ISD::SMAX, C1))
49637 if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()))
49638 return SMin;
49640 if (SDValue SMax = MatchMinMax(In, ISD::SMAX, C1))
49641 if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, C2))
49642 if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()) &&
49643 C2.uge(C1)) {
49644 return DAG.getNode(ISD::SMAX, DL, InVT, SMin, In.getOperand(1));
49647 return SDValue();
49650 /// Detect patterns of truncation with signed saturation:
49651 /// (truncate (smin ((smax (x, signed_min_of_dest_type)),
49652 /// signed_max_of_dest_type)) to dest_type)
49653 /// or:
49654 /// (truncate (smax ((smin (x, signed_max_of_dest_type)),
49655 /// signed_min_of_dest_type)) to dest_type).
49656 /// With MatchPackUS, the smax/smin range is [0, unsigned_max_of_dest_type].
49657 /// Return the source value to be truncated or SDValue() if the pattern was not
49658 /// matched.
49659 static SDValue detectSSatPattern(SDValue In, EVT VT, bool MatchPackUS = false) {
49660 unsigned NumDstBits = VT.getScalarSizeInBits();
49661 unsigned NumSrcBits = In.getScalarValueSizeInBits();
49662 assert(NumSrcBits > NumDstBits && "Unexpected types for truncate operation");
49664 auto MatchMinMax = [](SDValue V, unsigned Opcode,
49665 const APInt &Limit) -> SDValue {
49666 APInt C;
49667 if (V.getOpcode() == Opcode &&
49668 ISD::isConstantSplatVector(V.getOperand(1).getNode(), C) && C == Limit)
49669 return V.getOperand(0);
49670 return SDValue();
49673 APInt SignedMax, SignedMin;
49674 if (MatchPackUS) {
49675 SignedMax = APInt::getAllOnes(NumDstBits).zext(NumSrcBits);
49676 SignedMin = APInt(NumSrcBits, 0);
49677 } else {
49678 SignedMax = APInt::getSignedMaxValue(NumDstBits).sext(NumSrcBits);
49679 SignedMin = APInt::getSignedMinValue(NumDstBits).sext(NumSrcBits);
49682 if (SDValue SMin = MatchMinMax(In, ISD::SMIN, SignedMax))
49683 if (SDValue SMax = MatchMinMax(SMin, ISD::SMAX, SignedMin))
49684 return SMax;
49686 if (SDValue SMax = MatchMinMax(In, ISD::SMAX, SignedMin))
49687 if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, SignedMax))
49688 return SMin;
49690 return SDValue();
49693 static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL,
49694 SelectionDAG &DAG,
49695 const X86Subtarget &Subtarget) {
49696 if (!Subtarget.hasSSE2() || !VT.isVector())
49697 return SDValue();
49699 EVT SVT = VT.getVectorElementType();
49700 EVT InVT = In.getValueType();
49701 EVT InSVT = InVT.getVectorElementType();
49703 // If we're clamping a signed 32-bit vector to 0-255 and the 32-bit vector is
49704 // split across two registers. We can use a packusdw+perm to clamp to 0-65535
49705 // and concatenate at the same time. Then we can use a final vpmovuswb to
49706 // clip to 0-255.
49707 if (Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
49708 InVT == MVT::v16i32 && VT == MVT::v16i8) {
49709 if (SDValue USatVal = detectSSatPattern(In, VT, true)) {
49710 // Emit a VPACKUSDW+VPERMQ followed by a VPMOVUSWB.
49711 SDValue Mid = truncateVectorWithPACK(X86ISD::PACKUS, MVT::v16i16, USatVal,
49712 DL, DAG, Subtarget);
49713 assert(Mid && "Failed to pack!");
49714 return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, Mid);
49718 // vXi32 truncate instructions are available with AVX512F.
49719 // vXi16 truncate instructions are only available with AVX512BW.
49720 // For 256-bit or smaller vectors, we require VLX.
49721 // FIXME: We could widen truncates to 512 to remove the VLX restriction.
49722 // If the result type is 256-bits or larger and we have disable 512-bit
49723 // registers, we should go ahead and use the pack instructions if possible.
49724 bool PreferAVX512 = ((Subtarget.hasAVX512() && InSVT == MVT::i32) ||
49725 (Subtarget.hasBWI() && InSVT == MVT::i16)) &&
49726 (InVT.getSizeInBits() > 128) &&
49727 (Subtarget.hasVLX() || InVT.getSizeInBits() > 256) &&
49728 !(!Subtarget.useAVX512Regs() && VT.getSizeInBits() >= 256);
49730 if (!PreferAVX512 && VT.getVectorNumElements() > 1 &&
49731 isPowerOf2_32(VT.getVectorNumElements()) &&
49732 (SVT == MVT::i8 || SVT == MVT::i16) &&
49733 (InSVT == MVT::i16 || InSVT == MVT::i32)) {
49734 if (SDValue USatVal = detectSSatPattern(In, VT, true)) {
49735 // vXi32 -> vXi8 must be performed as PACKUSWB(PACKSSDW,PACKSSDW).
49736 if (SVT == MVT::i8 && InSVT == MVT::i32) {
49737 EVT MidVT = VT.changeVectorElementType(MVT::i16);
49738 SDValue Mid = truncateVectorWithPACK(X86ISD::PACKSS, MidVT, USatVal, DL,
49739 DAG, Subtarget);
49740 assert(Mid && "Failed to pack!");
49741 SDValue V = truncateVectorWithPACK(X86ISD::PACKUS, VT, Mid, DL, DAG,
49742 Subtarget);
49743 assert(V && "Failed to pack!");
49744 return V;
49745 } else if (SVT == MVT::i8 || Subtarget.hasSSE41())
49746 return truncateVectorWithPACK(X86ISD::PACKUS, VT, USatVal, DL, DAG,
49747 Subtarget);
49749 if (SDValue SSatVal = detectSSatPattern(In, VT))
49750 return truncateVectorWithPACK(X86ISD::PACKSS, VT, SSatVal, DL, DAG,
49751 Subtarget);
49754 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
49755 if (TLI.isTypeLegal(InVT) && InVT.isVector() && SVT != MVT::i1 &&
49756 Subtarget.hasAVX512() && (InSVT != MVT::i16 || Subtarget.hasBWI()) &&
49757 (SVT == MVT::i32 || SVT == MVT::i16 || SVT == MVT::i8)) {
49758 unsigned TruncOpc = 0;
49759 SDValue SatVal;
49760 if (SDValue SSatVal = detectSSatPattern(In, VT)) {
49761 SatVal = SSatVal;
49762 TruncOpc = X86ISD::VTRUNCS;
49763 } else if (SDValue USatVal = detectUSatPattern(In, VT, DAG, DL)) {
49764 SatVal = USatVal;
49765 TruncOpc = X86ISD::VTRUNCUS;
49767 if (SatVal) {
49768 unsigned ResElts = VT.getVectorNumElements();
49769 // If the input type is less than 512 bits and we don't have VLX, we need
49770 // to widen to 512 bits.
49771 if (!Subtarget.hasVLX() && !InVT.is512BitVector()) {
49772 unsigned NumConcats = 512 / InVT.getSizeInBits();
49773 ResElts *= NumConcats;
49774 SmallVector<SDValue, 4> ConcatOps(NumConcats, DAG.getUNDEF(InVT));
49775 ConcatOps[0] = SatVal;
49776 InVT = EVT::getVectorVT(*DAG.getContext(), InSVT,
49777 NumConcats * InVT.getVectorNumElements());
49778 SatVal = DAG.getNode(ISD::CONCAT_VECTORS, DL, InVT, ConcatOps);
49780 // Widen the result if its narrower than 128 bits.
49781 if (ResElts * SVT.getSizeInBits() < 128)
49782 ResElts = 128 / SVT.getSizeInBits();
49783 EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), SVT, ResElts);
49784 SDValue Res = DAG.getNode(TruncOpc, DL, TruncVT, SatVal);
49785 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
49786 DAG.getIntPtrConstant(0, DL));
49790 return SDValue();
49793 /// This function detects the AVG pattern between vectors of unsigned i8/i16,
49794 /// which is c = (a + b + 1) / 2, and replace this operation with the efficient
49795 /// ISD::AVGCEILU (AVG) instruction.
49796 static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
49797 const X86Subtarget &Subtarget,
49798 const SDLoc &DL) {
49799 if (!VT.isVector())
49800 return SDValue();
49801 EVT InVT = In.getValueType();
49802 unsigned NumElems = VT.getVectorNumElements();
49804 EVT ScalarVT = VT.getVectorElementType();
49805 if (!((ScalarVT == MVT::i8 || ScalarVT == MVT::i16) && NumElems >= 2))
49806 return SDValue();
49808 // InScalarVT is the intermediate type in AVG pattern and it should be greater
49809 // than the original input type (i8/i16).
49810 EVT InScalarVT = InVT.getVectorElementType();
49811 if (InScalarVT.getFixedSizeInBits() <= ScalarVT.getFixedSizeInBits())
49812 return SDValue();
49814 if (!Subtarget.hasSSE2())
49815 return SDValue();
49817 // Detect the following pattern:
49819 // %1 = zext <N x i8> %a to <N x i32>
49820 // %2 = zext <N x i8> %b to <N x i32>
49821 // %3 = add nuw nsw <N x i32> %1, <i32 1 x N>
49822 // %4 = add nuw nsw <N x i32> %3, %2
49823 // %5 = lshr <N x i32> %N, <i32 1 x N>
49824 // %6 = trunc <N x i32> %5 to <N x i8>
49826 // In AVX512, the last instruction can also be a trunc store.
49827 if (In.getOpcode() != ISD::SRL)
49828 return SDValue();
49830 // A lambda checking the given SDValue is a constant vector and each element
49831 // is in the range [Min, Max].
49832 auto IsConstVectorInRange = [](SDValue V, unsigned Min, unsigned Max) {
49833 return ISD::matchUnaryPredicate(V, [Min, Max](ConstantSDNode *C) {
49834 return !(C->getAPIntValue().ult(Min) || C->getAPIntValue().ugt(Max));
49838 auto IsZExtLike = [DAG = &DAG, ScalarVT](SDValue V) {
49839 unsigned MaxActiveBits = DAG->computeKnownBits(V).countMaxActiveBits();
49840 return MaxActiveBits <= ScalarVT.getSizeInBits();
49843 // Check if each element of the vector is right-shifted by one.
49844 SDValue LHS = In.getOperand(0);
49845 SDValue RHS = In.getOperand(1);
49846 if (!IsConstVectorInRange(RHS, 1, 1))
49847 return SDValue();
49848 if (LHS.getOpcode() != ISD::ADD)
49849 return SDValue();
49851 // Detect a pattern of a + b + 1 where the order doesn't matter.
49852 SDValue Operands[3];
49853 Operands[0] = LHS.getOperand(0);
49854 Operands[1] = LHS.getOperand(1);
49856 auto AVGBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
49857 ArrayRef<SDValue> Ops) {
49858 return DAG.getNode(ISD::AVGCEILU, DL, Ops[0].getValueType(), Ops);
49861 auto AVGSplitter = [&](std::array<SDValue, 2> Ops) {
49862 for (SDValue &Op : Ops)
49863 if (Op.getValueType() != VT)
49864 Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
49865 // Pad to a power-of-2 vector, split+apply and extract the original vector.
49866 unsigned NumElemsPow2 = PowerOf2Ceil(NumElems);
49867 EVT Pow2VT = EVT::getVectorVT(*DAG.getContext(), ScalarVT, NumElemsPow2);
49868 if (NumElemsPow2 != NumElems) {
49869 for (SDValue &Op : Ops) {
49870 SmallVector<SDValue, 32> EltsOfOp(NumElemsPow2, DAG.getUNDEF(ScalarVT));
49871 for (unsigned i = 0; i != NumElems; ++i) {
49872 SDValue Idx = DAG.getIntPtrConstant(i, DL);
49873 EltsOfOp[i] =
49874 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ScalarVT, Op, Idx);
49876 Op = DAG.getBuildVector(Pow2VT, DL, EltsOfOp);
49879 SDValue Res = SplitOpsAndApply(DAG, Subtarget, DL, Pow2VT, Ops, AVGBuilder);
49880 if (NumElemsPow2 == NumElems)
49881 return Res;
49882 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
49883 DAG.getIntPtrConstant(0, DL));
49886 // Take care of the case when one of the operands is a constant vector whose
49887 // element is in the range [1, 256].
49888 if (IsConstVectorInRange(Operands[1], 1, ScalarVT == MVT::i8 ? 256 : 65536) &&
49889 IsZExtLike(Operands[0])) {
49890 // The pattern is detected. Subtract one from the constant vector, then
49891 // demote it and emit X86ISD::AVG instruction.
49892 SDValue VecOnes = DAG.getConstant(1, DL, InVT);
49893 Operands[1] = DAG.getNode(ISD::SUB, DL, InVT, Operands[1], VecOnes);
49894 return AVGSplitter({Operands[0], Operands[1]});
49897 // Matches 'add like' patterns: add(Op0,Op1) + zext(or(Op0,Op1)).
49898 // Match the or case only if its 'add-like' - can be replaced by an add.
49899 auto FindAddLike = [&](SDValue V, SDValue &Op0, SDValue &Op1) {
49900 if (ISD::ADD == V.getOpcode()) {
49901 Op0 = V.getOperand(0);
49902 Op1 = V.getOperand(1);
49903 return true;
49905 if (ISD::ZERO_EXTEND != V.getOpcode())
49906 return false;
49907 V = V.getOperand(0);
49908 if (V.getValueType() != VT || ISD::OR != V.getOpcode() ||
49909 !DAG.haveNoCommonBitsSet(V.getOperand(0), V.getOperand(1)))
49910 return false;
49911 Op0 = V.getOperand(0);
49912 Op1 = V.getOperand(1);
49913 return true;
49916 SDValue Op0, Op1;
49917 if (FindAddLike(Operands[0], Op0, Op1))
49918 std::swap(Operands[0], Operands[1]);
49919 else if (!FindAddLike(Operands[1], Op0, Op1))
49920 return SDValue();
49921 Operands[2] = Op0;
49922 Operands[1] = Op1;
49924 // Now we have three operands of two additions. Check that one of them is a
49925 // constant vector with ones, and the other two can be promoted from i8/i16.
49926 for (SDValue &Op : Operands) {
49927 if (!IsConstVectorInRange(Op, 1, 1))
49928 continue;
49929 std::swap(Op, Operands[2]);
49931 // Check if Operands[0] and Operands[1] are results of type promotion.
49932 for (int j = 0; j < 2; ++j)
49933 if (Operands[j].getValueType() != VT)
49934 if (!IsZExtLike(Operands[j]))
49935 return SDValue();
49937 // The pattern is detected, emit X86ISD::AVG instruction(s).
49938 return AVGSplitter({Operands[0], Operands[1]});
49941 return SDValue();
49944 static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
49945 TargetLowering::DAGCombinerInfo &DCI,
49946 const X86Subtarget &Subtarget) {
49947 LoadSDNode *Ld = cast<LoadSDNode>(N);
49948 EVT RegVT = Ld->getValueType(0);
49949 EVT MemVT = Ld->getMemoryVT();
49950 SDLoc dl(Ld);
49951 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
49953 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
49954 // into two 16-byte operations. Also split non-temporal aligned loads on
49955 // pre-AVX2 targets as 32-byte loads will lower to regular temporal loads.
49956 ISD::LoadExtType Ext = Ld->getExtensionType();
49957 unsigned Fast;
49958 if (RegVT.is256BitVector() && !DCI.isBeforeLegalizeOps() &&
49959 Ext == ISD::NON_EXTLOAD &&
49960 ((Ld->isNonTemporal() && !Subtarget.hasInt256() &&
49961 Ld->getAlign() >= Align(16)) ||
49962 (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), RegVT,
49963 *Ld->getMemOperand(), &Fast) &&
49964 !Fast))) {
49965 unsigned NumElems = RegVT.getVectorNumElements();
49966 if (NumElems < 2)
49967 return SDValue();
49969 unsigned HalfOffset = 16;
49970 SDValue Ptr1 = Ld->getBasePtr();
49971 SDValue Ptr2 =
49972 DAG.getMemBasePlusOffset(Ptr1, TypeSize::getFixed(HalfOffset), dl);
49973 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
49974 NumElems / 2);
49975 SDValue Load1 =
49976 DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr1, Ld->getPointerInfo(),
49977 Ld->getOriginalAlign(),
49978 Ld->getMemOperand()->getFlags());
49979 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr2,
49980 Ld->getPointerInfo().getWithOffset(HalfOffset),
49981 Ld->getOriginalAlign(),
49982 Ld->getMemOperand()->getFlags());
49983 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
49984 Load1.getValue(1), Load2.getValue(1));
49986 SDValue NewVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, RegVT, Load1, Load2);
49987 return DCI.CombineTo(N, NewVec, TF, true);
49990 // Bool vector load - attempt to cast to an integer, as we have good
49991 // (vXiY *ext(vXi1 bitcast(iX))) handling.
49992 if (Ext == ISD::NON_EXTLOAD && !Subtarget.hasAVX512() && RegVT.isVector() &&
49993 RegVT.getScalarType() == MVT::i1 && DCI.isBeforeLegalize()) {
49994 unsigned NumElts = RegVT.getVectorNumElements();
49995 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
49996 if (TLI.isTypeLegal(IntVT)) {
49997 SDValue IntLoad = DAG.getLoad(IntVT, dl, Ld->getChain(), Ld->getBasePtr(),
49998 Ld->getPointerInfo(),
49999 Ld->getOriginalAlign(),
50000 Ld->getMemOperand()->getFlags());
50001 SDValue BoolVec = DAG.getBitcast(RegVT, IntLoad);
50002 return DCI.CombineTo(N, BoolVec, IntLoad.getValue(1), true);
50006 // If we also load/broadcast this to a wider type, then just extract the
50007 // lowest subvector.
50008 if (Ext == ISD::NON_EXTLOAD && Subtarget.hasAVX() && Ld->isSimple() &&
50009 (RegVT.is128BitVector() || RegVT.is256BitVector())) {
50010 SDValue Ptr = Ld->getBasePtr();
50011 SDValue Chain = Ld->getChain();
50012 for (SDNode *User : Chain->uses()) {
50013 auto *UserLd = dyn_cast<MemSDNode>(User);
50014 if (User != N && UserLd &&
50015 (User->getOpcode() == X86ISD::SUBV_BROADCAST_LOAD ||
50016 User->getOpcode() == X86ISD::VBROADCAST_LOAD ||
50017 ISD::isNormalLoad(User)) &&
50018 UserLd->getChain() == Chain && !User->hasAnyUseOfValue(1) &&
50019 User->getValueSizeInBits(0).getFixedValue() >
50020 RegVT.getFixedSizeInBits()) {
50021 if (User->getOpcode() == X86ISD::SUBV_BROADCAST_LOAD &&
50022 UserLd->getBasePtr() == Ptr &&
50023 UserLd->getMemoryVT().getSizeInBits() == MemVT.getSizeInBits()) {
50024 SDValue Extract = extractSubVector(SDValue(User, 0), 0, DAG, SDLoc(N),
50025 RegVT.getSizeInBits());
50026 Extract = DAG.getBitcast(RegVT, Extract);
50027 return DCI.CombineTo(N, Extract, SDValue(User, 1));
50029 auto MatchingBits = [](const APInt &Undefs, const APInt &UserUndefs,
50030 ArrayRef<APInt> Bits, ArrayRef<APInt> UserBits) {
50031 for (unsigned I = 0, E = Undefs.getBitWidth(); I != E; ++I) {
50032 if (Undefs[I])
50033 continue;
50034 if (UserUndefs[I] || Bits[I] != UserBits[I])
50035 return false;
50037 return true;
50039 // See if we are loading a constant that matches in the lower
50040 // bits of a longer constant (but from a different constant pool ptr).
50041 EVT UserVT = User->getValueType(0);
50042 SDValue UserPtr = UserLd->getBasePtr();
50043 const Constant *LdC = getTargetConstantFromBasePtr(Ptr);
50044 const Constant *UserC = getTargetConstantFromBasePtr(UserPtr);
50045 if (LdC && UserC && UserPtr != Ptr) {
50046 unsigned LdSize = LdC->getType()->getPrimitiveSizeInBits();
50047 unsigned UserSize = UserC->getType()->getPrimitiveSizeInBits();
50048 if (LdSize < UserSize || !ISD::isNormalLoad(User)) {
50049 APInt Undefs, UserUndefs;
50050 SmallVector<APInt> Bits, UserBits;
50051 unsigned NumBits = std::min(RegVT.getScalarSizeInBits(),
50052 UserVT.getScalarSizeInBits());
50053 if (getTargetConstantBitsFromNode(SDValue(N, 0), NumBits, Undefs,
50054 Bits) &&
50055 getTargetConstantBitsFromNode(SDValue(User, 0), NumBits,
50056 UserUndefs, UserBits)) {
50057 if (MatchingBits(Undefs, UserUndefs, Bits, UserBits)) {
50058 SDValue Extract = extractSubVector(
50059 SDValue(User, 0), 0, DAG, SDLoc(N), RegVT.getSizeInBits());
50060 Extract = DAG.getBitcast(RegVT, Extract);
50061 return DCI.CombineTo(N, Extract, SDValue(User, 1));
50070 // Cast ptr32 and ptr64 pointers to the default address space before a load.
50071 unsigned AddrSpace = Ld->getAddressSpace();
50072 if (AddrSpace == X86AS::PTR64 || AddrSpace == X86AS::PTR32_SPTR ||
50073 AddrSpace == X86AS::PTR32_UPTR) {
50074 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
50075 if (PtrVT != Ld->getBasePtr().getSimpleValueType()) {
50076 SDValue Cast =
50077 DAG.getAddrSpaceCast(dl, PtrVT, Ld->getBasePtr(), AddrSpace, 0);
50078 return DAG.getExtLoad(Ext, dl, RegVT, Ld->getChain(), Cast,
50079 Ld->getPointerInfo(), MemVT, Ld->getOriginalAlign(),
50080 Ld->getMemOperand()->getFlags());
50084 return SDValue();
50087 /// If V is a build vector of boolean constants and exactly one of those
50088 /// constants is true, return the operand index of that true element.
50089 /// Otherwise, return -1.
50090 static int getOneTrueElt(SDValue V) {
50091 // This needs to be a build vector of booleans.
50092 // TODO: Checking for the i1 type matches the IR definition for the mask,
50093 // but the mask check could be loosened to i8 or other types. That might
50094 // also require checking more than 'allOnesValue'; eg, the x86 HW
50095 // instructions only require that the MSB is set for each mask element.
50096 // The ISD::MSTORE comments/definition do not specify how the mask operand
50097 // is formatted.
50098 auto *BV = dyn_cast<BuildVectorSDNode>(V);
50099 if (!BV || BV->getValueType(0).getVectorElementType() != MVT::i1)
50100 return -1;
50102 int TrueIndex = -1;
50103 unsigned NumElts = BV->getValueType(0).getVectorNumElements();
50104 for (unsigned i = 0; i < NumElts; ++i) {
50105 const SDValue &Op = BV->getOperand(i);
50106 if (Op.isUndef())
50107 continue;
50108 auto *ConstNode = dyn_cast<ConstantSDNode>(Op);
50109 if (!ConstNode)
50110 return -1;
50111 if (ConstNode->getAPIntValue().countr_one() >= 1) {
50112 // If we already found a one, this is too many.
50113 if (TrueIndex >= 0)
50114 return -1;
50115 TrueIndex = i;
50118 return TrueIndex;
50121 /// Given a masked memory load/store operation, return true if it has one mask
50122 /// bit set. If it has one mask bit set, then also return the memory address of
50123 /// the scalar element to load/store, the vector index to insert/extract that
50124 /// scalar element, and the alignment for the scalar memory access.
50125 static bool getParamsForOneTrueMaskedElt(MaskedLoadStoreSDNode *MaskedOp,
50126 SelectionDAG &DAG, SDValue &Addr,
50127 SDValue &Index, Align &Alignment,
50128 unsigned &Offset) {
50129 int TrueMaskElt = getOneTrueElt(MaskedOp->getMask());
50130 if (TrueMaskElt < 0)
50131 return false;
50133 // Get the address of the one scalar element that is specified by the mask
50134 // using the appropriate offset from the base pointer.
50135 EVT EltVT = MaskedOp->getMemoryVT().getVectorElementType();
50136 Offset = 0;
50137 Addr = MaskedOp->getBasePtr();
50138 if (TrueMaskElt != 0) {
50139 Offset = TrueMaskElt * EltVT.getStoreSize();
50140 Addr = DAG.getMemBasePlusOffset(Addr, TypeSize::getFixed(Offset),
50141 SDLoc(MaskedOp));
50144 Index = DAG.getIntPtrConstant(TrueMaskElt, SDLoc(MaskedOp));
50145 Alignment = commonAlignment(MaskedOp->getOriginalAlign(),
50146 EltVT.getStoreSize());
50147 return true;
50150 /// If exactly one element of the mask is set for a non-extending masked load,
50151 /// it is a scalar load and vector insert.
50152 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
50153 /// mask have already been optimized in IR, so we don't bother with those here.
50154 static SDValue
50155 reduceMaskedLoadToScalarLoad(MaskedLoadSDNode *ML, SelectionDAG &DAG,
50156 TargetLowering::DAGCombinerInfo &DCI,
50157 const X86Subtarget &Subtarget) {
50158 assert(ML->isUnindexed() && "Unexpected indexed masked load!");
50159 // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
50160 // However, some target hooks may need to be added to know when the transform
50161 // is profitable. Endianness would also have to be considered.
50163 SDValue Addr, VecIndex;
50164 Align Alignment;
50165 unsigned Offset;
50166 if (!getParamsForOneTrueMaskedElt(ML, DAG, Addr, VecIndex, Alignment, Offset))
50167 return SDValue();
50169 // Load the one scalar element that is specified by the mask using the
50170 // appropriate offset from the base pointer.
50171 SDLoc DL(ML);
50172 EVT VT = ML->getValueType(0);
50173 EVT EltVT = VT.getVectorElementType();
50175 EVT CastVT = VT;
50176 if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
50177 EltVT = MVT::f64;
50178 CastVT = VT.changeVectorElementType(EltVT);
50181 SDValue Load =
50182 DAG.getLoad(EltVT, DL, ML->getChain(), Addr,
50183 ML->getPointerInfo().getWithOffset(Offset),
50184 Alignment, ML->getMemOperand()->getFlags());
50186 SDValue PassThru = DAG.getBitcast(CastVT, ML->getPassThru());
50188 // Insert the loaded element into the appropriate place in the vector.
50189 SDValue Insert =
50190 DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, CastVT, PassThru, Load, VecIndex);
50191 Insert = DAG.getBitcast(VT, Insert);
50192 return DCI.CombineTo(ML, Insert, Load.getValue(1), true);
50195 static SDValue
50196 combineMaskedLoadConstantMask(MaskedLoadSDNode *ML, SelectionDAG &DAG,
50197 TargetLowering::DAGCombinerInfo &DCI) {
50198 assert(ML->isUnindexed() && "Unexpected indexed masked load!");
50199 if (!ISD::isBuildVectorOfConstantSDNodes(ML->getMask().getNode()))
50200 return SDValue();
50202 SDLoc DL(ML);
50203 EVT VT = ML->getValueType(0);
50205 // If we are loading the first and last elements of a vector, it is safe and
50206 // always faster to load the whole vector. Replace the masked load with a
50207 // vector load and select.
50208 unsigned NumElts = VT.getVectorNumElements();
50209 BuildVectorSDNode *MaskBV = cast<BuildVectorSDNode>(ML->getMask());
50210 bool LoadFirstElt = !isNullConstant(MaskBV->getOperand(0));
50211 bool LoadLastElt = !isNullConstant(MaskBV->getOperand(NumElts - 1));
50212 if (LoadFirstElt && LoadLastElt) {
50213 SDValue VecLd = DAG.getLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
50214 ML->getMemOperand());
50215 SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), VecLd,
50216 ML->getPassThru());
50217 return DCI.CombineTo(ML, Blend, VecLd.getValue(1), true);
50220 // Convert a masked load with a constant mask into a masked load and a select.
50221 // This allows the select operation to use a faster kind of select instruction
50222 // (for example, vblendvps -> vblendps).
50224 // Don't try this if the pass-through operand is already undefined. That would
50225 // cause an infinite loop because that's what we're about to create.
50226 if (ML->getPassThru().isUndef())
50227 return SDValue();
50229 if (ISD::isBuildVectorAllZeros(ML->getPassThru().getNode()))
50230 return SDValue();
50232 // The new masked load has an undef pass-through operand. The select uses the
50233 // original pass-through operand.
50234 SDValue NewML = DAG.getMaskedLoad(
50235 VT, DL, ML->getChain(), ML->getBasePtr(), ML->getOffset(), ML->getMask(),
50236 DAG.getUNDEF(VT), ML->getMemoryVT(), ML->getMemOperand(),
50237 ML->getAddressingMode(), ML->getExtensionType());
50238 SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), NewML,
50239 ML->getPassThru());
50241 return DCI.CombineTo(ML, Blend, NewML.getValue(1), true);
50244 static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG,
50245 TargetLowering::DAGCombinerInfo &DCI,
50246 const X86Subtarget &Subtarget) {
50247 auto *Mld = cast<MaskedLoadSDNode>(N);
50249 // TODO: Expanding load with constant mask may be optimized as well.
50250 if (Mld->isExpandingLoad())
50251 return SDValue();
50253 if (Mld->getExtensionType() == ISD::NON_EXTLOAD) {
50254 if (SDValue ScalarLoad =
50255 reduceMaskedLoadToScalarLoad(Mld, DAG, DCI, Subtarget))
50256 return ScalarLoad;
50258 // TODO: Do some AVX512 subsets benefit from this transform?
50259 if (!Subtarget.hasAVX512())
50260 if (SDValue Blend = combineMaskedLoadConstantMask(Mld, DAG, DCI))
50261 return Blend;
50264 // If the mask value has been legalized to a non-boolean vector, try to
50265 // simplify ops leading up to it. We only demand the MSB of each lane.
50266 SDValue Mask = Mld->getMask();
50267 if (Mask.getScalarValueSizeInBits() != 1) {
50268 EVT VT = Mld->getValueType(0);
50269 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50270 APInt DemandedBits(APInt::getSignMask(VT.getScalarSizeInBits()));
50271 if (TLI.SimplifyDemandedBits(Mask, DemandedBits, DCI)) {
50272 if (N->getOpcode() != ISD::DELETED_NODE)
50273 DCI.AddToWorklist(N);
50274 return SDValue(N, 0);
50276 if (SDValue NewMask =
50277 TLI.SimplifyMultipleUseDemandedBits(Mask, DemandedBits, DAG))
50278 return DAG.getMaskedLoad(
50279 VT, SDLoc(N), Mld->getChain(), Mld->getBasePtr(), Mld->getOffset(),
50280 NewMask, Mld->getPassThru(), Mld->getMemoryVT(), Mld->getMemOperand(),
50281 Mld->getAddressingMode(), Mld->getExtensionType());
50284 return SDValue();
50287 /// If exactly one element of the mask is set for a non-truncating masked store,
50288 /// it is a vector extract and scalar store.
50289 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
50290 /// mask have already been optimized in IR, so we don't bother with those here.
50291 static SDValue reduceMaskedStoreToScalarStore(MaskedStoreSDNode *MS,
50292 SelectionDAG &DAG,
50293 const X86Subtarget &Subtarget) {
50294 // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
50295 // However, some target hooks may need to be added to know when the transform
50296 // is profitable. Endianness would also have to be considered.
50298 SDValue Addr, VecIndex;
50299 Align Alignment;
50300 unsigned Offset;
50301 if (!getParamsForOneTrueMaskedElt(MS, DAG, Addr, VecIndex, Alignment, Offset))
50302 return SDValue();
50304 // Extract the one scalar element that is actually being stored.
50305 SDLoc DL(MS);
50306 SDValue Value = MS->getValue();
50307 EVT VT = Value.getValueType();
50308 EVT EltVT = VT.getVectorElementType();
50309 if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
50310 EltVT = MVT::f64;
50311 EVT CastVT = VT.changeVectorElementType(EltVT);
50312 Value = DAG.getBitcast(CastVT, Value);
50314 SDValue Extract =
50315 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Value, VecIndex);
50317 // Store that element at the appropriate offset from the base pointer.
50318 return DAG.getStore(MS->getChain(), DL, Extract, Addr,
50319 MS->getPointerInfo().getWithOffset(Offset),
50320 Alignment, MS->getMemOperand()->getFlags());
50323 static SDValue combineMaskedStore(SDNode *N, SelectionDAG &DAG,
50324 TargetLowering::DAGCombinerInfo &DCI,
50325 const X86Subtarget &Subtarget) {
50326 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
50327 if (Mst->isCompressingStore())
50328 return SDValue();
50330 EVT VT = Mst->getValue().getValueType();
50331 SDLoc dl(Mst);
50332 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50334 if (Mst->isTruncatingStore())
50335 return SDValue();
50337 if (SDValue ScalarStore = reduceMaskedStoreToScalarStore(Mst, DAG, Subtarget))
50338 return ScalarStore;
50340 // If the mask value has been legalized to a non-boolean vector, try to
50341 // simplify ops leading up to it. We only demand the MSB of each lane.
50342 SDValue Mask = Mst->getMask();
50343 if (Mask.getScalarValueSizeInBits() != 1) {
50344 APInt DemandedBits(APInt::getSignMask(VT.getScalarSizeInBits()));
50345 if (TLI.SimplifyDemandedBits(Mask, DemandedBits, DCI)) {
50346 if (N->getOpcode() != ISD::DELETED_NODE)
50347 DCI.AddToWorklist(N);
50348 return SDValue(N, 0);
50350 if (SDValue NewMask =
50351 TLI.SimplifyMultipleUseDemandedBits(Mask, DemandedBits, DAG))
50352 return DAG.getMaskedStore(Mst->getChain(), SDLoc(N), Mst->getValue(),
50353 Mst->getBasePtr(), Mst->getOffset(), NewMask,
50354 Mst->getMemoryVT(), Mst->getMemOperand(),
50355 Mst->getAddressingMode());
50358 SDValue Value = Mst->getValue();
50359 if (Value.getOpcode() == ISD::TRUNCATE && Value.getNode()->hasOneUse() &&
50360 TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
50361 Mst->getMemoryVT())) {
50362 return DAG.getMaskedStore(Mst->getChain(), SDLoc(N), Value.getOperand(0),
50363 Mst->getBasePtr(), Mst->getOffset(), Mask,
50364 Mst->getMemoryVT(), Mst->getMemOperand(),
50365 Mst->getAddressingMode(), true);
50368 return SDValue();
50371 static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
50372 TargetLowering::DAGCombinerInfo &DCI,
50373 const X86Subtarget &Subtarget) {
50374 StoreSDNode *St = cast<StoreSDNode>(N);
50375 EVT StVT = St->getMemoryVT();
50376 SDLoc dl(St);
50377 SDValue StoredVal = St->getValue();
50378 EVT VT = StoredVal.getValueType();
50379 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50381 // Convert a store of vXi1 into a store of iX and a bitcast.
50382 if (!Subtarget.hasAVX512() && VT == StVT && VT.isVector() &&
50383 VT.getVectorElementType() == MVT::i1) {
50385 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), VT.getVectorNumElements());
50386 StoredVal = DAG.getBitcast(NewVT, StoredVal);
50388 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
50389 St->getPointerInfo(), St->getOriginalAlign(),
50390 St->getMemOperand()->getFlags());
50393 // If this is a store of a scalar_to_vector to v1i1, just use a scalar store.
50394 // This will avoid a copy to k-register.
50395 if (VT == MVT::v1i1 && VT == StVT && Subtarget.hasAVX512() &&
50396 StoredVal.getOpcode() == ISD::SCALAR_TO_VECTOR &&
50397 StoredVal.getOperand(0).getValueType() == MVT::i8) {
50398 SDValue Val = StoredVal.getOperand(0);
50399 // We must store zeros to the unused bits.
50400 Val = DAG.getZeroExtendInReg(Val, dl, MVT::i1);
50401 return DAG.getStore(St->getChain(), dl, Val,
50402 St->getBasePtr(), St->getPointerInfo(),
50403 St->getOriginalAlign(),
50404 St->getMemOperand()->getFlags());
50407 // Widen v2i1/v4i1 stores to v8i1.
50408 if ((VT == MVT::v1i1 || VT == MVT::v2i1 || VT == MVT::v4i1) && VT == StVT &&
50409 Subtarget.hasAVX512()) {
50410 unsigned NumConcats = 8 / VT.getVectorNumElements();
50411 // We must store zeros to the unused bits.
50412 SmallVector<SDValue, 4> Ops(NumConcats, DAG.getConstant(0, dl, VT));
50413 Ops[0] = StoredVal;
50414 StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
50415 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
50416 St->getPointerInfo(), St->getOriginalAlign(),
50417 St->getMemOperand()->getFlags());
50420 // Turn vXi1 stores of constants into a scalar store.
50421 if ((VT == MVT::v8i1 || VT == MVT::v16i1 || VT == MVT::v32i1 ||
50422 VT == MVT::v64i1) && VT == StVT && TLI.isTypeLegal(VT) &&
50423 ISD::isBuildVectorOfConstantSDNodes(StoredVal.getNode())) {
50424 // If its a v64i1 store without 64-bit support, we need two stores.
50425 if (!DCI.isBeforeLegalize() && VT == MVT::v64i1 && !Subtarget.is64Bit()) {
50426 SDValue Lo = DAG.getBuildVector(MVT::v32i1, dl,
50427 StoredVal->ops().slice(0, 32));
50428 Lo = combinevXi1ConstantToInteger(Lo, DAG);
50429 SDValue Hi = DAG.getBuildVector(MVT::v32i1, dl,
50430 StoredVal->ops().slice(32, 32));
50431 Hi = combinevXi1ConstantToInteger(Hi, DAG);
50433 SDValue Ptr0 = St->getBasePtr();
50434 SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, TypeSize::getFixed(4), dl);
50436 SDValue Ch0 =
50437 DAG.getStore(St->getChain(), dl, Lo, Ptr0, St->getPointerInfo(),
50438 St->getOriginalAlign(),
50439 St->getMemOperand()->getFlags());
50440 SDValue Ch1 =
50441 DAG.getStore(St->getChain(), dl, Hi, Ptr1,
50442 St->getPointerInfo().getWithOffset(4),
50443 St->getOriginalAlign(),
50444 St->getMemOperand()->getFlags());
50445 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
50448 StoredVal = combinevXi1ConstantToInteger(StoredVal, DAG);
50449 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
50450 St->getPointerInfo(), St->getOriginalAlign(),
50451 St->getMemOperand()->getFlags());
50454 // If we are saving a 32-byte vector and 32-byte stores are slow, such as on
50455 // Sandy Bridge, perform two 16-byte stores.
50456 unsigned Fast;
50457 if (VT.is256BitVector() && StVT == VT &&
50458 TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
50459 *St->getMemOperand(), &Fast) &&
50460 !Fast) {
50461 unsigned NumElems = VT.getVectorNumElements();
50462 if (NumElems < 2)
50463 return SDValue();
50465 return splitVectorStore(St, DAG);
50468 // Split under-aligned vector non-temporal stores.
50469 if (St->isNonTemporal() && StVT == VT &&
50470 St->getAlign().value() < VT.getStoreSize()) {
50471 // ZMM/YMM nt-stores - either it can be stored as a series of shorter
50472 // vectors or the legalizer can scalarize it to use MOVNTI.
50473 if (VT.is256BitVector() || VT.is512BitVector()) {
50474 unsigned NumElems = VT.getVectorNumElements();
50475 if (NumElems < 2)
50476 return SDValue();
50477 return splitVectorStore(St, DAG);
50480 // XMM nt-stores - scalarize this to f64 nt-stores on SSE4A, else i32/i64
50481 // to use MOVNTI.
50482 if (VT.is128BitVector() && Subtarget.hasSSE2()) {
50483 MVT NTVT = Subtarget.hasSSE4A()
50484 ? MVT::v2f64
50485 : (TLI.isTypeLegal(MVT::i64) ? MVT::v2i64 : MVT::v4i32);
50486 return scalarizeVectorStore(St, NTVT, DAG);
50490 // Try to optimize v16i16->v16i8 truncating stores when BWI is not
50491 // supported, but avx512f is by extending to v16i32 and truncating.
50492 if (!St->isTruncatingStore() && VT == MVT::v16i8 && !Subtarget.hasBWI() &&
50493 St->getValue().getOpcode() == ISD::TRUNCATE &&
50494 St->getValue().getOperand(0).getValueType() == MVT::v16i16 &&
50495 TLI.isTruncStoreLegal(MVT::v16i32, MVT::v16i8) &&
50496 St->getValue().hasOneUse() && !DCI.isBeforeLegalizeOps()) {
50497 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::v16i32,
50498 St->getValue().getOperand(0));
50499 return DAG.getTruncStore(St->getChain(), dl, Ext, St->getBasePtr(),
50500 MVT::v16i8, St->getMemOperand());
50503 // Try to fold a VTRUNCUS or VTRUNCS into a truncating store.
50504 if (!St->isTruncatingStore() &&
50505 (StoredVal.getOpcode() == X86ISD::VTRUNCUS ||
50506 StoredVal.getOpcode() == X86ISD::VTRUNCS) &&
50507 StoredVal.hasOneUse() &&
50508 TLI.isTruncStoreLegal(StoredVal.getOperand(0).getValueType(), VT)) {
50509 bool IsSigned = StoredVal.getOpcode() == X86ISD::VTRUNCS;
50510 return EmitTruncSStore(IsSigned, St->getChain(),
50511 dl, StoredVal.getOperand(0), St->getBasePtr(),
50512 VT, St->getMemOperand(), DAG);
50515 // Try to fold a extract_element(VTRUNC) pattern into a truncating store.
50516 if (!St->isTruncatingStore()) {
50517 auto IsExtractedElement = [](SDValue V) {
50518 if (V.getOpcode() == ISD::TRUNCATE && V.hasOneUse())
50519 V = V.getOperand(0);
50520 unsigned Opc = V.getOpcode();
50521 if ((Opc == ISD::EXTRACT_VECTOR_ELT || Opc == X86ISD::PEXTRW) &&
50522 isNullConstant(V.getOperand(1)) && V.hasOneUse() &&
50523 V.getOperand(0).hasOneUse())
50524 return V.getOperand(0);
50525 return SDValue();
50527 if (SDValue Extract = IsExtractedElement(StoredVal)) {
50528 SDValue Trunc = peekThroughOneUseBitcasts(Extract);
50529 if (Trunc.getOpcode() == X86ISD::VTRUNC) {
50530 SDValue Src = Trunc.getOperand(0);
50531 MVT DstVT = Trunc.getSimpleValueType();
50532 MVT SrcVT = Src.getSimpleValueType();
50533 unsigned NumSrcElts = SrcVT.getVectorNumElements();
50534 unsigned NumTruncBits = DstVT.getScalarSizeInBits() * NumSrcElts;
50535 MVT TruncVT = MVT::getVectorVT(DstVT.getScalarType(), NumSrcElts);
50536 if (NumTruncBits == VT.getSizeInBits() &&
50537 TLI.isTruncStoreLegal(SrcVT, TruncVT)) {
50538 return DAG.getTruncStore(St->getChain(), dl, Src, St->getBasePtr(),
50539 TruncVT, St->getMemOperand());
50545 // Optimize trunc store (of multiple scalars) to shuffle and store.
50546 // First, pack all of the elements in one place. Next, store to memory
50547 // in fewer chunks.
50548 if (St->isTruncatingStore() && VT.isVector()) {
50549 // Check if we can detect an AVG pattern from the truncation. If yes,
50550 // replace the trunc store by a normal store with the result of X86ISD::AVG
50551 // instruction.
50552 if (DCI.isBeforeLegalize() || TLI.isTypeLegal(St->getMemoryVT()))
50553 if (SDValue Avg = detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG,
50554 Subtarget, dl))
50555 return DAG.getStore(St->getChain(), dl, Avg, St->getBasePtr(),
50556 St->getPointerInfo(), St->getOriginalAlign(),
50557 St->getMemOperand()->getFlags());
50559 if (TLI.isTruncStoreLegal(VT, StVT)) {
50560 if (SDValue Val = detectSSatPattern(St->getValue(), St->getMemoryVT()))
50561 return EmitTruncSStore(true /* Signed saturation */, St->getChain(),
50562 dl, Val, St->getBasePtr(),
50563 St->getMemoryVT(), St->getMemOperand(), DAG);
50564 if (SDValue Val = detectUSatPattern(St->getValue(), St->getMemoryVT(),
50565 DAG, dl))
50566 return EmitTruncSStore(false /* Unsigned saturation */, St->getChain(),
50567 dl, Val, St->getBasePtr(),
50568 St->getMemoryVT(), St->getMemOperand(), DAG);
50571 return SDValue();
50574 // Cast ptr32 and ptr64 pointers to the default address space before a store.
50575 unsigned AddrSpace = St->getAddressSpace();
50576 if (AddrSpace == X86AS::PTR64 || AddrSpace == X86AS::PTR32_SPTR ||
50577 AddrSpace == X86AS::PTR32_UPTR) {
50578 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
50579 if (PtrVT != St->getBasePtr().getSimpleValueType()) {
50580 SDValue Cast =
50581 DAG.getAddrSpaceCast(dl, PtrVT, St->getBasePtr(), AddrSpace, 0);
50582 return DAG.getTruncStore(
50583 St->getChain(), dl, StoredVal, Cast, St->getPointerInfo(), StVT,
50584 St->getOriginalAlign(), St->getMemOperand()->getFlags(),
50585 St->getAAInfo());
50589 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
50590 // the FP state in cases where an emms may be missing.
50591 // A preferable solution to the general problem is to figure out the right
50592 // places to insert EMMS. This qualifies as a quick hack.
50594 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
50595 if (VT.getSizeInBits() != 64)
50596 return SDValue();
50598 const Function &F = DAG.getMachineFunction().getFunction();
50599 bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
50600 bool F64IsLegal =
50601 !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2();
50603 if (!F64IsLegal || Subtarget.is64Bit())
50604 return SDValue();
50606 if (VT == MVT::i64 && isa<LoadSDNode>(St->getValue()) &&
50607 cast<LoadSDNode>(St->getValue())->isSimple() &&
50608 St->getChain().hasOneUse() && St->isSimple()) {
50609 auto *Ld = cast<LoadSDNode>(St->getValue());
50611 if (!ISD::isNormalLoad(Ld))
50612 return SDValue();
50614 // Avoid the transformation if there are multiple uses of the loaded value.
50615 if (!Ld->hasNUsesOfValue(1, 0))
50616 return SDValue();
50618 SDLoc LdDL(Ld);
50619 SDLoc StDL(N);
50620 // Lower to a single movq load/store pair.
50621 SDValue NewLd = DAG.getLoad(MVT::f64, LdDL, Ld->getChain(),
50622 Ld->getBasePtr(), Ld->getMemOperand());
50624 // Make sure new load is placed in same chain order.
50625 DAG.makeEquivalentMemoryOrdering(Ld, NewLd);
50626 return DAG.getStore(St->getChain(), StDL, NewLd, St->getBasePtr(),
50627 St->getMemOperand());
50630 // This is similar to the above case, but here we handle a scalar 64-bit
50631 // integer store that is extracted from a vector on a 32-bit target.
50632 // If we have SSE2, then we can treat it like a floating-point double
50633 // to get past legalization. The execution dependencies fixup pass will
50634 // choose the optimal machine instruction for the store if this really is
50635 // an integer or v2f32 rather than an f64.
50636 if (VT == MVT::i64 &&
50637 St->getOperand(1).getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
50638 SDValue OldExtract = St->getOperand(1);
50639 SDValue ExtOp0 = OldExtract.getOperand(0);
50640 unsigned VecSize = ExtOp0.getValueSizeInBits();
50641 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, VecSize / 64);
50642 SDValue BitCast = DAG.getBitcast(VecVT, ExtOp0);
50643 SDValue NewExtract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
50644 BitCast, OldExtract.getOperand(1));
50645 return DAG.getStore(St->getChain(), dl, NewExtract, St->getBasePtr(),
50646 St->getPointerInfo(), St->getOriginalAlign(),
50647 St->getMemOperand()->getFlags());
50650 return SDValue();
50653 static SDValue combineVEXTRACT_STORE(SDNode *N, SelectionDAG &DAG,
50654 TargetLowering::DAGCombinerInfo &DCI,
50655 const X86Subtarget &Subtarget) {
50656 auto *St = cast<MemIntrinsicSDNode>(N);
50658 SDValue StoredVal = N->getOperand(1);
50659 MVT VT = StoredVal.getSimpleValueType();
50660 EVT MemVT = St->getMemoryVT();
50662 // Figure out which elements we demand.
50663 unsigned StElts = MemVT.getSizeInBits() / VT.getScalarSizeInBits();
50664 APInt DemandedElts = APInt::getLowBitsSet(VT.getVectorNumElements(), StElts);
50666 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50667 if (TLI.SimplifyDemandedVectorElts(StoredVal, DemandedElts, DCI)) {
50668 if (N->getOpcode() != ISD::DELETED_NODE)
50669 DCI.AddToWorklist(N);
50670 return SDValue(N, 0);
50673 return SDValue();
50676 /// Return 'true' if this vector operation is "horizontal"
50677 /// and return the operands for the horizontal operation in LHS and RHS. A
50678 /// horizontal operation performs the binary operation on successive elements
50679 /// of its first operand, then on successive elements of its second operand,
50680 /// returning the resulting values in a vector. For example, if
50681 /// A = < float a0, float a1, float a2, float a3 >
50682 /// and
50683 /// B = < float b0, float b1, float b2, float b3 >
50684 /// then the result of doing a horizontal operation on A and B is
50685 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
50686 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
50687 /// A horizontal-op B, for some already available A and B, and if so then LHS is
50688 /// set to A, RHS to B, and the routine returns 'true'.
50689 static bool isHorizontalBinOp(unsigned HOpcode, SDValue &LHS, SDValue &RHS,
50690 SelectionDAG &DAG, const X86Subtarget &Subtarget,
50691 bool IsCommutative,
50692 SmallVectorImpl<int> &PostShuffleMask) {
50693 // If either operand is undef, bail out. The binop should be simplified.
50694 if (LHS.isUndef() || RHS.isUndef())
50695 return false;
50697 // Look for the following pattern:
50698 // A = < float a0, float a1, float a2, float a3 >
50699 // B = < float b0, float b1, float b2, float b3 >
50700 // and
50701 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
50702 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
50703 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
50704 // which is A horizontal-op B.
50706 MVT VT = LHS.getSimpleValueType();
50707 assert((VT.is128BitVector() || VT.is256BitVector()) &&
50708 "Unsupported vector type for horizontal add/sub");
50709 unsigned NumElts = VT.getVectorNumElements();
50711 auto GetShuffle = [&](SDValue Op, SDValue &N0, SDValue &N1,
50712 SmallVectorImpl<int> &ShuffleMask) {
50713 bool UseSubVector = false;
50714 if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
50715 Op.getOperand(0).getValueType().is256BitVector() &&
50716 llvm::isNullConstant(Op.getOperand(1))) {
50717 Op = Op.getOperand(0);
50718 UseSubVector = true;
50720 SmallVector<SDValue, 2> SrcOps;
50721 SmallVector<int, 16> SrcMask, ScaledMask;
50722 SDValue BC = peekThroughBitcasts(Op);
50723 if (getTargetShuffleInputs(BC, SrcOps, SrcMask, DAG) &&
50724 !isAnyZero(SrcMask) && all_of(SrcOps, [BC](SDValue Op) {
50725 return Op.getValueSizeInBits() == BC.getValueSizeInBits();
50726 })) {
50727 resolveTargetShuffleInputsAndMask(SrcOps, SrcMask);
50728 if (!UseSubVector && SrcOps.size() <= 2 &&
50729 scaleShuffleElements(SrcMask, NumElts, ScaledMask)) {
50730 N0 = !SrcOps.empty() ? SrcOps[0] : SDValue();
50731 N1 = SrcOps.size() > 1 ? SrcOps[1] : SDValue();
50732 ShuffleMask.assign(ScaledMask.begin(), ScaledMask.end());
50734 if (UseSubVector && SrcOps.size() == 1 &&
50735 scaleShuffleElements(SrcMask, 2 * NumElts, ScaledMask)) {
50736 std::tie(N0, N1) = DAG.SplitVector(SrcOps[0], SDLoc(Op));
50737 ArrayRef<int> Mask = ArrayRef<int>(ScaledMask).slice(0, NumElts);
50738 ShuffleMask.assign(Mask.begin(), Mask.end());
50743 // View LHS in the form
50744 // LHS = VECTOR_SHUFFLE A, B, LMask
50745 // If LHS is not a shuffle, then pretend it is the identity shuffle:
50746 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
50747 // NOTE: A default initialized SDValue represents an UNDEF of type VT.
50748 SDValue A, B;
50749 SmallVector<int, 16> LMask;
50750 GetShuffle(LHS, A, B, LMask);
50752 // Likewise, view RHS in the form
50753 // RHS = VECTOR_SHUFFLE C, D, RMask
50754 SDValue C, D;
50755 SmallVector<int, 16> RMask;
50756 GetShuffle(RHS, C, D, RMask);
50758 // At least one of the operands should be a vector shuffle.
50759 unsigned NumShuffles = (LMask.empty() ? 0 : 1) + (RMask.empty() ? 0 : 1);
50760 if (NumShuffles == 0)
50761 return false;
50763 if (LMask.empty()) {
50764 A = LHS;
50765 for (unsigned i = 0; i != NumElts; ++i)
50766 LMask.push_back(i);
50769 if (RMask.empty()) {
50770 C = RHS;
50771 for (unsigned i = 0; i != NumElts; ++i)
50772 RMask.push_back(i);
50775 // If we have an unary mask, ensure the other op is set to null.
50776 if (isUndefOrInRange(LMask, 0, NumElts))
50777 B = SDValue();
50778 else if (isUndefOrInRange(LMask, NumElts, NumElts * 2))
50779 A = SDValue();
50781 if (isUndefOrInRange(RMask, 0, NumElts))
50782 D = SDValue();
50783 else if (isUndefOrInRange(RMask, NumElts, NumElts * 2))
50784 C = SDValue();
50786 // If A and B occur in reverse order in RHS, then canonicalize by commuting
50787 // RHS operands and shuffle mask.
50788 if (A != C) {
50789 std::swap(C, D);
50790 ShuffleVectorSDNode::commuteMask(RMask);
50792 // Check that the shuffles are both shuffling the same vectors.
50793 if (!(A == C && B == D))
50794 return false;
50796 PostShuffleMask.clear();
50797 PostShuffleMask.append(NumElts, SM_SentinelUndef);
50799 // LHS and RHS are now:
50800 // LHS = shuffle A, B, LMask
50801 // RHS = shuffle A, B, RMask
50802 // Check that the masks correspond to performing a horizontal operation.
50803 // AVX defines horizontal add/sub to operate independently on 128-bit lanes,
50804 // so we just repeat the inner loop if this is a 256-bit op.
50805 unsigned Num128BitChunks = VT.getSizeInBits() / 128;
50806 unsigned NumEltsPer128BitChunk = NumElts / Num128BitChunks;
50807 unsigned NumEltsPer64BitChunk = NumEltsPer128BitChunk / 2;
50808 assert((NumEltsPer128BitChunk % 2 == 0) &&
50809 "Vector type should have an even number of elements in each lane");
50810 for (unsigned j = 0; j != NumElts; j += NumEltsPer128BitChunk) {
50811 for (unsigned i = 0; i != NumEltsPer128BitChunk; ++i) {
50812 // Ignore undefined components.
50813 int LIdx = LMask[i + j], RIdx = RMask[i + j];
50814 if (LIdx < 0 || RIdx < 0 ||
50815 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
50816 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
50817 continue;
50819 // Check that successive odd/even elements are being operated on. If not,
50820 // this is not a horizontal operation.
50821 if (!((RIdx & 1) == 1 && (LIdx + 1) == RIdx) &&
50822 !((LIdx & 1) == 1 && (RIdx + 1) == LIdx && IsCommutative))
50823 return false;
50825 // Compute the post-shuffle mask index based on where the element
50826 // is stored in the HOP result, and where it needs to be moved to.
50827 int Base = LIdx & ~1u;
50828 int Index = ((Base % NumEltsPer128BitChunk) / 2) +
50829 ((Base % NumElts) & ~(NumEltsPer128BitChunk - 1));
50831 // The low half of the 128-bit result must choose from A.
50832 // The high half of the 128-bit result must choose from B,
50833 // unless B is undef. In that case, we are always choosing from A.
50834 if ((B && Base >= (int)NumElts) || (!B && i >= NumEltsPer64BitChunk))
50835 Index += NumEltsPer64BitChunk;
50836 PostShuffleMask[i + j] = Index;
50840 SDValue NewLHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
50841 SDValue NewRHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
50843 bool IsIdentityPostShuffle =
50844 isSequentialOrUndefInRange(PostShuffleMask, 0, NumElts, 0);
50845 if (IsIdentityPostShuffle)
50846 PostShuffleMask.clear();
50848 // Avoid 128-bit multi lane shuffles if pre-AVX2 and FP (integer will split).
50849 if (!IsIdentityPostShuffle && !Subtarget.hasAVX2() && VT.isFloatingPoint() &&
50850 isMultiLaneShuffleMask(128, VT.getScalarSizeInBits(), PostShuffleMask))
50851 return false;
50853 // If the source nodes are already used in HorizOps then always accept this.
50854 // Shuffle folding should merge these back together.
50855 bool FoundHorizLHS = llvm::any_of(NewLHS->uses(), [&](SDNode *User) {
50856 return User->getOpcode() == HOpcode && User->getValueType(0) == VT;
50858 bool FoundHorizRHS = llvm::any_of(NewRHS->uses(), [&](SDNode *User) {
50859 return User->getOpcode() == HOpcode && User->getValueType(0) == VT;
50861 bool ForceHorizOp = FoundHorizLHS && FoundHorizRHS;
50863 // Assume a SingleSource HOP if we only shuffle one input and don't need to
50864 // shuffle the result.
50865 if (!ForceHorizOp &&
50866 !shouldUseHorizontalOp(NewLHS == NewRHS &&
50867 (NumShuffles < 2 || !IsIdentityPostShuffle),
50868 DAG, Subtarget))
50869 return false;
50871 LHS = DAG.getBitcast(VT, NewLHS);
50872 RHS = DAG.getBitcast(VT, NewRHS);
50873 return true;
50876 // Try to synthesize horizontal (f)hadd/hsub from (f)adds/subs of shuffles.
50877 static SDValue combineToHorizontalAddSub(SDNode *N, SelectionDAG &DAG,
50878 const X86Subtarget &Subtarget) {
50879 EVT VT = N->getValueType(0);
50880 unsigned Opcode = N->getOpcode();
50881 bool IsAdd = (Opcode == ISD::FADD) || (Opcode == ISD::ADD);
50882 SmallVector<int, 8> PostShuffleMask;
50884 switch (Opcode) {
50885 case ISD::FADD:
50886 case ISD::FSUB:
50887 if ((Subtarget.hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
50888 (Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) {
50889 SDValue LHS = N->getOperand(0);
50890 SDValue RHS = N->getOperand(1);
50891 auto HorizOpcode = IsAdd ? X86ISD::FHADD : X86ISD::FHSUB;
50892 if (isHorizontalBinOp(HorizOpcode, LHS, RHS, DAG, Subtarget, IsAdd,
50893 PostShuffleMask)) {
50894 SDValue HorizBinOp = DAG.getNode(HorizOpcode, SDLoc(N), VT, LHS, RHS);
50895 if (!PostShuffleMask.empty())
50896 HorizBinOp = DAG.getVectorShuffle(VT, SDLoc(HorizBinOp), HorizBinOp,
50897 DAG.getUNDEF(VT), PostShuffleMask);
50898 return HorizBinOp;
50901 break;
50902 case ISD::ADD:
50903 case ISD::SUB:
50904 if (Subtarget.hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32 ||
50905 VT == MVT::v16i16 || VT == MVT::v8i32)) {
50906 SDValue LHS = N->getOperand(0);
50907 SDValue RHS = N->getOperand(1);
50908 auto HorizOpcode = IsAdd ? X86ISD::HADD : X86ISD::HSUB;
50909 if (isHorizontalBinOp(HorizOpcode, LHS, RHS, DAG, Subtarget, IsAdd,
50910 PostShuffleMask)) {
50911 auto HOpBuilder = [HorizOpcode](SelectionDAG &DAG, const SDLoc &DL,
50912 ArrayRef<SDValue> Ops) {
50913 return DAG.getNode(HorizOpcode, DL, Ops[0].getValueType(), Ops);
50915 SDValue HorizBinOp = SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT,
50916 {LHS, RHS}, HOpBuilder);
50917 if (!PostShuffleMask.empty())
50918 HorizBinOp = DAG.getVectorShuffle(VT, SDLoc(HorizBinOp), HorizBinOp,
50919 DAG.getUNDEF(VT), PostShuffleMask);
50920 return HorizBinOp;
50923 break;
50926 return SDValue();
50929 // Try to combine the following nodes
50930 // t29: i64 = X86ISD::Wrapper TargetConstantPool:i64
50931 // <i32 -2147483648[float -0.000000e+00]> 0
50932 // t27: v16i32[v16f32],ch = X86ISD::VBROADCAST_LOAD
50933 // <(load 4 from constant-pool)> t0, t29
50934 // [t30: v16i32 = bitcast t27]
50935 // t6: v16i32 = xor t7, t27[t30]
50936 // t11: v16f32 = bitcast t6
50937 // t21: v16f32 = X86ISD::VFMULC[X86ISD::VCFMULC] t11, t8
50938 // into X86ISD::VFCMULC[X86ISD::VFMULC] if possible:
50939 // t22: v16f32 = bitcast t7
50940 // t23: v16f32 = X86ISD::VFCMULC[X86ISD::VFMULC] t8, t22
50941 // t24: v32f16 = bitcast t23
50942 static SDValue combineFMulcFCMulc(SDNode *N, SelectionDAG &DAG,
50943 const X86Subtarget &Subtarget) {
50944 EVT VT = N->getValueType(0);
50945 SDValue LHS = N->getOperand(0);
50946 SDValue RHS = N->getOperand(1);
50947 int CombineOpcode =
50948 N->getOpcode() == X86ISD::VFCMULC ? X86ISD::VFMULC : X86ISD::VFCMULC;
50949 auto combineConjugation = [&](SDValue &r) {
50950 if (LHS->getOpcode() == ISD::BITCAST && RHS.hasOneUse()) {
50951 SDValue XOR = LHS.getOperand(0);
50952 if (XOR->getOpcode() == ISD::XOR && XOR.hasOneUse()) {
50953 KnownBits XORRHS = DAG.computeKnownBits(XOR.getOperand(1));
50954 if (XORRHS.isConstant()) {
50955 APInt ConjugationInt32 = APInt(32, 0x80000000, true);
50956 APInt ConjugationInt64 = APInt(64, 0x8000000080000000ULL, true);
50957 if ((XORRHS.getBitWidth() == 32 &&
50958 XORRHS.getConstant() == ConjugationInt32) ||
50959 (XORRHS.getBitWidth() == 64 &&
50960 XORRHS.getConstant() == ConjugationInt64)) {
50961 SelectionDAG::FlagInserter FlagsInserter(DAG, N);
50962 SDValue I2F = DAG.getBitcast(VT, LHS.getOperand(0).getOperand(0));
50963 SDValue FCMulC = DAG.getNode(CombineOpcode, SDLoc(N), VT, RHS, I2F);
50964 r = DAG.getBitcast(VT, FCMulC);
50965 return true;
50970 return false;
50972 SDValue Res;
50973 if (combineConjugation(Res))
50974 return Res;
50975 std::swap(LHS, RHS);
50976 if (combineConjugation(Res))
50977 return Res;
50978 return Res;
50981 // Try to combine the following nodes:
50982 // FADD(A, FMA(B, C, 0)) and FADD(A, FMUL(B, C)) to FMA(B, C, A)
50983 static SDValue combineFaddCFmul(SDNode *N, SelectionDAG &DAG,
50984 const X86Subtarget &Subtarget) {
50985 auto AllowContract = [&DAG](const SDNodeFlags &Flags) {
50986 return DAG.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast ||
50987 Flags.hasAllowContract();
50990 auto HasNoSignedZero = [&DAG](const SDNodeFlags &Flags) {
50991 return DAG.getTarget().Options.NoSignedZerosFPMath ||
50992 Flags.hasNoSignedZeros();
50994 auto IsVectorAllNegativeZero = [&DAG](SDValue Op) {
50995 APInt AI = APInt(32, 0x80008000, true);
50996 KnownBits Bits = DAG.computeKnownBits(Op);
50997 return Bits.getBitWidth() == 32 && Bits.isConstant() &&
50998 Bits.getConstant() == AI;
51001 if (N->getOpcode() != ISD::FADD || !Subtarget.hasFP16() ||
51002 !AllowContract(N->getFlags()))
51003 return SDValue();
51005 EVT VT = N->getValueType(0);
51006 if (VT != MVT::v8f16 && VT != MVT::v16f16 && VT != MVT::v32f16)
51007 return SDValue();
51009 SDValue LHS = N->getOperand(0);
51010 SDValue RHS = N->getOperand(1);
51011 bool IsConj;
51012 SDValue FAddOp1, MulOp0, MulOp1;
51013 auto GetCFmulFrom = [&MulOp0, &MulOp1, &IsConj, &AllowContract,
51014 &IsVectorAllNegativeZero,
51015 &HasNoSignedZero](SDValue N) -> bool {
51016 if (!N.hasOneUse() || N.getOpcode() != ISD::BITCAST)
51017 return false;
51018 SDValue Op0 = N.getOperand(0);
51019 unsigned Opcode = Op0.getOpcode();
51020 if (Op0.hasOneUse() && AllowContract(Op0->getFlags())) {
51021 if ((Opcode == X86ISD::VFMULC || Opcode == X86ISD::VFCMULC)) {
51022 MulOp0 = Op0.getOperand(0);
51023 MulOp1 = Op0.getOperand(1);
51024 IsConj = Opcode == X86ISD::VFCMULC;
51025 return true;
51027 if ((Opcode == X86ISD::VFMADDC || Opcode == X86ISD::VFCMADDC) &&
51028 ((ISD::isBuildVectorAllZeros(Op0->getOperand(2).getNode()) &&
51029 HasNoSignedZero(Op0->getFlags())) ||
51030 IsVectorAllNegativeZero(Op0->getOperand(2)))) {
51031 MulOp0 = Op0.getOperand(0);
51032 MulOp1 = Op0.getOperand(1);
51033 IsConj = Opcode == X86ISD::VFCMADDC;
51034 return true;
51037 return false;
51040 if (GetCFmulFrom(LHS))
51041 FAddOp1 = RHS;
51042 else if (GetCFmulFrom(RHS))
51043 FAddOp1 = LHS;
51044 else
51045 return SDValue();
51047 MVT CVT = MVT::getVectorVT(MVT::f32, VT.getVectorNumElements() / 2);
51048 FAddOp1 = DAG.getBitcast(CVT, FAddOp1);
51049 unsigned NewOp = IsConj ? X86ISD::VFCMADDC : X86ISD::VFMADDC;
51050 // FIXME: How do we handle when fast math flags of FADD are different from
51051 // CFMUL's?
51052 SDValue CFmul =
51053 DAG.getNode(NewOp, SDLoc(N), CVT, MulOp0, MulOp1, FAddOp1, N->getFlags());
51054 return DAG.getBitcast(VT, CFmul);
51057 /// Do target-specific dag combines on floating-point adds/subs.
51058 static SDValue combineFaddFsub(SDNode *N, SelectionDAG &DAG,
51059 const X86Subtarget &Subtarget) {
51060 if (SDValue HOp = combineToHorizontalAddSub(N, DAG, Subtarget))
51061 return HOp;
51063 if (SDValue COp = combineFaddCFmul(N, DAG, Subtarget))
51064 return COp;
51066 return SDValue();
51069 /// Attempt to pre-truncate inputs to arithmetic ops if it will simplify
51070 /// the codegen.
51071 /// e.g. TRUNC( BINOP( X, Y ) ) --> BINOP( TRUNC( X ), TRUNC( Y ) )
51072 /// TODO: This overlaps with the generic combiner's visitTRUNCATE. Remove
51073 /// anything that is guaranteed to be transformed by DAGCombiner.
51074 static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
51075 const X86Subtarget &Subtarget,
51076 const SDLoc &DL) {
51077 assert(N->getOpcode() == ISD::TRUNCATE && "Wrong opcode");
51078 SDValue Src = N->getOperand(0);
51079 unsigned SrcOpcode = Src.getOpcode();
51080 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
51082 EVT VT = N->getValueType(0);
51083 EVT SrcVT = Src.getValueType();
51085 auto IsFreeTruncation = [VT](SDValue Op) {
51086 unsigned TruncSizeInBits = VT.getScalarSizeInBits();
51088 // See if this has been extended from a smaller/equal size to
51089 // the truncation size, allowing a truncation to combine with the extend.
51090 unsigned Opcode = Op.getOpcode();
51091 if ((Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND ||
51092 Opcode == ISD::ZERO_EXTEND) &&
51093 Op.getOperand(0).getScalarValueSizeInBits() <= TruncSizeInBits)
51094 return true;
51096 // See if this is a single use constant which can be constant folded.
51097 // NOTE: We don't peek throught bitcasts here because there is currently
51098 // no support for constant folding truncate+bitcast+vector_of_constants. So
51099 // we'll just send up with a truncate on both operands which will
51100 // get turned back into (truncate (binop)) causing an infinite loop.
51101 return ISD::isBuildVectorOfConstantSDNodes(Op.getNode());
51104 auto TruncateArithmetic = [&](SDValue N0, SDValue N1) {
51105 SDValue Trunc0 = DAG.getNode(ISD::TRUNCATE, DL, VT, N0);
51106 SDValue Trunc1 = DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
51107 return DAG.getNode(SrcOpcode, DL, VT, Trunc0, Trunc1);
51110 // Don't combine if the operation has other uses.
51111 if (!Src.hasOneUse())
51112 return SDValue();
51114 // Only support vector truncation for now.
51115 // TODO: i64 scalar math would benefit as well.
51116 if (!VT.isVector())
51117 return SDValue();
51119 // In most cases its only worth pre-truncating if we're only facing the cost
51120 // of one truncation.
51121 // i.e. if one of the inputs will constant fold or the input is repeated.
51122 switch (SrcOpcode) {
51123 case ISD::MUL:
51124 // X86 is rubbish at scalar and vector i64 multiplies (until AVX512DQ) - its
51125 // better to truncate if we have the chance.
51126 if (SrcVT.getScalarType() == MVT::i64 &&
51127 TLI.isOperationLegal(SrcOpcode, VT) &&
51128 !TLI.isOperationLegal(SrcOpcode, SrcVT))
51129 return TruncateArithmetic(Src.getOperand(0), Src.getOperand(1));
51130 [[fallthrough]];
51131 case ISD::AND:
51132 case ISD::XOR:
51133 case ISD::OR:
51134 case ISD::ADD:
51135 case ISD::SUB: {
51136 SDValue Op0 = Src.getOperand(0);
51137 SDValue Op1 = Src.getOperand(1);
51138 if (TLI.isOperationLegal(SrcOpcode, VT) &&
51139 (Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1)))
51140 return TruncateArithmetic(Op0, Op1);
51141 break;
51145 return SDValue();
51148 // Try to form a MULHU or MULHS node by looking for
51149 // (trunc (srl (mul ext, ext), 16))
51150 // TODO: This is X86 specific because we want to be able to handle wide types
51151 // before type legalization. But we can only do it if the vector will be
51152 // legalized via widening/splitting. Type legalization can't handle promotion
51153 // of a MULHU/MULHS. There isn't a way to convey this to the generic DAG
51154 // combiner.
51155 static SDValue combinePMULH(SDValue Src, EVT VT, const SDLoc &DL,
51156 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
51157 // First instruction should be a right shift of a multiply.
51158 if (Src.getOpcode() != ISD::SRL ||
51159 Src.getOperand(0).getOpcode() != ISD::MUL)
51160 return SDValue();
51162 if (!Subtarget.hasSSE2())
51163 return SDValue();
51165 // Only handle vXi16 types that are at least 128-bits unless they will be
51166 // widened.
51167 if (!VT.isVector() || VT.getVectorElementType() != MVT::i16)
51168 return SDValue();
51170 // Input type should be at least vXi32.
51171 EVT InVT = Src.getValueType();
51172 if (InVT.getVectorElementType().getSizeInBits() < 32)
51173 return SDValue();
51175 // Need a shift by 16.
51176 APInt ShiftAmt;
51177 if (!ISD::isConstantSplatVector(Src.getOperand(1).getNode(), ShiftAmt) ||
51178 ShiftAmt != 16)
51179 return SDValue();
51181 SDValue LHS = Src.getOperand(0).getOperand(0);
51182 SDValue RHS = Src.getOperand(0).getOperand(1);
51184 // Count leading sign/zero bits on both inputs - if there are enough then
51185 // truncation back to vXi16 will be cheap - either as a pack/shuffle
51186 // sequence or using AVX512 truncations. If the inputs are sext/zext then the
51187 // truncations may actually be free by peeking through to the ext source.
51188 auto IsSext = [&DAG](SDValue V) {
51189 return DAG.ComputeMaxSignificantBits(V) <= 16;
51191 auto IsZext = [&DAG](SDValue V) {
51192 return DAG.computeKnownBits(V).countMaxActiveBits() <= 16;
51195 bool IsSigned = IsSext(LHS) && IsSext(RHS);
51196 bool IsUnsigned = IsZext(LHS) && IsZext(RHS);
51197 if (!IsSigned && !IsUnsigned)
51198 return SDValue();
51200 // Check if both inputs are extensions, which will be removed by truncation.
51201 bool IsTruncateFree = (LHS.getOpcode() == ISD::SIGN_EXTEND ||
51202 LHS.getOpcode() == ISD::ZERO_EXTEND) &&
51203 (RHS.getOpcode() == ISD::SIGN_EXTEND ||
51204 RHS.getOpcode() == ISD::ZERO_EXTEND) &&
51205 LHS.getOperand(0).getScalarValueSizeInBits() <= 16 &&
51206 RHS.getOperand(0).getScalarValueSizeInBits() <= 16;
51208 // For AVX2+ targets, with the upper bits known zero, we can perform MULHU on
51209 // the (bitcasted) inputs directly, and then cheaply pack/truncate the result
51210 // (upper elts will be zero). Don't attempt this with just AVX512F as MULHU
51211 // will have to split anyway.
51212 unsigned InSizeInBits = InVT.getSizeInBits();
51213 if (IsUnsigned && !IsTruncateFree && Subtarget.hasInt256() &&
51214 !(Subtarget.hasAVX512() && !Subtarget.hasBWI() && VT.is256BitVector()) &&
51215 (InSizeInBits % 16) == 0) {
51216 EVT BCVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
51217 InVT.getSizeInBits() / 16);
51218 SDValue Res = DAG.getNode(ISD::MULHU, DL, BCVT, DAG.getBitcast(BCVT, LHS),
51219 DAG.getBitcast(BCVT, RHS));
51220 return DAG.getNode(ISD::TRUNCATE, DL, VT, DAG.getBitcast(InVT, Res));
51223 // Truncate back to source type.
51224 LHS = DAG.getNode(ISD::TRUNCATE, DL, VT, LHS);
51225 RHS = DAG.getNode(ISD::TRUNCATE, DL, VT, RHS);
51227 unsigned Opc = IsSigned ? ISD::MULHS : ISD::MULHU;
51228 return DAG.getNode(Opc, DL, VT, LHS, RHS);
51231 // Attempt to match PMADDUBSW, which multiplies corresponding unsigned bytes
51232 // from one vector with signed bytes from another vector, adds together
51233 // adjacent pairs of 16-bit products, and saturates the result before
51234 // truncating to 16-bits.
51236 // Which looks something like this:
51237 // (i16 (ssat (add (mul (zext (even elts (i8 A))), (sext (even elts (i8 B)))),
51238 // (mul (zext (odd elts (i8 A)), (sext (odd elts (i8 B))))))))
51239 static SDValue detectPMADDUBSW(SDValue In, EVT VT, SelectionDAG &DAG,
51240 const X86Subtarget &Subtarget,
51241 const SDLoc &DL) {
51242 if (!VT.isVector() || !Subtarget.hasSSSE3())
51243 return SDValue();
51245 unsigned NumElems = VT.getVectorNumElements();
51246 EVT ScalarVT = VT.getVectorElementType();
51247 if (ScalarVT != MVT::i16 || NumElems < 8 || !isPowerOf2_32(NumElems))
51248 return SDValue();
51250 SDValue SSatVal = detectSSatPattern(In, VT);
51251 if (!SSatVal || SSatVal.getOpcode() != ISD::ADD)
51252 return SDValue();
51254 // Ok this is a signed saturation of an ADD. See if this ADD is adding pairs
51255 // of multiplies from even/odd elements.
51256 SDValue N0 = SSatVal.getOperand(0);
51257 SDValue N1 = SSatVal.getOperand(1);
51259 if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
51260 return SDValue();
51262 SDValue N00 = N0.getOperand(0);
51263 SDValue N01 = N0.getOperand(1);
51264 SDValue N10 = N1.getOperand(0);
51265 SDValue N11 = N1.getOperand(1);
51267 // TODO: Handle constant vectors and use knownbits/computenumsignbits?
51268 // Canonicalize zero_extend to LHS.
51269 if (N01.getOpcode() == ISD::ZERO_EXTEND)
51270 std::swap(N00, N01);
51271 if (N11.getOpcode() == ISD::ZERO_EXTEND)
51272 std::swap(N10, N11);
51274 // Ensure we have a zero_extend and a sign_extend.
51275 if (N00.getOpcode() != ISD::ZERO_EXTEND ||
51276 N01.getOpcode() != ISD::SIGN_EXTEND ||
51277 N10.getOpcode() != ISD::ZERO_EXTEND ||
51278 N11.getOpcode() != ISD::SIGN_EXTEND)
51279 return SDValue();
51281 // Peek through the extends.
51282 N00 = N00.getOperand(0);
51283 N01 = N01.getOperand(0);
51284 N10 = N10.getOperand(0);
51285 N11 = N11.getOperand(0);
51287 // Ensure the extend is from vXi8.
51288 if (N00.getValueType().getVectorElementType() != MVT::i8 ||
51289 N01.getValueType().getVectorElementType() != MVT::i8 ||
51290 N10.getValueType().getVectorElementType() != MVT::i8 ||
51291 N11.getValueType().getVectorElementType() != MVT::i8)
51292 return SDValue();
51294 // All inputs should be build_vectors.
51295 if (N00.getOpcode() != ISD::BUILD_VECTOR ||
51296 N01.getOpcode() != ISD::BUILD_VECTOR ||
51297 N10.getOpcode() != ISD::BUILD_VECTOR ||
51298 N11.getOpcode() != ISD::BUILD_VECTOR)
51299 return SDValue();
51301 // N00/N10 are zero extended. N01/N11 are sign extended.
51303 // For each element, we need to ensure we have an odd element from one vector
51304 // multiplied by the odd element of another vector and the even element from
51305 // one of the same vectors being multiplied by the even element from the
51306 // other vector. So we need to make sure for each element i, this operator
51307 // is being performed:
51308 // A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
51309 SDValue ZExtIn, SExtIn;
51310 for (unsigned i = 0; i != NumElems; ++i) {
51311 SDValue N00Elt = N00.getOperand(i);
51312 SDValue N01Elt = N01.getOperand(i);
51313 SDValue N10Elt = N10.getOperand(i);
51314 SDValue N11Elt = N11.getOperand(i);
51315 // TODO: Be more tolerant to undefs.
51316 if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
51317 N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
51318 N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
51319 N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
51320 return SDValue();
51321 auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
51322 auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
51323 auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
51324 auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
51325 if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
51326 return SDValue();
51327 unsigned IdxN00 = ConstN00Elt->getZExtValue();
51328 unsigned IdxN01 = ConstN01Elt->getZExtValue();
51329 unsigned IdxN10 = ConstN10Elt->getZExtValue();
51330 unsigned IdxN11 = ConstN11Elt->getZExtValue();
51331 // Add is commutative so indices can be reordered.
51332 if (IdxN00 > IdxN10) {
51333 std::swap(IdxN00, IdxN10);
51334 std::swap(IdxN01, IdxN11);
51336 // N0 indices be the even element. N1 indices must be the next odd element.
51337 if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
51338 IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
51339 return SDValue();
51340 SDValue N00In = N00Elt.getOperand(0);
51341 SDValue N01In = N01Elt.getOperand(0);
51342 SDValue N10In = N10Elt.getOperand(0);
51343 SDValue N11In = N11Elt.getOperand(0);
51344 // First time we find an input capture it.
51345 if (!ZExtIn) {
51346 ZExtIn = N00In;
51347 SExtIn = N01In;
51349 if (ZExtIn != N00In || SExtIn != N01In ||
51350 ZExtIn != N10In || SExtIn != N11In)
51351 return SDValue();
51354 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
51355 ArrayRef<SDValue> Ops) {
51356 // Shrink by adding truncate nodes and let DAGCombine fold with the
51357 // sources.
51358 EVT InVT = Ops[0].getValueType();
51359 assert(InVT.getScalarType() == MVT::i8 &&
51360 "Unexpected scalar element type");
51361 assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
51362 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
51363 InVT.getVectorNumElements() / 2);
51364 return DAG.getNode(X86ISD::VPMADDUBSW, DL, ResVT, Ops[0], Ops[1]);
51366 return SplitOpsAndApply(DAG, Subtarget, DL, VT, { ZExtIn, SExtIn },
51367 PMADDBuilder);
51370 static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
51371 const X86Subtarget &Subtarget) {
51372 EVT VT = N->getValueType(0);
51373 SDValue Src = N->getOperand(0);
51374 SDLoc DL(N);
51376 // Attempt to pre-truncate inputs to arithmetic ops instead.
51377 if (SDValue V = combineTruncatedArithmetic(N, DAG, Subtarget, DL))
51378 return V;
51380 // Try to detect AVG pattern first.
51381 if (SDValue Avg = detectAVGPattern(Src, VT, DAG, Subtarget, DL))
51382 return Avg;
51384 // Try to detect PMADD
51385 if (SDValue PMAdd = detectPMADDUBSW(Src, VT, DAG, Subtarget, DL))
51386 return PMAdd;
51388 // Try to combine truncation with signed/unsigned saturation.
51389 if (SDValue Val = combineTruncateWithSat(Src, VT, DL, DAG, Subtarget))
51390 return Val;
51392 // Try to combine PMULHUW/PMULHW for vXi16.
51393 if (SDValue V = combinePMULH(Src, VT, DL, DAG, Subtarget))
51394 return V;
51396 // The bitcast source is a direct mmx result.
51397 // Detect bitcasts between i32 to x86mmx
51398 if (Src.getOpcode() == ISD::BITCAST && VT == MVT::i32) {
51399 SDValue BCSrc = Src.getOperand(0);
51400 if (BCSrc.getValueType() == MVT::x86mmx)
51401 return DAG.getNode(X86ISD::MMX_MOVD2W, DL, MVT::i32, BCSrc);
51404 return SDValue();
51407 static SDValue combineVTRUNC(SDNode *N, SelectionDAG &DAG,
51408 TargetLowering::DAGCombinerInfo &DCI) {
51409 EVT VT = N->getValueType(0);
51410 SDValue In = N->getOperand(0);
51411 SDLoc DL(N);
51413 if (SDValue SSatVal = detectSSatPattern(In, VT))
51414 return DAG.getNode(X86ISD::VTRUNCS, DL, VT, SSatVal);
51415 if (SDValue USatVal = detectUSatPattern(In, VT, DAG, DL))
51416 return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, USatVal);
51418 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
51419 APInt DemandedMask(APInt::getAllOnes(VT.getScalarSizeInBits()));
51420 if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
51421 return SDValue(N, 0);
51423 return SDValue();
51426 /// Returns the negated value if the node \p N flips sign of FP value.
51428 /// FP-negation node may have different forms: FNEG(x), FXOR (x, 0x80000000)
51429 /// or FSUB(0, x)
51430 /// AVX512F does not have FXOR, so FNEG is lowered as
51431 /// (bitcast (xor (bitcast x), (bitcast ConstantFP(0x80000000)))).
51432 /// In this case we go though all bitcasts.
51433 /// This also recognizes splat of a negated value and returns the splat of that
51434 /// value.
51435 static SDValue isFNEG(SelectionDAG &DAG, SDNode *N, unsigned Depth = 0) {
51436 if (N->getOpcode() == ISD::FNEG)
51437 return N->getOperand(0);
51439 // Don't recurse exponentially.
51440 if (Depth > SelectionDAG::MaxRecursionDepth)
51441 return SDValue();
51443 unsigned ScalarSize = N->getValueType(0).getScalarSizeInBits();
51445 SDValue Op = peekThroughBitcasts(SDValue(N, 0));
51446 EVT VT = Op->getValueType(0);
51448 // Make sure the element size doesn't change.
51449 if (VT.getScalarSizeInBits() != ScalarSize)
51450 return SDValue();
51452 unsigned Opc = Op.getOpcode();
51453 switch (Opc) {
51454 case ISD::VECTOR_SHUFFLE: {
51455 // For a VECTOR_SHUFFLE(VEC1, VEC2), if the VEC2 is undef, then the negate
51456 // of this is VECTOR_SHUFFLE(-VEC1, UNDEF). The mask can be anything here.
51457 if (!Op.getOperand(1).isUndef())
51458 return SDValue();
51459 if (SDValue NegOp0 = isFNEG(DAG, Op.getOperand(0).getNode(), Depth + 1))
51460 if (NegOp0.getValueType() == VT) // FIXME: Can we do better?
51461 return DAG.getVectorShuffle(VT, SDLoc(Op), NegOp0, DAG.getUNDEF(VT),
51462 cast<ShuffleVectorSDNode>(Op)->getMask());
51463 break;
51465 case ISD::INSERT_VECTOR_ELT: {
51466 // Negate of INSERT_VECTOR_ELT(UNDEF, V, INDEX) is INSERT_VECTOR_ELT(UNDEF,
51467 // -V, INDEX).
51468 SDValue InsVector = Op.getOperand(0);
51469 SDValue InsVal = Op.getOperand(1);
51470 if (!InsVector.isUndef())
51471 return SDValue();
51472 if (SDValue NegInsVal = isFNEG(DAG, InsVal.getNode(), Depth + 1))
51473 if (NegInsVal.getValueType() == VT.getVectorElementType()) // FIXME
51474 return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(Op), VT, InsVector,
51475 NegInsVal, Op.getOperand(2));
51476 break;
51478 case ISD::FSUB:
51479 case ISD::XOR:
51480 case X86ISD::FXOR: {
51481 SDValue Op1 = Op.getOperand(1);
51482 SDValue Op0 = Op.getOperand(0);
51484 // For XOR and FXOR, we want to check if constant
51485 // bits of Op1 are sign bit masks. For FSUB, we
51486 // have to check if constant bits of Op0 are sign
51487 // bit masks and hence we swap the operands.
51488 if (Opc == ISD::FSUB)
51489 std::swap(Op0, Op1);
51491 APInt UndefElts;
51492 SmallVector<APInt, 16> EltBits;
51493 // Extract constant bits and see if they are all
51494 // sign bit masks. Ignore the undef elements.
51495 if (getTargetConstantBitsFromNode(Op1, ScalarSize, UndefElts, EltBits,
51496 /* AllowWholeUndefs */ true,
51497 /* AllowPartialUndefs */ false)) {
51498 for (unsigned I = 0, E = EltBits.size(); I < E; I++)
51499 if (!UndefElts[I] && !EltBits[I].isSignMask())
51500 return SDValue();
51502 // Only allow bitcast from correctly-sized constant.
51503 Op0 = peekThroughBitcasts(Op0);
51504 if (Op0.getScalarValueSizeInBits() == ScalarSize)
51505 return Op0;
51507 break;
51508 } // case
51509 } // switch
51511 return SDValue();
51514 static unsigned negateFMAOpcode(unsigned Opcode, bool NegMul, bool NegAcc,
51515 bool NegRes) {
51516 if (NegMul) {
51517 switch (Opcode) {
51518 default: llvm_unreachable("Unexpected opcode");
51519 case ISD::FMA: Opcode = X86ISD::FNMADD; break;
51520 case ISD::STRICT_FMA: Opcode = X86ISD::STRICT_FNMADD; break;
51521 case X86ISD::FMADD_RND: Opcode = X86ISD::FNMADD_RND; break;
51522 case X86ISD::FMSUB: Opcode = X86ISD::FNMSUB; break;
51523 case X86ISD::STRICT_FMSUB: Opcode = X86ISD::STRICT_FNMSUB; break;
51524 case X86ISD::FMSUB_RND: Opcode = X86ISD::FNMSUB_RND; break;
51525 case X86ISD::FNMADD: Opcode = ISD::FMA; break;
51526 case X86ISD::STRICT_FNMADD: Opcode = ISD::STRICT_FMA; break;
51527 case X86ISD::FNMADD_RND: Opcode = X86ISD::FMADD_RND; break;
51528 case X86ISD::FNMSUB: Opcode = X86ISD::FMSUB; break;
51529 case X86ISD::STRICT_FNMSUB: Opcode = X86ISD::STRICT_FMSUB; break;
51530 case X86ISD::FNMSUB_RND: Opcode = X86ISD::FMSUB_RND; break;
51534 if (NegAcc) {
51535 switch (Opcode) {
51536 default: llvm_unreachable("Unexpected opcode");
51537 case ISD::FMA: Opcode = X86ISD::FMSUB; break;
51538 case ISD::STRICT_FMA: Opcode = X86ISD::STRICT_FMSUB; break;
51539 case X86ISD::FMADD_RND: Opcode = X86ISD::FMSUB_RND; break;
51540 case X86ISD::FMSUB: Opcode = ISD::FMA; break;
51541 case X86ISD::STRICT_FMSUB: Opcode = ISD::STRICT_FMA; break;
51542 case X86ISD::FMSUB_RND: Opcode = X86ISD::FMADD_RND; break;
51543 case X86ISD::FNMADD: Opcode = X86ISD::FNMSUB; break;
51544 case X86ISD::STRICT_FNMADD: Opcode = X86ISD::STRICT_FNMSUB; break;
51545 case X86ISD::FNMADD_RND: Opcode = X86ISD::FNMSUB_RND; break;
51546 case X86ISD::FNMSUB: Opcode = X86ISD::FNMADD; break;
51547 case X86ISD::STRICT_FNMSUB: Opcode = X86ISD::STRICT_FNMADD; break;
51548 case X86ISD::FNMSUB_RND: Opcode = X86ISD::FNMADD_RND; break;
51549 case X86ISD::FMADDSUB: Opcode = X86ISD::FMSUBADD; break;
51550 case X86ISD::FMADDSUB_RND: Opcode = X86ISD::FMSUBADD_RND; break;
51551 case X86ISD::FMSUBADD: Opcode = X86ISD::FMADDSUB; break;
51552 case X86ISD::FMSUBADD_RND: Opcode = X86ISD::FMADDSUB_RND; break;
51556 if (NegRes) {
51557 switch (Opcode) {
51558 // For accuracy reason, we never combine fneg and fma under strict FP.
51559 default: llvm_unreachable("Unexpected opcode");
51560 case ISD::FMA: Opcode = X86ISD::FNMSUB; break;
51561 case X86ISD::FMADD_RND: Opcode = X86ISD::FNMSUB_RND; break;
51562 case X86ISD::FMSUB: Opcode = X86ISD::FNMADD; break;
51563 case X86ISD::FMSUB_RND: Opcode = X86ISD::FNMADD_RND; break;
51564 case X86ISD::FNMADD: Opcode = X86ISD::FMSUB; break;
51565 case X86ISD::FNMADD_RND: Opcode = X86ISD::FMSUB_RND; break;
51566 case X86ISD::FNMSUB: Opcode = ISD::FMA; break;
51567 case X86ISD::FNMSUB_RND: Opcode = X86ISD::FMADD_RND; break;
51571 return Opcode;
51574 /// Do target-specific dag combines on floating point negations.
51575 static SDValue combineFneg(SDNode *N, SelectionDAG &DAG,
51576 TargetLowering::DAGCombinerInfo &DCI,
51577 const X86Subtarget &Subtarget) {
51578 EVT OrigVT = N->getValueType(0);
51579 SDValue Arg = isFNEG(DAG, N);
51580 if (!Arg)
51581 return SDValue();
51583 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
51584 EVT VT = Arg.getValueType();
51585 EVT SVT = VT.getScalarType();
51586 SDLoc DL(N);
51588 // Let legalize expand this if it isn't a legal type yet.
51589 if (!TLI.isTypeLegal(VT))
51590 return SDValue();
51592 // If we're negating a FMUL node on a target with FMA, then we can avoid the
51593 // use of a constant by performing (-0 - A*B) instead.
51594 // FIXME: Check rounding control flags as well once it becomes available.
51595 if (Arg.getOpcode() == ISD::FMUL && (SVT == MVT::f32 || SVT == MVT::f64) &&
51596 Arg->getFlags().hasNoSignedZeros() && Subtarget.hasAnyFMA()) {
51597 SDValue Zero = DAG.getConstantFP(0.0, DL, VT);
51598 SDValue NewNode = DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0),
51599 Arg.getOperand(1), Zero);
51600 return DAG.getBitcast(OrigVT, NewNode);
51603 bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
51604 bool LegalOperations = !DCI.isBeforeLegalizeOps();
51605 if (SDValue NegArg =
51606 TLI.getNegatedExpression(Arg, DAG, LegalOperations, CodeSize))
51607 return DAG.getBitcast(OrigVT, NegArg);
51609 return SDValue();
51612 SDValue X86TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
51613 bool LegalOperations,
51614 bool ForCodeSize,
51615 NegatibleCost &Cost,
51616 unsigned Depth) const {
51617 // fneg patterns are removable even if they have multiple uses.
51618 if (SDValue Arg = isFNEG(DAG, Op.getNode(), Depth)) {
51619 Cost = NegatibleCost::Cheaper;
51620 return DAG.getBitcast(Op.getValueType(), Arg);
51623 EVT VT = Op.getValueType();
51624 EVT SVT = VT.getScalarType();
51625 unsigned Opc = Op.getOpcode();
51626 SDNodeFlags Flags = Op.getNode()->getFlags();
51627 switch (Opc) {
51628 case ISD::FMA:
51629 case X86ISD::FMSUB:
51630 case X86ISD::FNMADD:
51631 case X86ISD::FNMSUB:
51632 case X86ISD::FMADD_RND:
51633 case X86ISD::FMSUB_RND:
51634 case X86ISD::FNMADD_RND:
51635 case X86ISD::FNMSUB_RND: {
51636 if (!Op.hasOneUse() || !Subtarget.hasAnyFMA() || !isTypeLegal(VT) ||
51637 !(SVT == MVT::f32 || SVT == MVT::f64) ||
51638 !isOperationLegal(ISD::FMA, VT))
51639 break;
51641 // Don't fold (fneg (fma (fneg x), y, (fneg z))) to (fma x, y, z)
51642 // if it may have signed zeros.
51643 if (!Flags.hasNoSignedZeros())
51644 break;
51646 // This is always negatible for free but we might be able to remove some
51647 // extra operand negations as well.
51648 SmallVector<SDValue, 4> NewOps(Op.getNumOperands(), SDValue());
51649 for (int i = 0; i != 3; ++i)
51650 NewOps[i] = getCheaperNegatedExpression(
51651 Op.getOperand(i), DAG, LegalOperations, ForCodeSize, Depth + 1);
51653 bool NegA = !!NewOps[0];
51654 bool NegB = !!NewOps[1];
51655 bool NegC = !!NewOps[2];
51656 unsigned NewOpc = negateFMAOpcode(Opc, NegA != NegB, NegC, true);
51658 Cost = (NegA || NegB || NegC) ? NegatibleCost::Cheaper
51659 : NegatibleCost::Neutral;
51661 // Fill in the non-negated ops with the original values.
51662 for (int i = 0, e = Op.getNumOperands(); i != e; ++i)
51663 if (!NewOps[i])
51664 NewOps[i] = Op.getOperand(i);
51665 return DAG.getNode(NewOpc, SDLoc(Op), VT, NewOps);
51667 case X86ISD::FRCP:
51668 if (SDValue NegOp0 =
51669 getNegatedExpression(Op.getOperand(0), DAG, LegalOperations,
51670 ForCodeSize, Cost, Depth + 1))
51671 return DAG.getNode(Opc, SDLoc(Op), VT, NegOp0);
51672 break;
51675 return TargetLowering::getNegatedExpression(Op, DAG, LegalOperations,
51676 ForCodeSize, Cost, Depth);
51679 static SDValue lowerX86FPLogicOp(SDNode *N, SelectionDAG &DAG,
51680 const X86Subtarget &Subtarget) {
51681 MVT VT = N->getSimpleValueType(0);
51682 // If we have integer vector types available, use the integer opcodes.
51683 if (!VT.isVector() || !Subtarget.hasSSE2())
51684 return SDValue();
51686 SDLoc dl(N);
51688 unsigned IntBits = VT.getScalarSizeInBits();
51689 MVT IntSVT = MVT::getIntegerVT(IntBits);
51690 MVT IntVT = MVT::getVectorVT(IntSVT, VT.getSizeInBits() / IntBits);
51692 SDValue Op0 = DAG.getBitcast(IntVT, N->getOperand(0));
51693 SDValue Op1 = DAG.getBitcast(IntVT, N->getOperand(1));
51694 unsigned IntOpcode;
51695 switch (N->getOpcode()) {
51696 default: llvm_unreachable("Unexpected FP logic op");
51697 case X86ISD::FOR: IntOpcode = ISD::OR; break;
51698 case X86ISD::FXOR: IntOpcode = ISD::XOR; break;
51699 case X86ISD::FAND: IntOpcode = ISD::AND; break;
51700 case X86ISD::FANDN: IntOpcode = X86ISD::ANDNP; break;
51702 SDValue IntOp = DAG.getNode(IntOpcode, dl, IntVT, Op0, Op1);
51703 return DAG.getBitcast(VT, IntOp);
51707 /// Fold a xor(setcc cond, val), 1 --> setcc (inverted(cond), val)
51708 static SDValue foldXor1SetCC(SDNode *N, SelectionDAG &DAG) {
51709 if (N->getOpcode() != ISD::XOR)
51710 return SDValue();
51712 SDValue LHS = N->getOperand(0);
51713 if (!isOneConstant(N->getOperand(1)) || LHS->getOpcode() != X86ISD::SETCC)
51714 return SDValue();
51716 X86::CondCode NewCC = X86::GetOppositeBranchCondition(
51717 X86::CondCode(LHS->getConstantOperandVal(0)));
51718 SDLoc DL(N);
51719 return getSETCC(NewCC, LHS->getOperand(1), DL, DAG);
51722 static SDValue combineXorSubCTLZ(SDNode *N, SelectionDAG &DAG,
51723 const X86Subtarget &Subtarget) {
51724 assert((N->getOpcode() == ISD::XOR || N->getOpcode() == ISD::SUB) &&
51725 "Invalid opcode for combing with CTLZ");
51726 if (Subtarget.hasFastLZCNT())
51727 return SDValue();
51729 EVT VT = N->getValueType(0);
51730 if (VT != MVT::i8 && VT != MVT::i16 && VT != MVT::i32 &&
51731 (VT != MVT::i64 || !Subtarget.is64Bit()))
51732 return SDValue();
51734 SDValue N0 = N->getOperand(0);
51735 SDValue N1 = N->getOperand(1);
51737 if (N0.getOpcode() != ISD::CTLZ_ZERO_UNDEF &&
51738 N1.getOpcode() != ISD::CTLZ_ZERO_UNDEF)
51739 return SDValue();
51741 SDValue OpCTLZ;
51742 SDValue OpSizeTM1;
51744 if (N1.getOpcode() == ISD::CTLZ_ZERO_UNDEF) {
51745 OpCTLZ = N1;
51746 OpSizeTM1 = N0;
51747 } else if (N->getOpcode() == ISD::SUB) {
51748 return SDValue();
51749 } else {
51750 OpCTLZ = N0;
51751 OpSizeTM1 = N1;
51754 if (!OpCTLZ.hasOneUse())
51755 return SDValue();
51756 auto *C = dyn_cast<ConstantSDNode>(OpSizeTM1);
51757 if (!C)
51758 return SDValue();
51760 if (C->getZExtValue() != uint64_t(OpCTLZ.getValueSizeInBits() - 1))
51761 return SDValue();
51762 SDLoc DL(N);
51763 EVT OpVT = VT;
51764 SDValue Op = OpCTLZ.getOperand(0);
51765 if (VT == MVT::i8) {
51766 // Zero extend to i32 since there is not an i8 bsr.
51767 OpVT = MVT::i32;
51768 Op = DAG.getNode(ISD::ZERO_EXTEND, DL, OpVT, Op);
51771 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
51772 Op = DAG.getNode(X86ISD::BSR, DL, VTs, Op);
51773 if (VT == MVT::i8)
51774 Op = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, Op);
51776 return Op;
51779 static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
51780 TargetLowering::DAGCombinerInfo &DCI,
51781 const X86Subtarget &Subtarget) {
51782 SDValue N0 = N->getOperand(0);
51783 SDValue N1 = N->getOperand(1);
51784 EVT VT = N->getValueType(0);
51786 // If this is SSE1 only convert to FXOR to avoid scalarization.
51787 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
51788 return DAG.getBitcast(MVT::v4i32,
51789 DAG.getNode(X86ISD::FXOR, SDLoc(N), MVT::v4f32,
51790 DAG.getBitcast(MVT::v4f32, N0),
51791 DAG.getBitcast(MVT::v4f32, N1)));
51794 if (SDValue Cmp = foldVectorXorShiftIntoCmp(N, DAG, Subtarget))
51795 return Cmp;
51797 if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
51798 return R;
51800 if (SDValue R = combineBitOpWithShift(N, DAG))
51801 return R;
51803 if (SDValue R = combineBitOpWithPACK(N, DAG))
51804 return R;
51806 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, DCI, Subtarget))
51807 return FPLogic;
51809 if (SDValue R = combineXorSubCTLZ(N, DAG, Subtarget))
51810 return R;
51812 if (DCI.isBeforeLegalizeOps())
51813 return SDValue();
51815 if (SDValue SetCC = foldXor1SetCC(N, DAG))
51816 return SetCC;
51818 if (SDValue R = combineOrXorWithSETCC(N, N0, N1, DAG))
51819 return R;
51821 if (SDValue RV = foldXorTruncShiftIntoCmp(N, DAG))
51822 return RV;
51824 // Fold not(iX bitcast(vXi1)) -> (iX bitcast(not(vec))) for legal boolvecs.
51825 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
51826 if (llvm::isAllOnesConstant(N1) && N0.getOpcode() == ISD::BITCAST &&
51827 N0.getOperand(0).getValueType().isVector() &&
51828 N0.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
51829 TLI.isTypeLegal(N0.getOperand(0).getValueType()) && N0.hasOneUse()) {
51830 return DAG.getBitcast(VT, DAG.getNOT(SDLoc(N), N0.getOperand(0),
51831 N0.getOperand(0).getValueType()));
51834 // Handle AVX512 mask widening.
51835 // Fold not(insert_subvector(undef,sub)) -> insert_subvector(undef,not(sub))
51836 if (ISD::isBuildVectorAllOnes(N1.getNode()) && VT.isVector() &&
51837 VT.getVectorElementType() == MVT::i1 &&
51838 N0.getOpcode() == ISD::INSERT_SUBVECTOR && N0.getOperand(0).isUndef() &&
51839 TLI.isTypeLegal(N0.getOperand(1).getValueType())) {
51840 return DAG.getNode(
51841 ISD::INSERT_SUBVECTOR, SDLoc(N), VT, N0.getOperand(0),
51842 DAG.getNOT(SDLoc(N), N0.getOperand(1), N0.getOperand(1).getValueType()),
51843 N0.getOperand(2));
51846 // Fold xor(zext(xor(x,c1)),c2) -> xor(zext(x),xor(zext(c1),c2))
51847 // Fold xor(truncate(xor(x,c1)),c2) -> xor(truncate(x),xor(truncate(c1),c2))
51848 // TODO: Under what circumstances could this be performed in DAGCombine?
51849 if ((N0.getOpcode() == ISD::TRUNCATE || N0.getOpcode() == ISD::ZERO_EXTEND) &&
51850 N0.getOperand(0).getOpcode() == N->getOpcode()) {
51851 SDValue TruncExtSrc = N0.getOperand(0);
51852 auto *N1C = dyn_cast<ConstantSDNode>(N1);
51853 auto *N001C = dyn_cast<ConstantSDNode>(TruncExtSrc.getOperand(1));
51854 if (N1C && !N1C->isOpaque() && N001C && !N001C->isOpaque()) {
51855 SDLoc DL(N);
51856 SDValue LHS = DAG.getZExtOrTrunc(TruncExtSrc.getOperand(0), DL, VT);
51857 SDValue RHS = DAG.getZExtOrTrunc(TruncExtSrc.getOperand(1), DL, VT);
51858 return DAG.getNode(ISD::XOR, DL, VT, LHS,
51859 DAG.getNode(ISD::XOR, DL, VT, RHS, N1));
51863 if (SDValue R = combineBMILogicOp(N, DAG, Subtarget))
51864 return R;
51866 return combineFneg(N, DAG, DCI, Subtarget);
51869 static SDValue combineBITREVERSE(SDNode *N, SelectionDAG &DAG,
51870 TargetLowering::DAGCombinerInfo &DCI,
51871 const X86Subtarget &Subtarget) {
51872 SDValue N0 = N->getOperand(0);
51873 EVT VT = N->getValueType(0);
51875 // Convert a (iX bitreverse(bitcast(vXi1 X))) -> (iX bitcast(shuffle(X)))
51876 if (VT.isInteger() && N0.getOpcode() == ISD::BITCAST && N0.hasOneUse()) {
51877 SDValue Src = N0.getOperand(0);
51878 EVT SrcVT = Src.getValueType();
51879 if (SrcVT.isVector() && SrcVT.getScalarType() == MVT::i1 &&
51880 (DCI.isBeforeLegalize() ||
51881 DAG.getTargetLoweringInfo().isTypeLegal(SrcVT)) &&
51882 Subtarget.hasSSSE3()) {
51883 unsigned NumElts = SrcVT.getVectorNumElements();
51884 SmallVector<int, 32> ReverseMask(NumElts);
51885 for (unsigned I = 0; I != NumElts; ++I)
51886 ReverseMask[I] = (NumElts - 1) - I;
51887 SDValue Rev =
51888 DAG.getVectorShuffle(SrcVT, SDLoc(N), Src, Src, ReverseMask);
51889 return DAG.getBitcast(VT, Rev);
51893 return SDValue();
51896 static SDValue combineBEXTR(SDNode *N, SelectionDAG &DAG,
51897 TargetLowering::DAGCombinerInfo &DCI,
51898 const X86Subtarget &Subtarget) {
51899 EVT VT = N->getValueType(0);
51900 unsigned NumBits = VT.getSizeInBits();
51902 // TODO - Constant Folding.
51904 // Simplify the inputs.
51905 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
51906 APInt DemandedMask(APInt::getAllOnes(NumBits));
51907 if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
51908 return SDValue(N, 0);
51910 return SDValue();
51913 static bool isNullFPScalarOrVectorConst(SDValue V) {
51914 return isNullFPConstant(V) || ISD::isBuildVectorAllZeros(V.getNode());
51917 /// If a value is a scalar FP zero or a vector FP zero (potentially including
51918 /// undefined elements), return a zero constant that may be used to fold away
51919 /// that value. In the case of a vector, the returned constant will not contain
51920 /// undefined elements even if the input parameter does. This makes it suitable
51921 /// to be used as a replacement operand with operations (eg, bitwise-and) where
51922 /// an undef should not propagate.
51923 static SDValue getNullFPConstForNullVal(SDValue V, SelectionDAG &DAG,
51924 const X86Subtarget &Subtarget) {
51925 if (!isNullFPScalarOrVectorConst(V))
51926 return SDValue();
51928 if (V.getValueType().isVector())
51929 return getZeroVector(V.getSimpleValueType(), Subtarget, DAG, SDLoc(V));
51931 return V;
51934 static SDValue combineFAndFNotToFAndn(SDNode *N, SelectionDAG &DAG,
51935 const X86Subtarget &Subtarget) {
51936 SDValue N0 = N->getOperand(0);
51937 SDValue N1 = N->getOperand(1);
51938 EVT VT = N->getValueType(0);
51939 SDLoc DL(N);
51941 // Vector types are handled in combineANDXORWithAllOnesIntoANDNP().
51942 if (!((VT == MVT::f32 && Subtarget.hasSSE1()) ||
51943 (VT == MVT::f64 && Subtarget.hasSSE2()) ||
51944 (VT == MVT::v4f32 && Subtarget.hasSSE1() && !Subtarget.hasSSE2())))
51945 return SDValue();
51947 auto isAllOnesConstantFP = [](SDValue V) {
51948 if (V.getSimpleValueType().isVector())
51949 return ISD::isBuildVectorAllOnes(V.getNode());
51950 auto *C = dyn_cast<ConstantFPSDNode>(V);
51951 return C && C->getConstantFPValue()->isAllOnesValue();
51954 // fand (fxor X, -1), Y --> fandn X, Y
51955 if (N0.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N0.getOperand(1)))
51956 return DAG.getNode(X86ISD::FANDN, DL, VT, N0.getOperand(0), N1);
51958 // fand X, (fxor Y, -1) --> fandn Y, X
51959 if (N1.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N1.getOperand(1)))
51960 return DAG.getNode(X86ISD::FANDN, DL, VT, N1.getOperand(0), N0);
51962 return SDValue();
51965 /// Do target-specific dag combines on X86ISD::FAND nodes.
51966 static SDValue combineFAnd(SDNode *N, SelectionDAG &DAG,
51967 const X86Subtarget &Subtarget) {
51968 // FAND(0.0, x) -> 0.0
51969 if (SDValue V = getNullFPConstForNullVal(N->getOperand(0), DAG, Subtarget))
51970 return V;
51972 // FAND(x, 0.0) -> 0.0
51973 if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
51974 return V;
51976 if (SDValue V = combineFAndFNotToFAndn(N, DAG, Subtarget))
51977 return V;
51979 return lowerX86FPLogicOp(N, DAG, Subtarget);
51982 /// Do target-specific dag combines on X86ISD::FANDN nodes.
51983 static SDValue combineFAndn(SDNode *N, SelectionDAG &DAG,
51984 const X86Subtarget &Subtarget) {
51985 // FANDN(0.0, x) -> x
51986 if (isNullFPScalarOrVectorConst(N->getOperand(0)))
51987 return N->getOperand(1);
51989 // FANDN(x, 0.0) -> 0.0
51990 if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
51991 return V;
51993 return lowerX86FPLogicOp(N, DAG, Subtarget);
51996 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
51997 static SDValue combineFOr(SDNode *N, SelectionDAG &DAG,
51998 TargetLowering::DAGCombinerInfo &DCI,
51999 const X86Subtarget &Subtarget) {
52000 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
52002 // F[X]OR(0.0, x) -> x
52003 if (isNullFPScalarOrVectorConst(N->getOperand(0)))
52004 return N->getOperand(1);
52006 // F[X]OR(x, 0.0) -> x
52007 if (isNullFPScalarOrVectorConst(N->getOperand(1)))
52008 return N->getOperand(0);
52010 if (SDValue NewVal = combineFneg(N, DAG, DCI, Subtarget))
52011 return NewVal;
52013 return lowerX86FPLogicOp(N, DAG, Subtarget);
52016 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
52017 static SDValue combineFMinFMax(SDNode *N, SelectionDAG &DAG) {
52018 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
52020 // FMIN/FMAX are commutative if no NaNs and no negative zeros are allowed.
52021 if (!DAG.getTarget().Options.NoNaNsFPMath ||
52022 !DAG.getTarget().Options.NoSignedZerosFPMath)
52023 return SDValue();
52025 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
52026 // into FMINC and FMAXC, which are Commutative operations.
52027 unsigned NewOp = 0;
52028 switch (N->getOpcode()) {
52029 default: llvm_unreachable("unknown opcode");
52030 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
52031 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
52034 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
52035 N->getOperand(0), N->getOperand(1));
52038 static SDValue combineFMinNumFMaxNum(SDNode *N, SelectionDAG &DAG,
52039 const X86Subtarget &Subtarget) {
52040 EVT VT = N->getValueType(0);
52041 if (Subtarget.useSoftFloat() || isSoftF16(VT, Subtarget))
52042 return SDValue();
52044 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52046 if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
52047 (Subtarget.hasSSE2() && VT == MVT::f64) ||
52048 (Subtarget.hasFP16() && VT == MVT::f16) ||
52049 (VT.isVector() && TLI.isTypeLegal(VT))))
52050 return SDValue();
52052 SDValue Op0 = N->getOperand(0);
52053 SDValue Op1 = N->getOperand(1);
52054 SDLoc DL(N);
52055 auto MinMaxOp = N->getOpcode() == ISD::FMAXNUM ? X86ISD::FMAX : X86ISD::FMIN;
52057 // If we don't have to respect NaN inputs, this is a direct translation to x86
52058 // min/max instructions.
52059 if (DAG.getTarget().Options.NoNaNsFPMath || N->getFlags().hasNoNaNs())
52060 return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
52062 // If one of the operands is known non-NaN use the native min/max instructions
52063 // with the non-NaN input as second operand.
52064 if (DAG.isKnownNeverNaN(Op1))
52065 return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
52066 if (DAG.isKnownNeverNaN(Op0))
52067 return DAG.getNode(MinMaxOp, DL, VT, Op1, Op0, N->getFlags());
52069 // If we have to respect NaN inputs, this takes at least 3 instructions.
52070 // Favor a library call when operating on a scalar and minimizing code size.
52071 if (!VT.isVector() && DAG.getMachineFunction().getFunction().hasMinSize())
52072 return SDValue();
52074 EVT SetCCType = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
52075 VT);
52077 // There are 4 possibilities involving NaN inputs, and these are the required
52078 // outputs:
52079 // Op1
52080 // Num NaN
52081 // ----------------
52082 // Num | Max | Op0 |
52083 // Op0 ----------------
52084 // NaN | Op1 | NaN |
52085 // ----------------
52087 // The SSE FP max/min instructions were not designed for this case, but rather
52088 // to implement:
52089 // Min = Op1 < Op0 ? Op1 : Op0
52090 // Max = Op1 > Op0 ? Op1 : Op0
52092 // So they always return Op0 if either input is a NaN. However, we can still
52093 // use those instructions for fmaxnum by selecting away a NaN input.
52095 // If either operand is NaN, the 2nd source operand (Op0) is passed through.
52096 SDValue MinOrMax = DAG.getNode(MinMaxOp, DL, VT, Op1, Op0);
52097 SDValue IsOp0Nan = DAG.getSetCC(DL, SetCCType, Op0, Op0, ISD::SETUO);
52099 // If Op0 is a NaN, select Op1. Otherwise, select the max. If both operands
52100 // are NaN, the NaN value of Op1 is the result.
52101 return DAG.getSelect(DL, VT, IsOp0Nan, Op1, MinOrMax);
52104 static SDValue combineX86INT_TO_FP(SDNode *N, SelectionDAG &DAG,
52105 TargetLowering::DAGCombinerInfo &DCI) {
52106 EVT VT = N->getValueType(0);
52107 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52109 APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
52110 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, DCI))
52111 return SDValue(N, 0);
52113 // Convert a full vector load into vzload when not all bits are needed.
52114 SDValue In = N->getOperand(0);
52115 MVT InVT = In.getSimpleValueType();
52116 if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
52117 ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
52118 assert(InVT.is128BitVector() && "Expected 128-bit input vector");
52119 LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
52120 unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
52121 MVT MemVT = MVT::getIntegerVT(NumBits);
52122 MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
52123 if (SDValue VZLoad = narrowLoadToVZLoad(LN, MemVT, LoadVT, DAG)) {
52124 SDLoc dl(N);
52125 SDValue Convert = DAG.getNode(N->getOpcode(), dl, VT,
52126 DAG.getBitcast(InVT, VZLoad));
52127 DCI.CombineTo(N, Convert);
52128 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
52129 DCI.recursivelyDeleteUnusedNodes(LN);
52130 return SDValue(N, 0);
52134 return SDValue();
52137 static SDValue combineCVTP2I_CVTTP2I(SDNode *N, SelectionDAG &DAG,
52138 TargetLowering::DAGCombinerInfo &DCI) {
52139 bool IsStrict = N->isTargetStrictFPOpcode();
52140 EVT VT = N->getValueType(0);
52142 // Convert a full vector load into vzload when not all bits are needed.
52143 SDValue In = N->getOperand(IsStrict ? 1 : 0);
52144 MVT InVT = In.getSimpleValueType();
52145 if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
52146 ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
52147 assert(InVT.is128BitVector() && "Expected 128-bit input vector");
52148 LoadSDNode *LN = cast<LoadSDNode>(In);
52149 unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
52150 MVT MemVT = MVT::getFloatingPointVT(NumBits);
52151 MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
52152 if (SDValue VZLoad = narrowLoadToVZLoad(LN, MemVT, LoadVT, DAG)) {
52153 SDLoc dl(N);
52154 if (IsStrict) {
52155 SDValue Convert =
52156 DAG.getNode(N->getOpcode(), dl, {VT, MVT::Other},
52157 {N->getOperand(0), DAG.getBitcast(InVT, VZLoad)});
52158 DCI.CombineTo(N, Convert, Convert.getValue(1));
52159 } else {
52160 SDValue Convert =
52161 DAG.getNode(N->getOpcode(), dl, VT, DAG.getBitcast(InVT, VZLoad));
52162 DCI.CombineTo(N, Convert);
52164 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
52165 DCI.recursivelyDeleteUnusedNodes(LN);
52166 return SDValue(N, 0);
52170 return SDValue();
52173 /// Do target-specific dag combines on X86ISD::ANDNP nodes.
52174 static SDValue combineAndnp(SDNode *N, SelectionDAG &DAG,
52175 TargetLowering::DAGCombinerInfo &DCI,
52176 const X86Subtarget &Subtarget) {
52177 SDValue N0 = N->getOperand(0);
52178 SDValue N1 = N->getOperand(1);
52179 MVT VT = N->getSimpleValueType(0);
52180 int NumElts = VT.getVectorNumElements();
52181 unsigned EltSizeInBits = VT.getScalarSizeInBits();
52182 SDLoc DL(N);
52184 // ANDNP(undef, x) -> 0
52185 // ANDNP(x, undef) -> 0
52186 if (N0.isUndef() || N1.isUndef())
52187 return DAG.getConstant(0, DL, VT);
52189 // ANDNP(0, x) -> x
52190 if (ISD::isBuildVectorAllZeros(N0.getNode()))
52191 return N1;
52193 // ANDNP(x, 0) -> 0
52194 if (ISD::isBuildVectorAllZeros(N1.getNode()))
52195 return DAG.getConstant(0, DL, VT);
52197 // ANDNP(x, -1) -> NOT(x) -> XOR(x, -1)
52198 if (ISD::isBuildVectorAllOnes(N1.getNode()))
52199 return DAG.getNOT(DL, N0, VT);
52201 // Turn ANDNP back to AND if input is inverted.
52202 if (SDValue Not = IsNOT(N0, DAG))
52203 return DAG.getNode(ISD::AND, DL, VT, DAG.getBitcast(VT, Not), N1);
52205 // Fold for better commutatvity:
52206 // ANDNP(x,NOT(y)) -> AND(NOT(x),NOT(y)) -> NOT(OR(X,Y)).
52207 if (N1->hasOneUse())
52208 if (SDValue Not = IsNOT(N1, DAG))
52209 return DAG.getNOT(
52210 DL, DAG.getNode(ISD::OR, DL, VT, N0, DAG.getBitcast(VT, Not)), VT);
52212 // Constant Folding
52213 APInt Undefs0, Undefs1;
52214 SmallVector<APInt> EltBits0, EltBits1;
52215 if (getTargetConstantBitsFromNode(N0, EltSizeInBits, Undefs0, EltBits0)) {
52216 if (getTargetConstantBitsFromNode(N1, EltSizeInBits, Undefs1, EltBits1)) {
52217 SmallVector<APInt> ResultBits;
52218 for (int I = 0; I != NumElts; ++I)
52219 ResultBits.push_back(~EltBits0[I] & EltBits1[I]);
52220 return getConstVector(ResultBits, VT, DAG, DL);
52223 // Constant fold NOT(N0) to allow us to use AND.
52224 // Ensure this is only performed if we can confirm that the bitcasted source
52225 // has oneuse to prevent an infinite loop with canonicalizeBitSelect.
52226 if (N0->hasOneUse()) {
52227 SDValue BC0 = peekThroughOneUseBitcasts(N0);
52228 if (BC0.getOpcode() != ISD::BITCAST) {
52229 for (APInt &Elt : EltBits0)
52230 Elt = ~Elt;
52231 SDValue Not = getConstVector(EltBits0, VT, DAG, DL);
52232 return DAG.getNode(ISD::AND, DL, VT, Not, N1);
52237 // Attempt to recursively combine a bitmask ANDNP with shuffles.
52238 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
52239 SDValue Op(N, 0);
52240 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
52241 return Res;
52243 // If either operand is a constant mask, then only the elements that aren't
52244 // zero are actually demanded by the other operand.
52245 auto GetDemandedMasks = [&](SDValue Op, bool Invert = false) {
52246 APInt UndefElts;
52247 SmallVector<APInt> EltBits;
52248 APInt DemandedBits = APInt::getAllOnes(EltSizeInBits);
52249 APInt DemandedElts = APInt::getAllOnes(NumElts);
52250 if (getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
52251 EltBits)) {
52252 DemandedBits.clearAllBits();
52253 DemandedElts.clearAllBits();
52254 for (int I = 0; I != NumElts; ++I) {
52255 if (UndefElts[I]) {
52256 // We can't assume an undef src element gives an undef dst - the
52257 // other src might be zero.
52258 DemandedBits.setAllBits();
52259 DemandedElts.setBit(I);
52260 } else if ((Invert && !EltBits[I].isAllOnes()) ||
52261 (!Invert && !EltBits[I].isZero())) {
52262 DemandedBits |= Invert ? ~EltBits[I] : EltBits[I];
52263 DemandedElts.setBit(I);
52267 return std::make_pair(DemandedBits, DemandedElts);
52269 APInt Bits0, Elts0;
52270 APInt Bits1, Elts1;
52271 std::tie(Bits0, Elts0) = GetDemandedMasks(N1);
52272 std::tie(Bits1, Elts1) = GetDemandedMasks(N0, true);
52274 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52275 if (TLI.SimplifyDemandedVectorElts(N0, Elts0, DCI) ||
52276 TLI.SimplifyDemandedVectorElts(N1, Elts1, DCI) ||
52277 TLI.SimplifyDemandedBits(N0, Bits0, Elts0, DCI) ||
52278 TLI.SimplifyDemandedBits(N1, Bits1, Elts1, DCI)) {
52279 if (N->getOpcode() != ISD::DELETED_NODE)
52280 DCI.AddToWorklist(N);
52281 return SDValue(N, 0);
52285 return SDValue();
52288 static SDValue combineBT(SDNode *N, SelectionDAG &DAG,
52289 TargetLowering::DAGCombinerInfo &DCI) {
52290 SDValue N1 = N->getOperand(1);
52292 // BT ignores high bits in the bit index operand.
52293 unsigned BitWidth = N1.getValueSizeInBits();
52294 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
52295 if (DAG.getTargetLoweringInfo().SimplifyDemandedBits(N1, DemandedMask, DCI)) {
52296 if (N->getOpcode() != ISD::DELETED_NODE)
52297 DCI.AddToWorklist(N);
52298 return SDValue(N, 0);
52301 return SDValue();
52304 static SDValue combineCVTPH2PS(SDNode *N, SelectionDAG &DAG,
52305 TargetLowering::DAGCombinerInfo &DCI) {
52306 bool IsStrict = N->getOpcode() == X86ISD::STRICT_CVTPH2PS;
52307 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
52309 if (N->getValueType(0) == MVT::v4f32 && Src.getValueType() == MVT::v8i16) {
52310 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52311 APInt DemandedElts = APInt::getLowBitsSet(8, 4);
52312 if (TLI.SimplifyDemandedVectorElts(Src, DemandedElts, DCI)) {
52313 if (N->getOpcode() != ISD::DELETED_NODE)
52314 DCI.AddToWorklist(N);
52315 return SDValue(N, 0);
52318 // Convert a full vector load into vzload when not all bits are needed.
52319 if (ISD::isNormalLoad(Src.getNode()) && Src.hasOneUse()) {
52320 LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(IsStrict ? 1 : 0));
52321 if (SDValue VZLoad = narrowLoadToVZLoad(LN, MVT::i64, MVT::v2i64, DAG)) {
52322 SDLoc dl(N);
52323 if (IsStrict) {
52324 SDValue Convert = DAG.getNode(
52325 N->getOpcode(), dl, {MVT::v4f32, MVT::Other},
52326 {N->getOperand(0), DAG.getBitcast(MVT::v8i16, VZLoad)});
52327 DCI.CombineTo(N, Convert, Convert.getValue(1));
52328 } else {
52329 SDValue Convert = DAG.getNode(N->getOpcode(), dl, MVT::v4f32,
52330 DAG.getBitcast(MVT::v8i16, VZLoad));
52331 DCI.CombineTo(N, Convert);
52334 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
52335 DCI.recursivelyDeleteUnusedNodes(LN);
52336 return SDValue(N, 0);
52341 return SDValue();
52344 // Try to combine sext_in_reg of a cmov of constants by extending the constants.
52345 static SDValue combineSextInRegCmov(SDNode *N, SelectionDAG &DAG) {
52346 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
52348 EVT DstVT = N->getValueType(0);
52350 SDValue N0 = N->getOperand(0);
52351 SDValue N1 = N->getOperand(1);
52352 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
52354 if (ExtraVT != MVT::i8 && ExtraVT != MVT::i16)
52355 return SDValue();
52357 // Look through single use any_extends / truncs.
52358 SDValue IntermediateBitwidthOp;
52359 if ((N0.getOpcode() == ISD::ANY_EXTEND || N0.getOpcode() == ISD::TRUNCATE) &&
52360 N0.hasOneUse()) {
52361 IntermediateBitwidthOp = N0;
52362 N0 = N0.getOperand(0);
52365 // See if we have a single use cmov.
52366 if (N0.getOpcode() != X86ISD::CMOV || !N0.hasOneUse())
52367 return SDValue();
52369 SDValue CMovOp0 = N0.getOperand(0);
52370 SDValue CMovOp1 = N0.getOperand(1);
52372 // Make sure both operands are constants.
52373 if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
52374 !isa<ConstantSDNode>(CMovOp1.getNode()))
52375 return SDValue();
52377 SDLoc DL(N);
52379 // If we looked through an any_extend/trunc above, add one to the constants.
52380 if (IntermediateBitwidthOp) {
52381 unsigned IntermediateOpc = IntermediateBitwidthOp.getOpcode();
52382 CMovOp0 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp0);
52383 CMovOp1 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp1);
52386 CMovOp0 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp0, N1);
52387 CMovOp1 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp1, N1);
52389 EVT CMovVT = DstVT;
52390 // We do not want i16 CMOV's. Promote to i32 and truncate afterwards.
52391 if (DstVT == MVT::i16) {
52392 CMovVT = MVT::i32;
52393 CMovOp0 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp0);
52394 CMovOp1 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp1);
52397 SDValue CMov = DAG.getNode(X86ISD::CMOV, DL, CMovVT, CMovOp0, CMovOp1,
52398 N0.getOperand(2), N0.getOperand(3));
52400 if (CMovVT != DstVT)
52401 CMov = DAG.getNode(ISD::TRUNCATE, DL, DstVT, CMov);
52403 return CMov;
52406 static SDValue combineSignExtendInReg(SDNode *N, SelectionDAG &DAG,
52407 const X86Subtarget &Subtarget) {
52408 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
52410 if (SDValue V = combineSextInRegCmov(N, DAG))
52411 return V;
52413 EVT VT = N->getValueType(0);
52414 SDValue N0 = N->getOperand(0);
52415 SDValue N1 = N->getOperand(1);
52416 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
52417 SDLoc dl(N);
52419 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
52420 // both SSE and AVX2 since there is no sign-extended shift right
52421 // operation on a vector with 64-bit elements.
52422 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
52423 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
52424 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
52425 N0.getOpcode() == ISD::SIGN_EXTEND)) {
52426 SDValue N00 = N0.getOperand(0);
52428 // EXTLOAD has a better solution on AVX2,
52429 // it may be replaced with X86ISD::VSEXT node.
52430 if (N00.getOpcode() == ISD::LOAD && Subtarget.hasInt256())
52431 if (!ISD::isNormalLoad(N00.getNode()))
52432 return SDValue();
52434 // Attempt to promote any comparison mask ops before moving the
52435 // SIGN_EXTEND_INREG in the way.
52436 if (SDValue Promote = PromoteMaskArithmetic(N0.getNode(), DAG, Subtarget))
52437 return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Promote, N1);
52439 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
52440 SDValue Tmp =
52441 DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, N00, N1);
52442 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
52445 return SDValue();
52448 /// sext(add_nsw(x, C)) --> add(sext(x), C_sext)
52449 /// zext(add_nuw(x, C)) --> add(zext(x), C_zext)
52450 /// Promoting a sign/zero extension ahead of a no overflow 'add' exposes
52451 /// opportunities to combine math ops, use an LEA, or use a complex addressing
52452 /// mode. This can eliminate extend, add, and shift instructions.
52453 static SDValue promoteExtBeforeAdd(SDNode *Ext, SelectionDAG &DAG,
52454 const X86Subtarget &Subtarget) {
52455 if (Ext->getOpcode() != ISD::SIGN_EXTEND &&
52456 Ext->getOpcode() != ISD::ZERO_EXTEND)
52457 return SDValue();
52459 // TODO: This should be valid for other integer types.
52460 EVT VT = Ext->getValueType(0);
52461 if (VT != MVT::i64)
52462 return SDValue();
52464 SDValue Add = Ext->getOperand(0);
52465 if (Add.getOpcode() != ISD::ADD)
52466 return SDValue();
52468 SDValue AddOp0 = Add.getOperand(0);
52469 SDValue AddOp1 = Add.getOperand(1);
52470 bool Sext = Ext->getOpcode() == ISD::SIGN_EXTEND;
52471 bool NSW = Add->getFlags().hasNoSignedWrap();
52472 bool NUW = Add->getFlags().hasNoUnsignedWrap();
52473 NSW = NSW || (Sext && DAG.willNotOverflowAdd(true, AddOp0, AddOp1));
52474 NUW = NUW || (!Sext && DAG.willNotOverflowAdd(false, AddOp0, AddOp1));
52476 // We need an 'add nsw' feeding into the 'sext' or 'add nuw' feeding
52477 // into the 'zext'
52478 if ((Sext && !NSW) || (!Sext && !NUW))
52479 return SDValue();
52481 // Having a constant operand to the 'add' ensures that we are not increasing
52482 // the instruction count because the constant is extended for free below.
52483 // A constant operand can also become the displacement field of an LEA.
52484 auto *AddOp1C = dyn_cast<ConstantSDNode>(AddOp1);
52485 if (!AddOp1C)
52486 return SDValue();
52488 // Don't make the 'add' bigger if there's no hope of combining it with some
52489 // other 'add' or 'shl' instruction.
52490 // TODO: It may be profitable to generate simpler LEA instructions in place
52491 // of single 'add' instructions, but the cost model for selecting an LEA
52492 // currently has a high threshold.
52493 bool HasLEAPotential = false;
52494 for (auto *User : Ext->uses()) {
52495 if (User->getOpcode() == ISD::ADD || User->getOpcode() == ISD::SHL) {
52496 HasLEAPotential = true;
52497 break;
52500 if (!HasLEAPotential)
52501 return SDValue();
52503 // Everything looks good, so pull the '{s|z}ext' ahead of the 'add'.
52504 int64_t AddC = Sext ? AddOp1C->getSExtValue() : AddOp1C->getZExtValue();
52505 SDValue NewExt = DAG.getNode(Ext->getOpcode(), SDLoc(Ext), VT, AddOp0);
52506 SDValue NewConstant = DAG.getConstant(AddC, SDLoc(Add), VT);
52508 // The wider add is guaranteed to not wrap because both operands are
52509 // sign-extended.
52510 SDNodeFlags Flags;
52511 Flags.setNoSignedWrap(NSW);
52512 Flags.setNoUnsignedWrap(NUW);
52513 return DAG.getNode(ISD::ADD, SDLoc(Add), VT, NewExt, NewConstant, Flags);
52516 // If we face {ANY,SIGN,ZERO}_EXTEND that is applied to a CMOV with constant
52517 // operands and the result of CMOV is not used anywhere else - promote CMOV
52518 // itself instead of promoting its result. This could be beneficial, because:
52519 // 1) X86TargetLowering::EmitLoweredSelect later can do merging of two
52520 // (or more) pseudo-CMOVs only when they go one-after-another and
52521 // getting rid of result extension code after CMOV will help that.
52522 // 2) Promotion of constant CMOV arguments is free, hence the
52523 // {ANY,SIGN,ZERO}_EXTEND will just be deleted.
52524 // 3) 16-bit CMOV encoding is 4 bytes, 32-bit CMOV is 3-byte, so this
52525 // promotion is also good in terms of code-size.
52526 // (64-bit CMOV is 4-bytes, that's why we don't do 32-bit => 64-bit
52527 // promotion).
52528 static SDValue combineToExtendCMOV(SDNode *Extend, SelectionDAG &DAG) {
52529 SDValue CMovN = Extend->getOperand(0);
52530 if (CMovN.getOpcode() != X86ISD::CMOV || !CMovN.hasOneUse())
52531 return SDValue();
52533 EVT TargetVT = Extend->getValueType(0);
52534 unsigned ExtendOpcode = Extend->getOpcode();
52535 SDLoc DL(Extend);
52537 EVT VT = CMovN.getValueType();
52538 SDValue CMovOp0 = CMovN.getOperand(0);
52539 SDValue CMovOp1 = CMovN.getOperand(1);
52541 if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
52542 !isa<ConstantSDNode>(CMovOp1.getNode()))
52543 return SDValue();
52545 // Only extend to i32 or i64.
52546 if (TargetVT != MVT::i32 && TargetVT != MVT::i64)
52547 return SDValue();
52549 // Only extend from i16 unless its a sign_extend from i32. Zext/aext from i32
52550 // are free.
52551 if (VT != MVT::i16 && !(ExtendOpcode == ISD::SIGN_EXTEND && VT == MVT::i32))
52552 return SDValue();
52554 // If this a zero extend to i64, we should only extend to i32 and use a free
52555 // zero extend to finish.
52556 EVT ExtendVT = TargetVT;
52557 if (TargetVT == MVT::i64 && ExtendOpcode != ISD::SIGN_EXTEND)
52558 ExtendVT = MVT::i32;
52560 CMovOp0 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp0);
52561 CMovOp1 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp1);
52563 SDValue Res = DAG.getNode(X86ISD::CMOV, DL, ExtendVT, CMovOp0, CMovOp1,
52564 CMovN.getOperand(2), CMovN.getOperand(3));
52566 // Finish extending if needed.
52567 if (ExtendVT != TargetVT)
52568 Res = DAG.getNode(ExtendOpcode, DL, TargetVT, Res);
52570 return Res;
52573 // Attempt to combine a (sext/zext (setcc)) to a setcc with a xmm/ymm/zmm
52574 // result type.
52575 static SDValue combineExtSetcc(SDNode *N, SelectionDAG &DAG,
52576 const X86Subtarget &Subtarget) {
52577 SDValue N0 = N->getOperand(0);
52578 EVT VT = N->getValueType(0);
52579 SDLoc dl(N);
52581 // Only do this combine with AVX512 for vector extends.
52582 if (!Subtarget.hasAVX512() || !VT.isVector() || N0.getOpcode() != ISD::SETCC)
52583 return SDValue();
52585 // Only combine legal element types.
52586 EVT SVT = VT.getVectorElementType();
52587 if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32 &&
52588 SVT != MVT::i64 && SVT != MVT::f32 && SVT != MVT::f64)
52589 return SDValue();
52591 // We don't have CMPP Instruction for vxf16
52592 if (N0.getOperand(0).getValueType().getVectorElementType() == MVT::f16)
52593 return SDValue();
52594 // We can only do this if the vector size in 256 bits or less.
52595 unsigned Size = VT.getSizeInBits();
52596 if (Size > 256 && Subtarget.useAVX512Regs())
52597 return SDValue();
52599 // Don't fold if the condition code can't be handled by PCMPEQ/PCMPGT since
52600 // that's the only integer compares with we have.
52601 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
52602 if (ISD::isUnsignedIntSetCC(CC))
52603 return SDValue();
52605 // Only do this combine if the extension will be fully consumed by the setcc.
52606 EVT N00VT = N0.getOperand(0).getValueType();
52607 EVT MatchingVecType = N00VT.changeVectorElementTypeToInteger();
52608 if (Size != MatchingVecType.getSizeInBits())
52609 return SDValue();
52611 SDValue Res = DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
52613 if (N->getOpcode() == ISD::ZERO_EXTEND)
52614 Res = DAG.getZeroExtendInReg(Res, dl, N0.getValueType());
52616 return Res;
52619 static SDValue combineSext(SDNode *N, SelectionDAG &DAG,
52620 TargetLowering::DAGCombinerInfo &DCI,
52621 const X86Subtarget &Subtarget) {
52622 SDValue N0 = N->getOperand(0);
52623 EVT VT = N->getValueType(0);
52624 SDLoc DL(N);
52626 // (i32 (sext (i8 (x86isd::setcc_carry)))) -> (i32 (x86isd::setcc_carry))
52627 if (!DCI.isBeforeLegalizeOps() &&
52628 N0.getOpcode() == X86ISD::SETCC_CARRY) {
52629 SDValue Setcc = DAG.getNode(X86ISD::SETCC_CARRY, DL, VT, N0->getOperand(0),
52630 N0->getOperand(1));
52631 bool ReplaceOtherUses = !N0.hasOneUse();
52632 DCI.CombineTo(N, Setcc);
52633 // Replace other uses with a truncate of the widened setcc_carry.
52634 if (ReplaceOtherUses) {
52635 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
52636 N0.getValueType(), Setcc);
52637 DCI.CombineTo(N0.getNode(), Trunc);
52640 return SDValue(N, 0);
52643 if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
52644 return NewCMov;
52646 if (!DCI.isBeforeLegalizeOps())
52647 return SDValue();
52649 if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
52650 return V;
52652 if (SDValue V = combineToExtendBoolVectorInReg(N->getOpcode(), DL, VT, N0,
52653 DAG, DCI, Subtarget))
52654 return V;
52656 if (VT.isVector()) {
52657 if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
52658 return R;
52660 if (N0.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG)
52661 return DAG.getNode(N0.getOpcode(), DL, VT, N0.getOperand(0));
52664 if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
52665 return NewAdd;
52667 return SDValue();
52670 // Inverting a constant vector is profitable if it can be eliminated and the
52671 // inverted vector is already present in DAG. Otherwise, it will be loaded
52672 // anyway.
52674 // We determine which of the values can be completely eliminated and invert it.
52675 // If both are eliminable, select a vector with the first negative element.
52676 static SDValue getInvertedVectorForFMA(SDValue V, SelectionDAG &DAG) {
52677 assert(ISD::isBuildVectorOfConstantFPSDNodes(V.getNode()) &&
52678 "ConstantFP build vector expected");
52679 // Check if we can eliminate V. We assume if a value is only used in FMAs, we
52680 // can eliminate it. Since this function is invoked for each FMA with this
52681 // vector.
52682 auto IsNotFMA = [](SDNode *Use) {
52683 return Use->getOpcode() != ISD::FMA && Use->getOpcode() != ISD::STRICT_FMA;
52685 if (llvm::any_of(V->uses(), IsNotFMA))
52686 return SDValue();
52688 SmallVector<SDValue, 8> Ops;
52689 EVT VT = V.getValueType();
52690 EVT EltVT = VT.getVectorElementType();
52691 for (auto Op : V->op_values()) {
52692 if (auto *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
52693 Ops.push_back(DAG.getConstantFP(-Cst->getValueAPF(), SDLoc(Op), EltVT));
52694 } else {
52695 assert(Op.isUndef());
52696 Ops.push_back(DAG.getUNDEF(EltVT));
52700 SDNode *NV = DAG.getNodeIfExists(ISD::BUILD_VECTOR, DAG.getVTList(VT), Ops);
52701 if (!NV)
52702 return SDValue();
52704 // If an inverted version cannot be eliminated, choose it instead of the
52705 // original version.
52706 if (llvm::any_of(NV->uses(), IsNotFMA))
52707 return SDValue(NV, 0);
52709 // If the inverted version also can be eliminated, we have to consistently
52710 // prefer one of the values. We prefer a constant with a negative value on
52711 // the first place.
52712 // N.B. We need to skip undefs that may precede a value.
52713 for (auto op : V->op_values()) {
52714 if (auto *Cst = dyn_cast<ConstantFPSDNode>(op)) {
52715 if (Cst->isNegative())
52716 return SDValue();
52717 break;
52720 return SDValue(NV, 0);
52723 static SDValue combineFMA(SDNode *N, SelectionDAG &DAG,
52724 TargetLowering::DAGCombinerInfo &DCI,
52725 const X86Subtarget &Subtarget) {
52726 SDLoc dl(N);
52727 EVT VT = N->getValueType(0);
52728 bool IsStrict = N->isStrictFPOpcode() || N->isTargetStrictFPOpcode();
52730 // Let legalize expand this if it isn't a legal type yet.
52731 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52732 if (!TLI.isTypeLegal(VT))
52733 return SDValue();
52735 SDValue A = N->getOperand(IsStrict ? 1 : 0);
52736 SDValue B = N->getOperand(IsStrict ? 2 : 1);
52737 SDValue C = N->getOperand(IsStrict ? 3 : 2);
52739 // If the operation allows fast-math and the target does not support FMA,
52740 // split this into mul+add to avoid libcall(s).
52741 SDNodeFlags Flags = N->getFlags();
52742 if (!IsStrict && Flags.hasAllowReassociation() &&
52743 TLI.isOperationExpand(ISD::FMA, VT)) {
52744 SDValue Fmul = DAG.getNode(ISD::FMUL, dl, VT, A, B, Flags);
52745 return DAG.getNode(ISD::FADD, dl, VT, Fmul, C, Flags);
52748 EVT ScalarVT = VT.getScalarType();
52749 if (((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) ||
52750 !Subtarget.hasAnyFMA()) &&
52751 !(ScalarVT == MVT::f16 && Subtarget.hasFP16()))
52752 return SDValue();
52754 auto invertIfNegative = [&DAG, &TLI, &DCI](SDValue &V) {
52755 bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
52756 bool LegalOperations = !DCI.isBeforeLegalizeOps();
52757 if (SDValue NegV = TLI.getCheaperNegatedExpression(V, DAG, LegalOperations,
52758 CodeSize)) {
52759 V = NegV;
52760 return true;
52762 // Look through extract_vector_elts. If it comes from an FNEG, create a
52763 // new extract from the FNEG input.
52764 if (V.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
52765 isNullConstant(V.getOperand(1))) {
52766 SDValue Vec = V.getOperand(0);
52767 if (SDValue NegV = TLI.getCheaperNegatedExpression(
52768 Vec, DAG, LegalOperations, CodeSize)) {
52769 V = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), V.getValueType(),
52770 NegV, V.getOperand(1));
52771 return true;
52774 // Lookup if there is an inverted version of constant vector V in DAG.
52775 if (ISD::isBuildVectorOfConstantFPSDNodes(V.getNode())) {
52776 if (SDValue NegV = getInvertedVectorForFMA(V, DAG)) {
52777 V = NegV;
52778 return true;
52781 return false;
52784 // Do not convert the passthru input of scalar intrinsics.
52785 // FIXME: We could allow negations of the lower element only.
52786 bool NegA = invertIfNegative(A);
52787 bool NegB = invertIfNegative(B);
52788 bool NegC = invertIfNegative(C);
52790 if (!NegA && !NegB && !NegC)
52791 return SDValue();
52793 unsigned NewOpcode =
52794 negateFMAOpcode(N->getOpcode(), NegA != NegB, NegC, false);
52796 // Propagate fast-math-flags to new FMA node.
52797 SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
52798 if (IsStrict) {
52799 assert(N->getNumOperands() == 4 && "Shouldn't be greater than 4");
52800 return DAG.getNode(NewOpcode, dl, {VT, MVT::Other},
52801 {N->getOperand(0), A, B, C});
52802 } else {
52803 if (N->getNumOperands() == 4)
52804 return DAG.getNode(NewOpcode, dl, VT, A, B, C, N->getOperand(3));
52805 return DAG.getNode(NewOpcode, dl, VT, A, B, C);
52809 // Combine FMADDSUB(A, B, FNEG(C)) -> FMSUBADD(A, B, C)
52810 // Combine FMSUBADD(A, B, FNEG(C)) -> FMADDSUB(A, B, C)
52811 static SDValue combineFMADDSUB(SDNode *N, SelectionDAG &DAG,
52812 TargetLowering::DAGCombinerInfo &DCI) {
52813 SDLoc dl(N);
52814 EVT VT = N->getValueType(0);
52815 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52816 bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
52817 bool LegalOperations = !DCI.isBeforeLegalizeOps();
52819 SDValue N2 = N->getOperand(2);
52821 SDValue NegN2 =
52822 TLI.getCheaperNegatedExpression(N2, DAG, LegalOperations, CodeSize);
52823 if (!NegN2)
52824 return SDValue();
52825 unsigned NewOpcode = negateFMAOpcode(N->getOpcode(), false, true, false);
52827 if (N->getNumOperands() == 4)
52828 return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
52829 NegN2, N->getOperand(3));
52830 return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
52831 NegN2);
52834 static SDValue combineZext(SDNode *N, SelectionDAG &DAG,
52835 TargetLowering::DAGCombinerInfo &DCI,
52836 const X86Subtarget &Subtarget) {
52837 SDLoc dl(N);
52838 SDValue N0 = N->getOperand(0);
52839 EVT VT = N->getValueType(0);
52841 // (i32 (aext (i8 (x86isd::setcc_carry)))) -> (i32 (x86isd::setcc_carry))
52842 // FIXME: Is this needed? We don't seem to have any tests for it.
52843 if (!DCI.isBeforeLegalizeOps() && N->getOpcode() == ISD::ANY_EXTEND &&
52844 N0.getOpcode() == X86ISD::SETCC_CARRY) {
52845 SDValue Setcc = DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, N0->getOperand(0),
52846 N0->getOperand(1));
52847 bool ReplaceOtherUses = !N0.hasOneUse();
52848 DCI.CombineTo(N, Setcc);
52849 // Replace other uses with a truncate of the widened setcc_carry.
52850 if (ReplaceOtherUses) {
52851 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
52852 N0.getValueType(), Setcc);
52853 DCI.CombineTo(N0.getNode(), Trunc);
52856 return SDValue(N, 0);
52859 if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
52860 return NewCMov;
52862 if (DCI.isBeforeLegalizeOps())
52863 if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
52864 return V;
52866 if (SDValue V = combineToExtendBoolVectorInReg(N->getOpcode(), dl, VT, N0,
52867 DAG, DCI, Subtarget))
52868 return V;
52870 if (VT.isVector())
52871 if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
52872 return R;
52874 if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
52875 return NewAdd;
52877 if (SDValue R = combineOrCmpEqZeroToCtlzSrl(N, DAG, DCI, Subtarget))
52878 return R;
52880 // TODO: Combine with any target/faux shuffle.
52881 if (N0.getOpcode() == X86ISD::PACKUS && N0.getValueSizeInBits() == 128 &&
52882 VT.getScalarSizeInBits() == N0.getOperand(0).getScalarValueSizeInBits()) {
52883 SDValue N00 = N0.getOperand(0);
52884 SDValue N01 = N0.getOperand(1);
52885 unsigned NumSrcEltBits = N00.getScalarValueSizeInBits();
52886 APInt ZeroMask = APInt::getHighBitsSet(NumSrcEltBits, NumSrcEltBits / 2);
52887 if ((N00.isUndef() || DAG.MaskedValueIsZero(N00, ZeroMask)) &&
52888 (N01.isUndef() || DAG.MaskedValueIsZero(N01, ZeroMask))) {
52889 return concatSubVectors(N00, N01, DAG, dl);
52893 return SDValue();
52896 /// If we have AVX512, but not BWI and this is a vXi16/vXi8 setcc, just
52897 /// pre-promote its result type since vXi1 vectors don't get promoted
52898 /// during type legalization.
52899 static SDValue truncateAVX512SetCCNoBWI(EVT VT, EVT OpVT, SDValue LHS,
52900 SDValue RHS, ISD::CondCode CC,
52901 const SDLoc &DL, SelectionDAG &DAG,
52902 const X86Subtarget &Subtarget) {
52903 if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && VT.isVector() &&
52904 VT.getVectorElementType() == MVT::i1 &&
52905 (OpVT.getVectorElementType() == MVT::i8 ||
52906 OpVT.getVectorElementType() == MVT::i16)) {
52907 SDValue Setcc = DAG.getSetCC(DL, OpVT, LHS, RHS, CC);
52908 return DAG.getNode(ISD::TRUNCATE, DL, VT, Setcc);
52910 return SDValue();
52913 static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
52914 TargetLowering::DAGCombinerInfo &DCI,
52915 const X86Subtarget &Subtarget) {
52916 const ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
52917 const SDValue LHS = N->getOperand(0);
52918 const SDValue RHS = N->getOperand(1);
52919 EVT VT = N->getValueType(0);
52920 EVT OpVT = LHS.getValueType();
52921 SDLoc DL(N);
52923 if (CC == ISD::SETNE || CC == ISD::SETEQ) {
52924 if (SDValue V = combineVectorSizedSetCCEquality(VT, LHS, RHS, CC, DL, DAG,
52925 Subtarget))
52926 return V;
52928 if (VT == MVT::i1) {
52929 X86::CondCode X86CC;
52930 if (SDValue V =
52931 MatchVectorAllEqualTest(LHS, RHS, CC, DL, Subtarget, DAG, X86CC))
52932 return DAG.getNode(ISD::TRUNCATE, DL, VT, getSETCC(X86CC, V, DL, DAG));
52935 if (OpVT.isScalarInteger()) {
52936 // cmpeq(or(X,Y),X) --> cmpeq(and(~X,Y),0)
52937 // cmpne(or(X,Y),X) --> cmpne(and(~X,Y),0)
52938 auto MatchOrCmpEq = [&](SDValue N0, SDValue N1) {
52939 if (N0.getOpcode() == ISD::OR && N0->hasOneUse()) {
52940 if (N0.getOperand(0) == N1)
52941 return DAG.getNode(ISD::AND, DL, OpVT, DAG.getNOT(DL, N1, OpVT),
52942 N0.getOperand(1));
52943 if (N0.getOperand(1) == N1)
52944 return DAG.getNode(ISD::AND, DL, OpVT, DAG.getNOT(DL, N1, OpVT),
52945 N0.getOperand(0));
52947 return SDValue();
52949 if (SDValue AndN = MatchOrCmpEq(LHS, RHS))
52950 return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
52951 if (SDValue AndN = MatchOrCmpEq(RHS, LHS))
52952 return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
52954 // cmpeq(and(X,Y),Y) --> cmpeq(and(~X,Y),0)
52955 // cmpne(and(X,Y),Y) --> cmpne(and(~X,Y),0)
52956 auto MatchAndCmpEq = [&](SDValue N0, SDValue N1) {
52957 if (N0.getOpcode() == ISD::AND && N0->hasOneUse()) {
52958 if (N0.getOperand(0) == N1)
52959 return DAG.getNode(ISD::AND, DL, OpVT, N1,
52960 DAG.getNOT(DL, N0.getOperand(1), OpVT));
52961 if (N0.getOperand(1) == N1)
52962 return DAG.getNode(ISD::AND, DL, OpVT, N1,
52963 DAG.getNOT(DL, N0.getOperand(0), OpVT));
52965 return SDValue();
52967 if (SDValue AndN = MatchAndCmpEq(LHS, RHS))
52968 return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
52969 if (SDValue AndN = MatchAndCmpEq(RHS, LHS))
52970 return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
52972 // cmpeq(trunc(x),C) --> cmpeq(x,C)
52973 // cmpne(trunc(x),C) --> cmpne(x,C)
52974 // iff x upper bits are zero.
52975 if (LHS.getOpcode() == ISD::TRUNCATE &&
52976 LHS.getOperand(0).getScalarValueSizeInBits() >= 32 &&
52977 isa<ConstantSDNode>(RHS) && !DCI.isBeforeLegalize()) {
52978 EVT SrcVT = LHS.getOperand(0).getValueType();
52979 APInt UpperBits = APInt::getBitsSetFrom(SrcVT.getScalarSizeInBits(),
52980 OpVT.getScalarSizeInBits());
52981 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52982 auto *C = cast<ConstantSDNode>(RHS);
52983 if (DAG.MaskedValueIsZero(LHS.getOperand(0), UpperBits) &&
52984 TLI.isTypeLegal(LHS.getOperand(0).getValueType()))
52985 return DAG.getSetCC(DL, VT, LHS.getOperand(0),
52986 DAG.getConstant(C->getAPIntValue().zextOrTrunc(
52987 SrcVT.getScalarSizeInBits()),
52988 DL, SrcVT),
52989 CC);
52992 // With C as a power of 2 and C != 0 and C != INT_MIN:
52993 // icmp eq Abs(X) C ->
52994 // (icmp eq A, C) | (icmp eq A, -C)
52995 // icmp ne Abs(X) C ->
52996 // (icmp ne A, C) & (icmp ne A, -C)
52997 // Both of these patterns can be better optimized in
52998 // DAGCombiner::foldAndOrOfSETCC. Note this only applies for scalar
52999 // integers which is checked above.
53000 if (LHS.getOpcode() == ISD::ABS && LHS.hasOneUse()) {
53001 if (auto *C = dyn_cast<ConstantSDNode>(RHS)) {
53002 const APInt &CInt = C->getAPIntValue();
53003 // We can better optimize this case in DAGCombiner::foldAndOrOfSETCC.
53004 if (CInt.isPowerOf2() && !CInt.isMinSignedValue()) {
53005 SDValue BaseOp = LHS.getOperand(0);
53006 SDValue SETCC0 = DAG.getSetCC(DL, VT, BaseOp, RHS, CC);
53007 SDValue SETCC1 = DAG.getSetCC(
53008 DL, VT, BaseOp, DAG.getConstant(-CInt, DL, OpVT), CC);
53009 return DAG.getNode(CC == ISD::SETEQ ? ISD::OR : ISD::AND, DL, VT,
53010 SETCC0, SETCC1);
53017 if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
53018 (CC == ISD::SETNE || CC == ISD::SETEQ || ISD::isSignedIntSetCC(CC))) {
53019 // Using temporaries to avoid messing up operand ordering for later
53020 // transformations if this doesn't work.
53021 SDValue Op0 = LHS;
53022 SDValue Op1 = RHS;
53023 ISD::CondCode TmpCC = CC;
53024 // Put build_vector on the right.
53025 if (Op0.getOpcode() == ISD::BUILD_VECTOR) {
53026 std::swap(Op0, Op1);
53027 TmpCC = ISD::getSetCCSwappedOperands(TmpCC);
53030 bool IsSEXT0 =
53031 (Op0.getOpcode() == ISD::SIGN_EXTEND) &&
53032 (Op0.getOperand(0).getValueType().getVectorElementType() == MVT::i1);
53033 bool IsVZero1 = ISD::isBuildVectorAllZeros(Op1.getNode());
53035 if (IsSEXT0 && IsVZero1) {
53036 assert(VT == Op0.getOperand(0).getValueType() &&
53037 "Unexpected operand type");
53038 if (TmpCC == ISD::SETGT)
53039 return DAG.getConstant(0, DL, VT);
53040 if (TmpCC == ISD::SETLE)
53041 return DAG.getConstant(1, DL, VT);
53042 if (TmpCC == ISD::SETEQ || TmpCC == ISD::SETGE)
53043 return DAG.getNOT(DL, Op0.getOperand(0), VT);
53045 assert((TmpCC == ISD::SETNE || TmpCC == ISD::SETLT) &&
53046 "Unexpected condition code!");
53047 return Op0.getOperand(0);
53051 // Try and make unsigned vector comparison signed. On pre AVX512 targets there
53052 // only are unsigned comparisons (`PCMPGT`) and on AVX512 its often better to
53053 // use `PCMPGT` if the result is mean to stay in a vector (and if its going to
53054 // a mask, there are signed AVX512 comparisons).
53055 if (VT.isVector() && OpVT.isVector() && OpVT.isInteger()) {
53056 bool CanMakeSigned = false;
53057 if (ISD::isUnsignedIntSetCC(CC)) {
53058 KnownBits CmpKnown =
53059 DAG.computeKnownBits(LHS).intersectWith(DAG.computeKnownBits(RHS));
53060 // If we know LHS/RHS share the same sign bit at each element we can
53061 // make this signed.
53062 // NOTE: `computeKnownBits` on a vector type aggregates common bits
53063 // across all lanes. So a pattern where the sign varies from lane to
53064 // lane, but at each lane Sign(LHS) is known to equal Sign(RHS), will be
53065 // missed. We could get around this by demanding each lane
53066 // independently, but this isn't the most important optimization and
53067 // that may eat into compile time.
53068 CanMakeSigned =
53069 CmpKnown.Zero.isSignBitSet() || CmpKnown.One.isSignBitSet();
53071 if (CanMakeSigned || ISD::isSignedIntSetCC(CC)) {
53072 SDValue LHSOut = LHS;
53073 SDValue RHSOut = RHS;
53074 ISD::CondCode NewCC = CC;
53075 switch (CC) {
53076 case ISD::SETGE:
53077 case ISD::SETUGE:
53078 if (SDValue NewLHS = incDecVectorConstant(LHS, DAG, /*IsInc*/ true,
53079 /*NSW*/ true))
53080 LHSOut = NewLHS;
53081 else if (SDValue NewRHS = incDecVectorConstant(
53082 RHS, DAG, /*IsInc*/ false, /*NSW*/ true))
53083 RHSOut = NewRHS;
53084 else
53085 break;
53087 [[fallthrough]];
53088 case ISD::SETUGT:
53089 NewCC = ISD::SETGT;
53090 break;
53092 case ISD::SETLE:
53093 case ISD::SETULE:
53094 if (SDValue NewLHS = incDecVectorConstant(LHS, DAG, /*IsInc*/ false,
53095 /*NSW*/ true))
53096 LHSOut = NewLHS;
53097 else if (SDValue NewRHS = incDecVectorConstant(RHS, DAG, /*IsInc*/ true,
53098 /*NSW*/ true))
53099 RHSOut = NewRHS;
53100 else
53101 break;
53103 [[fallthrough]];
53104 case ISD::SETULT:
53105 // Will be swapped to SETGT in LowerVSETCC*.
53106 NewCC = ISD::SETLT;
53107 break;
53108 default:
53109 break;
53111 if (NewCC != CC) {
53112 if (SDValue R = truncateAVX512SetCCNoBWI(VT, OpVT, LHSOut, RHSOut,
53113 NewCC, DL, DAG, Subtarget))
53114 return R;
53115 return DAG.getSetCC(DL, VT, LHSOut, RHSOut, NewCC);
53120 if (SDValue R =
53121 truncateAVX512SetCCNoBWI(VT, OpVT, LHS, RHS, CC, DL, DAG, Subtarget))
53122 return R;
53124 // For an SSE1-only target, lower a comparison of v4f32 to X86ISD::CMPP early
53125 // to avoid scalarization via legalization because v4i32 is not a legal type.
53126 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32 &&
53127 LHS.getValueType() == MVT::v4f32)
53128 return LowerVSETCC(SDValue(N, 0), Subtarget, DAG);
53130 // X pred 0.0 --> X pred -X
53131 // If the negation of X already exists, use it in the comparison. This removes
53132 // the need to materialize 0.0 and allows matching to SSE's MIN/MAX
53133 // instructions in patterns with a 'select' node.
53134 if (isNullFPScalarOrVectorConst(RHS)) {
53135 SDVTList FNegVT = DAG.getVTList(OpVT);
53136 if (SDNode *FNeg = DAG.getNodeIfExists(ISD::FNEG, FNegVT, {LHS}))
53137 return DAG.getSetCC(DL, VT, LHS, SDValue(FNeg, 0), CC);
53140 return SDValue();
53143 static SDValue combineMOVMSK(SDNode *N, SelectionDAG &DAG,
53144 TargetLowering::DAGCombinerInfo &DCI,
53145 const X86Subtarget &Subtarget) {
53146 SDValue Src = N->getOperand(0);
53147 MVT SrcVT = Src.getSimpleValueType();
53148 MVT VT = N->getSimpleValueType(0);
53149 unsigned NumBits = VT.getScalarSizeInBits();
53150 unsigned NumElts = SrcVT.getVectorNumElements();
53151 unsigned NumBitsPerElt = SrcVT.getScalarSizeInBits();
53152 assert(VT == MVT::i32 && NumElts <= NumBits && "Unexpected MOVMSK types");
53154 // Perform constant folding.
53155 APInt UndefElts;
53156 SmallVector<APInt, 32> EltBits;
53157 if (getTargetConstantBitsFromNode(Src, NumBitsPerElt, UndefElts, EltBits)) {
53158 APInt Imm(32, 0);
53159 for (unsigned Idx = 0; Idx != NumElts; ++Idx)
53160 if (!UndefElts[Idx] && EltBits[Idx].isNegative())
53161 Imm.setBit(Idx);
53163 return DAG.getConstant(Imm, SDLoc(N), VT);
53166 // Look through int->fp bitcasts that don't change the element width.
53167 unsigned EltWidth = SrcVT.getScalarSizeInBits();
53168 if (Subtarget.hasSSE2() && Src.getOpcode() == ISD::BITCAST &&
53169 Src.getOperand(0).getScalarValueSizeInBits() == EltWidth)
53170 return DAG.getNode(X86ISD::MOVMSK, SDLoc(N), VT, Src.getOperand(0));
53172 // Fold movmsk(not(x)) -> not(movmsk(x)) to improve folding of movmsk results
53173 // with scalar comparisons.
53174 if (SDValue NotSrc = IsNOT(Src, DAG)) {
53175 SDLoc DL(N);
53176 APInt NotMask = APInt::getLowBitsSet(NumBits, NumElts);
53177 NotSrc = DAG.getBitcast(SrcVT, NotSrc);
53178 return DAG.getNode(ISD::XOR, DL, VT,
53179 DAG.getNode(X86ISD::MOVMSK, DL, VT, NotSrc),
53180 DAG.getConstant(NotMask, DL, VT));
53183 // Fold movmsk(icmp_sgt(x,-1)) -> not(movmsk(x)) to improve folding of movmsk
53184 // results with scalar comparisons.
53185 if (Src.getOpcode() == X86ISD::PCMPGT &&
53186 ISD::isBuildVectorAllOnes(Src.getOperand(1).getNode())) {
53187 SDLoc DL(N);
53188 APInt NotMask = APInt::getLowBitsSet(NumBits, NumElts);
53189 return DAG.getNode(ISD::XOR, DL, VT,
53190 DAG.getNode(X86ISD::MOVMSK, DL, VT, Src.getOperand(0)),
53191 DAG.getConstant(NotMask, DL, VT));
53194 // Fold movmsk(icmp_eq(and(x,c1),c1)) -> movmsk(shl(x,c2))
53195 // Fold movmsk(icmp_eq(and(x,c1),0)) -> movmsk(not(shl(x,c2)))
53196 // iff pow2splat(c1).
53197 // Use KnownBits to determine if only a single bit is non-zero
53198 // in each element (pow2 or zero), and shift that bit to the msb.
53199 if (Src.getOpcode() == X86ISD::PCMPEQ) {
53200 KnownBits KnownLHS = DAG.computeKnownBits(Src.getOperand(0));
53201 KnownBits KnownRHS = DAG.computeKnownBits(Src.getOperand(1));
53202 unsigned ShiftAmt = KnownLHS.countMinLeadingZeros();
53203 if (KnownLHS.countMaxPopulation() == 1 &&
53204 (KnownRHS.isZero() || (KnownRHS.countMaxPopulation() == 1 &&
53205 ShiftAmt == KnownRHS.countMinLeadingZeros()))) {
53206 SDLoc DL(N);
53207 MVT ShiftVT = SrcVT;
53208 SDValue ShiftLHS = Src.getOperand(0);
53209 SDValue ShiftRHS = Src.getOperand(1);
53210 if (ShiftVT.getScalarType() == MVT::i8) {
53211 // vXi8 shifts - we only care about the signbit so can use PSLLW.
53212 ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
53213 ShiftLHS = DAG.getBitcast(ShiftVT, ShiftLHS);
53214 ShiftRHS = DAG.getBitcast(ShiftVT, ShiftRHS);
53216 ShiftLHS = getTargetVShiftByConstNode(X86ISD::VSHLI, DL, ShiftVT,
53217 ShiftLHS, ShiftAmt, DAG);
53218 ShiftRHS = getTargetVShiftByConstNode(X86ISD::VSHLI, DL, ShiftVT,
53219 ShiftRHS, ShiftAmt, DAG);
53220 ShiftLHS = DAG.getBitcast(SrcVT, ShiftLHS);
53221 ShiftRHS = DAG.getBitcast(SrcVT, ShiftRHS);
53222 SDValue Res = DAG.getNode(ISD::XOR, DL, SrcVT, ShiftLHS, ShiftRHS);
53223 return DAG.getNode(X86ISD::MOVMSK, DL, VT, DAG.getNOT(DL, Res, SrcVT));
53227 // Fold movmsk(logic(X,C)) -> logic(movmsk(X),C)
53228 if (N->isOnlyUserOf(Src.getNode())) {
53229 SDValue SrcBC = peekThroughOneUseBitcasts(Src);
53230 if (ISD::isBitwiseLogicOp(SrcBC.getOpcode())) {
53231 APInt UndefElts;
53232 SmallVector<APInt, 32> EltBits;
53233 if (getTargetConstantBitsFromNode(SrcBC.getOperand(1), NumBitsPerElt,
53234 UndefElts, EltBits)) {
53235 APInt Mask = APInt::getZero(NumBits);
53236 for (unsigned Idx = 0; Idx != NumElts; ++Idx) {
53237 if (!UndefElts[Idx] && EltBits[Idx].isNegative())
53238 Mask.setBit(Idx);
53240 SDLoc DL(N);
53241 SDValue NewSrc = DAG.getBitcast(SrcVT, SrcBC.getOperand(0));
53242 SDValue NewMovMsk = DAG.getNode(X86ISD::MOVMSK, DL, VT, NewSrc);
53243 return DAG.getNode(SrcBC.getOpcode(), DL, VT, NewMovMsk,
53244 DAG.getConstant(Mask, DL, VT));
53249 // Simplify the inputs.
53250 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53251 APInt DemandedMask(APInt::getAllOnes(NumBits));
53252 if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
53253 return SDValue(N, 0);
53255 return SDValue();
53258 static SDValue combineTESTP(SDNode *N, SelectionDAG &DAG,
53259 TargetLowering::DAGCombinerInfo &DCI,
53260 const X86Subtarget &Subtarget) {
53261 MVT VT = N->getSimpleValueType(0);
53262 unsigned NumBits = VT.getScalarSizeInBits();
53264 // Simplify the inputs.
53265 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53266 APInt DemandedMask(APInt::getAllOnes(NumBits));
53267 if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
53268 return SDValue(N, 0);
53270 return SDValue();
53273 static SDValue combineX86GatherScatter(SDNode *N, SelectionDAG &DAG,
53274 TargetLowering::DAGCombinerInfo &DCI) {
53275 auto *MemOp = cast<X86MaskedGatherScatterSDNode>(N);
53276 SDValue Mask = MemOp->getMask();
53278 // With vector masks we only demand the upper bit of the mask.
53279 if (Mask.getScalarValueSizeInBits() != 1) {
53280 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53281 APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
53282 if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI)) {
53283 if (N->getOpcode() != ISD::DELETED_NODE)
53284 DCI.AddToWorklist(N);
53285 return SDValue(N, 0);
53289 return SDValue();
53292 static SDValue rebuildGatherScatter(MaskedGatherScatterSDNode *GorS,
53293 SDValue Index, SDValue Base, SDValue Scale,
53294 SelectionDAG &DAG) {
53295 SDLoc DL(GorS);
53297 if (auto *Gather = dyn_cast<MaskedGatherSDNode>(GorS)) {
53298 SDValue Ops[] = { Gather->getChain(), Gather->getPassThru(),
53299 Gather->getMask(), Base, Index, Scale } ;
53300 return DAG.getMaskedGather(Gather->getVTList(),
53301 Gather->getMemoryVT(), DL, Ops,
53302 Gather->getMemOperand(),
53303 Gather->getIndexType(),
53304 Gather->getExtensionType());
53306 auto *Scatter = cast<MaskedScatterSDNode>(GorS);
53307 SDValue Ops[] = { Scatter->getChain(), Scatter->getValue(),
53308 Scatter->getMask(), Base, Index, Scale };
53309 return DAG.getMaskedScatter(Scatter->getVTList(),
53310 Scatter->getMemoryVT(), DL,
53311 Ops, Scatter->getMemOperand(),
53312 Scatter->getIndexType(),
53313 Scatter->isTruncatingStore());
53316 static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
53317 TargetLowering::DAGCombinerInfo &DCI) {
53318 SDLoc DL(N);
53319 auto *GorS = cast<MaskedGatherScatterSDNode>(N);
53320 SDValue Index = GorS->getIndex();
53321 SDValue Base = GorS->getBasePtr();
53322 SDValue Scale = GorS->getScale();
53324 if (DCI.isBeforeLegalize()) {
53325 unsigned IndexWidth = Index.getScalarValueSizeInBits();
53327 // Shrink constant indices if they are larger than 32-bits.
53328 // Only do this before legalize types since v2i64 could become v2i32.
53329 // FIXME: We could check that the type is legal if we're after legalize
53330 // types, but then we would need to construct test cases where that happens.
53331 // FIXME: We could support more than just constant vectors, but we need to
53332 // careful with costing. A truncate that can be optimized out would be fine.
53333 // Otherwise we might only want to create a truncate if it avoids a split.
53334 if (auto *BV = dyn_cast<BuildVectorSDNode>(Index)) {
53335 if (BV->isConstant() && IndexWidth > 32 &&
53336 DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
53337 EVT NewVT = Index.getValueType().changeVectorElementType(MVT::i32);
53338 Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
53339 return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
53343 // Shrink any sign/zero extends from 32 or smaller to larger than 32 if
53344 // there are sufficient sign bits. Only do this before legalize types to
53345 // avoid creating illegal types in truncate.
53346 if ((Index.getOpcode() == ISD::SIGN_EXTEND ||
53347 Index.getOpcode() == ISD::ZERO_EXTEND) &&
53348 IndexWidth > 32 &&
53349 Index.getOperand(0).getScalarValueSizeInBits() <= 32 &&
53350 DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
53351 EVT NewVT = Index.getValueType().changeVectorElementType(MVT::i32);
53352 Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
53353 return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
53357 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53358 EVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
53359 // Try to move splat constant adders from the index operand to the base
53360 // pointer operand. Taking care to multiply by the scale. We can only do
53361 // this when index element type is the same as the pointer type.
53362 // Otherwise we need to be sure the math doesn't wrap before the scale.
53363 if (Index.getOpcode() == ISD::ADD &&
53364 Index.getValueType().getVectorElementType() == PtrVT &&
53365 isa<ConstantSDNode>(Scale)) {
53366 uint64_t ScaleAmt = Scale->getAsZExtVal();
53367 if (auto *BV = dyn_cast<BuildVectorSDNode>(Index.getOperand(1))) {
53368 BitVector UndefElts;
53369 if (ConstantSDNode *C = BV->getConstantSplatNode(&UndefElts)) {
53370 // FIXME: Allow non-constant?
53371 if (UndefElts.none()) {
53372 // Apply the scale.
53373 APInt Adder = C->getAPIntValue() * ScaleAmt;
53374 // Add it to the existing base.
53375 Base = DAG.getNode(ISD::ADD, DL, PtrVT, Base,
53376 DAG.getConstant(Adder, DL, PtrVT));
53377 Index = Index.getOperand(0);
53378 return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
53382 // It's also possible base is just a constant. In that case, just
53383 // replace it with 0 and move the displacement into the index.
53384 if (BV->isConstant() && isa<ConstantSDNode>(Base) &&
53385 isOneConstant(Scale)) {
53386 SDValue Splat = DAG.getSplatBuildVector(Index.getValueType(), DL, Base);
53387 // Combine the constant build_vector and the constant base.
53388 Splat = DAG.getNode(ISD::ADD, DL, Index.getValueType(),
53389 Index.getOperand(1), Splat);
53390 // Add to the LHS of the original Index add.
53391 Index = DAG.getNode(ISD::ADD, DL, Index.getValueType(),
53392 Index.getOperand(0), Splat);
53393 Base = DAG.getConstant(0, DL, Base.getValueType());
53394 return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
53399 if (DCI.isBeforeLegalizeOps()) {
53400 unsigned IndexWidth = Index.getScalarValueSizeInBits();
53402 // Make sure the index is either i32 or i64
53403 if (IndexWidth != 32 && IndexWidth != 64) {
53404 MVT EltVT = IndexWidth > 32 ? MVT::i64 : MVT::i32;
53405 EVT IndexVT = Index.getValueType().changeVectorElementType(EltVT);
53406 Index = DAG.getSExtOrTrunc(Index, DL, IndexVT);
53407 return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
53411 // With vector masks we only demand the upper bit of the mask.
53412 SDValue Mask = GorS->getMask();
53413 if (Mask.getScalarValueSizeInBits() != 1) {
53414 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53415 APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
53416 if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI)) {
53417 if (N->getOpcode() != ISD::DELETED_NODE)
53418 DCI.AddToWorklist(N);
53419 return SDValue(N, 0);
53423 return SDValue();
53426 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
53427 static SDValue combineX86SetCC(SDNode *N, SelectionDAG &DAG,
53428 const X86Subtarget &Subtarget) {
53429 SDLoc DL(N);
53430 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
53431 SDValue EFLAGS = N->getOperand(1);
53433 // Try to simplify the EFLAGS and condition code operands.
53434 if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget))
53435 return getSETCC(CC, Flags, DL, DAG);
53437 return SDValue();
53440 /// Optimize branch condition evaluation.
53441 static SDValue combineBrCond(SDNode *N, SelectionDAG &DAG,
53442 const X86Subtarget &Subtarget) {
53443 SDLoc DL(N);
53444 SDValue EFLAGS = N->getOperand(3);
53445 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
53447 // Try to simplify the EFLAGS and condition code operands.
53448 // Make sure to not keep references to operands, as combineSetCCEFLAGS can
53449 // RAUW them under us.
53450 if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget)) {
53451 SDValue Cond = DAG.getTargetConstant(CC, DL, MVT::i8);
53452 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
53453 N->getOperand(1), Cond, Flags);
53456 return SDValue();
53459 // TODO: Could we move this to DAGCombine?
53460 static SDValue combineVectorCompareAndMaskUnaryOp(SDNode *N,
53461 SelectionDAG &DAG) {
53462 // Take advantage of vector comparisons (etc.) producing 0 or -1 in each lane
53463 // to optimize away operation when it's from a constant.
53465 // The general transformation is:
53466 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
53467 // AND(VECTOR_CMP(x,y), constant2)
53468 // constant2 = UNARYOP(constant)
53470 // Early exit if this isn't a vector operation, the operand of the
53471 // unary operation isn't a bitwise AND, or if the sizes of the operations
53472 // aren't the same.
53473 EVT VT = N->getValueType(0);
53474 bool IsStrict = N->isStrictFPOpcode();
53475 unsigned NumEltBits = VT.getScalarSizeInBits();
53476 SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
53477 if (!VT.isVector() || Op0.getOpcode() != ISD::AND ||
53478 DAG.ComputeNumSignBits(Op0.getOperand(0)) != NumEltBits ||
53479 VT.getSizeInBits() != Op0.getValueSizeInBits())
53480 return SDValue();
53482 // Now check that the other operand of the AND is a constant. We could
53483 // make the transformation for non-constant splats as well, but it's unclear
53484 // that would be a benefit as it would not eliminate any operations, just
53485 // perform one more step in scalar code before moving to the vector unit.
53486 if (auto *BV = dyn_cast<BuildVectorSDNode>(Op0.getOperand(1))) {
53487 // Bail out if the vector isn't a constant.
53488 if (!BV->isConstant())
53489 return SDValue();
53491 // Everything checks out. Build up the new and improved node.
53492 SDLoc DL(N);
53493 EVT IntVT = BV->getValueType(0);
53494 // Create a new constant of the appropriate type for the transformed
53495 // DAG.
53496 SDValue SourceConst;
53497 if (IsStrict)
53498 SourceConst = DAG.getNode(N->getOpcode(), DL, {VT, MVT::Other},
53499 {N->getOperand(0), SDValue(BV, 0)});
53500 else
53501 SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
53502 // The AND node needs bitcasts to/from an integer vector type around it.
53503 SDValue MaskConst = DAG.getBitcast(IntVT, SourceConst);
53504 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT, Op0->getOperand(0),
53505 MaskConst);
53506 SDValue Res = DAG.getBitcast(VT, NewAnd);
53507 if (IsStrict)
53508 return DAG.getMergeValues({Res, SourceConst.getValue(1)}, DL);
53509 return Res;
53512 return SDValue();
53515 /// If we are converting a value to floating-point, try to replace scalar
53516 /// truncate of an extracted vector element with a bitcast. This tries to keep
53517 /// the sequence on XMM registers rather than moving between vector and GPRs.
53518 static SDValue combineToFPTruncExtElt(SDNode *N, SelectionDAG &DAG) {
53519 // TODO: This is currently only used by combineSIntToFP, but it is generalized
53520 // to allow being called by any similar cast opcode.
53521 // TODO: Consider merging this into lowering: vectorizeExtractedCast().
53522 SDValue Trunc = N->getOperand(0);
53523 if (!Trunc.hasOneUse() || Trunc.getOpcode() != ISD::TRUNCATE)
53524 return SDValue();
53526 SDValue ExtElt = Trunc.getOperand(0);
53527 if (!ExtElt.hasOneUse() || ExtElt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
53528 !isNullConstant(ExtElt.getOperand(1)))
53529 return SDValue();
53531 EVT TruncVT = Trunc.getValueType();
53532 EVT SrcVT = ExtElt.getValueType();
53533 unsigned DestWidth = TruncVT.getSizeInBits();
53534 unsigned SrcWidth = SrcVT.getSizeInBits();
53535 if (SrcWidth % DestWidth != 0)
53536 return SDValue();
53538 // inttofp (trunc (extelt X, 0)) --> inttofp (extelt (bitcast X), 0)
53539 EVT SrcVecVT = ExtElt.getOperand(0).getValueType();
53540 unsigned VecWidth = SrcVecVT.getSizeInBits();
53541 unsigned NumElts = VecWidth / DestWidth;
53542 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), TruncVT, NumElts);
53543 SDValue BitcastVec = DAG.getBitcast(BitcastVT, ExtElt.getOperand(0));
53544 SDLoc DL(N);
53545 SDValue NewExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, TruncVT,
53546 BitcastVec, ExtElt.getOperand(1));
53547 return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), NewExtElt);
53550 static SDValue combineUIntToFP(SDNode *N, SelectionDAG &DAG,
53551 const X86Subtarget &Subtarget) {
53552 bool IsStrict = N->isStrictFPOpcode();
53553 SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
53554 EVT VT = N->getValueType(0);
53555 EVT InVT = Op0.getValueType();
53557 // Using i16 as an intermediate type is a bad idea, unless we have HW support
53558 // for it. Therefore for type sizes equal or smaller than 32 just go with i32.
53559 // if hasFP16 support:
53560 // UINT_TO_FP(vXi1~15) -> SINT_TO_FP(ZEXT(vXi1~15 to vXi16))
53561 // UINT_TO_FP(vXi17~31) -> SINT_TO_FP(ZEXT(vXi17~31 to vXi32))
53562 // else
53563 // UINT_TO_FP(vXi1~31) -> SINT_TO_FP(ZEXT(vXi1~31 to vXi32))
53564 // UINT_TO_FP(vXi33~63) -> SINT_TO_FP(ZEXT(vXi33~63 to vXi64))
53565 if (InVT.isVector() && VT.getVectorElementType() == MVT::f16) {
53566 unsigned ScalarSize = InVT.getScalarSizeInBits();
53567 if ((ScalarSize == 16 && Subtarget.hasFP16()) || ScalarSize == 32 ||
53568 ScalarSize >= 64)
53569 return SDValue();
53570 SDLoc dl(N);
53571 EVT DstVT =
53572 EVT::getVectorVT(*DAG.getContext(),
53573 (Subtarget.hasFP16() && ScalarSize < 16) ? MVT::i16
53574 : ScalarSize < 32 ? MVT::i32
53575 : MVT::i64,
53576 InVT.getVectorNumElements());
53577 SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
53578 if (IsStrict)
53579 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
53580 {N->getOperand(0), P});
53581 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
53584 // UINT_TO_FP(vXi1) -> SINT_TO_FP(ZEXT(vXi1 to vXi32))
53585 // UINT_TO_FP(vXi8) -> SINT_TO_FP(ZEXT(vXi8 to vXi32))
53586 // UINT_TO_FP(vXi16) -> SINT_TO_FP(ZEXT(vXi16 to vXi32))
53587 if (InVT.isVector() && InVT.getScalarSizeInBits() < 32 &&
53588 VT.getScalarType() != MVT::f16) {
53589 SDLoc dl(N);
53590 EVT DstVT = InVT.changeVectorElementType(MVT::i32);
53591 SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
53593 // UINT_TO_FP isn't legal without AVX512 so use SINT_TO_FP.
53594 if (IsStrict)
53595 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
53596 {N->getOperand(0), P});
53597 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
53600 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
53601 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
53602 // the optimization here.
53603 if (DAG.SignBitIsZero(Op0)) {
53604 if (IsStrict)
53605 return DAG.getNode(ISD::STRICT_SINT_TO_FP, SDLoc(N), {VT, MVT::Other},
53606 {N->getOperand(0), Op0});
53607 return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, Op0);
53610 return SDValue();
53613 static SDValue combineSIntToFP(SDNode *N, SelectionDAG &DAG,
53614 TargetLowering::DAGCombinerInfo &DCI,
53615 const X86Subtarget &Subtarget) {
53616 // First try to optimize away the conversion entirely when it's
53617 // conditionally from a constant. Vectors only.
53618 bool IsStrict = N->isStrictFPOpcode();
53619 if (SDValue Res = combineVectorCompareAndMaskUnaryOp(N, DAG))
53620 return Res;
53622 // Now move on to more general possibilities.
53623 SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
53624 EVT VT = N->getValueType(0);
53625 EVT InVT = Op0.getValueType();
53627 // Using i16 as an intermediate type is a bad idea, unless we have HW support
53628 // for it. Therefore for type sizes equal or smaller than 32 just go with i32.
53629 // if hasFP16 support:
53630 // SINT_TO_FP(vXi1~15) -> SINT_TO_FP(SEXT(vXi1~15 to vXi16))
53631 // SINT_TO_FP(vXi17~31) -> SINT_TO_FP(SEXT(vXi17~31 to vXi32))
53632 // else
53633 // SINT_TO_FP(vXi1~31) -> SINT_TO_FP(ZEXT(vXi1~31 to vXi32))
53634 // SINT_TO_FP(vXi33~63) -> SINT_TO_FP(SEXT(vXi33~63 to vXi64))
53635 if (InVT.isVector() && VT.getVectorElementType() == MVT::f16) {
53636 unsigned ScalarSize = InVT.getScalarSizeInBits();
53637 if ((ScalarSize == 16 && Subtarget.hasFP16()) || ScalarSize == 32 ||
53638 ScalarSize >= 64)
53639 return SDValue();
53640 SDLoc dl(N);
53641 EVT DstVT =
53642 EVT::getVectorVT(*DAG.getContext(),
53643 (Subtarget.hasFP16() && ScalarSize < 16) ? MVT::i16
53644 : ScalarSize < 32 ? MVT::i32
53645 : MVT::i64,
53646 InVT.getVectorNumElements());
53647 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
53648 if (IsStrict)
53649 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
53650 {N->getOperand(0), P});
53651 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
53654 // SINT_TO_FP(vXi1) -> SINT_TO_FP(SEXT(vXi1 to vXi32))
53655 // SINT_TO_FP(vXi8) -> SINT_TO_FP(SEXT(vXi8 to vXi32))
53656 // SINT_TO_FP(vXi16) -> SINT_TO_FP(SEXT(vXi16 to vXi32))
53657 if (InVT.isVector() && InVT.getScalarSizeInBits() < 32 &&
53658 VT.getScalarType() != MVT::f16) {
53659 SDLoc dl(N);
53660 EVT DstVT = InVT.changeVectorElementType(MVT::i32);
53661 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
53662 if (IsStrict)
53663 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
53664 {N->getOperand(0), P});
53665 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
53668 // Without AVX512DQ we only support i64 to float scalar conversion. For both
53669 // vectors and scalars, see if we know that the upper bits are all the sign
53670 // bit, in which case we can truncate the input to i32 and convert from that.
53671 if (InVT.getScalarSizeInBits() > 32 && !Subtarget.hasDQI()) {
53672 unsigned BitWidth = InVT.getScalarSizeInBits();
53673 unsigned NumSignBits = DAG.ComputeNumSignBits(Op0);
53674 if (NumSignBits >= (BitWidth - 31)) {
53675 EVT TruncVT = MVT::i32;
53676 if (InVT.isVector())
53677 TruncVT = InVT.changeVectorElementType(TruncVT);
53678 SDLoc dl(N);
53679 if (DCI.isBeforeLegalize() || TruncVT != MVT::v2i32) {
53680 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Op0);
53681 if (IsStrict)
53682 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
53683 {N->getOperand(0), Trunc});
53684 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Trunc);
53686 // If we're after legalize and the type is v2i32 we need to shuffle and
53687 // use CVTSI2P.
53688 assert(InVT == MVT::v2i64 && "Unexpected VT!");
53689 SDValue Cast = DAG.getBitcast(MVT::v4i32, Op0);
53690 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Cast, Cast,
53691 { 0, 2, -1, -1 });
53692 if (IsStrict)
53693 return DAG.getNode(X86ISD::STRICT_CVTSI2P, dl, {VT, MVT::Other},
53694 {N->getOperand(0), Shuf});
53695 return DAG.getNode(X86ISD::CVTSI2P, dl, VT, Shuf);
53699 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
53700 // a 32-bit target where SSE doesn't support i64->FP operations.
53701 if (!Subtarget.useSoftFloat() && Subtarget.hasX87() &&
53702 Op0.getOpcode() == ISD::LOAD) {
53703 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
53705 // This transformation is not supported if the result type is f16 or f128.
53706 if (VT == MVT::f16 || VT == MVT::f128)
53707 return SDValue();
53709 // If we have AVX512DQ we can use packed conversion instructions unless
53710 // the VT is f80.
53711 if (Subtarget.hasDQI() && VT != MVT::f80)
53712 return SDValue();
53714 if (Ld->isSimple() && !VT.isVector() && ISD::isNormalLoad(Op0.getNode()) &&
53715 Op0.hasOneUse() && !Subtarget.is64Bit() && InVT == MVT::i64) {
53716 std::pair<SDValue, SDValue> Tmp =
53717 Subtarget.getTargetLowering()->BuildFILD(
53718 VT, InVT, SDLoc(N), Ld->getChain(), Ld->getBasePtr(),
53719 Ld->getPointerInfo(), Ld->getOriginalAlign(), DAG);
53720 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), Tmp.second);
53721 return Tmp.first;
53725 if (IsStrict)
53726 return SDValue();
53728 if (SDValue V = combineToFPTruncExtElt(N, DAG))
53729 return V;
53731 return SDValue();
53734 static bool needCarryOrOverflowFlag(SDValue Flags) {
53735 assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
53737 for (const SDNode *User : Flags->uses()) {
53738 X86::CondCode CC;
53739 switch (User->getOpcode()) {
53740 default:
53741 // Be conservative.
53742 return true;
53743 case X86ISD::SETCC:
53744 case X86ISD::SETCC_CARRY:
53745 CC = (X86::CondCode)User->getConstantOperandVal(0);
53746 break;
53747 case X86ISD::BRCOND:
53748 case X86ISD::CMOV:
53749 CC = (X86::CondCode)User->getConstantOperandVal(2);
53750 break;
53753 switch (CC) {
53754 default: break;
53755 case X86::COND_A: case X86::COND_AE:
53756 case X86::COND_B: case X86::COND_BE:
53757 case X86::COND_O: case X86::COND_NO:
53758 case X86::COND_G: case X86::COND_GE:
53759 case X86::COND_L: case X86::COND_LE:
53760 return true;
53764 return false;
53767 static bool onlyZeroFlagUsed(SDValue Flags) {
53768 assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
53770 for (const SDNode *User : Flags->uses()) {
53771 unsigned CCOpNo;
53772 switch (User->getOpcode()) {
53773 default:
53774 // Be conservative.
53775 return false;
53776 case X86ISD::SETCC:
53777 case X86ISD::SETCC_CARRY:
53778 CCOpNo = 0;
53779 break;
53780 case X86ISD::BRCOND:
53781 case X86ISD::CMOV:
53782 CCOpNo = 2;
53783 break;
53786 X86::CondCode CC = (X86::CondCode)User->getConstantOperandVal(CCOpNo);
53787 if (CC != X86::COND_E && CC != X86::COND_NE)
53788 return false;
53791 return true;
53794 static SDValue combineCMP(SDNode *N, SelectionDAG &DAG,
53795 const X86Subtarget &Subtarget) {
53796 // Only handle test patterns.
53797 if (!isNullConstant(N->getOperand(1)))
53798 return SDValue();
53800 // If we have a CMP of a truncated binop, see if we can make a smaller binop
53801 // and use its flags directly.
53802 // TODO: Maybe we should try promoting compares that only use the zero flag
53803 // first if we can prove the upper bits with computeKnownBits?
53804 SDLoc dl(N);
53805 SDValue Op = N->getOperand(0);
53806 EVT VT = Op.getValueType();
53807 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53809 // If we have a constant logical shift that's only used in a comparison
53810 // against zero turn it into an equivalent AND. This allows turning it into
53811 // a TEST instruction later.
53812 if ((Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) &&
53813 Op.hasOneUse() && isa<ConstantSDNode>(Op.getOperand(1)) &&
53814 onlyZeroFlagUsed(SDValue(N, 0))) {
53815 unsigned BitWidth = VT.getSizeInBits();
53816 const APInt &ShAmt = Op.getConstantOperandAPInt(1);
53817 if (ShAmt.ult(BitWidth)) { // Avoid undefined shifts.
53818 unsigned MaskBits = BitWidth - ShAmt.getZExtValue();
53819 APInt Mask = Op.getOpcode() == ISD::SRL
53820 ? APInt::getHighBitsSet(BitWidth, MaskBits)
53821 : APInt::getLowBitsSet(BitWidth, MaskBits);
53822 if (Mask.isSignedIntN(32)) {
53823 Op = DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0),
53824 DAG.getConstant(Mask, dl, VT));
53825 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
53826 DAG.getConstant(0, dl, VT));
53831 // If we're extracting from a avx512 bool vector and comparing against zero,
53832 // then try to just bitcast the vector to an integer to use TEST/BT directly.
53833 // (and (extract_elt (kshiftr vXi1, C), 0), 1) -> (and (bc vXi1), 1<<C)
53834 if (Op.getOpcode() == ISD::AND && isOneConstant(Op.getOperand(1)) &&
53835 Op.hasOneUse() && onlyZeroFlagUsed(SDValue(N, 0))) {
53836 SDValue Src = Op.getOperand(0);
53837 if (Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
53838 isNullConstant(Src.getOperand(1)) &&
53839 Src.getOperand(0).getValueType().getScalarType() == MVT::i1) {
53840 SDValue BoolVec = Src.getOperand(0);
53841 unsigned ShAmt = 0;
53842 if (BoolVec.getOpcode() == X86ISD::KSHIFTR) {
53843 ShAmt = BoolVec.getConstantOperandVal(1);
53844 BoolVec = BoolVec.getOperand(0);
53846 BoolVec = widenMaskVector(BoolVec, false, Subtarget, DAG, dl);
53847 EVT VecVT = BoolVec.getValueType();
53848 unsigned BitWidth = VecVT.getVectorNumElements();
53849 EVT BCVT = EVT::getIntegerVT(*DAG.getContext(), BitWidth);
53850 if (TLI.isTypeLegal(VecVT) && TLI.isTypeLegal(BCVT)) {
53851 APInt Mask = APInt::getOneBitSet(BitWidth, ShAmt);
53852 Op = DAG.getBitcast(BCVT, BoolVec);
53853 Op = DAG.getNode(ISD::AND, dl, BCVT, Op,
53854 DAG.getConstant(Mask, dl, BCVT));
53855 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
53856 DAG.getConstant(0, dl, BCVT));
53861 // Peek through any zero-extend if we're only testing for a zero result.
53862 if (Op.getOpcode() == ISD::ZERO_EXTEND && onlyZeroFlagUsed(SDValue(N, 0))) {
53863 SDValue Src = Op.getOperand(0);
53864 EVT SrcVT = Src.getValueType();
53865 if (SrcVT.getScalarSizeInBits() >= 8 && TLI.isTypeLegal(SrcVT))
53866 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Src,
53867 DAG.getConstant(0, dl, SrcVT));
53870 // Look for a truncate.
53871 if (Op.getOpcode() != ISD::TRUNCATE)
53872 return SDValue();
53874 SDValue Trunc = Op;
53875 Op = Op.getOperand(0);
53877 // See if we can compare with zero against the truncation source,
53878 // which should help using the Z flag from many ops. Only do this for
53879 // i32 truncated op to prevent partial-reg compares of promoted ops.
53880 EVT OpVT = Op.getValueType();
53881 APInt UpperBits =
53882 APInt::getBitsSetFrom(OpVT.getSizeInBits(), VT.getSizeInBits());
53883 if (OpVT == MVT::i32 && DAG.MaskedValueIsZero(Op, UpperBits) &&
53884 onlyZeroFlagUsed(SDValue(N, 0))) {
53885 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
53886 DAG.getConstant(0, dl, OpVT));
53889 // After this the truncate and arithmetic op must have a single use.
53890 if (!Trunc.hasOneUse() || !Op.hasOneUse())
53891 return SDValue();
53893 unsigned NewOpc;
53894 switch (Op.getOpcode()) {
53895 default: return SDValue();
53896 case ISD::AND:
53897 // Skip and with constant. We have special handling for and with immediate
53898 // during isel to generate test instructions.
53899 if (isa<ConstantSDNode>(Op.getOperand(1)))
53900 return SDValue();
53901 NewOpc = X86ISD::AND;
53902 break;
53903 case ISD::OR: NewOpc = X86ISD::OR; break;
53904 case ISD::XOR: NewOpc = X86ISD::XOR; break;
53905 case ISD::ADD:
53906 // If the carry or overflow flag is used, we can't truncate.
53907 if (needCarryOrOverflowFlag(SDValue(N, 0)))
53908 return SDValue();
53909 NewOpc = X86ISD::ADD;
53910 break;
53911 case ISD::SUB:
53912 // If the carry or overflow flag is used, we can't truncate.
53913 if (needCarryOrOverflowFlag(SDValue(N, 0)))
53914 return SDValue();
53915 NewOpc = X86ISD::SUB;
53916 break;
53919 // We found an op we can narrow. Truncate its inputs.
53920 SDValue Op0 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(0));
53921 SDValue Op1 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(1));
53923 // Use a X86 specific opcode to avoid DAG combine messing with it.
53924 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
53925 Op = DAG.getNode(NewOpc, dl, VTs, Op0, Op1);
53927 // For AND, keep a CMP so that we can match the test pattern.
53928 if (NewOpc == X86ISD::AND)
53929 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
53930 DAG.getConstant(0, dl, VT));
53932 // Return the flags.
53933 return Op.getValue(1);
53936 static SDValue combineX86AddSub(SDNode *N, SelectionDAG &DAG,
53937 TargetLowering::DAGCombinerInfo &DCI) {
53938 assert((X86ISD::ADD == N->getOpcode() || X86ISD::SUB == N->getOpcode()) &&
53939 "Expected X86ISD::ADD or X86ISD::SUB");
53941 SDLoc DL(N);
53942 SDValue LHS = N->getOperand(0);
53943 SDValue RHS = N->getOperand(1);
53944 MVT VT = LHS.getSimpleValueType();
53945 bool IsSub = X86ISD::SUB == N->getOpcode();
53946 unsigned GenericOpc = IsSub ? ISD::SUB : ISD::ADD;
53948 // If we don't use the flag result, simplify back to a generic ADD/SUB.
53949 if (!N->hasAnyUseOfValue(1)) {
53950 SDValue Res = DAG.getNode(GenericOpc, DL, VT, LHS, RHS);
53951 return DAG.getMergeValues({Res, DAG.getConstant(0, DL, MVT::i32)}, DL);
53954 // Fold any similar generic ADD/SUB opcodes to reuse this node.
53955 auto MatchGeneric = [&](SDValue N0, SDValue N1, bool Negate) {
53956 SDValue Ops[] = {N0, N1};
53957 SDVTList VTs = DAG.getVTList(N->getValueType(0));
53958 if (SDNode *GenericAddSub = DAG.getNodeIfExists(GenericOpc, VTs, Ops)) {
53959 SDValue Op(N, 0);
53960 if (Negate)
53961 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
53962 DCI.CombineTo(GenericAddSub, Op);
53965 MatchGeneric(LHS, RHS, false);
53966 MatchGeneric(RHS, LHS, X86ISD::SUB == N->getOpcode());
53968 // TODO: Can we drop the ZeroSecondOpOnly limit? This is to guarantee that the
53969 // EFLAGS result doesn't change.
53970 return combineAddOrSubToADCOrSBB(IsSub, DL, VT, LHS, RHS, DAG,
53971 /*ZeroSecondOpOnly*/ true);
53974 static SDValue combineSBB(SDNode *N, SelectionDAG &DAG) {
53975 SDValue LHS = N->getOperand(0);
53976 SDValue RHS = N->getOperand(1);
53977 SDValue BorrowIn = N->getOperand(2);
53979 if (SDValue Flags = combineCarryThroughADD(BorrowIn, DAG)) {
53980 MVT VT = N->getSimpleValueType(0);
53981 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
53982 return DAG.getNode(X86ISD::SBB, SDLoc(N), VTs, LHS, RHS, Flags);
53985 // Fold SBB(SUB(X,Y),0,Carry) -> SBB(X,Y,Carry)
53986 // iff the flag result is dead.
53987 if (LHS.getOpcode() == ISD::SUB && isNullConstant(RHS) &&
53988 !N->hasAnyUseOfValue(1))
53989 return DAG.getNode(X86ISD::SBB, SDLoc(N), N->getVTList(), LHS.getOperand(0),
53990 LHS.getOperand(1), BorrowIn);
53992 return SDValue();
53995 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
53996 static SDValue combineADC(SDNode *N, SelectionDAG &DAG,
53997 TargetLowering::DAGCombinerInfo &DCI) {
53998 SDValue LHS = N->getOperand(0);
53999 SDValue RHS = N->getOperand(1);
54000 SDValue CarryIn = N->getOperand(2);
54001 auto *LHSC = dyn_cast<ConstantSDNode>(LHS);
54002 auto *RHSC = dyn_cast<ConstantSDNode>(RHS);
54004 // Canonicalize constant to RHS.
54005 if (LHSC && !RHSC)
54006 return DAG.getNode(X86ISD::ADC, SDLoc(N), N->getVTList(), RHS, LHS,
54007 CarryIn);
54009 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
54010 // the result is either zero or one (depending on the input carry bit).
54011 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
54012 if (LHSC && RHSC && LHSC->isZero() && RHSC->isZero() &&
54013 // We don't have a good way to replace an EFLAGS use, so only do this when
54014 // dead right now.
54015 SDValue(N, 1).use_empty()) {
54016 SDLoc DL(N);
54017 EVT VT = N->getValueType(0);
54018 SDValue CarryOut = DAG.getConstant(0, DL, N->getValueType(1));
54019 SDValue Res1 = DAG.getNode(
54020 ISD::AND, DL, VT,
54021 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
54022 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), CarryIn),
54023 DAG.getConstant(1, DL, VT));
54024 return DCI.CombineTo(N, Res1, CarryOut);
54027 // Fold ADC(C1,C2,Carry) -> ADC(0,C1+C2,Carry)
54028 // iff the flag result is dead.
54029 // TODO: Allow flag result if C1+C2 doesn't signed/unsigned overflow.
54030 if (LHSC && RHSC && !LHSC->isZero() && !N->hasAnyUseOfValue(1)) {
54031 SDLoc DL(N);
54032 APInt Sum = LHSC->getAPIntValue() + RHSC->getAPIntValue();
54033 return DAG.getNode(X86ISD::ADC, DL, N->getVTList(),
54034 DAG.getConstant(0, DL, LHS.getValueType()),
54035 DAG.getConstant(Sum, DL, LHS.getValueType()), CarryIn);
54038 if (SDValue Flags = combineCarryThroughADD(CarryIn, DAG)) {
54039 MVT VT = N->getSimpleValueType(0);
54040 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
54041 return DAG.getNode(X86ISD::ADC, SDLoc(N), VTs, LHS, RHS, Flags);
54044 // Fold ADC(ADD(X,Y),0,Carry) -> ADC(X,Y,Carry)
54045 // iff the flag result is dead.
54046 if (LHS.getOpcode() == ISD::ADD && RHSC && RHSC->isZero() &&
54047 !N->hasAnyUseOfValue(1))
54048 return DAG.getNode(X86ISD::ADC, SDLoc(N), N->getVTList(), LHS.getOperand(0),
54049 LHS.getOperand(1), CarryIn);
54051 return SDValue();
54054 static SDValue matchPMADDWD(SelectionDAG &DAG, SDValue Op0, SDValue Op1,
54055 const SDLoc &DL, EVT VT,
54056 const X86Subtarget &Subtarget) {
54057 // Example of pattern we try to detect:
54058 // t := (v8i32 mul (sext (v8i16 x0), (sext (v8i16 x1))))
54059 //(add (build_vector (extract_elt t, 0),
54060 // (extract_elt t, 2),
54061 // (extract_elt t, 4),
54062 // (extract_elt t, 6)),
54063 // (build_vector (extract_elt t, 1),
54064 // (extract_elt t, 3),
54065 // (extract_elt t, 5),
54066 // (extract_elt t, 7)))
54068 if (!Subtarget.hasSSE2())
54069 return SDValue();
54071 if (Op0.getOpcode() != ISD::BUILD_VECTOR ||
54072 Op1.getOpcode() != ISD::BUILD_VECTOR)
54073 return SDValue();
54075 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
54076 VT.getVectorNumElements() < 4 ||
54077 !isPowerOf2_32(VT.getVectorNumElements()))
54078 return SDValue();
54080 // Check if one of Op0,Op1 is of the form:
54081 // (build_vector (extract_elt Mul, 0),
54082 // (extract_elt Mul, 2),
54083 // (extract_elt Mul, 4),
54084 // ...
54085 // the other is of the form:
54086 // (build_vector (extract_elt Mul, 1),
54087 // (extract_elt Mul, 3),
54088 // (extract_elt Mul, 5),
54089 // ...
54090 // and identify Mul.
54091 SDValue Mul;
54092 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; i += 2) {
54093 SDValue Op0L = Op0->getOperand(i), Op1L = Op1->getOperand(i),
54094 Op0H = Op0->getOperand(i + 1), Op1H = Op1->getOperand(i + 1);
54095 // TODO: Be more tolerant to undefs.
54096 if (Op0L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
54097 Op1L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
54098 Op0H.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
54099 Op1H.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
54100 return SDValue();
54101 auto *Const0L = dyn_cast<ConstantSDNode>(Op0L->getOperand(1));
54102 auto *Const1L = dyn_cast<ConstantSDNode>(Op1L->getOperand(1));
54103 auto *Const0H = dyn_cast<ConstantSDNode>(Op0H->getOperand(1));
54104 auto *Const1H = dyn_cast<ConstantSDNode>(Op1H->getOperand(1));
54105 if (!Const0L || !Const1L || !Const0H || !Const1H)
54106 return SDValue();
54107 unsigned Idx0L = Const0L->getZExtValue(), Idx1L = Const1L->getZExtValue(),
54108 Idx0H = Const0H->getZExtValue(), Idx1H = Const1H->getZExtValue();
54109 // Commutativity of mul allows factors of a product to reorder.
54110 if (Idx0L > Idx1L)
54111 std::swap(Idx0L, Idx1L);
54112 if (Idx0H > Idx1H)
54113 std::swap(Idx0H, Idx1H);
54114 // Commutativity of add allows pairs of factors to reorder.
54115 if (Idx0L > Idx0H) {
54116 std::swap(Idx0L, Idx0H);
54117 std::swap(Idx1L, Idx1H);
54119 if (Idx0L != 2 * i || Idx1L != 2 * i + 1 || Idx0H != 2 * i + 2 ||
54120 Idx1H != 2 * i + 3)
54121 return SDValue();
54122 if (!Mul) {
54123 // First time an extract_elt's source vector is visited. Must be a MUL
54124 // with 2X number of vector elements than the BUILD_VECTOR.
54125 // Both extracts must be from same MUL.
54126 Mul = Op0L->getOperand(0);
54127 if (Mul->getOpcode() != ISD::MUL ||
54128 Mul.getValueType().getVectorNumElements() != 2 * e)
54129 return SDValue();
54131 // Check that the extract is from the same MUL previously seen.
54132 if (Mul != Op0L->getOperand(0) || Mul != Op1L->getOperand(0) ||
54133 Mul != Op0H->getOperand(0) || Mul != Op1H->getOperand(0))
54134 return SDValue();
54137 // Check if the Mul source can be safely shrunk.
54138 ShrinkMode Mode;
54139 if (!canReduceVMulWidth(Mul.getNode(), DAG, Mode) ||
54140 Mode == ShrinkMode::MULU16)
54141 return SDValue();
54143 EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
54144 VT.getVectorNumElements() * 2);
54145 SDValue N0 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Mul.getOperand(0));
54146 SDValue N1 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Mul.getOperand(1));
54148 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
54149 ArrayRef<SDValue> Ops) {
54150 EVT InVT = Ops[0].getValueType();
54151 assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
54152 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
54153 InVT.getVectorNumElements() / 2);
54154 return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, Ops[0], Ops[1]);
54156 return SplitOpsAndApply(DAG, Subtarget, DL, VT, { N0, N1 }, PMADDBuilder);
54159 // Attempt to turn this pattern into PMADDWD.
54160 // (add (mul (sext (build_vector)), (sext (build_vector))),
54161 // (mul (sext (build_vector)), (sext (build_vector)))
54162 static SDValue matchPMADDWD_2(SelectionDAG &DAG, SDValue N0, SDValue N1,
54163 const SDLoc &DL, EVT VT,
54164 const X86Subtarget &Subtarget) {
54165 if (!Subtarget.hasSSE2())
54166 return SDValue();
54168 if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
54169 return SDValue();
54171 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
54172 VT.getVectorNumElements() < 4 ||
54173 !isPowerOf2_32(VT.getVectorNumElements()))
54174 return SDValue();
54176 SDValue N00 = N0.getOperand(0);
54177 SDValue N01 = N0.getOperand(1);
54178 SDValue N10 = N1.getOperand(0);
54179 SDValue N11 = N1.getOperand(1);
54181 // All inputs need to be sign extends.
54182 // TODO: Support ZERO_EXTEND from known positive?
54183 if (N00.getOpcode() != ISD::SIGN_EXTEND ||
54184 N01.getOpcode() != ISD::SIGN_EXTEND ||
54185 N10.getOpcode() != ISD::SIGN_EXTEND ||
54186 N11.getOpcode() != ISD::SIGN_EXTEND)
54187 return SDValue();
54189 // Peek through the extends.
54190 N00 = N00.getOperand(0);
54191 N01 = N01.getOperand(0);
54192 N10 = N10.getOperand(0);
54193 N11 = N11.getOperand(0);
54195 // Must be extending from vXi16.
54196 EVT InVT = N00.getValueType();
54197 if (InVT.getVectorElementType() != MVT::i16 || N01.getValueType() != InVT ||
54198 N10.getValueType() != InVT || N11.getValueType() != InVT)
54199 return SDValue();
54201 // All inputs should be build_vectors.
54202 if (N00.getOpcode() != ISD::BUILD_VECTOR ||
54203 N01.getOpcode() != ISD::BUILD_VECTOR ||
54204 N10.getOpcode() != ISD::BUILD_VECTOR ||
54205 N11.getOpcode() != ISD::BUILD_VECTOR)
54206 return SDValue();
54208 // For each element, we need to ensure we have an odd element from one vector
54209 // multiplied by the odd element of another vector and the even element from
54210 // one of the same vectors being multiplied by the even element from the
54211 // other vector. So we need to make sure for each element i, this operator
54212 // is being performed:
54213 // A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
54214 SDValue In0, In1;
54215 for (unsigned i = 0; i != N00.getNumOperands(); ++i) {
54216 SDValue N00Elt = N00.getOperand(i);
54217 SDValue N01Elt = N01.getOperand(i);
54218 SDValue N10Elt = N10.getOperand(i);
54219 SDValue N11Elt = N11.getOperand(i);
54220 // TODO: Be more tolerant to undefs.
54221 if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
54222 N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
54223 N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
54224 N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
54225 return SDValue();
54226 auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
54227 auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
54228 auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
54229 auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
54230 if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
54231 return SDValue();
54232 unsigned IdxN00 = ConstN00Elt->getZExtValue();
54233 unsigned IdxN01 = ConstN01Elt->getZExtValue();
54234 unsigned IdxN10 = ConstN10Elt->getZExtValue();
54235 unsigned IdxN11 = ConstN11Elt->getZExtValue();
54236 // Add is commutative so indices can be reordered.
54237 if (IdxN00 > IdxN10) {
54238 std::swap(IdxN00, IdxN10);
54239 std::swap(IdxN01, IdxN11);
54241 // N0 indices be the even element. N1 indices must be the next odd element.
54242 if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
54243 IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
54244 return SDValue();
54245 SDValue N00In = N00Elt.getOperand(0);
54246 SDValue N01In = N01Elt.getOperand(0);
54247 SDValue N10In = N10Elt.getOperand(0);
54248 SDValue N11In = N11Elt.getOperand(0);
54250 // First time we find an input capture it.
54251 if (!In0) {
54252 In0 = N00In;
54253 In1 = N01In;
54255 // The input vectors must be at least as wide as the output.
54256 // If they are larger than the output, we extract subvector below.
54257 if (In0.getValueSizeInBits() < VT.getSizeInBits() ||
54258 In1.getValueSizeInBits() < VT.getSizeInBits())
54259 return SDValue();
54261 // Mul is commutative so the input vectors can be in any order.
54262 // Canonicalize to make the compares easier.
54263 if (In0 != N00In)
54264 std::swap(N00In, N01In);
54265 if (In0 != N10In)
54266 std::swap(N10In, N11In);
54267 if (In0 != N00In || In1 != N01In || In0 != N10In || In1 != N11In)
54268 return SDValue();
54271 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
54272 ArrayRef<SDValue> Ops) {
54273 EVT OpVT = Ops[0].getValueType();
54274 assert(OpVT.getScalarType() == MVT::i16 &&
54275 "Unexpected scalar element type");
54276 assert(OpVT == Ops[1].getValueType() && "Operands' types mismatch");
54277 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
54278 OpVT.getVectorNumElements() / 2);
54279 return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, Ops[0], Ops[1]);
54282 // If the output is narrower than an input, extract the low part of the input
54283 // vector.
54284 EVT OutVT16 = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
54285 VT.getVectorNumElements() * 2);
54286 if (OutVT16.bitsLT(In0.getValueType())) {
54287 In0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT16, In0,
54288 DAG.getIntPtrConstant(0, DL));
54290 if (OutVT16.bitsLT(In1.getValueType())) {
54291 In1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT16, In1,
54292 DAG.getIntPtrConstant(0, DL));
54294 return SplitOpsAndApply(DAG, Subtarget, DL, VT, { In0, In1 },
54295 PMADDBuilder);
54298 // ADD(VPMADDWD(X,Y),VPMADDWD(Z,W)) -> VPMADDWD(SHUFFLE(X,Z), SHUFFLE(Y,W))
54299 // If upper element in each pair of both VPMADDWD are zero then we can merge
54300 // the operand elements and use the implicit add of VPMADDWD.
54301 // TODO: Add support for VPMADDUBSW (which isn't commutable).
54302 static SDValue combineAddOfPMADDWD(SelectionDAG &DAG, SDValue N0, SDValue N1,
54303 const SDLoc &DL, EVT VT) {
54304 if (N0.getOpcode() != N1.getOpcode() || N0.getOpcode() != X86ISD::VPMADDWD)
54305 return SDValue();
54307 // TODO: Add 256/512-bit support once VPMADDWD combines with shuffles.
54308 if (VT.getSizeInBits() > 128)
54309 return SDValue();
54311 unsigned NumElts = VT.getVectorNumElements();
54312 MVT OpVT = N0.getOperand(0).getSimpleValueType();
54313 APInt DemandedBits = APInt::getAllOnes(OpVT.getScalarSizeInBits());
54314 APInt DemandedHiElts = APInt::getSplat(2 * NumElts, APInt(2, 2));
54316 bool Op0HiZero =
54317 DAG.MaskedValueIsZero(N0.getOperand(0), DemandedBits, DemandedHiElts) ||
54318 DAG.MaskedValueIsZero(N0.getOperand(1), DemandedBits, DemandedHiElts);
54319 bool Op1HiZero =
54320 DAG.MaskedValueIsZero(N1.getOperand(0), DemandedBits, DemandedHiElts) ||
54321 DAG.MaskedValueIsZero(N1.getOperand(1), DemandedBits, DemandedHiElts);
54323 // TODO: Check for zero lower elements once we have actual codegen that
54324 // creates them.
54325 if (!Op0HiZero || !Op1HiZero)
54326 return SDValue();
54328 // Create a shuffle mask packing the lower elements from each VPMADDWD.
54329 SmallVector<int> Mask;
54330 for (int i = 0; i != (int)NumElts; ++i) {
54331 Mask.push_back(2 * i);
54332 Mask.push_back(2 * (i + NumElts));
54335 SDValue LHS =
54336 DAG.getVectorShuffle(OpVT, DL, N0.getOperand(0), N1.getOperand(0), Mask);
54337 SDValue RHS =
54338 DAG.getVectorShuffle(OpVT, DL, N0.getOperand(1), N1.getOperand(1), Mask);
54339 return DAG.getNode(X86ISD::VPMADDWD, DL, VT, LHS, RHS);
54342 /// CMOV of constants requires materializing constant operands in registers.
54343 /// Try to fold those constants into an 'add' instruction to reduce instruction
54344 /// count. We do this with CMOV rather the generic 'select' because there are
54345 /// earlier folds that may be used to turn select-of-constants into logic hacks.
54346 static SDValue pushAddIntoCmovOfConsts(SDNode *N, SelectionDAG &DAG,
54347 const X86Subtarget &Subtarget) {
54348 // If an operand is zero, add-of-0 gets simplified away, so that's clearly
54349 // better because we eliminate 1-2 instructions. This transform is still
54350 // an improvement without zero operands because we trade 2 move constants and
54351 // 1 add for 2 adds (LEA) as long as the constants can be represented as
54352 // immediate asm operands (fit in 32-bits).
54353 auto isSuitableCmov = [](SDValue V) {
54354 if (V.getOpcode() != X86ISD::CMOV || !V.hasOneUse())
54355 return false;
54356 if (!isa<ConstantSDNode>(V.getOperand(0)) ||
54357 !isa<ConstantSDNode>(V.getOperand(1)))
54358 return false;
54359 return isNullConstant(V.getOperand(0)) || isNullConstant(V.getOperand(1)) ||
54360 (V.getConstantOperandAPInt(0).isSignedIntN(32) &&
54361 V.getConstantOperandAPInt(1).isSignedIntN(32));
54364 // Match an appropriate CMOV as the first operand of the add.
54365 SDValue Cmov = N->getOperand(0);
54366 SDValue OtherOp = N->getOperand(1);
54367 if (!isSuitableCmov(Cmov))
54368 std::swap(Cmov, OtherOp);
54369 if (!isSuitableCmov(Cmov))
54370 return SDValue();
54372 // Don't remove a load folding opportunity for the add. That would neutralize
54373 // any improvements from removing constant materializations.
54374 if (X86::mayFoldLoad(OtherOp, Subtarget))
54375 return SDValue();
54377 EVT VT = N->getValueType(0);
54378 SDLoc DL(N);
54379 SDValue FalseOp = Cmov.getOperand(0);
54380 SDValue TrueOp = Cmov.getOperand(1);
54382 // We will push the add through the select, but we can potentially do better
54383 // if we know there is another add in the sequence and this is pointer math.
54384 // In that case, we can absorb an add into the trailing memory op and avoid
54385 // a 3-operand LEA which is likely slower than a 2-operand LEA.
54386 // TODO: If target has "slow3OpsLEA", do this even without the trailing memop?
54387 if (OtherOp.getOpcode() == ISD::ADD && OtherOp.hasOneUse() &&
54388 !isa<ConstantSDNode>(OtherOp.getOperand(0)) &&
54389 all_of(N->uses(), [&](SDNode *Use) {
54390 auto *MemNode = dyn_cast<MemSDNode>(Use);
54391 return MemNode && MemNode->getBasePtr().getNode() == N;
54392 })) {
54393 // add (cmov C1, C2), add (X, Y) --> add (cmov (add X, C1), (add X, C2)), Y
54394 // TODO: We are arbitrarily choosing op0 as the 1st piece of the sum, but
54395 // it is possible that choosing op1 might be better.
54396 SDValue X = OtherOp.getOperand(0), Y = OtherOp.getOperand(1);
54397 FalseOp = DAG.getNode(ISD::ADD, DL, VT, X, FalseOp);
54398 TrueOp = DAG.getNode(ISD::ADD, DL, VT, X, TrueOp);
54399 Cmov = DAG.getNode(X86ISD::CMOV, DL, VT, FalseOp, TrueOp,
54400 Cmov.getOperand(2), Cmov.getOperand(3));
54401 return DAG.getNode(ISD::ADD, DL, VT, Cmov, Y);
54404 // add (cmov C1, C2), OtherOp --> cmov (add OtherOp, C1), (add OtherOp, C2)
54405 FalseOp = DAG.getNode(ISD::ADD, DL, VT, OtherOp, FalseOp);
54406 TrueOp = DAG.getNode(ISD::ADD, DL, VT, OtherOp, TrueOp);
54407 return DAG.getNode(X86ISD::CMOV, DL, VT, FalseOp, TrueOp, Cmov.getOperand(2),
54408 Cmov.getOperand(3));
54411 static SDValue combineAdd(SDNode *N, SelectionDAG &DAG,
54412 TargetLowering::DAGCombinerInfo &DCI,
54413 const X86Subtarget &Subtarget) {
54414 EVT VT = N->getValueType(0);
54415 SDValue Op0 = N->getOperand(0);
54416 SDValue Op1 = N->getOperand(1);
54417 SDLoc DL(N);
54419 if (SDValue Select = pushAddIntoCmovOfConsts(N, DAG, Subtarget))
54420 return Select;
54422 if (SDValue MAdd = matchPMADDWD(DAG, Op0, Op1, DL, VT, Subtarget))
54423 return MAdd;
54424 if (SDValue MAdd = matchPMADDWD_2(DAG, Op0, Op1, DL, VT, Subtarget))
54425 return MAdd;
54426 if (SDValue MAdd = combineAddOfPMADDWD(DAG, Op0, Op1, DL, VT))
54427 return MAdd;
54429 // Try to synthesize horizontal adds from adds of shuffles.
54430 if (SDValue V = combineToHorizontalAddSub(N, DAG, Subtarget))
54431 return V;
54433 // If vectors of i1 are legal, turn (add (zext (vXi1 X)), Y) into
54434 // (sub Y, (sext (vXi1 X))).
54435 // FIXME: We have the (sub Y, (zext (vXi1 X))) -> (add (sext (vXi1 X)), Y) in
54436 // generic DAG combine without a legal type check, but adding this there
54437 // caused regressions.
54438 if (VT.isVector()) {
54439 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
54440 if (Op0.getOpcode() == ISD::ZERO_EXTEND &&
54441 Op0.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
54442 TLI.isTypeLegal(Op0.getOperand(0).getValueType())) {
54443 SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op0.getOperand(0));
54444 return DAG.getNode(ISD::SUB, DL, VT, Op1, SExt);
54447 if (Op1.getOpcode() == ISD::ZERO_EXTEND &&
54448 Op1.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
54449 TLI.isTypeLegal(Op1.getOperand(0).getValueType())) {
54450 SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op1.getOperand(0));
54451 return DAG.getNode(ISD::SUB, DL, VT, Op0, SExt);
54455 // Fold ADD(ADC(Y,0,W),X) -> ADC(X,Y,W)
54456 if (Op0.getOpcode() == X86ISD::ADC && Op0->hasOneUse() &&
54457 X86::isZeroNode(Op0.getOperand(1))) {
54458 assert(!Op0->hasAnyUseOfValue(1) && "Overflow bit in use");
54459 return DAG.getNode(X86ISD::ADC, SDLoc(Op0), Op0->getVTList(), Op1,
54460 Op0.getOperand(0), Op0.getOperand(2));
54463 return combineAddOrSubToADCOrSBB(N, DAG);
54466 // Try to fold (sub Y, cmovns X, -X) -> (add Y, cmovns -X, X) if the cmov
54467 // condition comes from the subtract node that produced -X. This matches the
54468 // cmov expansion for absolute value. By swapping the operands we convert abs
54469 // to nabs.
54470 static SDValue combineSubABS(SDNode *N, SelectionDAG &DAG) {
54471 SDValue N0 = N->getOperand(0);
54472 SDValue N1 = N->getOperand(1);
54474 if (N1.getOpcode() != X86ISD::CMOV || !N1.hasOneUse())
54475 return SDValue();
54477 X86::CondCode CC = (X86::CondCode)N1.getConstantOperandVal(2);
54478 if (CC != X86::COND_S && CC != X86::COND_NS)
54479 return SDValue();
54481 // Condition should come from a negate operation.
54482 SDValue Cond = N1.getOperand(3);
54483 if (Cond.getOpcode() != X86ISD::SUB || !isNullConstant(Cond.getOperand(0)))
54484 return SDValue();
54485 assert(Cond.getResNo() == 1 && "Unexpected result number");
54487 // Get the X and -X from the negate.
54488 SDValue NegX = Cond.getValue(0);
54489 SDValue X = Cond.getOperand(1);
54491 SDValue FalseOp = N1.getOperand(0);
54492 SDValue TrueOp = N1.getOperand(1);
54494 // Cmov operands should be X and NegX. Order doesn't matter.
54495 if (!(TrueOp == X && FalseOp == NegX) && !(TrueOp == NegX && FalseOp == X))
54496 return SDValue();
54498 // Build a new CMOV with the operands swapped.
54499 SDLoc DL(N);
54500 MVT VT = N->getSimpleValueType(0);
54501 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VT, TrueOp, FalseOp,
54502 N1.getOperand(2), Cond);
54503 // Convert sub to add.
54504 return DAG.getNode(ISD::ADD, DL, VT, N0, Cmov);
54507 static SDValue combineSubSetcc(SDNode *N, SelectionDAG &DAG) {
54508 SDValue Op0 = N->getOperand(0);
54509 SDValue Op1 = N->getOperand(1);
54511 // (sub C (zero_extend (setcc)))
54512 // =>
54513 // (add (zero_extend (setcc inverted) C-1)) if C is a nonzero immediate
54514 // Don't disturb (sub 0 setcc), which is easily done with neg.
54515 EVT VT = N->getValueType(0);
54516 auto *Op0C = dyn_cast<ConstantSDNode>(Op0);
54517 if (Op1.getOpcode() == ISD::ZERO_EXTEND && Op1.hasOneUse() && Op0C &&
54518 !Op0C->isZero() && Op1.getOperand(0).getOpcode() == X86ISD::SETCC &&
54519 Op1.getOperand(0).hasOneUse()) {
54520 SDValue SetCC = Op1.getOperand(0);
54521 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0);
54522 X86::CondCode NewCC = X86::GetOppositeBranchCondition(CC);
54523 APInt NewImm = Op0C->getAPIntValue() - 1;
54524 SDLoc DL(Op1);
54525 SDValue NewSetCC = getSETCC(NewCC, SetCC.getOperand(1), DL, DAG);
54526 NewSetCC = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, NewSetCC);
54527 return DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(VT, VT), NewSetCC,
54528 DAG.getConstant(NewImm, DL, VT));
54531 return SDValue();
54534 static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
54535 TargetLowering::DAGCombinerInfo &DCI,
54536 const X86Subtarget &Subtarget) {
54537 SDValue Op0 = N->getOperand(0);
54538 SDValue Op1 = N->getOperand(1);
54540 // TODO: Add NoOpaque handling to isConstantIntBuildVectorOrConstantInt.
54541 auto IsNonOpaqueConstant = [&](SDValue Op) {
54542 if (SDNode *C = DAG.isConstantIntBuildVectorOrConstantInt(Op)) {
54543 if (auto *Cst = dyn_cast<ConstantSDNode>(C))
54544 return !Cst->isOpaque();
54545 return true;
54547 return false;
54550 // X86 can't encode an immediate LHS of a sub. See if we can push the
54551 // negation into a preceding instruction. If the RHS of the sub is a XOR with
54552 // one use and a constant, invert the immediate, saving one register.
54553 // However, ignore cases where C1 is 0, as those will become a NEG.
54554 // sub(C1, xor(X, C2)) -> add(xor(X, ~C2), C1+1)
54555 if (Op1.getOpcode() == ISD::XOR && IsNonOpaqueConstant(Op0) &&
54556 !isNullConstant(Op0) && IsNonOpaqueConstant(Op1.getOperand(1)) &&
54557 Op1->hasOneUse()) {
54558 SDLoc DL(N);
54559 EVT VT = Op0.getValueType();
54560 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT, Op1.getOperand(0),
54561 DAG.getNOT(SDLoc(Op1), Op1.getOperand(1), VT));
54562 SDValue NewAdd =
54563 DAG.getNode(ISD::ADD, DL, VT, Op0, DAG.getConstant(1, DL, VT));
54564 return DAG.getNode(ISD::ADD, DL, VT, NewXor, NewAdd);
54567 if (SDValue V = combineSubABS(N, DAG))
54568 return V;
54570 // Try to synthesize horizontal subs from subs of shuffles.
54571 if (SDValue V = combineToHorizontalAddSub(N, DAG, Subtarget))
54572 return V;
54574 // Fold SUB(X,ADC(Y,0,W)) -> SBB(X,Y,W)
54575 if (Op1.getOpcode() == X86ISD::ADC && Op1->hasOneUse() &&
54576 X86::isZeroNode(Op1.getOperand(1))) {
54577 assert(!Op1->hasAnyUseOfValue(1) && "Overflow bit in use");
54578 return DAG.getNode(X86ISD::SBB, SDLoc(Op1), Op1->getVTList(), Op0,
54579 Op1.getOperand(0), Op1.getOperand(2));
54582 // Fold SUB(X,SBB(Y,Z,W)) -> SUB(ADC(X,Z,W),Y)
54583 // Don't fold to ADC(0,0,W)/SETCC_CARRY pattern which will prevent more folds.
54584 if (Op1.getOpcode() == X86ISD::SBB && Op1->hasOneUse() &&
54585 !(X86::isZeroNode(Op0) && X86::isZeroNode(Op1.getOperand(1)))) {
54586 assert(!Op1->hasAnyUseOfValue(1) && "Overflow bit in use");
54587 SDValue ADC = DAG.getNode(X86ISD::ADC, SDLoc(Op1), Op1->getVTList(), Op0,
54588 Op1.getOperand(1), Op1.getOperand(2));
54589 return DAG.getNode(ISD::SUB, SDLoc(N), Op0.getValueType(), ADC.getValue(0),
54590 Op1.getOperand(0));
54593 if (SDValue V = combineXorSubCTLZ(N, DAG, Subtarget))
54594 return V;
54596 if (SDValue V = combineAddOrSubToADCOrSBB(N, DAG))
54597 return V;
54599 return combineSubSetcc(N, DAG);
54602 static SDValue combineVectorCompare(SDNode *N, SelectionDAG &DAG,
54603 const X86Subtarget &Subtarget) {
54604 MVT VT = N->getSimpleValueType(0);
54605 SDLoc DL(N);
54607 if (N->getOperand(0) == N->getOperand(1)) {
54608 if (N->getOpcode() == X86ISD::PCMPEQ)
54609 return DAG.getConstant(-1, DL, VT);
54610 if (N->getOpcode() == X86ISD::PCMPGT)
54611 return DAG.getConstant(0, DL, VT);
54614 return SDValue();
54617 /// Helper that combines an array of subvector ops as if they were the operands
54618 /// of a ISD::CONCAT_VECTORS node, but may have come from another source (e.g.
54619 /// ISD::INSERT_SUBVECTOR). The ops are assumed to be of the same type.
54620 static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
54621 ArrayRef<SDValue> Ops, SelectionDAG &DAG,
54622 TargetLowering::DAGCombinerInfo &DCI,
54623 const X86Subtarget &Subtarget) {
54624 assert(Subtarget.hasAVX() && "AVX assumed for concat_vectors");
54625 unsigned EltSizeInBits = VT.getScalarSizeInBits();
54627 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
54628 return DAG.getUNDEF(VT);
54630 if (llvm::all_of(Ops, [](SDValue Op) {
54631 return ISD::isBuildVectorAllZeros(Op.getNode());
54633 return getZeroVector(VT, Subtarget, DAG, DL);
54635 SDValue Op0 = Ops[0];
54636 bool IsSplat = llvm::all_equal(Ops);
54637 unsigned NumOps = Ops.size();
54638 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
54639 LLVMContext &Ctx = *DAG.getContext();
54641 // Repeated subvectors.
54642 if (IsSplat &&
54643 (VT.is256BitVector() || (VT.is512BitVector() && Subtarget.hasAVX512()))) {
54644 // If this broadcast is inserted into both halves, use a larger broadcast.
54645 if (Op0.getOpcode() == X86ISD::VBROADCAST)
54646 return DAG.getNode(Op0.getOpcode(), DL, VT, Op0.getOperand(0));
54648 // concat_vectors(movddup(x),movddup(x)) -> broadcast(x)
54649 if (Op0.getOpcode() == X86ISD::MOVDDUP && VT == MVT::v4f64 &&
54650 (Subtarget.hasAVX2() ||
54651 X86::mayFoldLoadIntoBroadcastFromMem(Op0.getOperand(0),
54652 VT.getScalarType(), Subtarget)))
54653 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
54654 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f64,
54655 Op0.getOperand(0),
54656 DAG.getIntPtrConstant(0, DL)));
54658 // concat_vectors(scalar_to_vector(x),scalar_to_vector(x)) -> broadcast(x)
54659 if (Op0.getOpcode() == ISD::SCALAR_TO_VECTOR &&
54660 (Subtarget.hasAVX2() ||
54661 (EltSizeInBits >= 32 &&
54662 X86::mayFoldLoad(Op0.getOperand(0), Subtarget))) &&
54663 Op0.getOperand(0).getValueType() == VT.getScalarType())
54664 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Op0.getOperand(0));
54666 // concat_vectors(extract_subvector(broadcast(x)),
54667 // extract_subvector(broadcast(x))) -> broadcast(x)
54668 // concat_vectors(extract_subvector(subv_broadcast(x)),
54669 // extract_subvector(subv_broadcast(x))) -> subv_broadcast(x)
54670 if (Op0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
54671 Op0.getOperand(0).getValueType() == VT) {
54672 SDValue SrcVec = Op0.getOperand(0);
54673 if (SrcVec.getOpcode() == X86ISD::VBROADCAST ||
54674 SrcVec.getOpcode() == X86ISD::VBROADCAST_LOAD)
54675 return Op0.getOperand(0);
54676 if (SrcVec.getOpcode() == X86ISD::SUBV_BROADCAST_LOAD &&
54677 Op0.getValueType() == cast<MemSDNode>(SrcVec)->getMemoryVT())
54678 return Op0.getOperand(0);
54681 // concat_vectors(permq(x),permq(x)) -> permq(concat_vectors(x,x))
54682 if (Op0.getOpcode() == X86ISD::VPERMI && Subtarget.useAVX512Regs() &&
54683 !X86::mayFoldLoad(Op0.getOperand(0), Subtarget))
54684 return DAG.getNode(Op0.getOpcode(), DL, VT,
54685 DAG.getNode(ISD::CONCAT_VECTORS, DL, VT,
54686 Op0.getOperand(0), Op0.getOperand(0)),
54687 Op0.getOperand(1));
54690 // concat(extract_subvector(v0,c0), extract_subvector(v1,c1)) -> vperm2x128.
54691 // Only concat of subvector high halves which vperm2x128 is best at.
54692 // TODO: This should go in combineX86ShufflesRecursively eventually.
54693 if (VT.is256BitVector() && NumOps == 2) {
54694 SDValue Src0 = peekThroughBitcasts(Ops[0]);
54695 SDValue Src1 = peekThroughBitcasts(Ops[1]);
54696 if (Src0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
54697 Src1.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
54698 EVT SrcVT0 = Src0.getOperand(0).getValueType();
54699 EVT SrcVT1 = Src1.getOperand(0).getValueType();
54700 unsigned NumSrcElts0 = SrcVT0.getVectorNumElements();
54701 unsigned NumSrcElts1 = SrcVT1.getVectorNumElements();
54702 if (SrcVT0.is256BitVector() && SrcVT1.is256BitVector() &&
54703 Src0.getConstantOperandAPInt(1) == (NumSrcElts0 / 2) &&
54704 Src1.getConstantOperandAPInt(1) == (NumSrcElts1 / 2)) {
54705 return DAG.getNode(X86ISD::VPERM2X128, DL, VT,
54706 DAG.getBitcast(VT, Src0.getOperand(0)),
54707 DAG.getBitcast(VT, Src1.getOperand(0)),
54708 DAG.getTargetConstant(0x31, DL, MVT::i8));
54713 // Repeated opcode.
54714 // TODO - combineX86ShufflesRecursively should handle shuffle concatenation
54715 // but it currently struggles with different vector widths.
54716 if (llvm::all_of(Ops, [Op0](SDValue Op) {
54717 return Op.getOpcode() == Op0.getOpcode() && Op.hasOneUse();
54718 })) {
54719 auto ConcatSubOperand = [&](EVT VT, ArrayRef<SDValue> SubOps, unsigned I) {
54720 SmallVector<SDValue> Subs;
54721 for (SDValue SubOp : SubOps)
54722 Subs.push_back(SubOp.getOperand(I));
54723 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Subs);
54725 auto IsConcatFree = [](MVT VT, ArrayRef<SDValue> SubOps, unsigned Op) {
54726 bool AllConstants = true;
54727 bool AllSubVectors = true;
54728 for (unsigned I = 0, E = SubOps.size(); I != E; ++I) {
54729 SDValue Sub = SubOps[I].getOperand(Op);
54730 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
54731 SDValue BC = peekThroughBitcasts(Sub);
54732 AllConstants &= ISD::isBuildVectorOfConstantSDNodes(BC.getNode()) ||
54733 ISD::isBuildVectorOfConstantFPSDNodes(BC.getNode());
54734 AllSubVectors &= Sub.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
54735 Sub.getOperand(0).getValueType() == VT &&
54736 Sub.getConstantOperandAPInt(1) == (I * NumSubElts);
54738 return AllConstants || AllSubVectors;
54741 switch (Op0.getOpcode()) {
54742 case X86ISD::VBROADCAST: {
54743 if (!IsSplat && llvm::all_of(Ops, [](SDValue Op) {
54744 return Op.getOperand(0).getValueType().is128BitVector();
54745 })) {
54746 if (VT == MVT::v4f64 || VT == MVT::v4i64)
54747 return DAG.getNode(X86ISD::UNPCKL, DL, VT,
54748 ConcatSubOperand(VT, Ops, 0),
54749 ConcatSubOperand(VT, Ops, 0));
54750 // TODO: Add pseudo v8i32 PSHUFD handling to AVX1Only targets.
54751 if (VT == MVT::v8f32 || (VT == MVT::v8i32 && Subtarget.hasInt256()))
54752 return DAG.getNode(VT == MVT::v8f32 ? X86ISD::VPERMILPI
54753 : X86ISD::PSHUFD,
54754 DL, VT, ConcatSubOperand(VT, Ops, 0),
54755 getV4X86ShuffleImm8ForMask({0, 0, 0, 0}, DL, DAG));
54757 break;
54759 case X86ISD::MOVDDUP:
54760 case X86ISD::MOVSHDUP:
54761 case X86ISD::MOVSLDUP: {
54762 if (!IsSplat)
54763 return DAG.getNode(Op0.getOpcode(), DL, VT,
54764 ConcatSubOperand(VT, Ops, 0));
54765 break;
54767 case X86ISD::SHUFP: {
54768 // Add SHUFPD support if/when necessary.
54769 if (!IsSplat && VT.getScalarType() == MVT::f32 &&
54770 llvm::all_of(Ops, [Op0](SDValue Op) {
54771 return Op.getOperand(2) == Op0.getOperand(2);
54772 })) {
54773 return DAG.getNode(Op0.getOpcode(), DL, VT,
54774 ConcatSubOperand(VT, Ops, 0),
54775 ConcatSubOperand(VT, Ops, 1), Op0.getOperand(2));
54777 break;
54779 case X86ISD::UNPCKH:
54780 case X86ISD::UNPCKL: {
54781 // Don't concatenate build_vector patterns.
54782 if (!IsSplat && EltSizeInBits >= 32 &&
54783 ((VT.is256BitVector() && Subtarget.hasInt256()) ||
54784 (VT.is512BitVector() && Subtarget.useAVX512Regs())) &&
54785 none_of(Ops, [](SDValue Op) {
54786 return peekThroughBitcasts(Op.getOperand(0)).getOpcode() ==
54787 ISD::SCALAR_TO_VECTOR ||
54788 peekThroughBitcasts(Op.getOperand(1)).getOpcode() ==
54789 ISD::SCALAR_TO_VECTOR;
54790 })) {
54791 return DAG.getNode(Op0.getOpcode(), DL, VT,
54792 ConcatSubOperand(VT, Ops, 0),
54793 ConcatSubOperand(VT, Ops, 1));
54795 break;
54797 case X86ISD::PSHUFHW:
54798 case X86ISD::PSHUFLW:
54799 case X86ISD::PSHUFD:
54800 if (!IsSplat && NumOps == 2 && VT.is256BitVector() &&
54801 Subtarget.hasInt256() && Op0.getOperand(1) == Ops[1].getOperand(1)) {
54802 return DAG.getNode(Op0.getOpcode(), DL, VT,
54803 ConcatSubOperand(VT, Ops, 0), Op0.getOperand(1));
54805 [[fallthrough]];
54806 case X86ISD::VPERMILPI:
54807 if (!IsSplat && EltSizeInBits == 32 &&
54808 (VT.is256BitVector() ||
54809 (VT.is512BitVector() && Subtarget.useAVX512Regs())) &&
54810 all_of(Ops, [&Op0](SDValue Op) {
54811 return Op0.getOperand(1) == Op.getOperand(1);
54812 })) {
54813 MVT FloatVT = VT.changeVectorElementType(MVT::f32);
54814 SDValue Res = DAG.getBitcast(FloatVT, ConcatSubOperand(VT, Ops, 0));
54815 Res =
54816 DAG.getNode(X86ISD::VPERMILPI, DL, FloatVT, Res, Op0.getOperand(1));
54817 return DAG.getBitcast(VT, Res);
54819 if (!IsSplat && NumOps == 2 && VT == MVT::v4f64) {
54820 uint64_t Idx0 = Ops[0].getConstantOperandVal(1);
54821 uint64_t Idx1 = Ops[1].getConstantOperandVal(1);
54822 uint64_t Idx = ((Idx1 & 3) << 2) | (Idx0 & 3);
54823 return DAG.getNode(Op0.getOpcode(), DL, VT,
54824 ConcatSubOperand(VT, Ops, 0),
54825 DAG.getTargetConstant(Idx, DL, MVT::i8));
54827 break;
54828 case X86ISD::PSHUFB:
54829 case X86ISD::PSADBW:
54830 if (!IsSplat && ((VT.is256BitVector() && Subtarget.hasInt256()) ||
54831 (VT.is512BitVector() && Subtarget.useBWIRegs()))) {
54832 MVT SrcVT = Op0.getOperand(0).getSimpleValueType();
54833 SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
54834 NumOps * SrcVT.getVectorNumElements());
54835 return DAG.getNode(Op0.getOpcode(), DL, VT,
54836 ConcatSubOperand(SrcVT, Ops, 0),
54837 ConcatSubOperand(SrcVT, Ops, 1));
54839 break;
54840 case X86ISD::VPERMV:
54841 if (!IsSplat && NumOps == 2 &&
54842 (VT.is512BitVector() && Subtarget.useAVX512Regs())) {
54843 MVT OpVT = Op0.getSimpleValueType();
54844 int NumSrcElts = OpVT.getVectorNumElements();
54845 SmallVector<int, 64> ConcatMask;
54846 for (unsigned i = 0; i != NumOps; ++i) {
54847 SmallVector<int, 64> SubMask;
54848 SmallVector<SDValue, 2> SubOps;
54849 if (!getTargetShuffleMask(Ops[i].getNode(), OpVT, false, SubOps,
54850 SubMask))
54851 break;
54852 for (int M : SubMask) {
54853 if (0 <= M)
54854 M += i * NumSrcElts;
54855 ConcatMask.push_back(M);
54858 if (ConcatMask.size() == (NumOps * NumSrcElts)) {
54859 SDValue Src = concatSubVectors(Ops[0].getOperand(1),
54860 Ops[1].getOperand(1), DAG, DL);
54861 MVT IntMaskSVT = MVT::getIntegerVT(EltSizeInBits);
54862 MVT IntMaskVT = MVT::getVectorVT(IntMaskSVT, NumOps * NumSrcElts);
54863 SDValue Mask = getConstVector(ConcatMask, IntMaskVT, DAG, DL, true);
54864 return DAG.getNode(X86ISD::VPERMV, DL, VT, Mask, Src);
54867 break;
54868 case X86ISD::VPERMV3:
54869 if (!IsSplat && NumOps == 2 && VT.is512BitVector()) {
54870 MVT OpVT = Op0.getSimpleValueType();
54871 int NumSrcElts = OpVT.getVectorNumElements();
54872 SmallVector<int, 64> ConcatMask;
54873 for (unsigned i = 0; i != NumOps; ++i) {
54874 SmallVector<int, 64> SubMask;
54875 SmallVector<SDValue, 2> SubOps;
54876 if (!getTargetShuffleMask(Ops[i].getNode(), OpVT, false, SubOps,
54877 SubMask))
54878 break;
54879 for (int M : SubMask) {
54880 if (0 <= M) {
54881 M += M < NumSrcElts ? 0 : NumSrcElts;
54882 M += i * NumSrcElts;
54884 ConcatMask.push_back(M);
54887 if (ConcatMask.size() == (NumOps * NumSrcElts)) {
54888 SDValue Src0 = concatSubVectors(Ops[0].getOperand(0),
54889 Ops[1].getOperand(0), DAG, DL);
54890 SDValue Src1 = concatSubVectors(Ops[0].getOperand(2),
54891 Ops[1].getOperand(2), DAG, DL);
54892 MVT IntMaskSVT = MVT::getIntegerVT(EltSizeInBits);
54893 MVT IntMaskVT = MVT::getVectorVT(IntMaskSVT, NumOps * NumSrcElts);
54894 SDValue Mask = getConstVector(ConcatMask, IntMaskVT, DAG, DL, true);
54895 return DAG.getNode(X86ISD::VPERMV3, DL, VT, Src0, Mask, Src1);
54898 break;
54899 case X86ISD::VPERM2X128: {
54900 if (!IsSplat && VT.is512BitVector() && Subtarget.useAVX512Regs()) {
54901 assert(NumOps == 2 && "Bad concat_vectors operands");
54902 unsigned Imm0 = Ops[0].getConstantOperandVal(2);
54903 unsigned Imm1 = Ops[1].getConstantOperandVal(2);
54904 // TODO: Handle zero'd subvectors.
54905 if ((Imm0 & 0x88) == 0 && (Imm1 & 0x88) == 0) {
54906 int Mask[4] = {(int)(Imm0 & 0x03), (int)((Imm0 >> 4) & 0x3), (int)(Imm1 & 0x03),
54907 (int)((Imm1 >> 4) & 0x3)};
54908 MVT ShuffleVT = VT.isFloatingPoint() ? MVT::v8f64 : MVT::v8i64;
54909 SDValue LHS = concatSubVectors(Ops[0].getOperand(0),
54910 Ops[0].getOperand(1), DAG, DL);
54911 SDValue RHS = concatSubVectors(Ops[1].getOperand(0),
54912 Ops[1].getOperand(1), DAG, DL);
54913 SDValue Res = DAG.getNode(X86ISD::SHUF128, DL, ShuffleVT,
54914 DAG.getBitcast(ShuffleVT, LHS),
54915 DAG.getBitcast(ShuffleVT, RHS),
54916 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
54917 return DAG.getBitcast(VT, Res);
54920 break;
54922 case X86ISD::SHUF128: {
54923 if (!IsSplat && NumOps == 2 && VT.is512BitVector()) {
54924 unsigned Imm0 = Ops[0].getConstantOperandVal(2);
54925 unsigned Imm1 = Ops[1].getConstantOperandVal(2);
54926 unsigned Imm = ((Imm0 & 1) << 0) | ((Imm0 & 2) << 1) | 0x08 |
54927 ((Imm1 & 1) << 4) | ((Imm1 & 2) << 5) | 0x80;
54928 SDValue LHS = concatSubVectors(Ops[0].getOperand(0),
54929 Ops[0].getOperand(1), DAG, DL);
54930 SDValue RHS = concatSubVectors(Ops[1].getOperand(0),
54931 Ops[1].getOperand(1), DAG, DL);
54932 return DAG.getNode(X86ISD::SHUF128, DL, VT, LHS, RHS,
54933 DAG.getTargetConstant(Imm, DL, MVT::i8));
54935 break;
54937 case ISD::TRUNCATE:
54938 if (!IsSplat && NumOps == 2 && VT.is256BitVector()) {
54939 EVT SrcVT = Ops[0].getOperand(0).getValueType();
54940 if (SrcVT.is256BitVector() && SrcVT.isSimple() &&
54941 SrcVT == Ops[1].getOperand(0).getValueType() &&
54942 Subtarget.useAVX512Regs() &&
54943 Subtarget.getPreferVectorWidth() >= 512 &&
54944 (SrcVT.getScalarSizeInBits() > 16 || Subtarget.useBWIRegs())) {
54945 EVT NewSrcVT = SrcVT.getDoubleNumVectorElementsVT(Ctx);
54946 return DAG.getNode(ISD::TRUNCATE, DL, VT,
54947 ConcatSubOperand(NewSrcVT, Ops, 0));
54950 break;
54951 case X86ISD::VSHLI:
54952 case X86ISD::VSRLI:
54953 // Special case: SHL/SRL AVX1 V4i64 by 32-bits can lower as a shuffle.
54954 // TODO: Move this to LowerShiftByScalarImmediate?
54955 if (VT == MVT::v4i64 && !Subtarget.hasInt256() &&
54956 llvm::all_of(Ops, [](SDValue Op) {
54957 return Op.getConstantOperandAPInt(1) == 32;
54958 })) {
54959 SDValue Res = DAG.getBitcast(MVT::v8i32, ConcatSubOperand(VT, Ops, 0));
54960 SDValue Zero = getZeroVector(MVT::v8i32, Subtarget, DAG, DL);
54961 if (Op0.getOpcode() == X86ISD::VSHLI) {
54962 Res = DAG.getVectorShuffle(MVT::v8i32, DL, Res, Zero,
54963 {8, 0, 8, 2, 8, 4, 8, 6});
54964 } else {
54965 Res = DAG.getVectorShuffle(MVT::v8i32, DL, Res, Zero,
54966 {1, 8, 3, 8, 5, 8, 7, 8});
54968 return DAG.getBitcast(VT, Res);
54970 [[fallthrough]];
54971 case X86ISD::VSRAI:
54972 case X86ISD::VSHL:
54973 case X86ISD::VSRL:
54974 case X86ISD::VSRA:
54975 if (((VT.is256BitVector() && Subtarget.hasInt256()) ||
54976 (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
54977 (EltSizeInBits >= 32 || Subtarget.useBWIRegs()))) &&
54978 llvm::all_of(Ops, [Op0](SDValue Op) {
54979 return Op0.getOperand(1) == Op.getOperand(1);
54980 })) {
54981 return DAG.getNode(Op0.getOpcode(), DL, VT,
54982 ConcatSubOperand(VT, Ops, 0), Op0.getOperand(1));
54984 break;
54985 case X86ISD::VPERMI:
54986 case X86ISD::VROTLI:
54987 case X86ISD::VROTRI:
54988 if (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
54989 llvm::all_of(Ops, [Op0](SDValue Op) {
54990 return Op0.getOperand(1) == Op.getOperand(1);
54991 })) {
54992 return DAG.getNode(Op0.getOpcode(), DL, VT,
54993 ConcatSubOperand(VT, Ops, 0), Op0.getOperand(1));
54995 break;
54996 case ISD::AND:
54997 case ISD::OR:
54998 case ISD::XOR:
54999 case X86ISD::ANDNP:
55000 if (!IsSplat && ((VT.is256BitVector() && Subtarget.hasInt256()) ||
55001 (VT.is512BitVector() && Subtarget.useAVX512Regs()))) {
55002 return DAG.getNode(Op0.getOpcode(), DL, VT,
55003 ConcatSubOperand(VT, Ops, 0),
55004 ConcatSubOperand(VT, Ops, 1));
55006 break;
55007 case X86ISD::PCMPEQ:
55008 case X86ISD::PCMPGT:
55009 if (!IsSplat && VT.is256BitVector() && Subtarget.hasInt256() &&
55010 (IsConcatFree(VT, Ops, 0) || IsConcatFree(VT, Ops, 1))) {
55011 return DAG.getNode(Op0.getOpcode(), DL, VT,
55012 ConcatSubOperand(VT, Ops, 0),
55013 ConcatSubOperand(VT, Ops, 1));
55015 break;
55016 case ISD::CTPOP:
55017 case ISD::CTTZ:
55018 case ISD::CTLZ:
55019 case ISD::CTTZ_ZERO_UNDEF:
55020 case ISD::CTLZ_ZERO_UNDEF:
55021 if (!IsSplat && ((VT.is256BitVector() && Subtarget.hasInt256()) ||
55022 (VT.is512BitVector() && Subtarget.useBWIRegs()))) {
55023 return DAG.getNode(Op0.getOpcode(), DL, VT,
55024 ConcatSubOperand(VT, Ops, 0));
55026 break;
55027 case X86ISD::GF2P8AFFINEQB:
55028 if (!IsSplat &&
55029 (VT.is256BitVector() ||
55030 (VT.is512BitVector() && Subtarget.useAVX512Regs())) &&
55031 llvm::all_of(Ops, [Op0](SDValue Op) {
55032 return Op0.getOperand(2) == Op.getOperand(2);
55033 })) {
55034 return DAG.getNode(Op0.getOpcode(), DL, VT,
55035 ConcatSubOperand(VT, Ops, 0),
55036 ConcatSubOperand(VT, Ops, 1), Op0.getOperand(2));
55038 break;
55039 case ISD::ADD:
55040 case ISD::SUB:
55041 case ISD::MUL:
55042 if (!IsSplat && ((VT.is256BitVector() && Subtarget.hasInt256()) ||
55043 (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
55044 (EltSizeInBits >= 32 || Subtarget.useBWIRegs())))) {
55045 return DAG.getNode(Op0.getOpcode(), DL, VT,
55046 ConcatSubOperand(VT, Ops, 0),
55047 ConcatSubOperand(VT, Ops, 1));
55049 break;
55050 // Due to VADD, VSUB, VMUL can executed on more ports than VINSERT and
55051 // their latency are short, so here we don't replace them.
55052 case ISD::FDIV:
55053 if (!IsSplat && (VT.is256BitVector() ||
55054 (VT.is512BitVector() && Subtarget.useAVX512Regs()))) {
55055 return DAG.getNode(Op0.getOpcode(), DL, VT,
55056 ConcatSubOperand(VT, Ops, 0),
55057 ConcatSubOperand(VT, Ops, 1));
55059 break;
55060 case X86ISD::HADD:
55061 case X86ISD::HSUB:
55062 case X86ISD::FHADD:
55063 case X86ISD::FHSUB:
55064 if (!IsSplat && VT.is256BitVector() &&
55065 (VT.isFloatingPoint() || Subtarget.hasInt256())) {
55066 return DAG.getNode(Op0.getOpcode(), DL, VT,
55067 ConcatSubOperand(VT, Ops, 0),
55068 ConcatSubOperand(VT, Ops, 1));
55070 break;
55071 case X86ISD::PACKSS:
55072 case X86ISD::PACKUS:
55073 if (!IsSplat && ((VT.is256BitVector() && Subtarget.hasInt256()) ||
55074 (VT.is512BitVector() && Subtarget.useBWIRegs()))) {
55075 MVT SrcVT = Op0.getOperand(0).getSimpleValueType();
55076 SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
55077 NumOps * SrcVT.getVectorNumElements());
55078 return DAG.getNode(Op0.getOpcode(), DL, VT,
55079 ConcatSubOperand(SrcVT, Ops, 0),
55080 ConcatSubOperand(SrcVT, Ops, 1));
55082 break;
55083 case X86ISD::PALIGNR:
55084 if (!IsSplat &&
55085 ((VT.is256BitVector() && Subtarget.hasInt256()) ||
55086 (VT.is512BitVector() && Subtarget.useBWIRegs())) &&
55087 llvm::all_of(Ops, [Op0](SDValue Op) {
55088 return Op0.getOperand(2) == Op.getOperand(2);
55089 })) {
55090 return DAG.getNode(Op0.getOpcode(), DL, VT,
55091 ConcatSubOperand(VT, Ops, 0),
55092 ConcatSubOperand(VT, Ops, 1), Op0.getOperand(2));
55094 break;
55095 case X86ISD::BLENDI:
55096 if (NumOps == 2 && VT.is512BitVector() && Subtarget.useBWIRegs()) {
55097 uint64_t Mask0 = Ops[0].getConstantOperandVal(2);
55098 uint64_t Mask1 = Ops[1].getConstantOperandVal(2);
55099 uint64_t Mask = (Mask1 << (VT.getVectorNumElements() / 2)) | Mask0;
55100 MVT MaskSVT = MVT::getIntegerVT(VT.getVectorNumElements());
55101 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
55102 SDValue Sel =
55103 DAG.getBitcast(MaskVT, DAG.getConstant(Mask, DL, MaskSVT));
55104 return DAG.getSelect(DL, VT, Sel, ConcatSubOperand(VT, Ops, 1),
55105 ConcatSubOperand(VT, Ops, 0));
55107 break;
55108 case ISD::VSELECT:
55109 if (!IsSplat && Subtarget.hasAVX512() &&
55110 (VT.is256BitVector() ||
55111 (VT.is512BitVector() && Subtarget.useAVX512Regs())) &&
55112 (EltSizeInBits >= 32 || Subtarget.hasBWI())) {
55113 EVT SelVT = Ops[0].getOperand(0).getValueType();
55114 if (SelVT.getVectorElementType() == MVT::i1) {
55115 SelVT = EVT::getVectorVT(Ctx, MVT::i1,
55116 NumOps * SelVT.getVectorNumElements());
55117 if (TLI.isTypeLegal(SelVT))
55118 return DAG.getNode(Op0.getOpcode(), DL, VT,
55119 ConcatSubOperand(SelVT.getSimpleVT(), Ops, 0),
55120 ConcatSubOperand(VT, Ops, 1),
55121 ConcatSubOperand(VT, Ops, 2));
55124 [[fallthrough]];
55125 case X86ISD::BLENDV:
55126 if (!IsSplat && VT.is256BitVector() && NumOps == 2 &&
55127 (EltSizeInBits >= 32 || Subtarget.hasInt256()) &&
55128 IsConcatFree(VT, Ops, 1) && IsConcatFree(VT, Ops, 2)) {
55129 EVT SelVT = Ops[0].getOperand(0).getValueType();
55130 SelVT = SelVT.getDoubleNumVectorElementsVT(Ctx);
55131 if (TLI.isTypeLegal(SelVT))
55132 return DAG.getNode(Op0.getOpcode(), DL, VT,
55133 ConcatSubOperand(SelVT.getSimpleVT(), Ops, 0),
55134 ConcatSubOperand(VT, Ops, 1),
55135 ConcatSubOperand(VT, Ops, 2));
55137 break;
55141 // Fold subvector loads into one.
55142 // If needed, look through bitcasts to get to the load.
55143 if (auto *FirstLd = dyn_cast<LoadSDNode>(peekThroughBitcasts(Op0))) {
55144 unsigned Fast;
55145 const X86TargetLowering *TLI = Subtarget.getTargetLowering();
55146 if (TLI->allowsMemoryAccess(Ctx, DAG.getDataLayout(), VT,
55147 *FirstLd->getMemOperand(), &Fast) &&
55148 Fast) {
55149 if (SDValue Ld =
55150 EltsFromConsecutiveLoads(VT, Ops, DL, DAG, Subtarget, false))
55151 return Ld;
55155 // Attempt to fold target constant loads.
55156 if (all_of(Ops, [](SDValue Op) { return getTargetConstantFromNode(Op); })) {
55157 SmallVector<APInt> EltBits;
55158 APInt UndefElts = APInt::getZero(VT.getVectorNumElements());
55159 for (unsigned I = 0; I != NumOps; ++I) {
55160 APInt OpUndefElts;
55161 SmallVector<APInt> OpEltBits;
55162 if (!getTargetConstantBitsFromNode(Ops[I], EltSizeInBits, OpUndefElts,
55163 OpEltBits, true, false))
55164 break;
55165 EltBits.append(OpEltBits);
55166 UndefElts.insertBits(OpUndefElts, I * OpUndefElts.getBitWidth());
55168 if (EltBits.size() == VT.getVectorNumElements()) {
55169 Constant *C = getConstantVector(VT, EltBits, UndefElts, Ctx);
55170 MVT PVT = TLI.getPointerTy(DAG.getDataLayout());
55171 SDValue CV = DAG.getConstantPool(C, PVT);
55172 MachineFunction &MF = DAG.getMachineFunction();
55173 MachinePointerInfo MPI = MachinePointerInfo::getConstantPool(MF);
55174 SDValue Ld = DAG.getLoad(VT, DL, DAG.getEntryNode(), CV, MPI);
55175 SDValue Sub = extractSubVector(Ld, 0, DAG, DL, Op0.getValueSizeInBits());
55176 DAG.ReplaceAllUsesOfValueWith(Op0, Sub);
55177 return Ld;
55181 // If this simple subvector or scalar/subvector broadcast_load is inserted
55182 // into both halves, use a larger broadcast_load. Update other uses to use
55183 // an extracted subvector.
55184 if (IsSplat &&
55185 (VT.is256BitVector() || (VT.is512BitVector() && Subtarget.hasAVX512()))) {
55186 if (ISD::isNormalLoad(Op0.getNode()) ||
55187 Op0.getOpcode() == X86ISD::VBROADCAST_LOAD ||
55188 Op0.getOpcode() == X86ISD::SUBV_BROADCAST_LOAD) {
55189 auto *Mem = cast<MemSDNode>(Op0);
55190 unsigned Opc = Op0.getOpcode() == X86ISD::VBROADCAST_LOAD
55191 ? X86ISD::VBROADCAST_LOAD
55192 : X86ISD::SUBV_BROADCAST_LOAD;
55193 if (SDValue BcastLd =
55194 getBROADCAST_LOAD(Opc, DL, VT, Mem->getMemoryVT(), Mem, 0, DAG)) {
55195 SDValue BcastSrc =
55196 extractSubVector(BcastLd, 0, DAG, DL, Op0.getValueSizeInBits());
55197 DAG.ReplaceAllUsesOfValueWith(Op0, BcastSrc);
55198 return BcastLd;
55203 // If we're splatting a 128-bit subvector to 512-bits, use SHUF128 directly.
55204 if (IsSplat && NumOps == 4 && VT.is512BitVector() &&
55205 Subtarget.useAVX512Regs()) {
55206 MVT ShuffleVT = VT.isFloatingPoint() ? MVT::v8f64 : MVT::v8i64;
55207 SDValue Res = widenSubVector(Op0, false, Subtarget, DAG, DL, 512);
55208 Res = DAG.getBitcast(ShuffleVT, Res);
55209 Res = DAG.getNode(X86ISD::SHUF128, DL, ShuffleVT, Res, Res,
55210 getV4X86ShuffleImm8ForMask({0, 0, 0, 0}, DL, DAG));
55211 return DAG.getBitcast(VT, Res);
55214 return SDValue();
55217 static SDValue combineCONCAT_VECTORS(SDNode *N, SelectionDAG &DAG,
55218 TargetLowering::DAGCombinerInfo &DCI,
55219 const X86Subtarget &Subtarget) {
55220 EVT VT = N->getValueType(0);
55221 EVT SrcVT = N->getOperand(0).getValueType();
55222 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55223 SmallVector<SDValue, 4> Ops(N->op_begin(), N->op_end());
55225 if (VT.getVectorElementType() == MVT::i1) {
55226 // Attempt to constant fold.
55227 unsigned SubSizeInBits = SrcVT.getSizeInBits();
55228 APInt Constant = APInt::getZero(VT.getSizeInBits());
55229 for (unsigned I = 0, E = Ops.size(); I != E; ++I) {
55230 auto *C = dyn_cast<ConstantSDNode>(peekThroughBitcasts(Ops[I]));
55231 if (!C) break;
55232 Constant.insertBits(C->getAPIntValue(), I * SubSizeInBits);
55233 if (I == (E - 1)) {
55234 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
55235 if (TLI.isTypeLegal(IntVT))
55236 return DAG.getBitcast(VT, DAG.getConstant(Constant, SDLoc(N), IntVT));
55240 // Don't do anything else for i1 vectors.
55241 return SDValue();
55244 if (Subtarget.hasAVX() && TLI.isTypeLegal(VT) && TLI.isTypeLegal(SrcVT)) {
55245 if (SDValue R = combineConcatVectorOps(SDLoc(N), VT.getSimpleVT(), Ops, DAG,
55246 DCI, Subtarget))
55247 return R;
55250 return SDValue();
55253 static SDValue combineINSERT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
55254 TargetLowering::DAGCombinerInfo &DCI,
55255 const X86Subtarget &Subtarget) {
55256 if (DCI.isBeforeLegalizeOps())
55257 return SDValue();
55259 MVT OpVT = N->getSimpleValueType(0);
55261 bool IsI1Vector = OpVT.getVectorElementType() == MVT::i1;
55263 SDLoc dl(N);
55264 SDValue Vec = N->getOperand(0);
55265 SDValue SubVec = N->getOperand(1);
55267 uint64_t IdxVal = N->getConstantOperandVal(2);
55268 MVT SubVecVT = SubVec.getSimpleValueType();
55270 if (Vec.isUndef() && SubVec.isUndef())
55271 return DAG.getUNDEF(OpVT);
55273 // Inserting undefs/zeros into zeros/undefs is a zero vector.
55274 if ((Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())) &&
55275 (SubVec.isUndef() || ISD::isBuildVectorAllZeros(SubVec.getNode())))
55276 return getZeroVector(OpVT, Subtarget, DAG, dl);
55278 if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
55279 // If we're inserting into a zero vector and then into a larger zero vector,
55280 // just insert into the larger zero vector directly.
55281 if (SubVec.getOpcode() == ISD::INSERT_SUBVECTOR &&
55282 ISD::isBuildVectorAllZeros(SubVec.getOperand(0).getNode())) {
55283 uint64_t Idx2Val = SubVec.getConstantOperandVal(2);
55284 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
55285 getZeroVector(OpVT, Subtarget, DAG, dl),
55286 SubVec.getOperand(1),
55287 DAG.getIntPtrConstant(IdxVal + Idx2Val, dl));
55290 // If we're inserting into a zero vector and our input was extracted from an
55291 // insert into a zero vector of the same type and the extraction was at
55292 // least as large as the original insertion. Just insert the original
55293 // subvector into a zero vector.
55294 if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR && IdxVal == 0 &&
55295 isNullConstant(SubVec.getOperand(1)) &&
55296 SubVec.getOperand(0).getOpcode() == ISD::INSERT_SUBVECTOR) {
55297 SDValue Ins = SubVec.getOperand(0);
55298 if (isNullConstant(Ins.getOperand(2)) &&
55299 ISD::isBuildVectorAllZeros(Ins.getOperand(0).getNode()) &&
55300 Ins.getOperand(1).getValueSizeInBits().getFixedValue() <=
55301 SubVecVT.getFixedSizeInBits())
55302 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
55303 getZeroVector(OpVT, Subtarget, DAG, dl),
55304 Ins.getOperand(1), N->getOperand(2));
55308 // Stop here if this is an i1 vector.
55309 if (IsI1Vector)
55310 return SDValue();
55312 // Eliminate an intermediate vector widening:
55313 // insert_subvector X, (insert_subvector undef, Y, 0), Idx -->
55314 // insert_subvector X, Y, Idx
55315 // TODO: This is a more general version of a DAGCombiner fold, can we move it
55316 // there?
55317 if (SubVec.getOpcode() == ISD::INSERT_SUBVECTOR &&
55318 SubVec.getOperand(0).isUndef() && isNullConstant(SubVec.getOperand(2)))
55319 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, Vec,
55320 SubVec.getOperand(1), N->getOperand(2));
55322 // If this is an insert of an extract, combine to a shuffle. Don't do this
55323 // if the insert or extract can be represented with a subregister operation.
55324 if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
55325 SubVec.getOperand(0).getSimpleValueType() == OpVT &&
55326 (IdxVal != 0 ||
55327 !(Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())))) {
55328 int ExtIdxVal = SubVec.getConstantOperandVal(1);
55329 if (ExtIdxVal != 0) {
55330 int VecNumElts = OpVT.getVectorNumElements();
55331 int SubVecNumElts = SubVecVT.getVectorNumElements();
55332 SmallVector<int, 64> Mask(VecNumElts);
55333 // First create an identity shuffle mask.
55334 for (int i = 0; i != VecNumElts; ++i)
55335 Mask[i] = i;
55336 // Now insert the extracted portion.
55337 for (int i = 0; i != SubVecNumElts; ++i)
55338 Mask[i + IdxVal] = i + ExtIdxVal + VecNumElts;
55340 return DAG.getVectorShuffle(OpVT, dl, Vec, SubVec.getOperand(0), Mask);
55344 // Match concat_vector style patterns.
55345 SmallVector<SDValue, 2> SubVectorOps;
55346 if (collectConcatOps(N, SubVectorOps, DAG)) {
55347 if (SDValue Fold =
55348 combineConcatVectorOps(dl, OpVT, SubVectorOps, DAG, DCI, Subtarget))
55349 return Fold;
55351 // If we're inserting all zeros into the upper half, change this to
55352 // a concat with zero. We will match this to a move
55353 // with implicit upper bit zeroing during isel.
55354 // We do this here because we don't want combineConcatVectorOps to
55355 // create INSERT_SUBVECTOR from CONCAT_VECTORS.
55356 if (SubVectorOps.size() == 2 &&
55357 ISD::isBuildVectorAllZeros(SubVectorOps[1].getNode()))
55358 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
55359 getZeroVector(OpVT, Subtarget, DAG, dl),
55360 SubVectorOps[0], DAG.getIntPtrConstant(0, dl));
55362 // Attempt to recursively combine to a shuffle.
55363 if (all_of(SubVectorOps, [](SDValue SubOp) {
55364 return isTargetShuffle(SubOp.getOpcode());
55365 })) {
55366 SDValue Op(N, 0);
55367 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
55368 return Res;
55372 // If this is a broadcast insert into an upper undef, use a larger broadcast.
55373 if (Vec.isUndef() && IdxVal != 0 && SubVec.getOpcode() == X86ISD::VBROADCAST)
55374 return DAG.getNode(X86ISD::VBROADCAST, dl, OpVT, SubVec.getOperand(0));
55376 // If this is a broadcast load inserted into an upper undef, use a larger
55377 // broadcast load.
55378 if (Vec.isUndef() && IdxVal != 0 && SubVec.hasOneUse() &&
55379 SubVec.getOpcode() == X86ISD::VBROADCAST_LOAD) {
55380 auto *MemIntr = cast<MemIntrinsicSDNode>(SubVec);
55381 SDVTList Tys = DAG.getVTList(OpVT, MVT::Other);
55382 SDValue Ops[] = { MemIntr->getChain(), MemIntr->getBasePtr() };
55383 SDValue BcastLd =
55384 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
55385 MemIntr->getMemoryVT(),
55386 MemIntr->getMemOperand());
55387 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), BcastLd.getValue(1));
55388 return BcastLd;
55391 // If we're splatting the lower half subvector of a full vector load into the
55392 // upper half, attempt to create a subvector broadcast.
55393 if (IdxVal == (OpVT.getVectorNumElements() / 2) && SubVec.hasOneUse() &&
55394 Vec.getValueSizeInBits() == (2 * SubVec.getValueSizeInBits())) {
55395 auto *VecLd = dyn_cast<LoadSDNode>(Vec);
55396 auto *SubLd = dyn_cast<LoadSDNode>(SubVec);
55397 if (VecLd && SubLd &&
55398 DAG.areNonVolatileConsecutiveLoads(SubLd, VecLd,
55399 SubVec.getValueSizeInBits() / 8, 0))
55400 return getBROADCAST_LOAD(X86ISD::SUBV_BROADCAST_LOAD, dl, OpVT, SubVecVT,
55401 SubLd, 0, DAG);
55404 return SDValue();
55407 /// If we are extracting a subvector of a vector select and the select condition
55408 /// is composed of concatenated vectors, try to narrow the select width. This
55409 /// is a common pattern for AVX1 integer code because 256-bit selects may be
55410 /// legal, but there is almost no integer math/logic available for 256-bit.
55411 /// This function should only be called with legal types (otherwise, the calls
55412 /// to get simple value types will assert).
55413 static SDValue narrowExtractedVectorSelect(SDNode *Ext, SelectionDAG &DAG) {
55414 SDValue Sel = Ext->getOperand(0);
55415 if (Sel.getOpcode() != ISD::VSELECT ||
55416 !isFreeToSplitVector(Sel.getOperand(0).getNode(), DAG))
55417 return SDValue();
55419 // Note: We assume simple value types because this should only be called with
55420 // legal operations/types.
55421 // TODO: This can be extended to handle extraction to 256-bits.
55422 MVT VT = Ext->getSimpleValueType(0);
55423 if (!VT.is128BitVector())
55424 return SDValue();
55426 MVT SelCondVT = Sel.getOperand(0).getSimpleValueType();
55427 if (!SelCondVT.is256BitVector() && !SelCondVT.is512BitVector())
55428 return SDValue();
55430 MVT WideVT = Ext->getOperand(0).getSimpleValueType();
55431 MVT SelVT = Sel.getSimpleValueType();
55432 assert((SelVT.is256BitVector() || SelVT.is512BitVector()) &&
55433 "Unexpected vector type with legal operations");
55435 unsigned SelElts = SelVT.getVectorNumElements();
55436 unsigned CastedElts = WideVT.getVectorNumElements();
55437 unsigned ExtIdx = Ext->getConstantOperandVal(1);
55438 if (SelElts % CastedElts == 0) {
55439 // The select has the same or more (narrower) elements than the extract
55440 // operand. The extraction index gets scaled by that factor.
55441 ExtIdx *= (SelElts / CastedElts);
55442 } else if (CastedElts % SelElts == 0) {
55443 // The select has less (wider) elements than the extract operand. Make sure
55444 // that the extraction index can be divided evenly.
55445 unsigned IndexDivisor = CastedElts / SelElts;
55446 if (ExtIdx % IndexDivisor != 0)
55447 return SDValue();
55448 ExtIdx /= IndexDivisor;
55449 } else {
55450 llvm_unreachable("Element count of simple vector types are not divisible?");
55453 unsigned NarrowingFactor = WideVT.getSizeInBits() / VT.getSizeInBits();
55454 unsigned NarrowElts = SelElts / NarrowingFactor;
55455 MVT NarrowSelVT = MVT::getVectorVT(SelVT.getVectorElementType(), NarrowElts);
55456 SDLoc DL(Ext);
55457 SDValue ExtCond = extract128BitVector(Sel.getOperand(0), ExtIdx, DAG, DL);
55458 SDValue ExtT = extract128BitVector(Sel.getOperand(1), ExtIdx, DAG, DL);
55459 SDValue ExtF = extract128BitVector(Sel.getOperand(2), ExtIdx, DAG, DL);
55460 SDValue NarrowSel = DAG.getSelect(DL, NarrowSelVT, ExtCond, ExtT, ExtF);
55461 return DAG.getBitcast(VT, NarrowSel);
55464 static SDValue combineEXTRACT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
55465 TargetLowering::DAGCombinerInfo &DCI,
55466 const X86Subtarget &Subtarget) {
55467 // For AVX1 only, if we are extracting from a 256-bit and+not (which will
55468 // eventually get combined/lowered into ANDNP) with a concatenated operand,
55469 // split the 'and' into 128-bit ops to avoid the concatenate and extract.
55470 // We let generic combining take over from there to simplify the
55471 // insert/extract and 'not'.
55472 // This pattern emerges during AVX1 legalization. We handle it before lowering
55473 // to avoid complications like splitting constant vector loads.
55475 // Capture the original wide type in the likely case that we need to bitcast
55476 // back to this type.
55477 if (!N->getValueType(0).isSimple())
55478 return SDValue();
55480 MVT VT = N->getSimpleValueType(0);
55481 SDValue InVec = N->getOperand(0);
55482 unsigned IdxVal = N->getConstantOperandVal(1);
55483 SDValue InVecBC = peekThroughBitcasts(InVec);
55484 EVT InVecVT = InVec.getValueType();
55485 unsigned SizeInBits = VT.getSizeInBits();
55486 unsigned InSizeInBits = InVecVT.getSizeInBits();
55487 unsigned NumSubElts = VT.getVectorNumElements();
55488 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55490 if (Subtarget.hasAVX() && !Subtarget.hasAVX2() &&
55491 TLI.isTypeLegal(InVecVT) &&
55492 InSizeInBits == 256 && InVecBC.getOpcode() == ISD::AND) {
55493 auto isConcatenatedNot = [](SDValue V) {
55494 V = peekThroughBitcasts(V);
55495 if (!isBitwiseNot(V))
55496 return false;
55497 SDValue NotOp = V->getOperand(0);
55498 return peekThroughBitcasts(NotOp).getOpcode() == ISD::CONCAT_VECTORS;
55500 if (isConcatenatedNot(InVecBC.getOperand(0)) ||
55501 isConcatenatedNot(InVecBC.getOperand(1))) {
55502 // extract (and v4i64 X, (not (concat Y1, Y2))), n -> andnp v2i64 X(n), Y1
55503 SDValue Concat = splitVectorIntBinary(InVecBC, DAG);
55504 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), VT,
55505 DAG.getBitcast(InVecVT, Concat), N->getOperand(1));
55509 if (DCI.isBeforeLegalizeOps())
55510 return SDValue();
55512 if (SDValue V = narrowExtractedVectorSelect(N, DAG))
55513 return V;
55515 if (ISD::isBuildVectorAllZeros(InVec.getNode()))
55516 return getZeroVector(VT, Subtarget, DAG, SDLoc(N));
55518 if (ISD::isBuildVectorAllOnes(InVec.getNode())) {
55519 if (VT.getScalarType() == MVT::i1)
55520 return DAG.getConstant(1, SDLoc(N), VT);
55521 return getOnesVector(VT, DAG, SDLoc(N));
55524 if (InVec.getOpcode() == ISD::BUILD_VECTOR)
55525 return DAG.getBuildVector(VT, SDLoc(N),
55526 InVec->ops().slice(IdxVal, NumSubElts));
55528 // If we are extracting from an insert into a larger vector, replace with a
55529 // smaller insert if we don't access less than the original subvector. Don't
55530 // do this for i1 vectors.
55531 // TODO: Relax the matching indices requirement?
55532 if (VT.getVectorElementType() != MVT::i1 &&
55533 InVec.getOpcode() == ISD::INSERT_SUBVECTOR && InVec.hasOneUse() &&
55534 IdxVal == InVec.getConstantOperandVal(2) &&
55535 InVec.getOperand(1).getValueSizeInBits() <= SizeInBits) {
55536 SDLoc DL(N);
55537 SDValue NewExt = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT,
55538 InVec.getOperand(0), N->getOperand(1));
55539 unsigned NewIdxVal = InVec.getConstantOperandVal(2) - IdxVal;
55540 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, NewExt,
55541 InVec.getOperand(1),
55542 DAG.getVectorIdxConstant(NewIdxVal, DL));
55545 // If we're extracting an upper subvector from a broadcast we should just
55546 // extract the lowest subvector instead which should allow
55547 // SimplifyDemandedVectorElts do more simplifications.
55548 if (IdxVal != 0 && (InVec.getOpcode() == X86ISD::VBROADCAST ||
55549 InVec.getOpcode() == X86ISD::VBROADCAST_LOAD ||
55550 DAG.isSplatValue(InVec, /*AllowUndefs*/ false)))
55551 return extractSubVector(InVec, 0, DAG, SDLoc(N), SizeInBits);
55553 // If we're extracting a broadcasted subvector, just use the lowest subvector.
55554 if (IdxVal != 0 && InVec.getOpcode() == X86ISD::SUBV_BROADCAST_LOAD &&
55555 cast<MemIntrinsicSDNode>(InVec)->getMemoryVT() == VT)
55556 return extractSubVector(InVec, 0, DAG, SDLoc(N), SizeInBits);
55558 // Attempt to extract from the source of a shuffle vector.
55559 if ((InSizeInBits % SizeInBits) == 0 && (IdxVal % NumSubElts) == 0) {
55560 SmallVector<int, 32> ShuffleMask;
55561 SmallVector<int, 32> ScaledMask;
55562 SmallVector<SDValue, 2> ShuffleInputs;
55563 unsigned NumSubVecs = InSizeInBits / SizeInBits;
55564 // Decode the shuffle mask and scale it so its shuffling subvectors.
55565 if (getTargetShuffleInputs(InVecBC, ShuffleInputs, ShuffleMask, DAG) &&
55566 scaleShuffleElements(ShuffleMask, NumSubVecs, ScaledMask)) {
55567 unsigned SubVecIdx = IdxVal / NumSubElts;
55568 if (ScaledMask[SubVecIdx] == SM_SentinelUndef)
55569 return DAG.getUNDEF(VT);
55570 if (ScaledMask[SubVecIdx] == SM_SentinelZero)
55571 return getZeroVector(VT, Subtarget, DAG, SDLoc(N));
55572 SDValue Src = ShuffleInputs[ScaledMask[SubVecIdx] / NumSubVecs];
55573 if (Src.getValueSizeInBits() == InSizeInBits) {
55574 unsigned SrcSubVecIdx = ScaledMask[SubVecIdx] % NumSubVecs;
55575 unsigned SrcEltIdx = SrcSubVecIdx * NumSubElts;
55576 return extractSubVector(DAG.getBitcast(InVecVT, Src), SrcEltIdx, DAG,
55577 SDLoc(N), SizeInBits);
55582 // If we're extracting the lowest subvector and we're the only user,
55583 // we may be able to perform this with a smaller vector width.
55584 unsigned InOpcode = InVec.getOpcode();
55585 if (InVec.hasOneUse()) {
55586 if (IdxVal == 0 && VT == MVT::v2f64 && InVecVT == MVT::v4f64) {
55587 // v2f64 CVTDQ2PD(v4i32).
55588 if (InOpcode == ISD::SINT_TO_FP &&
55589 InVec.getOperand(0).getValueType() == MVT::v4i32) {
55590 return DAG.getNode(X86ISD::CVTSI2P, SDLoc(N), VT, InVec.getOperand(0));
55592 // v2f64 CVTUDQ2PD(v4i32).
55593 if (InOpcode == ISD::UINT_TO_FP && Subtarget.hasVLX() &&
55594 InVec.getOperand(0).getValueType() == MVT::v4i32) {
55595 return DAG.getNode(X86ISD::CVTUI2P, SDLoc(N), VT, InVec.getOperand(0));
55597 // v2f64 CVTPS2PD(v4f32).
55598 if (InOpcode == ISD::FP_EXTEND &&
55599 InVec.getOperand(0).getValueType() == MVT::v4f32) {
55600 return DAG.getNode(X86ISD::VFPEXT, SDLoc(N), VT, InVec.getOperand(0));
55603 if (IdxVal == 0 &&
55604 (ISD::isExtOpcode(InOpcode) || ISD::isExtVecInRegOpcode(InOpcode)) &&
55605 (SizeInBits == 128 || SizeInBits == 256) &&
55606 InVec.getOperand(0).getValueSizeInBits() >= SizeInBits) {
55607 SDLoc DL(N);
55608 SDValue Ext = InVec.getOperand(0);
55609 if (Ext.getValueSizeInBits() > SizeInBits)
55610 Ext = extractSubVector(Ext, 0, DAG, DL, SizeInBits);
55611 unsigned ExtOp = DAG.getOpcode_EXTEND_VECTOR_INREG(InOpcode);
55612 return DAG.getNode(ExtOp, DL, VT, Ext);
55614 if (IdxVal == 0 && InOpcode == ISD::VSELECT &&
55615 InVec.getOperand(0).getValueType().is256BitVector() &&
55616 InVec.getOperand(1).getValueType().is256BitVector() &&
55617 InVec.getOperand(2).getValueType().is256BitVector()) {
55618 SDLoc DL(N);
55619 SDValue Ext0 = extractSubVector(InVec.getOperand(0), 0, DAG, DL, 128);
55620 SDValue Ext1 = extractSubVector(InVec.getOperand(1), 0, DAG, DL, 128);
55621 SDValue Ext2 = extractSubVector(InVec.getOperand(2), 0, DAG, DL, 128);
55622 return DAG.getNode(InOpcode, DL, VT, Ext0, Ext1, Ext2);
55624 if (IdxVal == 0 && InOpcode == ISD::TRUNCATE && Subtarget.hasVLX() &&
55625 (VT.is128BitVector() || VT.is256BitVector())) {
55626 SDLoc DL(N);
55627 SDValue InVecSrc = InVec.getOperand(0);
55628 unsigned Scale = InVecSrc.getValueSizeInBits() / InSizeInBits;
55629 SDValue Ext = extractSubVector(InVecSrc, 0, DAG, DL, Scale * SizeInBits);
55630 return DAG.getNode(InOpcode, DL, VT, Ext);
55632 if (InOpcode == X86ISD::MOVDDUP &&
55633 (VT.is128BitVector() || VT.is256BitVector())) {
55634 SDLoc DL(N);
55635 SDValue Ext0 =
55636 extractSubVector(InVec.getOperand(0), IdxVal, DAG, DL, SizeInBits);
55637 return DAG.getNode(InOpcode, DL, VT, Ext0);
55641 // Always split vXi64 logical shifts where we're extracting the upper 32-bits
55642 // as this is very likely to fold into a shuffle/truncation.
55643 if ((InOpcode == X86ISD::VSHLI || InOpcode == X86ISD::VSRLI) &&
55644 InVecVT.getScalarSizeInBits() == 64 &&
55645 InVec.getConstantOperandAPInt(1) == 32) {
55646 SDLoc DL(N);
55647 SDValue Ext =
55648 extractSubVector(InVec.getOperand(0), IdxVal, DAG, DL, SizeInBits);
55649 return DAG.getNode(InOpcode, DL, VT, Ext, InVec.getOperand(1));
55652 return SDValue();
55655 static SDValue combineScalarToVector(SDNode *N, SelectionDAG &DAG) {
55656 EVT VT = N->getValueType(0);
55657 SDValue Src = N->getOperand(0);
55658 SDLoc DL(N);
55660 // If this is a scalar to vector to v1i1 from an AND with 1, bypass the and.
55661 // This occurs frequently in our masked scalar intrinsic code and our
55662 // floating point select lowering with AVX512.
55663 // TODO: SimplifyDemandedBits instead?
55664 if (VT == MVT::v1i1 && Src.getOpcode() == ISD::AND && Src.hasOneUse() &&
55665 isOneConstant(Src.getOperand(1)))
55666 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1, Src.getOperand(0));
55668 // Combine scalar_to_vector of an extract_vector_elt into an extract_subvec.
55669 if (VT == MVT::v1i1 && Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
55670 Src.hasOneUse() && Src.getOperand(0).getValueType().isVector() &&
55671 Src.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
55672 isNullConstant(Src.getOperand(1)))
55673 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src.getOperand(0),
55674 Src.getOperand(1));
55676 // Reduce v2i64 to v4i32 if we don't need the upper bits or are known zero.
55677 // TODO: Move to DAGCombine/SimplifyDemandedBits?
55678 if ((VT == MVT::v2i64 || VT == MVT::v2f64) && Src.hasOneUse()) {
55679 auto IsExt64 = [&DAG](SDValue Op, bool IsZeroExt) {
55680 if (Op.getValueType() != MVT::i64)
55681 return SDValue();
55682 unsigned Opc = IsZeroExt ? ISD::ZERO_EXTEND : ISD::ANY_EXTEND;
55683 if (Op.getOpcode() == Opc &&
55684 Op.getOperand(0).getScalarValueSizeInBits() <= 32)
55685 return Op.getOperand(0);
55686 unsigned Ext = IsZeroExt ? ISD::ZEXTLOAD : ISD::EXTLOAD;
55687 if (auto *Ld = dyn_cast<LoadSDNode>(Op))
55688 if (Ld->getExtensionType() == Ext &&
55689 Ld->getMemoryVT().getScalarSizeInBits() <= 32)
55690 return Op;
55691 if (IsZeroExt) {
55692 KnownBits Known = DAG.computeKnownBits(Op);
55693 if (!Known.isConstant() && Known.countMinLeadingZeros() >= 32)
55694 return Op;
55696 return SDValue();
55699 if (SDValue AnyExt = IsExt64(peekThroughOneUseBitcasts(Src), false))
55700 return DAG.getBitcast(
55701 VT, DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4i32,
55702 DAG.getAnyExtOrTrunc(AnyExt, DL, MVT::i32)));
55704 if (SDValue ZeroExt = IsExt64(peekThroughOneUseBitcasts(Src), true))
55705 return DAG.getBitcast(
55707 DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v4i32,
55708 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4i32,
55709 DAG.getZExtOrTrunc(ZeroExt, DL, MVT::i32))));
55712 // Combine (v2i64 (scalar_to_vector (i64 (bitconvert (mmx))))) to MOVQ2DQ.
55713 if (VT == MVT::v2i64 && Src.getOpcode() == ISD::BITCAST &&
55714 Src.getOperand(0).getValueType() == MVT::x86mmx)
55715 return DAG.getNode(X86ISD::MOVQ2DQ, DL, VT, Src.getOperand(0));
55717 // See if we're broadcasting the scalar value, in which case just reuse that.
55718 // Ensure the same SDValue from the SDNode use is being used.
55719 if (VT.getScalarType() == Src.getValueType())
55720 for (SDNode *User : Src->uses())
55721 if (User->getOpcode() == X86ISD::VBROADCAST &&
55722 Src == User->getOperand(0)) {
55723 unsigned SizeInBits = VT.getFixedSizeInBits();
55724 unsigned BroadcastSizeInBits =
55725 User->getValueSizeInBits(0).getFixedValue();
55726 if (BroadcastSizeInBits == SizeInBits)
55727 return SDValue(User, 0);
55728 if (BroadcastSizeInBits > SizeInBits)
55729 return extractSubVector(SDValue(User, 0), 0, DAG, DL, SizeInBits);
55730 // TODO: Handle BroadcastSizeInBits < SizeInBits when we have test
55731 // coverage.
55734 return SDValue();
55737 // Simplify PMULDQ and PMULUDQ operations.
55738 static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
55739 TargetLowering::DAGCombinerInfo &DCI,
55740 const X86Subtarget &Subtarget) {
55741 SDValue LHS = N->getOperand(0);
55742 SDValue RHS = N->getOperand(1);
55744 // Canonicalize constant to RHS.
55745 if (DAG.isConstantIntBuildVectorOrConstantInt(LHS) &&
55746 !DAG.isConstantIntBuildVectorOrConstantInt(RHS))
55747 return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), RHS, LHS);
55749 // Multiply by zero.
55750 // Don't return RHS as it may contain UNDEFs.
55751 if (ISD::isBuildVectorAllZeros(RHS.getNode()))
55752 return DAG.getConstant(0, SDLoc(N), N->getValueType(0));
55754 // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
55755 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55756 if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(64), DCI))
55757 return SDValue(N, 0);
55759 // If the input is an extend_invec and the SimplifyDemandedBits call didn't
55760 // convert it to any_extend_invec, due to the LegalOperations check, do the
55761 // conversion directly to a vector shuffle manually. This exposes combine
55762 // opportunities missed by combineEXTEND_VECTOR_INREG not calling
55763 // combineX86ShufflesRecursively on SSE4.1 targets.
55764 // FIXME: This is basically a hack around several other issues related to
55765 // ANY_EXTEND_VECTOR_INREG.
55766 if (N->getValueType(0) == MVT::v2i64 && LHS.hasOneUse() &&
55767 (LHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
55768 LHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
55769 LHS.getOperand(0).getValueType() == MVT::v4i32) {
55770 SDLoc dl(N);
55771 LHS = DAG.getVectorShuffle(MVT::v4i32, dl, LHS.getOperand(0),
55772 LHS.getOperand(0), { 0, -1, 1, -1 });
55773 LHS = DAG.getBitcast(MVT::v2i64, LHS);
55774 return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
55776 if (N->getValueType(0) == MVT::v2i64 && RHS.hasOneUse() &&
55777 (RHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
55778 RHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
55779 RHS.getOperand(0).getValueType() == MVT::v4i32) {
55780 SDLoc dl(N);
55781 RHS = DAG.getVectorShuffle(MVT::v4i32, dl, RHS.getOperand(0),
55782 RHS.getOperand(0), { 0, -1, 1, -1 });
55783 RHS = DAG.getBitcast(MVT::v2i64, RHS);
55784 return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
55787 return SDValue();
55790 // Simplify VPMADDUBSW/VPMADDWD operations.
55791 static SDValue combineVPMADD(SDNode *N, SelectionDAG &DAG,
55792 TargetLowering::DAGCombinerInfo &DCI) {
55793 EVT VT = N->getValueType(0);
55794 SDValue LHS = N->getOperand(0);
55795 SDValue RHS = N->getOperand(1);
55797 // Multiply by zero.
55798 // Don't return LHS/RHS as it may contain UNDEFs.
55799 if (ISD::isBuildVectorAllZeros(LHS.getNode()) ||
55800 ISD::isBuildVectorAllZeros(RHS.getNode()))
55801 return DAG.getConstant(0, SDLoc(N), VT);
55803 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55804 APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
55805 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, DCI))
55806 return SDValue(N, 0);
55808 return SDValue();
55811 static SDValue combineEXTEND_VECTOR_INREG(SDNode *N, SelectionDAG &DAG,
55812 TargetLowering::DAGCombinerInfo &DCI,
55813 const X86Subtarget &Subtarget) {
55814 EVT VT = N->getValueType(0);
55815 SDValue In = N->getOperand(0);
55816 unsigned Opcode = N->getOpcode();
55817 unsigned InOpcode = In.getOpcode();
55818 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55819 SDLoc DL(N);
55821 // Try to merge vector loads and extend_inreg to an extload.
55822 if (!DCI.isBeforeLegalizeOps() && ISD::isNormalLoad(In.getNode()) &&
55823 In.hasOneUse()) {
55824 auto *Ld = cast<LoadSDNode>(In);
55825 if (Ld->isSimple()) {
55826 MVT SVT = In.getSimpleValueType().getVectorElementType();
55827 ISD::LoadExtType Ext = Opcode == ISD::SIGN_EXTEND_VECTOR_INREG
55828 ? ISD::SEXTLOAD
55829 : ISD::ZEXTLOAD;
55830 EVT MemVT = VT.changeVectorElementType(SVT);
55831 if (TLI.isLoadExtLegal(Ext, VT, MemVT)) {
55832 SDValue Load = DAG.getExtLoad(
55833 Ext, DL, VT, Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(),
55834 MemVT, Ld->getOriginalAlign(), Ld->getMemOperand()->getFlags());
55835 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
55836 return Load;
55841 // Fold EXTEND_VECTOR_INREG(EXTEND_VECTOR_INREG(X)) -> EXTEND_VECTOR_INREG(X).
55842 if (Opcode == InOpcode)
55843 return DAG.getNode(Opcode, DL, VT, In.getOperand(0));
55845 // Fold EXTEND_VECTOR_INREG(EXTRACT_SUBVECTOR(EXTEND(X),0))
55846 // -> EXTEND_VECTOR_INREG(X).
55847 // TODO: Handle non-zero subvector indices.
55848 if (InOpcode == ISD::EXTRACT_SUBVECTOR && In.getConstantOperandVal(1) == 0 &&
55849 In.getOperand(0).getOpcode() == DAG.getOpcode_EXTEND(Opcode) &&
55850 In.getOperand(0).getOperand(0).getValueSizeInBits() ==
55851 In.getValueSizeInBits())
55852 return DAG.getNode(Opcode, DL, VT, In.getOperand(0).getOperand(0));
55854 // Fold EXTEND_VECTOR_INREG(BUILD_VECTOR(X,Y,?,?)) -> BUILD_VECTOR(X,0,Y,0).
55855 // TODO: Move to DAGCombine?
55856 if (!DCI.isBeforeLegalizeOps() && Opcode == ISD::ZERO_EXTEND_VECTOR_INREG &&
55857 In.getOpcode() == ISD::BUILD_VECTOR && In.hasOneUse() &&
55858 In.getValueSizeInBits() == VT.getSizeInBits()) {
55859 unsigned NumElts = VT.getVectorNumElements();
55860 unsigned Scale = VT.getScalarSizeInBits() / In.getScalarValueSizeInBits();
55861 EVT EltVT = In.getOperand(0).getValueType();
55862 SmallVector<SDValue> Elts(Scale * NumElts, DAG.getConstant(0, DL, EltVT));
55863 for (unsigned I = 0; I != NumElts; ++I)
55864 Elts[I * Scale] = In.getOperand(I);
55865 return DAG.getBitcast(VT, DAG.getBuildVector(In.getValueType(), DL, Elts));
55868 // Attempt to combine as a shuffle on SSE41+ targets.
55869 if (Subtarget.hasSSE41()) {
55870 SDValue Op(N, 0);
55871 if (TLI.isTypeLegal(VT) && TLI.isTypeLegal(In.getValueType()))
55872 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
55873 return Res;
55876 return SDValue();
55879 static SDValue combineKSHIFT(SDNode *N, SelectionDAG &DAG,
55880 TargetLowering::DAGCombinerInfo &DCI) {
55881 EVT VT = N->getValueType(0);
55883 if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
55884 return DAG.getConstant(0, SDLoc(N), VT);
55886 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55887 APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
55888 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, DCI))
55889 return SDValue(N, 0);
55891 return SDValue();
55894 // Optimize (fp16_to_fp (fp_to_fp16 X)) to VCVTPS2PH followed by VCVTPH2PS.
55895 // Done as a combine because the lowering for fp16_to_fp and fp_to_fp16 produce
55896 // extra instructions between the conversion due to going to scalar and back.
55897 static SDValue combineFP16_TO_FP(SDNode *N, SelectionDAG &DAG,
55898 const X86Subtarget &Subtarget) {
55899 if (Subtarget.useSoftFloat() || !Subtarget.hasF16C())
55900 return SDValue();
55902 if (N->getOperand(0).getOpcode() != ISD::FP_TO_FP16)
55903 return SDValue();
55905 if (N->getValueType(0) != MVT::f32 ||
55906 N->getOperand(0).getOperand(0).getValueType() != MVT::f32)
55907 return SDValue();
55909 SDLoc dl(N);
55910 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32,
55911 N->getOperand(0).getOperand(0));
55912 Res = DAG.getNode(X86ISD::CVTPS2PH, dl, MVT::v8i16, Res,
55913 DAG.getTargetConstant(4, dl, MVT::i32));
55914 Res = DAG.getNode(X86ISD::CVTPH2PS, dl, MVT::v4f32, Res);
55915 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
55916 DAG.getIntPtrConstant(0, dl));
55919 static SDValue combineFP_EXTEND(SDNode *N, SelectionDAG &DAG,
55920 const X86Subtarget &Subtarget) {
55921 if (!Subtarget.hasF16C() || Subtarget.useSoftFloat())
55922 return SDValue();
55924 if (Subtarget.hasFP16())
55925 return SDValue();
55927 bool IsStrict = N->isStrictFPOpcode();
55928 EVT VT = N->getValueType(0);
55929 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
55930 EVT SrcVT = Src.getValueType();
55932 if (!SrcVT.isVector() || SrcVT.getVectorElementType() != MVT::f16)
55933 return SDValue();
55935 if (VT.getVectorElementType() != MVT::f32 &&
55936 VT.getVectorElementType() != MVT::f64)
55937 return SDValue();
55939 unsigned NumElts = VT.getVectorNumElements();
55940 if (NumElts == 1 || !isPowerOf2_32(NumElts))
55941 return SDValue();
55943 SDLoc dl(N);
55945 // Convert the input to vXi16.
55946 EVT IntVT = SrcVT.changeVectorElementTypeToInteger();
55947 Src = DAG.getBitcast(IntVT, Src);
55949 // Widen to at least 8 input elements.
55950 if (NumElts < 8) {
55951 unsigned NumConcats = 8 / NumElts;
55952 SDValue Fill = NumElts == 4 ? DAG.getUNDEF(IntVT)
55953 : DAG.getConstant(0, dl, IntVT);
55954 SmallVector<SDValue, 4> Ops(NumConcats, Fill);
55955 Ops[0] = Src;
55956 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, Ops);
55959 // Destination is vXf32 with at least 4 elements.
55960 EVT CvtVT = EVT::getVectorVT(*DAG.getContext(), MVT::f32,
55961 std::max(4U, NumElts));
55962 SDValue Cvt, Chain;
55963 if (IsStrict) {
55964 Cvt = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {CvtVT, MVT::Other},
55965 {N->getOperand(0), Src});
55966 Chain = Cvt.getValue(1);
55967 } else {
55968 Cvt = DAG.getNode(X86ISD::CVTPH2PS, dl, CvtVT, Src);
55971 if (NumElts < 4) {
55972 assert(NumElts == 2 && "Unexpected size");
55973 Cvt = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2f32, Cvt,
55974 DAG.getIntPtrConstant(0, dl));
55977 if (IsStrict) {
55978 // Extend to the original VT if necessary.
55979 if (Cvt.getValueType() != VT) {
55980 Cvt = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {VT, MVT::Other},
55981 {Chain, Cvt});
55982 Chain = Cvt.getValue(1);
55984 return DAG.getMergeValues({Cvt, Chain}, dl);
55987 // Extend to the original VT if necessary.
55988 return DAG.getNode(ISD::FP_EXTEND, dl, VT, Cvt);
55991 // Try to find a larger VBROADCAST_LOAD/SUBV_BROADCAST_LOAD that we can extract
55992 // from. Limit this to cases where the loads have the same input chain and the
55993 // output chains are unused. This avoids any memory ordering issues.
55994 static SDValue combineBROADCAST_LOAD(SDNode *N, SelectionDAG &DAG,
55995 TargetLowering::DAGCombinerInfo &DCI) {
55996 assert((N->getOpcode() == X86ISD::VBROADCAST_LOAD ||
55997 N->getOpcode() == X86ISD::SUBV_BROADCAST_LOAD) &&
55998 "Unknown broadcast load type");
56000 // Only do this if the chain result is unused.
56001 if (N->hasAnyUseOfValue(1))
56002 return SDValue();
56004 auto *MemIntrin = cast<MemIntrinsicSDNode>(N);
56006 SDValue Ptr = MemIntrin->getBasePtr();
56007 SDValue Chain = MemIntrin->getChain();
56008 EVT VT = N->getSimpleValueType(0);
56009 EVT MemVT = MemIntrin->getMemoryVT();
56011 // Look at other users of our base pointer and try to find a wider broadcast.
56012 // The input chain and the size of the memory VT must match.
56013 for (SDNode *User : Ptr->uses())
56014 if (User != N && User->getOpcode() == N->getOpcode() &&
56015 cast<MemIntrinsicSDNode>(User)->getBasePtr() == Ptr &&
56016 cast<MemIntrinsicSDNode>(User)->getChain() == Chain &&
56017 cast<MemIntrinsicSDNode>(User)->getMemoryVT().getSizeInBits() ==
56018 MemVT.getSizeInBits() &&
56019 !User->hasAnyUseOfValue(1) &&
56020 User->getValueSizeInBits(0).getFixedValue() > VT.getFixedSizeInBits()) {
56021 SDValue Extract = extractSubVector(SDValue(User, 0), 0, DAG, SDLoc(N),
56022 VT.getSizeInBits());
56023 Extract = DAG.getBitcast(VT, Extract);
56024 return DCI.CombineTo(N, Extract, SDValue(User, 1));
56027 return SDValue();
56030 static SDValue combineFP_ROUND(SDNode *N, SelectionDAG &DAG,
56031 const X86Subtarget &Subtarget) {
56032 if (!Subtarget.hasF16C() || Subtarget.useSoftFloat())
56033 return SDValue();
56035 bool IsStrict = N->isStrictFPOpcode();
56036 EVT VT = N->getValueType(0);
56037 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
56038 EVT SrcVT = Src.getValueType();
56040 if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
56041 SrcVT.getVectorElementType() != MVT::f32)
56042 return SDValue();
56044 SDLoc dl(N);
56046 SDValue Cvt, Chain;
56047 unsigned NumElts = VT.getVectorNumElements();
56048 if (Subtarget.hasFP16()) {
56049 // Combine (v8f16 fp_round(concat_vectors(v4f32 (xint_to_fp v4i64), ..)))
56050 // into (v8f16 vector_shuffle(v8f16 (CVTXI2P v4i64), ..))
56051 if (NumElts == 8 && Src.getOpcode() == ISD::CONCAT_VECTORS) {
56052 SDValue Cvt0, Cvt1;
56053 SDValue Op0 = Src.getOperand(0);
56054 SDValue Op1 = Src.getOperand(1);
56055 bool IsOp0Strict = Op0->isStrictFPOpcode();
56056 if (Op0.getOpcode() != Op1.getOpcode() ||
56057 Op0.getOperand(IsOp0Strict ? 1 : 0).getValueType() != MVT::v4i64 ||
56058 Op1.getOperand(IsOp0Strict ? 1 : 0).getValueType() != MVT::v4i64) {
56059 return SDValue();
56061 int Mask[8] = {0, 1, 2, 3, 8, 9, 10, 11};
56062 if (IsStrict) {
56063 assert(IsOp0Strict && "Op0 must be strict node");
56064 unsigned Opc = Op0.getOpcode() == ISD::STRICT_SINT_TO_FP
56065 ? X86ISD::STRICT_CVTSI2P
56066 : X86ISD::STRICT_CVTUI2P;
56067 Cvt0 = DAG.getNode(Opc, dl, {MVT::v8f16, MVT::Other},
56068 {Op0.getOperand(0), Op0.getOperand(1)});
56069 Cvt1 = DAG.getNode(Opc, dl, {MVT::v8f16, MVT::Other},
56070 {Op1.getOperand(0), Op1.getOperand(1)});
56071 Cvt = DAG.getVectorShuffle(MVT::v8f16, dl, Cvt0, Cvt1, Mask);
56072 return DAG.getMergeValues({Cvt, Cvt0.getValue(1)}, dl);
56074 unsigned Opc = Op0.getOpcode() == ISD::SINT_TO_FP ? X86ISD::CVTSI2P
56075 : X86ISD::CVTUI2P;
56076 Cvt0 = DAG.getNode(Opc, dl, MVT::v8f16, Op0.getOperand(0));
56077 Cvt1 = DAG.getNode(Opc, dl, MVT::v8f16, Op1.getOperand(0));
56078 return Cvt = DAG.getVectorShuffle(MVT::v8f16, dl, Cvt0, Cvt1, Mask);
56080 return SDValue();
56083 if (NumElts == 1 || !isPowerOf2_32(NumElts))
56084 return SDValue();
56086 // Widen to at least 4 input elements.
56087 if (NumElts < 4)
56088 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
56089 DAG.getConstantFP(0.0, dl, SrcVT));
56091 // Destination is v8i16 with at least 8 elements.
56092 EVT CvtVT =
56093 EVT::getVectorVT(*DAG.getContext(), MVT::i16, std::max(8U, NumElts));
56094 SDValue Rnd = DAG.getTargetConstant(4, dl, MVT::i32);
56095 if (IsStrict) {
56096 Cvt = DAG.getNode(X86ISD::STRICT_CVTPS2PH, dl, {CvtVT, MVT::Other},
56097 {N->getOperand(0), Src, Rnd});
56098 Chain = Cvt.getValue(1);
56099 } else {
56100 Cvt = DAG.getNode(X86ISD::CVTPS2PH, dl, CvtVT, Src, Rnd);
56103 // Extract down to real number of elements.
56104 if (NumElts < 8) {
56105 EVT IntVT = VT.changeVectorElementTypeToInteger();
56106 Cvt = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, IntVT, Cvt,
56107 DAG.getIntPtrConstant(0, dl));
56110 Cvt = DAG.getBitcast(VT, Cvt);
56112 if (IsStrict)
56113 return DAG.getMergeValues({Cvt, Chain}, dl);
56115 return Cvt;
56118 static SDValue combineMOVDQ2Q(SDNode *N, SelectionDAG &DAG) {
56119 SDValue Src = N->getOperand(0);
56121 // Turn MOVDQ2Q+simple_load into an mmx load.
56122 if (ISD::isNormalLoad(Src.getNode()) && Src.hasOneUse()) {
56123 LoadSDNode *LN = cast<LoadSDNode>(Src.getNode());
56125 if (LN->isSimple()) {
56126 SDValue NewLd = DAG.getLoad(MVT::x86mmx, SDLoc(N), LN->getChain(),
56127 LN->getBasePtr(),
56128 LN->getPointerInfo(),
56129 LN->getOriginalAlign(),
56130 LN->getMemOperand()->getFlags());
56131 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), NewLd.getValue(1));
56132 return NewLd;
56136 return SDValue();
56139 static SDValue combinePDEP(SDNode *N, SelectionDAG &DAG,
56140 TargetLowering::DAGCombinerInfo &DCI) {
56141 unsigned NumBits = N->getSimpleValueType(0).getSizeInBits();
56142 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
56143 if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(NumBits), DCI))
56144 return SDValue(N, 0);
56146 return SDValue();
56149 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
56150 DAGCombinerInfo &DCI) const {
56151 SelectionDAG &DAG = DCI.DAG;
56152 switch (N->getOpcode()) {
56153 default: break;
56154 case ISD::SCALAR_TO_VECTOR:
56155 return combineScalarToVector(N, DAG);
56156 case ISD::EXTRACT_VECTOR_ELT:
56157 case X86ISD::PEXTRW:
56158 case X86ISD::PEXTRB:
56159 return combineExtractVectorElt(N, DAG, DCI, Subtarget);
56160 case ISD::CONCAT_VECTORS:
56161 return combineCONCAT_VECTORS(N, DAG, DCI, Subtarget);
56162 case ISD::INSERT_SUBVECTOR:
56163 return combineINSERT_SUBVECTOR(N, DAG, DCI, Subtarget);
56164 case ISD::EXTRACT_SUBVECTOR:
56165 return combineEXTRACT_SUBVECTOR(N, DAG, DCI, Subtarget);
56166 case ISD::VSELECT:
56167 case ISD::SELECT:
56168 case X86ISD::BLENDV: return combineSelect(N, DAG, DCI, Subtarget);
56169 case ISD::BITCAST: return combineBitcast(N, DAG, DCI, Subtarget);
56170 case X86ISD::CMOV: return combineCMov(N, DAG, DCI, Subtarget);
56171 case X86ISD::CMP: return combineCMP(N, DAG, Subtarget);
56172 case ISD::ADD: return combineAdd(N, DAG, DCI, Subtarget);
56173 case ISD::SUB: return combineSub(N, DAG, DCI, Subtarget);
56174 case X86ISD::ADD:
56175 case X86ISD::SUB: return combineX86AddSub(N, DAG, DCI);
56176 case X86ISD::SBB: return combineSBB(N, DAG);
56177 case X86ISD::ADC: return combineADC(N, DAG, DCI);
56178 case ISD::MUL: return combineMul(N, DAG, DCI, Subtarget);
56179 case ISD::SHL: return combineShiftLeft(N, DAG);
56180 case ISD::SRA: return combineShiftRightArithmetic(N, DAG, Subtarget);
56181 case ISD::SRL: return combineShiftRightLogical(N, DAG, DCI, Subtarget);
56182 case ISD::AND: return combineAnd(N, DAG, DCI, Subtarget);
56183 case ISD::OR: return combineOr(N, DAG, DCI, Subtarget);
56184 case ISD::XOR: return combineXor(N, DAG, DCI, Subtarget);
56185 case ISD::BITREVERSE: return combineBITREVERSE(N, DAG, DCI, Subtarget);
56186 case X86ISD::BEXTR:
56187 case X86ISD::BEXTRI: return combineBEXTR(N, DAG, DCI, Subtarget);
56188 case ISD::LOAD: return combineLoad(N, DAG, DCI, Subtarget);
56189 case ISD::MLOAD: return combineMaskedLoad(N, DAG, DCI, Subtarget);
56190 case ISD::STORE: return combineStore(N, DAG, DCI, Subtarget);
56191 case ISD::MSTORE: return combineMaskedStore(N, DAG, DCI, Subtarget);
56192 case X86ISD::VEXTRACT_STORE:
56193 return combineVEXTRACT_STORE(N, DAG, DCI, Subtarget);
56194 case ISD::SINT_TO_FP:
56195 case ISD::STRICT_SINT_TO_FP:
56196 return combineSIntToFP(N, DAG, DCI, Subtarget);
56197 case ISD::UINT_TO_FP:
56198 case ISD::STRICT_UINT_TO_FP:
56199 return combineUIntToFP(N, DAG, Subtarget);
56200 case ISD::FADD:
56201 case ISD::FSUB: return combineFaddFsub(N, DAG, Subtarget);
56202 case X86ISD::VFCMULC:
56203 case X86ISD::VFMULC: return combineFMulcFCMulc(N, DAG, Subtarget);
56204 case ISD::FNEG: return combineFneg(N, DAG, DCI, Subtarget);
56205 case ISD::TRUNCATE: return combineTruncate(N, DAG, Subtarget);
56206 case X86ISD::VTRUNC: return combineVTRUNC(N, DAG, DCI);
56207 case X86ISD::ANDNP: return combineAndnp(N, DAG, DCI, Subtarget);
56208 case X86ISD::FAND: return combineFAnd(N, DAG, Subtarget);
56209 case X86ISD::FANDN: return combineFAndn(N, DAG, Subtarget);
56210 case X86ISD::FXOR:
56211 case X86ISD::FOR: return combineFOr(N, DAG, DCI, Subtarget);
56212 case X86ISD::FMIN:
56213 case X86ISD::FMAX: return combineFMinFMax(N, DAG);
56214 case ISD::FMINNUM:
56215 case ISD::FMAXNUM: return combineFMinNumFMaxNum(N, DAG, Subtarget);
56216 case X86ISD::CVTSI2P:
56217 case X86ISD::CVTUI2P: return combineX86INT_TO_FP(N, DAG, DCI);
56218 case X86ISD::CVTP2SI:
56219 case X86ISD::CVTP2UI:
56220 case X86ISD::STRICT_CVTTP2SI:
56221 case X86ISD::CVTTP2SI:
56222 case X86ISD::STRICT_CVTTP2UI:
56223 case X86ISD::CVTTP2UI:
56224 return combineCVTP2I_CVTTP2I(N, DAG, DCI);
56225 case X86ISD::STRICT_CVTPH2PS:
56226 case X86ISD::CVTPH2PS: return combineCVTPH2PS(N, DAG, DCI);
56227 case X86ISD::BT: return combineBT(N, DAG, DCI);
56228 case ISD::ANY_EXTEND:
56229 case ISD::ZERO_EXTEND: return combineZext(N, DAG, DCI, Subtarget);
56230 case ISD::SIGN_EXTEND: return combineSext(N, DAG, DCI, Subtarget);
56231 case ISD::SIGN_EXTEND_INREG: return combineSignExtendInReg(N, DAG, Subtarget);
56232 case ISD::ANY_EXTEND_VECTOR_INREG:
56233 case ISD::SIGN_EXTEND_VECTOR_INREG:
56234 case ISD::ZERO_EXTEND_VECTOR_INREG:
56235 return combineEXTEND_VECTOR_INREG(N, DAG, DCI, Subtarget);
56236 case ISD::SETCC: return combineSetCC(N, DAG, DCI, Subtarget);
56237 case X86ISD::SETCC: return combineX86SetCC(N, DAG, Subtarget);
56238 case X86ISD::BRCOND: return combineBrCond(N, DAG, Subtarget);
56239 case X86ISD::PACKSS:
56240 case X86ISD::PACKUS: return combineVectorPack(N, DAG, DCI, Subtarget);
56241 case X86ISD::HADD:
56242 case X86ISD::HSUB:
56243 case X86ISD::FHADD:
56244 case X86ISD::FHSUB: return combineVectorHADDSUB(N, DAG, DCI, Subtarget);
56245 case X86ISD::VSHL:
56246 case X86ISD::VSRA:
56247 case X86ISD::VSRL:
56248 return combineVectorShiftVar(N, DAG, DCI, Subtarget);
56249 case X86ISD::VSHLI:
56250 case X86ISD::VSRAI:
56251 case X86ISD::VSRLI:
56252 return combineVectorShiftImm(N, DAG, DCI, Subtarget);
56253 case ISD::INSERT_VECTOR_ELT:
56254 case X86ISD::PINSRB:
56255 case X86ISD::PINSRW: return combineVectorInsert(N, DAG, DCI, Subtarget);
56256 case X86ISD::SHUFP: // Handle all target specific shuffles
56257 case X86ISD::INSERTPS:
56258 case X86ISD::EXTRQI:
56259 case X86ISD::INSERTQI:
56260 case X86ISD::VALIGN:
56261 case X86ISD::PALIGNR:
56262 case X86ISD::VSHLDQ:
56263 case X86ISD::VSRLDQ:
56264 case X86ISD::BLENDI:
56265 case X86ISD::UNPCKH:
56266 case X86ISD::UNPCKL:
56267 case X86ISD::MOVHLPS:
56268 case X86ISD::MOVLHPS:
56269 case X86ISD::PSHUFB:
56270 case X86ISD::PSHUFD:
56271 case X86ISD::PSHUFHW:
56272 case X86ISD::PSHUFLW:
56273 case X86ISD::MOVSHDUP:
56274 case X86ISD::MOVSLDUP:
56275 case X86ISD::MOVDDUP:
56276 case X86ISD::MOVSS:
56277 case X86ISD::MOVSD:
56278 case X86ISD::MOVSH:
56279 case X86ISD::VBROADCAST:
56280 case X86ISD::VPPERM:
56281 case X86ISD::VPERMI:
56282 case X86ISD::VPERMV:
56283 case X86ISD::VPERMV3:
56284 case X86ISD::VPERMIL2:
56285 case X86ISD::VPERMILPI:
56286 case X86ISD::VPERMILPV:
56287 case X86ISD::VPERM2X128:
56288 case X86ISD::SHUF128:
56289 case X86ISD::VZEXT_MOVL:
56290 case ISD::VECTOR_SHUFFLE: return combineShuffle(N, DAG, DCI,Subtarget);
56291 case X86ISD::FMADD_RND:
56292 case X86ISD::FMSUB:
56293 case X86ISD::STRICT_FMSUB:
56294 case X86ISD::FMSUB_RND:
56295 case X86ISD::FNMADD:
56296 case X86ISD::STRICT_FNMADD:
56297 case X86ISD::FNMADD_RND:
56298 case X86ISD::FNMSUB:
56299 case X86ISD::STRICT_FNMSUB:
56300 case X86ISD::FNMSUB_RND:
56301 case ISD::FMA:
56302 case ISD::STRICT_FMA: return combineFMA(N, DAG, DCI, Subtarget);
56303 case X86ISD::FMADDSUB_RND:
56304 case X86ISD::FMSUBADD_RND:
56305 case X86ISD::FMADDSUB:
56306 case X86ISD::FMSUBADD: return combineFMADDSUB(N, DAG, DCI);
56307 case X86ISD::MOVMSK: return combineMOVMSK(N, DAG, DCI, Subtarget);
56308 case X86ISD::TESTP: return combineTESTP(N, DAG, DCI, Subtarget);
56309 case X86ISD::MGATHER:
56310 case X86ISD::MSCATTER: return combineX86GatherScatter(N, DAG, DCI);
56311 case ISD::MGATHER:
56312 case ISD::MSCATTER: return combineGatherScatter(N, DAG, DCI);
56313 case X86ISD::PCMPEQ:
56314 case X86ISD::PCMPGT: return combineVectorCompare(N, DAG, Subtarget);
56315 case X86ISD::PMULDQ:
56316 case X86ISD::PMULUDQ: return combinePMULDQ(N, DAG, DCI, Subtarget);
56317 case X86ISD::VPMADDUBSW:
56318 case X86ISD::VPMADDWD: return combineVPMADD(N, DAG, DCI);
56319 case X86ISD::KSHIFTL:
56320 case X86ISD::KSHIFTR: return combineKSHIFT(N, DAG, DCI);
56321 case ISD::FP16_TO_FP: return combineFP16_TO_FP(N, DAG, Subtarget);
56322 case ISD::STRICT_FP_EXTEND:
56323 case ISD::FP_EXTEND: return combineFP_EXTEND(N, DAG, Subtarget);
56324 case ISD::STRICT_FP_ROUND:
56325 case ISD::FP_ROUND: return combineFP_ROUND(N, DAG, Subtarget);
56326 case X86ISD::VBROADCAST_LOAD:
56327 case X86ISD::SUBV_BROADCAST_LOAD: return combineBROADCAST_LOAD(N, DAG, DCI);
56328 case X86ISD::MOVDQ2Q: return combineMOVDQ2Q(N, DAG);
56329 case X86ISD::PDEP: return combinePDEP(N, DAG, DCI);
56332 return SDValue();
56335 bool X86TargetLowering::preferABDSToABSWithNSW(EVT VT) const {
56336 return false;
56339 // Prefer (non-AVX512) vector TRUNCATE(SIGN_EXTEND_INREG(X)) to use of PACKSS.
56340 bool X86TargetLowering::preferSextInRegOfTruncate(EVT TruncVT, EVT VT,
56341 EVT ExtVT) const {
56342 return Subtarget.hasAVX512() || !VT.isVector();
56345 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
56346 if (!isTypeLegal(VT))
56347 return false;
56349 // There are no vXi8 shifts.
56350 if (Opc == ISD::SHL && VT.isVector() && VT.getVectorElementType() == MVT::i8)
56351 return false;
56353 // TODO: Almost no 8-bit ops are desirable because they have no actual
56354 // size/speed advantages vs. 32-bit ops, but they do have a major
56355 // potential disadvantage by causing partial register stalls.
56357 // 8-bit multiply/shl is probably not cheaper than 32-bit multiply/shl, and
56358 // we have specializations to turn 32-bit multiply/shl into LEA or other ops.
56359 // Also, see the comment in "IsDesirableToPromoteOp" - where we additionally
56360 // check for a constant operand to the multiply.
56361 if ((Opc == ISD::MUL || Opc == ISD::SHL) && VT == MVT::i8)
56362 return false;
56364 // i16 instruction encodings are longer and some i16 instructions are slow,
56365 // so those are not desirable.
56366 if (VT == MVT::i16) {
56367 switch (Opc) {
56368 default:
56369 break;
56370 case ISD::LOAD:
56371 case ISD::SIGN_EXTEND:
56372 case ISD::ZERO_EXTEND:
56373 case ISD::ANY_EXTEND:
56374 case ISD::SHL:
56375 case ISD::SRA:
56376 case ISD::SRL:
56377 case ISD::SUB:
56378 case ISD::ADD:
56379 case ISD::MUL:
56380 case ISD::AND:
56381 case ISD::OR:
56382 case ISD::XOR:
56383 return false;
56387 // Any legal type not explicitly accounted for above here is desirable.
56388 return true;
56391 SDValue X86TargetLowering::expandIndirectJTBranch(const SDLoc &dl,
56392 SDValue Value, SDValue Addr,
56393 int JTI,
56394 SelectionDAG &DAG) const {
56395 const Module *M = DAG.getMachineFunction().getMMI().getModule();
56396 Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
56397 if (IsCFProtectionSupported) {
56398 // In case control-flow branch protection is enabled, we need to add
56399 // notrack prefix to the indirect branch.
56400 // In order to do that we create NT_BRIND SDNode.
56401 // Upon ISEL, the pattern will convert it to jmp with NoTrack prefix.
56402 SDValue JTInfo = DAG.getJumpTableDebugInfo(JTI, Value, dl);
56403 return DAG.getNode(X86ISD::NT_BRIND, dl, MVT::Other, JTInfo, Addr);
56406 return TargetLowering::expandIndirectJTBranch(dl, Value, Addr, JTI, DAG);
56409 TargetLowering::AndOrSETCCFoldKind
56410 X86TargetLowering::isDesirableToCombineLogicOpOfSETCC(
56411 const SDNode *LogicOp, const SDNode *SETCC0, const SDNode *SETCC1) const {
56412 using AndOrSETCCFoldKind = TargetLowering::AndOrSETCCFoldKind;
56413 EVT VT = LogicOp->getValueType(0);
56414 EVT OpVT = SETCC0->getOperand(0).getValueType();
56415 if (!VT.isInteger())
56416 return AndOrSETCCFoldKind::None;
56418 if (VT.isVector())
56419 return AndOrSETCCFoldKind(AndOrSETCCFoldKind::NotAnd |
56420 (isOperationLegal(ISD::ABS, OpVT)
56421 ? AndOrSETCCFoldKind::ABS
56422 : AndOrSETCCFoldKind::None));
56424 // Don't use `NotAnd` as even though `not` is generally shorter code size than
56425 // `add`, `add` can lower to LEA which can save moves / spills. Any case where
56426 // `NotAnd` applies, `AddAnd` does as well.
56427 // TODO: Currently we lower (icmp eq/ne (and ~X, Y), 0) -> `test (not X), Y`,
56428 // if we change that to `andn Y, X` it may be worth prefering `NotAnd` here.
56429 return AndOrSETCCFoldKind::AddAnd;
56432 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
56433 EVT VT = Op.getValueType();
56434 bool Is8BitMulByConstant = VT == MVT::i8 && Op.getOpcode() == ISD::MUL &&
56435 isa<ConstantSDNode>(Op.getOperand(1));
56437 // i16 is legal, but undesirable since i16 instruction encodings are longer
56438 // and some i16 instructions are slow.
56439 // 8-bit multiply-by-constant can usually be expanded to something cheaper
56440 // using LEA and/or other ALU ops.
56441 if (VT != MVT::i16 && !Is8BitMulByConstant)
56442 return false;
56444 auto IsFoldableRMW = [](SDValue Load, SDValue Op) {
56445 if (!Op.hasOneUse())
56446 return false;
56447 SDNode *User = *Op->use_begin();
56448 if (!ISD::isNormalStore(User))
56449 return false;
56450 auto *Ld = cast<LoadSDNode>(Load);
56451 auto *St = cast<StoreSDNode>(User);
56452 return Ld->getBasePtr() == St->getBasePtr();
56455 auto IsFoldableAtomicRMW = [](SDValue Load, SDValue Op) {
56456 if (!Load.hasOneUse() || Load.getOpcode() != ISD::ATOMIC_LOAD)
56457 return false;
56458 if (!Op.hasOneUse())
56459 return false;
56460 SDNode *User = *Op->use_begin();
56461 if (User->getOpcode() != ISD::ATOMIC_STORE)
56462 return false;
56463 auto *Ld = cast<AtomicSDNode>(Load);
56464 auto *St = cast<AtomicSDNode>(User);
56465 return Ld->getBasePtr() == St->getBasePtr();
56468 bool Commute = false;
56469 switch (Op.getOpcode()) {
56470 default: return false;
56471 case ISD::SIGN_EXTEND:
56472 case ISD::ZERO_EXTEND:
56473 case ISD::ANY_EXTEND:
56474 break;
56475 case ISD::SHL:
56476 case ISD::SRA:
56477 case ISD::SRL: {
56478 SDValue N0 = Op.getOperand(0);
56479 // Look out for (store (shl (load), x)).
56480 if (X86::mayFoldLoad(N0, Subtarget) && IsFoldableRMW(N0, Op))
56481 return false;
56482 break;
56484 case ISD::ADD:
56485 case ISD::MUL:
56486 case ISD::AND:
56487 case ISD::OR:
56488 case ISD::XOR:
56489 Commute = true;
56490 [[fallthrough]];
56491 case ISD::SUB: {
56492 SDValue N0 = Op.getOperand(0);
56493 SDValue N1 = Op.getOperand(1);
56494 // Avoid disabling potential load folding opportunities.
56495 if (X86::mayFoldLoad(N1, Subtarget) &&
56496 (!Commute || !isa<ConstantSDNode>(N0) ||
56497 (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N1, Op))))
56498 return false;
56499 if (X86::mayFoldLoad(N0, Subtarget) &&
56500 ((Commute && !isa<ConstantSDNode>(N1)) ||
56501 (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N0, Op))))
56502 return false;
56503 if (IsFoldableAtomicRMW(N0, Op) ||
56504 (Commute && IsFoldableAtomicRMW(N1, Op)))
56505 return false;
56509 PVT = MVT::i32;
56510 return true;
56513 //===----------------------------------------------------------------------===//
56514 // X86 Inline Assembly Support
56515 //===----------------------------------------------------------------------===//
56517 // Helper to match a string separated by whitespace.
56518 static bool matchAsm(StringRef S, ArrayRef<const char *> Pieces) {
56519 S = S.substr(S.find_first_not_of(" \t")); // Skip leading whitespace.
56521 for (StringRef Piece : Pieces) {
56522 if (!S.starts_with(Piece)) // Check if the piece matches.
56523 return false;
56525 S = S.substr(Piece.size());
56526 StringRef::size_type Pos = S.find_first_not_of(" \t");
56527 if (Pos == 0) // We matched a prefix.
56528 return false;
56530 S = S.substr(Pos);
56533 return S.empty();
56536 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
56538 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
56539 if (llvm::is_contained(AsmPieces, "~{cc}") &&
56540 llvm::is_contained(AsmPieces, "~{flags}") &&
56541 llvm::is_contained(AsmPieces, "~{fpsr}")) {
56543 if (AsmPieces.size() == 3)
56544 return true;
56545 else if (llvm::is_contained(AsmPieces, "~{dirflag}"))
56546 return true;
56549 return false;
56552 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
56553 InlineAsm *IA = cast<InlineAsm>(CI->getCalledOperand());
56555 const std::string &AsmStr = IA->getAsmString();
56557 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
56558 if (!Ty || Ty->getBitWidth() % 16 != 0)
56559 return false;
56561 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
56562 SmallVector<StringRef, 4> AsmPieces;
56563 SplitString(AsmStr, AsmPieces, ";\n");
56565 switch (AsmPieces.size()) {
56566 default: return false;
56567 case 1:
56568 // FIXME: this should verify that we are targeting a 486 or better. If not,
56569 // we will turn this bswap into something that will be lowered to logical
56570 // ops instead of emitting the bswap asm. For now, we don't support 486 or
56571 // lower so don't worry about this.
56572 // bswap $0
56573 if (matchAsm(AsmPieces[0], {"bswap", "$0"}) ||
56574 matchAsm(AsmPieces[0], {"bswapl", "$0"}) ||
56575 matchAsm(AsmPieces[0], {"bswapq", "$0"}) ||
56576 matchAsm(AsmPieces[0], {"bswap", "${0:q}"}) ||
56577 matchAsm(AsmPieces[0], {"bswapl", "${0:q}"}) ||
56578 matchAsm(AsmPieces[0], {"bswapq", "${0:q}"})) {
56579 // No need to check constraints, nothing other than the equivalent of
56580 // "=r,0" would be valid here.
56581 return IntrinsicLowering::LowerToByteSwap(CI);
56584 // rorw $$8, ${0:w} --> llvm.bswap.i16
56585 if (CI->getType()->isIntegerTy(16) &&
56586 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
56587 (matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) ||
56588 matchAsm(AsmPieces[0], {"rolw", "$$8,", "${0:w}"}))) {
56589 AsmPieces.clear();
56590 StringRef ConstraintsStr = IA->getConstraintString();
56591 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
56592 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
56593 if (clobbersFlagRegisters(AsmPieces))
56594 return IntrinsicLowering::LowerToByteSwap(CI);
56596 break;
56597 case 3:
56598 if (CI->getType()->isIntegerTy(32) &&
56599 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
56600 matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) &&
56601 matchAsm(AsmPieces[1], {"rorl", "$$16,", "$0"}) &&
56602 matchAsm(AsmPieces[2], {"rorw", "$$8,", "${0:w}"})) {
56603 AsmPieces.clear();
56604 StringRef ConstraintsStr = IA->getConstraintString();
56605 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
56606 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
56607 if (clobbersFlagRegisters(AsmPieces))
56608 return IntrinsicLowering::LowerToByteSwap(CI);
56611 if (CI->getType()->isIntegerTy(64)) {
56612 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
56613 if (Constraints.size() >= 2 &&
56614 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
56615 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
56616 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
56617 if (matchAsm(AsmPieces[0], {"bswap", "%eax"}) &&
56618 matchAsm(AsmPieces[1], {"bswap", "%edx"}) &&
56619 matchAsm(AsmPieces[2], {"xchgl", "%eax,", "%edx"}))
56620 return IntrinsicLowering::LowerToByteSwap(CI);
56623 break;
56625 return false;
56628 static X86::CondCode parseConstraintCode(llvm::StringRef Constraint) {
56629 X86::CondCode Cond = StringSwitch<X86::CondCode>(Constraint)
56630 .Case("{@cca}", X86::COND_A)
56631 .Case("{@ccae}", X86::COND_AE)
56632 .Case("{@ccb}", X86::COND_B)
56633 .Case("{@ccbe}", X86::COND_BE)
56634 .Case("{@ccc}", X86::COND_B)
56635 .Case("{@cce}", X86::COND_E)
56636 .Case("{@ccz}", X86::COND_E)
56637 .Case("{@ccg}", X86::COND_G)
56638 .Case("{@ccge}", X86::COND_GE)
56639 .Case("{@ccl}", X86::COND_L)
56640 .Case("{@ccle}", X86::COND_LE)
56641 .Case("{@ccna}", X86::COND_BE)
56642 .Case("{@ccnae}", X86::COND_B)
56643 .Case("{@ccnb}", X86::COND_AE)
56644 .Case("{@ccnbe}", X86::COND_A)
56645 .Case("{@ccnc}", X86::COND_AE)
56646 .Case("{@ccne}", X86::COND_NE)
56647 .Case("{@ccnz}", X86::COND_NE)
56648 .Case("{@ccng}", X86::COND_LE)
56649 .Case("{@ccnge}", X86::COND_L)
56650 .Case("{@ccnl}", X86::COND_GE)
56651 .Case("{@ccnle}", X86::COND_G)
56652 .Case("{@ccno}", X86::COND_NO)
56653 .Case("{@ccnp}", X86::COND_NP)
56654 .Case("{@ccns}", X86::COND_NS)
56655 .Case("{@cco}", X86::COND_O)
56656 .Case("{@ccp}", X86::COND_P)
56657 .Case("{@ccs}", X86::COND_S)
56658 .Default(X86::COND_INVALID);
56659 return Cond;
56662 /// Given a constraint letter, return the type of constraint for this target.
56663 X86TargetLowering::ConstraintType
56664 X86TargetLowering::getConstraintType(StringRef Constraint) const {
56665 if (Constraint.size() == 1) {
56666 switch (Constraint[0]) {
56667 case 'R':
56668 case 'q':
56669 case 'Q':
56670 case 'f':
56671 case 't':
56672 case 'u':
56673 case 'y':
56674 case 'x':
56675 case 'v':
56676 case 'l':
56677 case 'k': // AVX512 masking registers.
56678 return C_RegisterClass;
56679 case 'a':
56680 case 'b':
56681 case 'c':
56682 case 'd':
56683 case 'S':
56684 case 'D':
56685 case 'A':
56686 return C_Register;
56687 case 'I':
56688 case 'J':
56689 case 'K':
56690 case 'N':
56691 case 'G':
56692 case 'L':
56693 case 'M':
56694 return C_Immediate;
56695 case 'C':
56696 case 'e':
56697 case 'Z':
56698 return C_Other;
56699 default:
56700 break;
56703 else if (Constraint.size() == 2) {
56704 switch (Constraint[0]) {
56705 default:
56706 break;
56707 case 'W':
56708 if (Constraint[1] != 's')
56709 break;
56710 return C_Other;
56711 case 'Y':
56712 switch (Constraint[1]) {
56713 default:
56714 break;
56715 case 'z':
56716 return C_Register;
56717 case 'i':
56718 case 'm':
56719 case 'k':
56720 case 't':
56721 case '2':
56722 return C_RegisterClass;
56725 } else if (parseConstraintCode(Constraint) != X86::COND_INVALID)
56726 return C_Other;
56727 return TargetLowering::getConstraintType(Constraint);
56730 /// Examine constraint type and operand type and determine a weight value.
56731 /// This object must already have been set up with the operand type
56732 /// and the current alternative constraint selected.
56733 TargetLowering::ConstraintWeight
56734 X86TargetLowering::getSingleConstraintMatchWeight(
56735 AsmOperandInfo &Info, const char *Constraint) const {
56736 ConstraintWeight Wt = CW_Invalid;
56737 Value *CallOperandVal = Info.CallOperandVal;
56738 // If we don't have a value, we can't do a match,
56739 // but allow it at the lowest weight.
56740 if (!CallOperandVal)
56741 return CW_Default;
56742 Type *Ty = CallOperandVal->getType();
56743 // Look at the constraint type.
56744 switch (*Constraint) {
56745 default:
56746 Wt = TargetLowering::getSingleConstraintMatchWeight(Info, Constraint);
56747 [[fallthrough]];
56748 case 'R':
56749 case 'q':
56750 case 'Q':
56751 case 'a':
56752 case 'b':
56753 case 'c':
56754 case 'd':
56755 case 'S':
56756 case 'D':
56757 case 'A':
56758 if (CallOperandVal->getType()->isIntegerTy())
56759 Wt = CW_SpecificReg;
56760 break;
56761 case 'f':
56762 case 't':
56763 case 'u':
56764 if (Ty->isFloatingPointTy())
56765 Wt = CW_SpecificReg;
56766 break;
56767 case 'y':
56768 if (Ty->isX86_MMXTy() && Subtarget.hasMMX())
56769 Wt = CW_SpecificReg;
56770 break;
56771 case 'Y':
56772 if (StringRef(Constraint).size() != 2)
56773 break;
56774 switch (Constraint[1]) {
56775 default:
56776 return CW_Invalid;
56777 // XMM0
56778 case 'z':
56779 if (((Ty->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
56780 ((Ty->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()) ||
56781 ((Ty->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512()))
56782 return CW_SpecificReg;
56783 return CW_Invalid;
56784 // Conditional OpMask regs (AVX512)
56785 case 'k':
56786 if ((Ty->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
56787 return CW_Register;
56788 return CW_Invalid;
56789 // Any MMX reg
56790 case 'm':
56791 if (Ty->isX86_MMXTy() && Subtarget.hasMMX())
56792 return Wt;
56793 return CW_Invalid;
56794 // Any SSE reg when ISA >= SSE2, same as 'x'
56795 case 'i':
56796 case 't':
56797 case '2':
56798 if (!Subtarget.hasSSE2())
56799 return CW_Invalid;
56800 break;
56802 break;
56803 case 'v':
56804 if ((Ty->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512())
56805 Wt = CW_Register;
56806 [[fallthrough]];
56807 case 'x':
56808 if (((Ty->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
56809 ((Ty->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()))
56810 Wt = CW_Register;
56811 break;
56812 case 'k':
56813 // Enable conditional vector operations using %k<#> registers.
56814 if ((Ty->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
56815 Wt = CW_Register;
56816 break;
56817 case 'I':
56818 if (auto *C = dyn_cast<ConstantInt>(Info.CallOperandVal))
56819 if (C->getZExtValue() <= 31)
56820 Wt = CW_Constant;
56821 break;
56822 case 'J':
56823 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
56824 if (C->getZExtValue() <= 63)
56825 Wt = CW_Constant;
56826 break;
56827 case 'K':
56828 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
56829 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
56830 Wt = CW_Constant;
56831 break;
56832 case 'L':
56833 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
56834 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
56835 Wt = CW_Constant;
56836 break;
56837 case 'M':
56838 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
56839 if (C->getZExtValue() <= 3)
56840 Wt = CW_Constant;
56841 break;
56842 case 'N':
56843 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
56844 if (C->getZExtValue() <= 0xff)
56845 Wt = CW_Constant;
56846 break;
56847 case 'G':
56848 case 'C':
56849 if (isa<ConstantFP>(CallOperandVal))
56850 Wt = CW_Constant;
56851 break;
56852 case 'e':
56853 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
56854 if ((C->getSExtValue() >= -0x80000000LL) &&
56855 (C->getSExtValue() <= 0x7fffffffLL))
56856 Wt = CW_Constant;
56857 break;
56858 case 'Z':
56859 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
56860 if (C->getZExtValue() <= 0xffffffff)
56861 Wt = CW_Constant;
56862 break;
56864 return Wt;
56867 /// Try to replace an X constraint, which matches anything, with another that
56868 /// has more specific requirements based on the type of the corresponding
56869 /// operand.
56870 const char *X86TargetLowering::
56871 LowerXConstraint(EVT ConstraintVT) const {
56872 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
56873 // 'f' like normal targets.
56874 if (ConstraintVT.isFloatingPoint()) {
56875 if (Subtarget.hasSSE1())
56876 return "x";
56879 return TargetLowering::LowerXConstraint(ConstraintVT);
56882 // Lower @cc targets via setcc.
56883 SDValue X86TargetLowering::LowerAsmOutputForConstraint(
56884 SDValue &Chain, SDValue &Glue, const SDLoc &DL,
56885 const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const {
56886 X86::CondCode Cond = parseConstraintCode(OpInfo.ConstraintCode);
56887 if (Cond == X86::COND_INVALID)
56888 return SDValue();
56889 // Check that return type is valid.
56890 if (OpInfo.ConstraintVT.isVector() || !OpInfo.ConstraintVT.isInteger() ||
56891 OpInfo.ConstraintVT.getSizeInBits() < 8)
56892 report_fatal_error("Glue output operand is of invalid type");
56894 // Get EFLAGS register. Only update chain when copyfrom is glued.
56895 if (Glue.getNode()) {
56896 Glue = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32, Glue);
56897 Chain = Glue.getValue(1);
56898 } else
56899 Glue = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32);
56900 // Extract CC code.
56901 SDValue CC = getSETCC(Cond, Glue, DL, DAG);
56902 // Extend to 32-bits
56903 SDValue Result = DAG.getNode(ISD::ZERO_EXTEND, DL, OpInfo.ConstraintVT, CC);
56905 return Result;
56908 /// Lower the specified operand into the Ops vector.
56909 /// If it is invalid, don't add anything to Ops.
56910 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
56911 StringRef Constraint,
56912 std::vector<SDValue> &Ops,
56913 SelectionDAG &DAG) const {
56914 SDValue Result;
56915 char ConstraintLetter = Constraint[0];
56916 switch (ConstraintLetter) {
56917 default: break;
56918 case 'I':
56919 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
56920 if (C->getZExtValue() <= 31) {
56921 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
56922 Op.getValueType());
56923 break;
56926 return;
56927 case 'J':
56928 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
56929 if (C->getZExtValue() <= 63) {
56930 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
56931 Op.getValueType());
56932 break;
56935 return;
56936 case 'K':
56937 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
56938 if (isInt<8>(C->getSExtValue())) {
56939 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
56940 Op.getValueType());
56941 break;
56944 return;
56945 case 'L':
56946 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
56947 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
56948 (Subtarget.is64Bit() && C->getZExtValue() == 0xffffffff)) {
56949 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
56950 Op.getValueType());
56951 break;
56954 return;
56955 case 'M':
56956 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
56957 if (C->getZExtValue() <= 3) {
56958 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
56959 Op.getValueType());
56960 break;
56963 return;
56964 case 'N':
56965 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
56966 if (C->getZExtValue() <= 255) {
56967 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
56968 Op.getValueType());
56969 break;
56972 return;
56973 case 'O':
56974 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
56975 if (C->getZExtValue() <= 127) {
56976 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
56977 Op.getValueType());
56978 break;
56981 return;
56982 case 'e': {
56983 // 32-bit signed value
56984 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
56985 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
56986 C->getSExtValue())) {
56987 // Widen to 64 bits here to get it sign extended.
56988 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), MVT::i64);
56989 break;
56991 // FIXME gcc accepts some relocatable values here too, but only in certain
56992 // memory models; it's complicated.
56994 return;
56996 case 'W': {
56997 assert(Constraint[1] == 's');
56998 // Op is a BlockAddressSDNode or a GlobalAddressSDNode with an optional
56999 // offset.
57000 if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) {
57001 Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(),
57002 BA->getValueType(0)));
57003 } else {
57004 int64_t Offset = 0;
57005 if (Op->getOpcode() == ISD::ADD &&
57006 isa<ConstantSDNode>(Op->getOperand(1))) {
57007 Offset = cast<ConstantSDNode>(Op->getOperand(1))->getSExtValue();
57008 Op = Op->getOperand(0);
57010 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op))
57011 Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
57012 GA->getValueType(0), Offset));
57014 return;
57016 case 'Z': {
57017 // 32-bit unsigned value
57018 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
57019 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
57020 C->getZExtValue())) {
57021 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
57022 Op.getValueType());
57023 break;
57026 // FIXME gcc accepts some relocatable values here too, but only in certain
57027 // memory models; it's complicated.
57028 return;
57030 case 'i': {
57031 // Literal immediates are always ok.
57032 if (auto *CST = dyn_cast<ConstantSDNode>(Op)) {
57033 bool IsBool = CST->getConstantIntValue()->getBitWidth() == 1;
57034 BooleanContent BCont = getBooleanContents(MVT::i64);
57035 ISD::NodeType ExtOpc = IsBool ? getExtendForContent(BCont)
57036 : ISD::SIGN_EXTEND;
57037 int64_t ExtVal = ExtOpc == ISD::ZERO_EXTEND ? CST->getZExtValue()
57038 : CST->getSExtValue();
57039 Result = DAG.getTargetConstant(ExtVal, SDLoc(Op), MVT::i64);
57040 break;
57043 // In any sort of PIC mode addresses need to be computed at runtime by
57044 // adding in a register or some sort of table lookup. These can't
57045 // be used as immediates. BlockAddresses and BasicBlocks are fine though.
57046 if ((Subtarget.isPICStyleGOT() || Subtarget.isPICStyleStubPIC()) &&
57047 !(isa<BlockAddressSDNode>(Op) || isa<BasicBlockSDNode>(Op)))
57048 return;
57050 // If we are in non-pic codegen mode, we allow the address of a global (with
57051 // an optional displacement) to be used with 'i'.
57052 if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op))
57053 // If we require an extra load to get this address, as in PIC mode, we
57054 // can't accept it.
57055 if (isGlobalStubReference(
57056 Subtarget.classifyGlobalReference(GA->getGlobal())))
57057 return;
57058 break;
57062 if (Result.getNode()) {
57063 Ops.push_back(Result);
57064 return;
57066 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
57069 /// Check if \p RC is a general purpose register class.
57070 /// I.e., GR* or one of their variant.
57071 static bool isGRClass(const TargetRegisterClass &RC) {
57072 return RC.hasSuperClassEq(&X86::GR8RegClass) ||
57073 RC.hasSuperClassEq(&X86::GR16RegClass) ||
57074 RC.hasSuperClassEq(&X86::GR32RegClass) ||
57075 RC.hasSuperClassEq(&X86::GR64RegClass) ||
57076 RC.hasSuperClassEq(&X86::LOW32_ADDR_ACCESS_RBPRegClass);
57079 /// Check if \p RC is a vector register class.
57080 /// I.e., FR* / VR* or one of their variant.
57081 static bool isFRClass(const TargetRegisterClass &RC) {
57082 return RC.hasSuperClassEq(&X86::FR16XRegClass) ||
57083 RC.hasSuperClassEq(&X86::FR32XRegClass) ||
57084 RC.hasSuperClassEq(&X86::FR64XRegClass) ||
57085 RC.hasSuperClassEq(&X86::VR128XRegClass) ||
57086 RC.hasSuperClassEq(&X86::VR256XRegClass) ||
57087 RC.hasSuperClassEq(&X86::VR512RegClass);
57090 /// Check if \p RC is a mask register class.
57091 /// I.e., VK* or one of their variant.
57092 static bool isVKClass(const TargetRegisterClass &RC) {
57093 return RC.hasSuperClassEq(&X86::VK1RegClass) ||
57094 RC.hasSuperClassEq(&X86::VK2RegClass) ||
57095 RC.hasSuperClassEq(&X86::VK4RegClass) ||
57096 RC.hasSuperClassEq(&X86::VK8RegClass) ||
57097 RC.hasSuperClassEq(&X86::VK16RegClass) ||
57098 RC.hasSuperClassEq(&X86::VK32RegClass) ||
57099 RC.hasSuperClassEq(&X86::VK64RegClass);
57102 std::pair<unsigned, const TargetRegisterClass *>
57103 X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
57104 StringRef Constraint,
57105 MVT VT) const {
57106 // First, see if this is a constraint that directly corresponds to an LLVM
57107 // register class.
57108 if (Constraint.size() == 1) {
57109 // GCC Constraint Letters
57110 switch (Constraint[0]) {
57111 default: break;
57112 // 'A' means [ER]AX + [ER]DX.
57113 case 'A':
57114 if (Subtarget.is64Bit())
57115 return std::make_pair(X86::RAX, &X86::GR64_ADRegClass);
57116 assert((Subtarget.is32Bit() || Subtarget.is16Bit()) &&
57117 "Expecting 64, 32 or 16 bit subtarget");
57118 return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
57120 // TODO: Slight differences here in allocation order and leaving
57121 // RIP in the class. Do they matter any more here than they do
57122 // in the normal allocation?
57123 case 'k':
57124 if (Subtarget.hasAVX512()) {
57125 if (VT == MVT::v1i1 || VT == MVT::i1)
57126 return std::make_pair(0U, &X86::VK1RegClass);
57127 if (VT == MVT::v8i1 || VT == MVT::i8)
57128 return std::make_pair(0U, &X86::VK8RegClass);
57129 if (VT == MVT::v16i1 || VT == MVT::i16)
57130 return std::make_pair(0U, &X86::VK16RegClass);
57132 if (Subtarget.hasBWI()) {
57133 if (VT == MVT::v32i1 || VT == MVT::i32)
57134 return std::make_pair(0U, &X86::VK32RegClass);
57135 if (VT == MVT::v64i1 || VT == MVT::i64)
57136 return std::make_pair(0U, &X86::VK64RegClass);
57138 break;
57139 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
57140 if (Subtarget.is64Bit()) {
57141 if (VT == MVT::i8 || VT == MVT::i1)
57142 return std::make_pair(0U, &X86::GR8_NOREX2RegClass);
57143 if (VT == MVT::i16)
57144 return std::make_pair(0U, &X86::GR16_NOREX2RegClass);
57145 if (VT == MVT::i32 || VT == MVT::f32)
57146 return std::make_pair(0U, &X86::GR32_NOREX2RegClass);
57147 if (VT != MVT::f80 && !VT.isVector())
57148 return std::make_pair(0U, &X86::GR64_NOREX2RegClass);
57149 break;
57151 [[fallthrough]];
57152 // 32-bit fallthrough
57153 case 'Q': // Q_REGS
57154 if (VT == MVT::i8 || VT == MVT::i1)
57155 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
57156 if (VT == MVT::i16)
57157 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
57158 if (VT == MVT::i32 || VT == MVT::f32 ||
57159 (!VT.isVector() && !Subtarget.is64Bit()))
57160 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
57161 if (VT != MVT::f80 && !VT.isVector())
57162 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
57163 break;
57164 case 'r': // GENERAL_REGS
57165 case 'l': // INDEX_REGS
57166 if (VT == MVT::i8 || VT == MVT::i1)
57167 return std::make_pair(0U, &X86::GR8_NOREX2RegClass);
57168 if (VT == MVT::i16)
57169 return std::make_pair(0U, &X86::GR16_NOREX2RegClass);
57170 if (VT == MVT::i32 || VT == MVT::f32 ||
57171 (!VT.isVector() && !Subtarget.is64Bit()))
57172 return std::make_pair(0U, &X86::GR32_NOREX2RegClass);
57173 if (VT != MVT::f80 && !VT.isVector())
57174 return std::make_pair(0U, &X86::GR64_NOREX2RegClass);
57175 break;
57176 case 'R': // LEGACY_REGS
57177 if (VT == MVT::i8 || VT == MVT::i1)
57178 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
57179 if (VT == MVT::i16)
57180 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
57181 if (VT == MVT::i32 || VT == MVT::f32 ||
57182 (!VT.isVector() && !Subtarget.is64Bit()))
57183 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
57184 if (VT != MVT::f80 && !VT.isVector())
57185 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
57186 break;
57187 case 'f': // FP Stack registers.
57188 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
57189 // value to the correct fpstack register class.
57190 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
57191 return std::make_pair(0U, &X86::RFP32RegClass);
57192 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
57193 return std::make_pair(0U, &X86::RFP64RegClass);
57194 if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f80)
57195 return std::make_pair(0U, &X86::RFP80RegClass);
57196 break;
57197 case 'y': // MMX_REGS if MMX allowed.
57198 if (!Subtarget.hasMMX()) break;
57199 return std::make_pair(0U, &X86::VR64RegClass);
57200 case 'v':
57201 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
57202 if (!Subtarget.hasSSE1()) break;
57203 bool VConstraint = (Constraint[0] == 'v');
57205 switch (VT.SimpleTy) {
57206 default: break;
57207 // Scalar SSE types.
57208 case MVT::f16:
57209 if (VConstraint && Subtarget.hasFP16())
57210 return std::make_pair(0U, &X86::FR16XRegClass);
57211 break;
57212 case MVT::f32:
57213 case MVT::i32:
57214 if (VConstraint && Subtarget.hasVLX())
57215 return std::make_pair(0U, &X86::FR32XRegClass);
57216 return std::make_pair(0U, &X86::FR32RegClass);
57217 case MVT::f64:
57218 case MVT::i64:
57219 if (VConstraint && Subtarget.hasVLX())
57220 return std::make_pair(0U, &X86::FR64XRegClass);
57221 return std::make_pair(0U, &X86::FR64RegClass);
57222 case MVT::i128:
57223 if (Subtarget.is64Bit()) {
57224 if (VConstraint && Subtarget.hasVLX())
57225 return std::make_pair(0U, &X86::VR128XRegClass);
57226 return std::make_pair(0U, &X86::VR128RegClass);
57228 break;
57229 // Vector types and fp128.
57230 case MVT::v8f16:
57231 if (!Subtarget.hasFP16())
57232 break;
57233 if (VConstraint)
57234 return std::make_pair(0U, &X86::VR128XRegClass);
57235 return std::make_pair(0U, &X86::VR128RegClass);
57236 case MVT::v8bf16:
57237 if (!Subtarget.hasBF16() || !Subtarget.hasVLX())
57238 break;
57239 if (VConstraint)
57240 return std::make_pair(0U, &X86::VR128XRegClass);
57241 return std::make_pair(0U, &X86::VR128RegClass);
57242 case MVT::f128:
57243 case MVT::v16i8:
57244 case MVT::v8i16:
57245 case MVT::v4i32:
57246 case MVT::v2i64:
57247 case MVT::v4f32:
57248 case MVT::v2f64:
57249 if (VConstraint && Subtarget.hasVLX())
57250 return std::make_pair(0U, &X86::VR128XRegClass);
57251 return std::make_pair(0U, &X86::VR128RegClass);
57252 // AVX types.
57253 case MVT::v16f16:
57254 if (!Subtarget.hasFP16())
57255 break;
57256 if (VConstraint)
57257 return std::make_pair(0U, &X86::VR256XRegClass);
57258 return std::make_pair(0U, &X86::VR256RegClass);
57259 case MVT::v16bf16:
57260 if (!Subtarget.hasBF16() || !Subtarget.hasVLX())
57261 break;
57262 if (VConstraint)
57263 return std::make_pair(0U, &X86::VR256XRegClass);
57264 return std::make_pair(0U, &X86::VR256RegClass);
57265 case MVT::v32i8:
57266 case MVT::v16i16:
57267 case MVT::v8i32:
57268 case MVT::v4i64:
57269 case MVT::v8f32:
57270 case MVT::v4f64:
57271 if (VConstraint && Subtarget.hasVLX())
57272 return std::make_pair(0U, &X86::VR256XRegClass);
57273 if (Subtarget.hasAVX())
57274 return std::make_pair(0U, &X86::VR256RegClass);
57275 break;
57276 case MVT::v32f16:
57277 if (!Subtarget.hasFP16())
57278 break;
57279 if (VConstraint)
57280 return std::make_pair(0U, &X86::VR512RegClass);
57281 return std::make_pair(0U, &X86::VR512_0_15RegClass);
57282 case MVT::v32bf16:
57283 if (!Subtarget.hasBF16())
57284 break;
57285 if (VConstraint)
57286 return std::make_pair(0U, &X86::VR512RegClass);
57287 return std::make_pair(0U, &X86::VR512_0_15RegClass);
57288 case MVT::v64i8:
57289 case MVT::v32i16:
57290 case MVT::v8f64:
57291 case MVT::v16f32:
57292 case MVT::v16i32:
57293 case MVT::v8i64:
57294 if (!Subtarget.hasAVX512()) break;
57295 if (VConstraint)
57296 return std::make_pair(0U, &X86::VR512RegClass);
57297 return std::make_pair(0U, &X86::VR512_0_15RegClass);
57299 break;
57301 } else if (Constraint.size() == 2 && Constraint[0] == 'Y') {
57302 switch (Constraint[1]) {
57303 default:
57304 break;
57305 case 'i':
57306 case 't':
57307 case '2':
57308 return getRegForInlineAsmConstraint(TRI, "x", VT);
57309 case 'm':
57310 if (!Subtarget.hasMMX()) break;
57311 return std::make_pair(0U, &X86::VR64RegClass);
57312 case 'z':
57313 if (!Subtarget.hasSSE1()) break;
57314 switch (VT.SimpleTy) {
57315 default: break;
57316 // Scalar SSE types.
57317 case MVT::f16:
57318 if (!Subtarget.hasFP16())
57319 break;
57320 return std::make_pair(X86::XMM0, &X86::FR16XRegClass);
57321 case MVT::f32:
57322 case MVT::i32:
57323 return std::make_pair(X86::XMM0, &X86::FR32RegClass);
57324 case MVT::f64:
57325 case MVT::i64:
57326 return std::make_pair(X86::XMM0, &X86::FR64RegClass);
57327 case MVT::v8f16:
57328 if (!Subtarget.hasFP16())
57329 break;
57330 return std::make_pair(X86::XMM0, &X86::VR128RegClass);
57331 case MVT::v8bf16:
57332 if (!Subtarget.hasBF16() || !Subtarget.hasVLX())
57333 break;
57334 return std::make_pair(X86::XMM0, &X86::VR128RegClass);
57335 case MVT::f128:
57336 case MVT::v16i8:
57337 case MVT::v8i16:
57338 case MVT::v4i32:
57339 case MVT::v2i64:
57340 case MVT::v4f32:
57341 case MVT::v2f64:
57342 return std::make_pair(X86::XMM0, &X86::VR128RegClass);
57343 // AVX types.
57344 case MVT::v16f16:
57345 if (!Subtarget.hasFP16())
57346 break;
57347 return std::make_pair(X86::YMM0, &X86::VR256RegClass);
57348 case MVT::v16bf16:
57349 if (!Subtarget.hasBF16() || !Subtarget.hasVLX())
57350 break;
57351 return std::make_pair(X86::YMM0, &X86::VR256RegClass);
57352 case MVT::v32i8:
57353 case MVT::v16i16:
57354 case MVT::v8i32:
57355 case MVT::v4i64:
57356 case MVT::v8f32:
57357 case MVT::v4f64:
57358 if (Subtarget.hasAVX())
57359 return std::make_pair(X86::YMM0, &X86::VR256RegClass);
57360 break;
57361 case MVT::v32f16:
57362 if (!Subtarget.hasFP16())
57363 break;
57364 return std::make_pair(X86::ZMM0, &X86::VR512_0_15RegClass);
57365 case MVT::v32bf16:
57366 if (!Subtarget.hasBF16())
57367 break;
57368 return std::make_pair(X86::ZMM0, &X86::VR512_0_15RegClass);
57369 case MVT::v64i8:
57370 case MVT::v32i16:
57371 case MVT::v8f64:
57372 case MVT::v16f32:
57373 case MVT::v16i32:
57374 case MVT::v8i64:
57375 if (Subtarget.hasAVX512())
57376 return std::make_pair(X86::ZMM0, &X86::VR512_0_15RegClass);
57377 break;
57379 break;
57380 case 'k':
57381 // This register class doesn't allocate k0 for masked vector operation.
57382 if (Subtarget.hasAVX512()) {
57383 if (VT == MVT::v1i1 || VT == MVT::i1)
57384 return std::make_pair(0U, &X86::VK1WMRegClass);
57385 if (VT == MVT::v8i1 || VT == MVT::i8)
57386 return std::make_pair(0U, &X86::VK8WMRegClass);
57387 if (VT == MVT::v16i1 || VT == MVT::i16)
57388 return std::make_pair(0U, &X86::VK16WMRegClass);
57390 if (Subtarget.hasBWI()) {
57391 if (VT == MVT::v32i1 || VT == MVT::i32)
57392 return std::make_pair(0U, &X86::VK32WMRegClass);
57393 if (VT == MVT::v64i1 || VT == MVT::i64)
57394 return std::make_pair(0U, &X86::VK64WMRegClass);
57396 break;
57400 if (parseConstraintCode(Constraint) != X86::COND_INVALID)
57401 return std::make_pair(0U, &X86::GR32RegClass);
57403 // Use the default implementation in TargetLowering to convert the register
57404 // constraint into a member of a register class.
57405 std::pair<Register, const TargetRegisterClass*> Res;
57406 Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
57408 // Not found as a standard register?
57409 if (!Res.second) {
57410 // Only match x87 registers if the VT is one SelectionDAGBuilder can convert
57411 // to/from f80.
57412 if (VT == MVT::Other || VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f80) {
57413 // Map st(0) -> st(7) -> ST0
57414 if (Constraint.size() == 7 && Constraint[0] == '{' &&
57415 tolower(Constraint[1]) == 's' && tolower(Constraint[2]) == 't' &&
57416 Constraint[3] == '(' &&
57417 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
57418 Constraint[5] == ')' && Constraint[6] == '}') {
57419 // st(7) is not allocatable and thus not a member of RFP80. Return
57420 // singleton class in cases where we have a reference to it.
57421 if (Constraint[4] == '7')
57422 return std::make_pair(X86::FP7, &X86::RFP80_7RegClass);
57423 return std::make_pair(X86::FP0 + Constraint[4] - '0',
57424 &X86::RFP80RegClass);
57427 // GCC allows "st(0)" to be called just plain "st".
57428 if (StringRef("{st}").equals_insensitive(Constraint))
57429 return std::make_pair(X86::FP0, &X86::RFP80RegClass);
57432 // flags -> EFLAGS
57433 if (StringRef("{flags}").equals_insensitive(Constraint))
57434 return std::make_pair(X86::EFLAGS, &X86::CCRRegClass);
57436 // dirflag -> DF
57437 // Only allow for clobber.
57438 if (StringRef("{dirflag}").equals_insensitive(Constraint) &&
57439 VT == MVT::Other)
57440 return std::make_pair(X86::DF, &X86::DFCCRRegClass);
57442 // fpsr -> FPSW
57443 // Only allow for clobber.
57444 if (StringRef("{fpsr}").equals_insensitive(Constraint) && VT == MVT::Other)
57445 return std::make_pair(X86::FPSW, &X86::FPCCRRegClass);
57447 return Res;
57450 // Make sure it isn't a register that requires 64-bit mode.
57451 if (!Subtarget.is64Bit() &&
57452 (isFRClass(*Res.second) || isGRClass(*Res.second)) &&
57453 TRI->getEncodingValue(Res.first) >= 8) {
57454 // Register requires REX prefix, but we're in 32-bit mode.
57455 return std::make_pair(0, nullptr);
57458 // Make sure it isn't a register that requires AVX512.
57459 if (!Subtarget.hasAVX512() && isFRClass(*Res.second) &&
57460 TRI->getEncodingValue(Res.first) & 0x10) {
57461 // Register requires EVEX prefix.
57462 return std::make_pair(0, nullptr);
57465 // Otherwise, check to see if this is a register class of the wrong value
57466 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
57467 // turn into {ax},{dx}.
57468 // MVT::Other is used to specify clobber names.
57469 if (TRI->isTypeLegalForClass(*Res.second, VT) || VT == MVT::Other)
57470 return Res; // Correct type already, nothing to do.
57472 // Get a matching integer of the correct size. i.e. "ax" with MVT::32 should
57473 // return "eax". This should even work for things like getting 64bit integer
57474 // registers when given an f64 type.
57475 const TargetRegisterClass *Class = Res.second;
57476 // The generic code will match the first register class that contains the
57477 // given register. Thus, based on the ordering of the tablegened file,
57478 // the "plain" GR classes might not come first.
57479 // Therefore, use a helper method.
57480 if (isGRClass(*Class)) {
57481 unsigned Size = VT.getSizeInBits();
57482 if (Size == 1) Size = 8;
57483 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
57484 return std::make_pair(0, nullptr);
57485 Register DestReg = getX86SubSuperRegister(Res.first, Size);
57486 if (DestReg.isValid()) {
57487 bool is64Bit = Subtarget.is64Bit();
57488 const TargetRegisterClass *RC =
57489 Size == 8 ? (is64Bit ? &X86::GR8RegClass : &X86::GR8_NOREXRegClass)
57490 : Size == 16 ? (is64Bit ? &X86::GR16RegClass : &X86::GR16_NOREXRegClass)
57491 : Size == 32 ? (is64Bit ? &X86::GR32RegClass : &X86::GR32_NOREXRegClass)
57492 : /*Size == 64*/ (is64Bit ? &X86::GR64RegClass : nullptr);
57493 if (Size == 64 && !is64Bit) {
57494 // Model GCC's behavior here and select a fixed pair of 32-bit
57495 // registers.
57496 switch (DestReg) {
57497 case X86::RAX:
57498 return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
57499 case X86::RDX:
57500 return std::make_pair(X86::EDX, &X86::GR32_DCRegClass);
57501 case X86::RCX:
57502 return std::make_pair(X86::ECX, &X86::GR32_CBRegClass);
57503 case X86::RBX:
57504 return std::make_pair(X86::EBX, &X86::GR32_BSIRegClass);
57505 case X86::RSI:
57506 return std::make_pair(X86::ESI, &X86::GR32_SIDIRegClass);
57507 case X86::RDI:
57508 return std::make_pair(X86::EDI, &X86::GR32_DIBPRegClass);
57509 case X86::RBP:
57510 return std::make_pair(X86::EBP, &X86::GR32_BPSPRegClass);
57511 default:
57512 return std::make_pair(0, nullptr);
57515 if (RC && RC->contains(DestReg))
57516 return std::make_pair(DestReg, RC);
57517 return Res;
57519 // No register found/type mismatch.
57520 return std::make_pair(0, nullptr);
57521 } else if (isFRClass(*Class)) {
57522 // Handle references to XMM physical registers that got mapped into the
57523 // wrong class. This can happen with constraints like {xmm0} where the
57524 // target independent register mapper will just pick the first match it can
57525 // find, ignoring the required type.
57527 // TODO: Handle f128 and i128 in FR128RegClass after it is tested well.
57528 if (VT == MVT::f16)
57529 Res.second = &X86::FR16XRegClass;
57530 else if (VT == MVT::f32 || VT == MVT::i32)
57531 Res.second = &X86::FR32XRegClass;
57532 else if (VT == MVT::f64 || VT == MVT::i64)
57533 Res.second = &X86::FR64XRegClass;
57534 else if (TRI->isTypeLegalForClass(X86::VR128XRegClass, VT))
57535 Res.second = &X86::VR128XRegClass;
57536 else if (TRI->isTypeLegalForClass(X86::VR256XRegClass, VT))
57537 Res.second = &X86::VR256XRegClass;
57538 else if (TRI->isTypeLegalForClass(X86::VR512RegClass, VT))
57539 Res.second = &X86::VR512RegClass;
57540 else {
57541 // Type mismatch and not a clobber: Return an error;
57542 Res.first = 0;
57543 Res.second = nullptr;
57545 } else if (isVKClass(*Class)) {
57546 if (VT == MVT::v1i1 || VT == MVT::i1)
57547 Res.second = &X86::VK1RegClass;
57548 else if (VT == MVT::v8i1 || VT == MVT::i8)
57549 Res.second = &X86::VK8RegClass;
57550 else if (VT == MVT::v16i1 || VT == MVT::i16)
57551 Res.second = &X86::VK16RegClass;
57552 else if (VT == MVT::v32i1 || VT == MVT::i32)
57553 Res.second = &X86::VK32RegClass;
57554 else if (VT == MVT::v64i1 || VT == MVT::i64)
57555 Res.second = &X86::VK64RegClass;
57556 else {
57557 // Type mismatch and not a clobber: Return an error;
57558 Res.first = 0;
57559 Res.second = nullptr;
57563 return Res;
57566 bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
57567 // Integer division on x86 is expensive. However, when aggressively optimizing
57568 // for code size, we prefer to use a div instruction, as it is usually smaller
57569 // than the alternative sequence.
57570 // The exception to this is vector division. Since x86 doesn't have vector
57571 // integer division, leaving the division as-is is a loss even in terms of
57572 // size, because it will have to be scalarized, while the alternative code
57573 // sequence can be performed in vector form.
57574 bool OptSize = Attr.hasFnAttr(Attribute::MinSize);
57575 return OptSize && !VT.isVector();
57578 void X86TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
57579 if (!Subtarget.is64Bit())
57580 return;
57582 // Update IsSplitCSR in X86MachineFunctionInfo.
57583 X86MachineFunctionInfo *AFI =
57584 Entry->getParent()->getInfo<X86MachineFunctionInfo>();
57585 AFI->setIsSplitCSR(true);
57588 void X86TargetLowering::insertCopiesSplitCSR(
57589 MachineBasicBlock *Entry,
57590 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
57591 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
57592 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
57593 if (!IStart)
57594 return;
57596 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
57597 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
57598 MachineBasicBlock::iterator MBBI = Entry->begin();
57599 for (const MCPhysReg *I = IStart; *I; ++I) {
57600 const TargetRegisterClass *RC = nullptr;
57601 if (X86::GR64RegClass.contains(*I))
57602 RC = &X86::GR64RegClass;
57603 else
57604 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
57606 Register NewVR = MRI->createVirtualRegister(RC);
57607 // Create copy from CSR to a virtual register.
57608 // FIXME: this currently does not emit CFI pseudo-instructions, it works
57609 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
57610 // nounwind. If we want to generalize this later, we may need to emit
57611 // CFI pseudo-instructions.
57612 assert(
57613 Entry->getParent()->getFunction().hasFnAttribute(Attribute::NoUnwind) &&
57614 "Function should be nounwind in insertCopiesSplitCSR!");
57615 Entry->addLiveIn(*I);
57616 BuildMI(*Entry, MBBI, MIMetadata(), TII->get(TargetOpcode::COPY), NewVR)
57617 .addReg(*I);
57619 // Insert the copy-back instructions right before the terminator.
57620 for (auto *Exit : Exits)
57621 BuildMI(*Exit, Exit->getFirstTerminator(), MIMetadata(),
57622 TII->get(TargetOpcode::COPY), *I)
57623 .addReg(NewVR);
57627 bool X86TargetLowering::supportSwiftError() const {
57628 return Subtarget.is64Bit();
57631 MachineInstr *
57632 X86TargetLowering::EmitKCFICheck(MachineBasicBlock &MBB,
57633 MachineBasicBlock::instr_iterator &MBBI,
57634 const TargetInstrInfo *TII) const {
57635 assert(MBBI->isCall() && MBBI->getCFIType() &&
57636 "Invalid call instruction for a KCFI check");
57638 MachineFunction &MF = *MBB.getParent();
57639 // If the call target is a memory operand, unfold it and use R11 for the
57640 // call, so KCFI_CHECK won't have to recompute the address.
57641 switch (MBBI->getOpcode()) {
57642 case X86::CALL64m:
57643 case X86::CALL64m_NT:
57644 case X86::TAILJMPm64:
57645 case X86::TAILJMPm64_REX: {
57646 MachineBasicBlock::instr_iterator OrigCall = MBBI;
57647 SmallVector<MachineInstr *, 2> NewMIs;
57648 if (!TII->unfoldMemoryOperand(MF, *OrigCall, X86::R11, /*UnfoldLoad=*/true,
57649 /*UnfoldStore=*/false, NewMIs))
57650 report_fatal_error("Failed to unfold memory operand for a KCFI check");
57651 for (auto *NewMI : NewMIs)
57652 MBBI = MBB.insert(OrigCall, NewMI);
57653 assert(MBBI->isCall() &&
57654 "Unexpected instruction after memory operand unfolding");
57655 if (OrigCall->shouldUpdateCallSiteInfo())
57656 MF.moveCallSiteInfo(&*OrigCall, &*MBBI);
57657 MBBI->setCFIType(MF, OrigCall->getCFIType());
57658 OrigCall->eraseFromParent();
57659 break;
57661 default:
57662 break;
57665 MachineOperand &Target = MBBI->getOperand(0);
57666 Register TargetReg;
57667 switch (MBBI->getOpcode()) {
57668 case X86::CALL64r:
57669 case X86::CALL64r_NT:
57670 case X86::TAILJMPr64:
57671 case X86::TAILJMPr64_REX:
57672 assert(Target.isReg() && "Unexpected target operand for an indirect call");
57673 Target.setIsRenamable(false);
57674 TargetReg = Target.getReg();
57675 break;
57676 case X86::CALL64pcrel32:
57677 case X86::TAILJMPd64:
57678 assert(Target.isSymbol() && "Unexpected target operand for a direct call");
57679 // X86TargetLowering::EmitLoweredIndirectThunk always uses r11 for
57680 // 64-bit indirect thunk calls.
57681 assert(StringRef(Target.getSymbolName()).ends_with("_r11") &&
57682 "Unexpected register for an indirect thunk call");
57683 TargetReg = X86::R11;
57684 break;
57685 default:
57686 llvm_unreachable("Unexpected CFI call opcode");
57687 break;
57690 return BuildMI(MBB, MBBI, MIMetadata(*MBBI), TII->get(X86::KCFI_CHECK))
57691 .addReg(TargetReg)
57692 .addImm(MBBI->getCFIType())
57693 .getInstr();
57696 /// Returns true if stack probing through a function call is requested.
57697 bool X86TargetLowering::hasStackProbeSymbol(const MachineFunction &MF) const {
57698 return !getStackProbeSymbolName(MF).empty();
57701 /// Returns true if stack probing through inline assembly is requested.
57702 bool X86TargetLowering::hasInlineStackProbe(const MachineFunction &MF) const {
57704 // No inline stack probe for Windows, they have their own mechanism.
57705 if (Subtarget.isOSWindows() ||
57706 MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
57707 return false;
57709 // If the function specifically requests inline stack probes, emit them.
57710 if (MF.getFunction().hasFnAttribute("probe-stack"))
57711 return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() ==
57712 "inline-asm";
57714 return false;
57717 /// Returns the name of the symbol used to emit stack probes or the empty
57718 /// string if not applicable.
57719 StringRef
57720 X86TargetLowering::getStackProbeSymbolName(const MachineFunction &MF) const {
57721 // Inline Stack probes disable stack probe call
57722 if (hasInlineStackProbe(MF))
57723 return "";
57725 // If the function specifically requests stack probes, emit them.
57726 if (MF.getFunction().hasFnAttribute("probe-stack"))
57727 return MF.getFunction().getFnAttribute("probe-stack").getValueAsString();
57729 // Generally, if we aren't on Windows, the platform ABI does not include
57730 // support for stack probes, so don't emit them.
57731 if (!Subtarget.isOSWindows() || Subtarget.isTargetMachO() ||
57732 MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
57733 return "";
57735 // We need a stack probe to conform to the Windows ABI. Choose the right
57736 // symbol.
57737 if (Subtarget.is64Bit())
57738 return Subtarget.isTargetCygMing() ? "___chkstk_ms" : "__chkstk";
57739 return Subtarget.isTargetCygMing() ? "_alloca" : "_chkstk";
57742 unsigned
57743 X86TargetLowering::getStackProbeSize(const MachineFunction &MF) const {
57744 // The default stack probe size is 4096 if the function has no stackprobesize
57745 // attribute.
57746 return MF.getFunction().getFnAttributeAsParsedInteger("stack-probe-size",
57747 4096);
57750 Align X86TargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
57751 if (ML && ML->isInnermost() &&
57752 ExperimentalPrefInnermostLoopAlignment.getNumOccurrences())
57753 return Align(1ULL << ExperimentalPrefInnermostLoopAlignment);
57754 return TargetLowering::getPrefLoopAlignment();