1 //===-- X86InstrSSE.td - SSE Instruction Set ---------------*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file describes the X86 SSE instruction set, defining the instructions,
10 // and properties of the instructions which are needed for code generation,
11 // machine code emission, and analysis.
13 //===----------------------------------------------------------------------===//
15 //===----------------------------------------------------------------------===//
16 // SSE 1 & 2 Instructions Classes
17 //===----------------------------------------------------------------------===//
19 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
20 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
21 RegisterClass RC, X86MemOperand x86memop,
22 Domain d, X86FoldableSchedWrite sched,
24 let isCommutable = 1 in {
25 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
27 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
28 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
29 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))], d>,
32 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
34 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
35 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
36 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))], d>,
37 Sched<[sched.Folded, sched.ReadAfterFold]>;
40 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
41 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr,
42 SDPatternOperator OpNode, RegisterClass RC,
43 ValueType VT, string asm, Operand memopr,
44 ComplexPattern mem_cpat, Domain d,
45 X86FoldableSchedWrite sched, bit Is2Addr = 1> {
46 let isCodeGenOnly = 1, hasSideEffects = 0 in {
47 def rr_Int : SI_Int<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
49 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
50 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
51 [(set RC:$dst, (VT (OpNode RC:$src1, RC:$src2)))], d>,
54 def rm_Int : SI_Int<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
56 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
57 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
58 [(set RC:$dst, (VT (OpNode RC:$src1, mem_cpat:$src2)))], d>,
59 Sched<[sched.Folded, sched.ReadAfterFold]>;
63 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
64 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
65 RegisterClass RC, ValueType vt,
66 X86MemOperand x86memop, PatFrag mem_frag,
67 Domain d, X86FoldableSchedWrite sched,
69 let isCommutable = 1 in
70 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
72 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
73 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
74 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], d>,
77 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
79 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
80 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
81 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))],
83 Sched<[sched.Folded, sched.ReadAfterFold]>;
86 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
87 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
88 string OpcodeStr, X86MemOperand x86memop,
89 X86FoldableSchedWrite sched,
90 list<dag> pat_rr, list<dag> pat_rm,
92 let isCommutable = 1, hasSideEffects = 0 in
93 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
95 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
96 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
99 let hasSideEffects = 0, mayLoad = 1 in
100 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
102 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
103 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
105 Sched<[sched.Folded, sched.ReadAfterFold]>;
109 // Alias instructions that map fld0 to xorps for sse or vxorps for avx.
110 // This is expanded by ExpandPostRAPseudos.
111 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
112 isPseudo = 1, SchedRW = [WriteZero] in {
113 def FsFLD0SS : I<0, Pseudo, (outs FR32:$dst), (ins), "",
114 [(set FR32:$dst, fp32imm0)]>, Requires<[HasSSE1, NoAVX512]>;
115 def FsFLD0SD : I<0, Pseudo, (outs FR64:$dst), (ins), "",
116 [(set FR64:$dst, fpimm0)]>, Requires<[HasSSE2, NoAVX512]>;
119 //===----------------------------------------------------------------------===//
120 // AVX & SSE - Zero/One Vectors
121 //===----------------------------------------------------------------------===//
123 // Alias instruction that maps zero vector to pxor / xorp* for sse.
124 // This is expanded by ExpandPostRAPseudos to an xorps / vxorps, and then
125 // swizzled by ExecutionDomainFix to pxor.
126 // We set canFoldAsLoad because this can be converted to a constant-pool
127 // load of an all-zeros value if folding it would be beneficial.
128 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
129 isPseudo = 1, SchedRW = [WriteZero] in {
130 def V_SET0 : I<0, Pseudo, (outs VR128:$dst), (ins), "",
131 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
134 let Predicates = [NoAVX512] in
135 def : Pat<(v4i32 immAllZerosV), (V_SET0)>;
138 // The same as done above but for AVX. The 256-bit AVX1 ISA doesn't support PI,
139 // and doesn't need it because on sandy bridge the register is set to zero
140 // at the rename stage without using any execution unit, so SET0PSY
141 // and SET0PDY can be used for vector int instructions without penalty
142 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
143 isPseudo = 1, Predicates = [NoAVX512], SchedRW = [WriteZero] in {
144 def AVX_SET0 : I<0, Pseudo, (outs VR256:$dst), (ins), "",
145 [(set VR256:$dst, (v8i32 immAllZerosV))]>;
148 // We set canFoldAsLoad because this can be converted to a constant-pool
149 // load of an all-ones value if folding it would be beneficial.
150 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
151 isPseudo = 1, SchedRW = [WriteZero] in {
152 def V_SETALLONES : I<0, Pseudo, (outs VR128:$dst), (ins), "",
153 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
154 let Predicates = [HasAVX1Only, OptForMinSize] in {
155 def AVX1_SETALLONES: I<0, Pseudo, (outs VR256:$dst), (ins), "",
156 [(set VR256:$dst, (v8i32 immAllOnesV))]>;
158 let Predicates = [HasAVX2] in
159 def AVX2_SETALLONES : I<0, Pseudo, (outs VR256:$dst), (ins), "",
160 [(set VR256:$dst, (v8i32 immAllOnesV))]>;
163 //===----------------------------------------------------------------------===//
164 // SSE 1 & 2 - Move FP Scalar Instructions
166 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
167 // register copies because it's a partial register update; Register-to-register
168 // movss/movsd is not modeled as an INSERT_SUBREG because INSERT_SUBREG requires
169 // that the insert be implementable in terms of a copy, and just mentioned, we
170 // don't use movss/movsd for copies.
171 //===----------------------------------------------------------------------===//
173 multiclass sse12_move_rr<SDNode OpNode, ValueType vt,
174 X86MemOperand x86memop, string base_opc,
175 string asm_opr, Domain d, string Name> {
176 let isCommutable = 1 in
177 def rr : SI<0x10, MRMSrcReg, (outs VR128:$dst),
178 (ins VR128:$src1, VR128:$src2),
179 !strconcat(base_opc, asm_opr),
180 [(set VR128:$dst, (vt (OpNode VR128:$src1, VR128:$src2)))], d>,
181 Sched<[SchedWriteFShuffle.XMM]>;
183 // For the disassembler
184 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
185 def rr_REV : SI<0x11, MRMDestReg, (outs VR128:$dst),
186 (ins VR128:$src1, VR128:$src2),
187 !strconcat(base_opc, asm_opr), []>,
188 Sched<[SchedWriteFShuffle.XMM]>, FoldGenData<Name#rr>;
191 multiclass sse12_move<RegisterClass RC, SDNode OpNode, ValueType vt,
192 X86MemOperand x86memop, string OpcodeStr,
193 Domain d, string Name, Predicate pred> {
195 let Predicates = [UseAVX, OptForSize] in
196 defm V#NAME : sse12_move_rr<OpNode, vt, x86memop, OpcodeStr,
197 "\t{$src2, $src1, $dst|$dst, $src1, $src2}", d,
199 VEX_4V, VEX_LIG, VEX_WIG;
201 def V#NAME#mr : SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
202 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
203 [(store RC:$src, addr:$dst)], d>,
204 VEX, VEX_LIG, Sched<[WriteFStore]>, VEX_WIG;
206 let Constraints = "$src1 = $dst" in {
207 let Predicates = [pred, NoSSE41_Or_OptForSize] in
208 defm NAME : sse12_move_rr<OpNode, vt, x86memop, OpcodeStr,
209 "\t{$src2, $dst|$dst, $src2}", d, Name>;
212 def NAME#mr : SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
213 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
214 [(store RC:$src, addr:$dst)], d>,
215 Sched<[WriteFStore]>;
217 def : InstAlias<"v"#OpcodeStr#".s\t{$src2, $src1, $dst|$dst, $src1, $src2}",
218 (!cast<Instruction>("V"#NAME#"rr_REV")
219 VR128:$dst, VR128:$src1, VR128:$src2), 0>;
220 def : InstAlias<OpcodeStr#".s\t{$src2, $dst|$dst, $src2}",
221 (!cast<Instruction>(NAME#"rr_REV")
222 VR128:$dst, VR128:$src2), 0>;
225 // Loading from memory automatically zeroing upper bits.
226 multiclass sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
227 PatFrag mem_pat, string OpcodeStr, Domain d> {
228 def V#NAME#rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
229 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
230 [(set RC:$dst, (mem_pat addr:$src))], d>,
231 VEX, VEX_LIG, Sched<[WriteFLoad]>, VEX_WIG;
232 def NAME#rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
233 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
234 [(set RC:$dst, (mem_pat addr:$src))], d>,
238 defm MOVSS : sse12_move<FR32, X86Movss, v4f32, f32mem, "movss",
239 SSEPackedSingle, "MOVSS", UseSSE1>, XS;
240 defm MOVSD : sse12_move<FR64, X86Movsd, v2f64, f64mem, "movsd",
241 SSEPackedDouble, "MOVSD", UseSSE2>, XD;
243 let canFoldAsLoad = 1, isReMaterializable = 1 in {
244 defm MOVSS : sse12_move_rm<FR32, f32mem, loadf32, "movss",
245 SSEPackedSingle>, XS;
246 defm MOVSD : sse12_move_rm<FR64, f64mem, loadf64, "movsd",
247 SSEPackedDouble>, XD;
251 let Predicates = [UseAVX] in {
252 // MOVSSrm zeros the high parts of the register; represent this
253 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
254 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
255 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
256 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
257 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
258 def : Pat<(v4f32 (X86vzload addr:$src)),
259 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
261 // MOVSDrm zeros the high parts of the register; represent this
262 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
263 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
264 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
265 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
266 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
267 def : Pat<(v2f64 (X86vzload addr:$src)),
268 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
270 // Represent the same patterns above but in the form they appear for
272 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
273 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))),
274 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_xmm)>;
275 def : Pat<(v8f32 (X86vzload addr:$src)),
276 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_xmm)>;
277 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
278 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
279 (SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_xmm)>;
280 def : Pat<(v4f64 (X86vzload addr:$src)),
281 (SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_xmm)>;
283 // Extract and store.
284 def : Pat<(store (f32 (extractelt (v4f32 VR128:$src), (iPTR 0))),
286 (VMOVSSmr addr:$dst, (COPY_TO_REGCLASS (v4f32 VR128:$src), FR32))>;
289 let Predicates = [UseAVX, OptForSize] in {
290 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
291 // MOVSS to the lower bits.
292 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
293 (VMOVSSrr (v4f32 (V_SET0)), VR128:$src)>;
294 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
295 (VMOVSSrr (v4i32 (V_SET0)), VR128:$src)>;
297 // Move low f32 and clear high bits.
298 def : Pat<(v8f32 (X86vzmovl (v8f32 VR256:$src))),
299 (SUBREG_TO_REG (i32 0),
300 (v4f32 (VMOVSSrr (v4f32 (V_SET0)),
301 (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm)))), sub_xmm)>;
302 def : Pat<(v8i32 (X86vzmovl (v8i32 VR256:$src))),
303 (SUBREG_TO_REG (i32 0),
304 (v4i32 (VMOVSSrr (v4i32 (V_SET0)),
305 (v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm)))), sub_xmm)>;
307 def : Pat<(v4f64 (X86vzmovl (v4f64 VR256:$src))),
308 (SUBREG_TO_REG (i32 0),
309 (v2f64 (VMOVSDrr (v2f64 (V_SET0)),
310 (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm)))),
312 def : Pat<(v4i64 (X86vzmovl (v4i64 VR256:$src))),
313 (SUBREG_TO_REG (i32 0),
314 (v2i64 (VMOVSDrr (v2i64 (V_SET0)),
315 (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm)))),
319 let Predicates = [UseSSE1] in {
320 let Predicates = [UseSSE1, NoSSE41_Or_OptForSize] in {
321 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
322 // MOVSS to the lower bits.
323 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
324 (MOVSSrr (v4f32 (V_SET0)), VR128:$src)>;
325 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
326 (MOVSSrr (v4i32 (V_SET0)), VR128:$src)>;
329 // MOVSSrm already zeros the high parts of the register.
330 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
331 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
332 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
333 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
334 def : Pat<(v4f32 (X86vzload addr:$src)),
335 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
337 // Extract and store.
338 def : Pat<(store (f32 (extractelt (v4f32 VR128:$src), (iPTR 0))),
340 (MOVSSmr addr:$dst, (COPY_TO_REGCLASS VR128:$src, FR32))>;
343 let Predicates = [UseSSE2] in {
344 // MOVSDrm already zeros the high parts of the register.
345 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
346 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
347 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
348 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
349 def : Pat<(v2f64 (X86vzload addr:$src)),
350 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
353 // Aliases to help the assembler pick two byte VEX encodings by swapping the
354 // operands relative to the normal instructions to use VEX.R instead of VEX.B.
355 def : InstAlias<"vmovss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
356 (VMOVSSrr_REV VR128L:$dst, VR128:$src1, VR128H:$src2), 0>;
357 def : InstAlias<"vmovsd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
358 (VMOVSDrr_REV VR128L:$dst, VR128:$src1, VR128H:$src2), 0>;
360 //===----------------------------------------------------------------------===//
361 // SSE 1 & 2 - Move Aligned/Unaligned FP Instructions
362 //===----------------------------------------------------------------------===//
364 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
365 X86MemOperand x86memop, PatFrag ld_frag,
366 string asm, Domain d,
367 X86SchedWriteMoveLS sched> {
368 let hasSideEffects = 0, isMoveReg = 1 in
369 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
370 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>,
372 let canFoldAsLoad = 1, isReMaterializable = 1 in
373 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
374 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
375 [(set RC:$dst, (ld_frag addr:$src))], d>,
379 let Predicates = [HasAVX, NoVLX] in {
380 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32, "movaps",
381 SSEPackedSingle, SchedWriteFMoveLS.XMM>,
383 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64, "movapd",
384 SSEPackedDouble, SchedWriteFMoveLS.XMM>,
386 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32, "movups",
387 SSEPackedSingle, SchedWriteFMoveLS.XMM>,
389 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64, "movupd",
390 SSEPackedDouble, SchedWriteFMoveLS.XMM>,
393 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32, "movaps",
394 SSEPackedSingle, SchedWriteFMoveLS.YMM>,
395 PS, VEX, VEX_L, VEX_WIG;
396 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64, "movapd",
397 SSEPackedDouble, SchedWriteFMoveLS.YMM>,
398 PD, VEX, VEX_L, VEX_WIG;
399 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32, "movups",
400 SSEPackedSingle, SchedWriteFMoveLS.YMM>,
401 PS, VEX, VEX_L, VEX_WIG;
402 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64, "movupd",
403 SSEPackedDouble, SchedWriteFMoveLS.YMM>,
404 PD, VEX, VEX_L, VEX_WIG;
407 let Predicates = [UseSSE1] in {
408 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32, "movaps",
409 SSEPackedSingle, SchedWriteFMoveLS.XMM>,
411 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32, "movups",
412 SSEPackedSingle, SchedWriteFMoveLS.XMM>,
415 let Predicates = [UseSSE2] in {
416 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64, "movapd",
417 SSEPackedDouble, SchedWriteFMoveLS.XMM>,
419 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64, "movupd",
420 SSEPackedDouble, SchedWriteFMoveLS.XMM>,
424 let Predicates = [HasAVX, NoVLX] in {
425 let SchedRW = [SchedWriteFMoveLS.XMM.MR] in {
426 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
427 "movaps\t{$src, $dst|$dst, $src}",
428 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>,
430 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
431 "movapd\t{$src, $dst|$dst, $src}",
432 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>,
434 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
435 "movups\t{$src, $dst|$dst, $src}",
436 [(store (v4f32 VR128:$src), addr:$dst)]>,
438 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
439 "movupd\t{$src, $dst|$dst, $src}",
440 [(store (v2f64 VR128:$src), addr:$dst)]>,
444 let SchedRW = [SchedWriteFMoveLS.YMM.MR] in {
445 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
446 "movaps\t{$src, $dst|$dst, $src}",
447 [(alignedstore (v8f32 VR256:$src), addr:$dst)]>,
449 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
450 "movapd\t{$src, $dst|$dst, $src}",
451 [(alignedstore (v4f64 VR256:$src), addr:$dst)]>,
453 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
454 "movups\t{$src, $dst|$dst, $src}",
455 [(store (v8f32 VR256:$src), addr:$dst)]>,
457 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
458 "movupd\t{$src, $dst|$dst, $src}",
459 [(store (v4f64 VR256:$src), addr:$dst)]>,
465 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
467 let SchedRW = [SchedWriteFMoveLS.XMM.RR] in {
468 def VMOVAPSrr_REV : VPSI<0x29, MRMDestReg, (outs VR128:$dst),
470 "movaps\t{$src, $dst|$dst, $src}", []>,
471 VEX, VEX_WIG, FoldGenData<"VMOVAPSrr">;
472 def VMOVAPDrr_REV : VPDI<0x29, MRMDestReg, (outs VR128:$dst),
474 "movapd\t{$src, $dst|$dst, $src}", []>,
475 VEX, VEX_WIG, FoldGenData<"VMOVAPDrr">;
476 def VMOVUPSrr_REV : VPSI<0x11, MRMDestReg, (outs VR128:$dst),
478 "movups\t{$src, $dst|$dst, $src}", []>,
479 VEX, VEX_WIG, FoldGenData<"VMOVUPSrr">;
480 def VMOVUPDrr_REV : VPDI<0x11, MRMDestReg, (outs VR128:$dst),
482 "movupd\t{$src, $dst|$dst, $src}", []>,
483 VEX, VEX_WIG, FoldGenData<"VMOVUPDrr">;
486 let SchedRW = [SchedWriteFMoveLS.YMM.RR] in {
487 def VMOVAPSYrr_REV : VPSI<0x29, MRMDestReg, (outs VR256:$dst),
489 "movaps\t{$src, $dst|$dst, $src}", []>,
490 VEX, VEX_L, VEX_WIG, FoldGenData<"VMOVAPSYrr">;
491 def VMOVAPDYrr_REV : VPDI<0x29, MRMDestReg, (outs VR256:$dst),
493 "movapd\t{$src, $dst|$dst, $src}", []>,
494 VEX, VEX_L, VEX_WIG, FoldGenData<"VMOVAPDYrr">;
495 def VMOVUPSYrr_REV : VPSI<0x11, MRMDestReg, (outs VR256:$dst),
497 "movups\t{$src, $dst|$dst, $src}", []>,
498 VEX, VEX_L, VEX_WIG, FoldGenData<"VMOVUPSYrr">;
499 def VMOVUPDYrr_REV : VPDI<0x11, MRMDestReg, (outs VR256:$dst),
501 "movupd\t{$src, $dst|$dst, $src}", []>,
502 VEX, VEX_L, VEX_WIG, FoldGenData<"VMOVUPDYrr">;
506 // Aliases to help the assembler pick two byte VEX encodings by swapping the
507 // operands relative to the normal instructions to use VEX.R instead of VEX.B.
508 def : InstAlias<"vmovaps\t{$src, $dst|$dst, $src}",
509 (VMOVAPSrr_REV VR128L:$dst, VR128H:$src), 0>;
510 def : InstAlias<"vmovapd\t{$src, $dst|$dst, $src}",
511 (VMOVAPDrr_REV VR128L:$dst, VR128H:$src), 0>;
512 def : InstAlias<"vmovups\t{$src, $dst|$dst, $src}",
513 (VMOVUPSrr_REV VR128L:$dst, VR128H:$src), 0>;
514 def : InstAlias<"vmovupd\t{$src, $dst|$dst, $src}",
515 (VMOVUPDrr_REV VR128L:$dst, VR128H:$src), 0>;
516 def : InstAlias<"vmovaps\t{$src, $dst|$dst, $src}",
517 (VMOVAPSYrr_REV VR256L:$dst, VR256H:$src), 0>;
518 def : InstAlias<"vmovapd\t{$src, $dst|$dst, $src}",
519 (VMOVAPDYrr_REV VR256L:$dst, VR256H:$src), 0>;
520 def : InstAlias<"vmovups\t{$src, $dst|$dst, $src}",
521 (VMOVUPSYrr_REV VR256L:$dst, VR256H:$src), 0>;
522 def : InstAlias<"vmovupd\t{$src, $dst|$dst, $src}",
523 (VMOVUPDYrr_REV VR256L:$dst, VR256H:$src), 0>;
525 // Reversed version with ".s" suffix for GAS compatibility.
526 def : InstAlias<"vmovaps.s\t{$src, $dst|$dst, $src}",
527 (VMOVAPSrr_REV VR128:$dst, VR128:$src), 0>;
528 def : InstAlias<"vmovapd.s\t{$src, $dst|$dst, $src}",
529 (VMOVAPDrr_REV VR128:$dst, VR128:$src), 0>;
530 def : InstAlias<"vmovups.s\t{$src, $dst|$dst, $src}",
531 (VMOVUPSrr_REV VR128:$dst, VR128:$src), 0>;
532 def : InstAlias<"vmovupd.s\t{$src, $dst|$dst, $src}",
533 (VMOVUPDrr_REV VR128:$dst, VR128:$src), 0>;
534 def : InstAlias<"vmovaps.s\t{$src, $dst|$dst, $src}",
535 (VMOVAPSYrr_REV VR256:$dst, VR256:$src), 0>;
536 def : InstAlias<"vmovapd.s\t{$src, $dst|$dst, $src}",
537 (VMOVAPDYrr_REV VR256:$dst, VR256:$src), 0>;
538 def : InstAlias<"vmovups.s\t{$src, $dst|$dst, $src}",
539 (VMOVUPSYrr_REV VR256:$dst, VR256:$src), 0>;
540 def : InstAlias<"vmovupd.s\t{$src, $dst|$dst, $src}",
541 (VMOVUPDYrr_REV VR256:$dst, VR256:$src), 0>;
543 let SchedRW = [SchedWriteFMoveLS.XMM.MR] in {
544 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
545 "movaps\t{$src, $dst|$dst, $src}",
546 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
547 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
548 "movapd\t{$src, $dst|$dst, $src}",
549 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
550 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
551 "movups\t{$src, $dst|$dst, $src}",
552 [(store (v4f32 VR128:$src), addr:$dst)]>;
553 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
554 "movupd\t{$src, $dst|$dst, $src}",
555 [(store (v2f64 VR128:$src), addr:$dst)]>;
559 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
560 isMoveReg = 1, SchedRW = [SchedWriteFMoveLS.XMM.RR] in {
561 def MOVAPSrr_REV : PSI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
562 "movaps\t{$src, $dst|$dst, $src}", []>,
563 FoldGenData<"MOVAPSrr">;
564 def MOVAPDrr_REV : PDI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
565 "movapd\t{$src, $dst|$dst, $src}", []>,
566 FoldGenData<"MOVAPDrr">;
567 def MOVUPSrr_REV : PSI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
568 "movups\t{$src, $dst|$dst, $src}", []>,
569 FoldGenData<"MOVUPSrr">;
570 def MOVUPDrr_REV : PDI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
571 "movupd\t{$src, $dst|$dst, $src}", []>,
572 FoldGenData<"MOVUPDrr">;
575 // Reversed version with ".s" suffix for GAS compatibility.
576 def : InstAlias<"movaps.s\t{$src, $dst|$dst, $src}",
577 (MOVAPSrr_REV VR128:$dst, VR128:$src), 0>;
578 def : InstAlias<"movapd.s\t{$src, $dst|$dst, $src}",
579 (MOVAPDrr_REV VR128:$dst, VR128:$src), 0>;
580 def : InstAlias<"movups.s\t{$src, $dst|$dst, $src}",
581 (MOVUPSrr_REV VR128:$dst, VR128:$src), 0>;
582 def : InstAlias<"movupd.s\t{$src, $dst|$dst, $src}",
583 (MOVUPDrr_REV VR128:$dst, VR128:$src), 0>;
585 let Predicates = [HasAVX, NoVLX] in {
586 // 256-bit load/store need to use floating point load/store in case we don't
587 // have AVX2. Execution domain fixing will convert to integer if AVX2 is
588 // available and changing the domain is beneficial.
589 def : Pat<(alignedloadv4i64 addr:$src),
590 (VMOVAPSYrm addr:$src)>;
591 def : Pat<(alignedloadv8i32 addr:$src),
592 (VMOVAPSYrm addr:$src)>;
593 def : Pat<(alignedloadv16i16 addr:$src),
594 (VMOVAPSYrm addr:$src)>;
595 def : Pat<(alignedloadv32i8 addr:$src),
596 (VMOVAPSYrm addr:$src)>;
597 def : Pat<(loadv4i64 addr:$src),
598 (VMOVUPSYrm addr:$src)>;
599 def : Pat<(loadv8i32 addr:$src),
600 (VMOVUPSYrm addr:$src)>;
601 def : Pat<(loadv16i16 addr:$src),
602 (VMOVUPSYrm addr:$src)>;
603 def : Pat<(loadv32i8 addr:$src),
604 (VMOVUPSYrm addr:$src)>;
606 def : Pat<(alignedstore (v4i64 VR256:$src), addr:$dst),
607 (VMOVAPSYmr addr:$dst, VR256:$src)>;
608 def : Pat<(alignedstore (v8i32 VR256:$src), addr:$dst),
609 (VMOVAPSYmr addr:$dst, VR256:$src)>;
610 def : Pat<(alignedstore (v16i16 VR256:$src), addr:$dst),
611 (VMOVAPSYmr addr:$dst, VR256:$src)>;
612 def : Pat<(alignedstore (v32i8 VR256:$src), addr:$dst),
613 (VMOVAPSYmr addr:$dst, VR256:$src)>;
614 def : Pat<(store (v4i64 VR256:$src), addr:$dst),
615 (VMOVUPSYmr addr:$dst, VR256:$src)>;
616 def : Pat<(store (v8i32 VR256:$src), addr:$dst),
617 (VMOVUPSYmr addr:$dst, VR256:$src)>;
618 def : Pat<(store (v16i16 VR256:$src), addr:$dst),
619 (VMOVUPSYmr addr:$dst, VR256:$src)>;
620 def : Pat<(store (v32i8 VR256:$src), addr:$dst),
621 (VMOVUPSYmr addr:$dst, VR256:$src)>;
624 // Use movaps / movups for SSE integer load / store (one byte shorter).
625 // The instructions selected below are then converted to MOVDQA/MOVDQU
626 // during the SSE domain pass.
627 let Predicates = [UseSSE1] in {
628 def : Pat<(alignedloadv2i64 addr:$src),
629 (MOVAPSrm addr:$src)>;
630 def : Pat<(alignedloadv4i32 addr:$src),
631 (MOVAPSrm addr:$src)>;
632 def : Pat<(alignedloadv8i16 addr:$src),
633 (MOVAPSrm addr:$src)>;
634 def : Pat<(alignedloadv16i8 addr:$src),
635 (MOVAPSrm addr:$src)>;
636 def : Pat<(loadv2i64 addr:$src),
637 (MOVUPSrm addr:$src)>;
638 def : Pat<(loadv4i32 addr:$src),
639 (MOVUPSrm addr:$src)>;
640 def : Pat<(loadv8i16 addr:$src),
641 (MOVUPSrm addr:$src)>;
642 def : Pat<(loadv16i8 addr:$src),
643 (MOVUPSrm addr:$src)>;
645 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
646 (MOVAPSmr addr:$dst, VR128:$src)>;
647 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
648 (MOVAPSmr addr:$dst, VR128:$src)>;
649 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
650 (MOVAPSmr addr:$dst, VR128:$src)>;
651 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
652 (MOVAPSmr addr:$dst, VR128:$src)>;
653 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
654 (MOVUPSmr addr:$dst, VR128:$src)>;
655 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
656 (MOVUPSmr addr:$dst, VR128:$src)>;
657 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
658 (MOVUPSmr addr:$dst, VR128:$src)>;
659 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
660 (MOVUPSmr addr:$dst, VR128:$src)>;
663 //===----------------------------------------------------------------------===//
664 // SSE 1 & 2 - Move Low packed FP Instructions
665 //===----------------------------------------------------------------------===//
667 multiclass sse12_mov_hilo_packed_base<bits<8>opc, SDNode pdnode,
668 string base_opc, string asm_opr> {
669 // No pattern as they need be special cased between high and low.
670 let hasSideEffects = 0, mayLoad = 1 in
671 def PSrm : PI<opc, MRMSrcMem,
672 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
673 !strconcat(base_opc, "s", asm_opr),
674 [], SSEPackedSingle>, PS,
675 Sched<[SchedWriteFShuffle.XMM.Folded, SchedWriteFShuffle.XMM.ReadAfterFold]>;
677 def PDrm : PI<opc, MRMSrcMem,
678 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
679 !strconcat(base_opc, "d", asm_opr),
680 [(set VR128:$dst, (v2f64 (pdnode VR128:$src1,
681 (scalar_to_vector (loadf64 addr:$src2)))))],
682 SSEPackedDouble>, PD,
683 Sched<[SchedWriteFShuffle.XMM.Folded, SchedWriteFShuffle.XMM.ReadAfterFold]>;
686 multiclass sse12_mov_hilo_packed<bits<8>opc, SDPatternOperator pdnode,
688 let Predicates = [UseAVX] in
689 defm V#NAME : sse12_mov_hilo_packed_base<opc, pdnode, base_opc,
690 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">,
693 let Constraints = "$src1 = $dst" in
694 defm NAME : sse12_mov_hilo_packed_base<opc, pdnode, base_opc,
695 "\t{$src2, $dst|$dst, $src2}">;
698 defm MOVL : sse12_mov_hilo_packed<0x12, X86Movsd, "movlp">;
700 let SchedRW = [WriteFStore] in {
701 let Predicates = [UseAVX] in {
702 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
703 "movlps\t{$src, $dst|$dst, $src}",
704 [(store (f64 (extractelt (bc_v2f64 (v4f32 VR128:$src)),
705 (iPTR 0))), addr:$dst)]>,
707 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
708 "movlpd\t{$src, $dst|$dst, $src}",
709 [(store (f64 (extractelt (v2f64 VR128:$src),
710 (iPTR 0))), addr:$dst)]>,
713 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
714 "movlps\t{$src, $dst|$dst, $src}",
715 [(store (f64 (extractelt (bc_v2f64 (v4f32 VR128:$src)),
716 (iPTR 0))), addr:$dst)]>;
717 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
718 "movlpd\t{$src, $dst|$dst, $src}",
719 [(store (f64 (extractelt (v2f64 VR128:$src),
720 (iPTR 0))), addr:$dst)]>;
723 let Predicates = [UseSSE1] in {
724 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
725 def : Pat<(store (i64 (extractelt (bc_v2i64 (v4f32 VR128:$src2)),
726 (iPTR 0))), addr:$src1),
727 (MOVLPSmr addr:$src1, VR128:$src2)>;
729 // This pattern helps select MOVLPS on SSE1 only targets. With SSE2 we'll
730 // end up with a movsd or blend instead of shufp.
731 // No need for aligned load, we're only loading 64-bits.
732 def : Pat<(X86Shufp (loadv4f32 addr:$src2), VR128:$src1, (i8 -28)),
733 (MOVLPSrm VR128:$src1, addr:$src2)>;
736 //===----------------------------------------------------------------------===//
737 // SSE 1 & 2 - Move Hi packed FP Instructions
738 //===----------------------------------------------------------------------===//
740 defm MOVH : sse12_mov_hilo_packed<0x16, X86Unpckl, "movhp">;
742 let SchedRW = [WriteFStore] in {
743 // v2f64 extract element 1 is always custom lowered to unpack high to low
744 // and extract element 0 so the non-store version isn't too horrible.
745 let Predicates = [UseAVX] in {
746 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
747 "movhps\t{$src, $dst|$dst, $src}",
748 [(store (f64 (extractelt
749 (X86Unpckh (bc_v2f64 (v4f32 VR128:$src)),
750 (bc_v2f64 (v4f32 VR128:$src))),
751 (iPTR 0))), addr:$dst)]>, VEX, VEX_WIG;
752 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
753 "movhpd\t{$src, $dst|$dst, $src}",
754 [(store (f64 (extractelt
755 (v2f64 (X86Unpckh VR128:$src, VR128:$src)),
756 (iPTR 0))), addr:$dst)]>, VEX, VEX_WIG;
758 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
759 "movhps\t{$src, $dst|$dst, $src}",
760 [(store (f64 (extractelt
761 (X86Unpckh (bc_v2f64 (v4f32 VR128:$src)),
762 (bc_v2f64 (v4f32 VR128:$src))),
763 (iPTR 0))), addr:$dst)]>;
764 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
765 "movhpd\t{$src, $dst|$dst, $src}",
766 [(store (f64 (extractelt
767 (v2f64 (X86Unpckh VR128:$src, VR128:$src)),
768 (iPTR 0))), addr:$dst)]>;
771 let Predicates = [UseAVX] in {
772 // Also handle an i64 load because that may get selected as a faster way to
774 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
775 (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))),
776 (VMOVHPDrm VR128:$src1, addr:$src2)>;
778 def : Pat<(store (f64 (extractelt
779 (v2f64 (X86VPermilpi VR128:$src, (i8 1))),
780 (iPTR 0))), addr:$dst),
781 (VMOVHPDmr addr:$dst, VR128:$src)>;
784 let Predicates = [UseSSE1] in {
785 // This pattern helps select MOVHPS on SSE1 only targets. With SSE2 we'll
786 // end up with a movsd or blend instead of shufp.
787 // No need for aligned load, we're only loading 64-bits.
788 def : Pat<(X86Movlhps VR128:$src1, (loadv4f32 addr:$src2)),
789 (MOVHPSrm VR128:$src1, addr:$src2)>;
792 let Predicates = [UseSSE2] in {
795 // Also handle an i64 load because that may get selected as a faster way to
797 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
798 (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))),
799 (MOVHPDrm VR128:$src1, addr:$src2)>;
801 def : Pat<(store (f64 (extractelt
802 (v2f64 (X86Shufp VR128:$src, VR128:$src, (i8 1))),
803 (iPTR 0))), addr:$dst),
804 (MOVHPDmr addr:$dst, VR128:$src)>;
807 //===----------------------------------------------------------------------===//
808 // SSE 1 & 2 - Move Low to High and High to Low packed FP Instructions
809 //===----------------------------------------------------------------------===//
811 let Predicates = [UseAVX] in {
812 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
813 (ins VR128:$src1, VR128:$src2),
814 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
816 (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))]>,
817 VEX_4V, Sched<[SchedWriteFShuffle.XMM]>, VEX_WIG;
818 let isCommutable = 1 in
819 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
820 (ins VR128:$src1, VR128:$src2),
821 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
823 (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))]>,
824 VEX_4V, Sched<[SchedWriteFShuffle.XMM]>, VEX_WIG,
827 let Constraints = "$src1 = $dst" in {
828 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
829 (ins VR128:$src1, VR128:$src2),
830 "movlhps\t{$src2, $dst|$dst, $src2}",
832 (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))]>,
833 Sched<[SchedWriteFShuffle.XMM]>;
834 let isCommutable = 1 in
835 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
836 (ins VR128:$src1, VR128:$src2),
837 "movhlps\t{$src2, $dst|$dst, $src2}",
839 (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))]>,
840 Sched<[SchedWriteFShuffle.XMM]>, NotMemoryFoldable;
843 //===----------------------------------------------------------------------===//
844 // SSE 1 & 2 - Conversion Instructions
845 //===----------------------------------------------------------------------===//
847 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
848 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
849 string asm, X86FoldableSchedWrite sched> {
850 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
851 [(set DstRC:$dst, (OpNode SrcRC:$src))]>,
853 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
854 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>,
855 Sched<[sched.Folded]>;
858 multiclass sse12_cvt_p<bits<8> opc, RegisterClass RC, X86MemOperand x86memop,
859 ValueType DstTy, ValueType SrcTy, PatFrag ld_frag,
860 string asm, Domain d, X86FoldableSchedWrite sched> {
861 let hasSideEffects = 0 in {
862 def rr : I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src), asm,
863 [(set RC:$dst, (DstTy (sint_to_fp (SrcTy RC:$src))))], d>,
866 def rm : I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src), asm,
867 [(set RC:$dst, (DstTy (sint_to_fp
868 (SrcTy (ld_frag addr:$src)))))], d>,
869 Sched<[sched.Folded]>;
873 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
874 X86MemOperand x86memop, string asm,
875 X86FoldableSchedWrite sched> {
876 let hasSideEffects = 0, Predicates = [UseAVX] in {
877 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
878 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
881 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
882 (ins DstRC:$src1, x86memop:$src),
883 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
884 Sched<[sched.Folded, sched.ReadAfterFold]>;
885 } // hasSideEffects = 0
888 let Predicates = [UseAVX] in {
889 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
890 "cvttss2si\t{$src, $dst|$dst, $src}",
893 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
894 "cvttss2si\t{$src, $dst|$dst, $src}",
896 XS, VEX, VEX_W, VEX_LIG;
897 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
898 "cvttsd2si\t{$src, $dst|$dst, $src}",
901 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
902 "cvttsd2si\t{$src, $dst|$dst, $src}",
904 XD, VEX, VEX_W, VEX_LIG;
906 def : InstAlias<"vcvttss2si{l}\t{$src, $dst|$dst, $src}",
907 (VCVTTSS2SIrr GR32:$dst, FR32:$src), 0, "att">;
908 def : InstAlias<"vcvttss2si{l}\t{$src, $dst|$dst, $src}",
909 (VCVTTSS2SIrm GR32:$dst, f32mem:$src), 0, "att">;
910 def : InstAlias<"vcvttsd2si{l}\t{$src, $dst|$dst, $src}",
911 (VCVTTSD2SIrr GR32:$dst, FR64:$src), 0, "att">;
912 def : InstAlias<"vcvttsd2si{l}\t{$src, $dst|$dst, $src}",
913 (VCVTTSD2SIrm GR32:$dst, f64mem:$src), 0, "att">;
914 def : InstAlias<"vcvttss2si{q}\t{$src, $dst|$dst, $src}",
915 (VCVTTSS2SI64rr GR64:$dst, FR32:$src), 0, "att">;
916 def : InstAlias<"vcvttss2si{q}\t{$src, $dst|$dst, $src}",
917 (VCVTTSS2SI64rm GR64:$dst, f32mem:$src), 0, "att">;
918 def : InstAlias<"vcvttsd2si{q}\t{$src, $dst|$dst, $src}",
919 (VCVTTSD2SI64rr GR64:$dst, FR64:$src), 0, "att">;
920 def : InstAlias<"vcvttsd2si{q}\t{$src, $dst|$dst, $src}",
921 (VCVTTSD2SI64rm GR64:$dst, f64mem:$src), 0, "att">;
923 // The assembler can recognize rr 64-bit instructions by seeing a rxx
924 // register, but the same isn't true when only using memory operands,
925 // provide other assembly "l" and "q" forms to address this explicitly
926 // where appropriate to do so.
927 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss{l}",
928 WriteCvtI2SS>, XS, VEX_4V, VEX_LIG;
929 defm VCVTSI642SS : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}",
930 WriteCvtI2SS>, XS, VEX_4V, VEX_W, VEX_LIG;
931 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}",
932 WriteCvtI2SD>, XD, VEX_4V, VEX_LIG;
933 defm VCVTSI642SD : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}",
934 WriteCvtI2SD>, XD, VEX_4V, VEX_W, VEX_LIG;
936 let Predicates = [UseAVX] in {
937 def : InstAlias<"vcvtsi2ss\t{$src, $src1, $dst|$dst, $src1, $src}",
938 (VCVTSI2SSrm FR64:$dst, FR64:$src1, i32mem:$src), 0, "att">;
939 def : InstAlias<"vcvtsi2sd\t{$src, $src1, $dst|$dst, $src1, $src}",
940 (VCVTSI2SDrm FR64:$dst, FR64:$src1, i32mem:$src), 0, "att">;
942 def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
943 (VCVTSI2SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
944 def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
945 (VCVTSI642SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
946 def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
947 (VCVTSI2SDrm (f64 (IMPLICIT_DEF)), addr:$src)>;
948 def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
949 (VCVTSI642SDrm (f64 (IMPLICIT_DEF)), addr:$src)>;
951 def : Pat<(f32 (sint_to_fp GR32:$src)),
952 (VCVTSI2SSrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
953 def : Pat<(f32 (sint_to_fp GR64:$src)),
954 (VCVTSI642SSrr (f32 (IMPLICIT_DEF)), GR64:$src)>;
955 def : Pat<(f64 (sint_to_fp GR32:$src)),
956 (VCVTSI2SDrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
957 def : Pat<(f64 (sint_to_fp GR64:$src)),
958 (VCVTSI642SDrr (f64 (IMPLICIT_DEF)), GR64:$src)>;
961 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
962 "cvttss2si\t{$src, $dst|$dst, $src}",
964 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
965 "cvttss2si\t{$src, $dst|$dst, $src}",
966 WriteCvtSS2I>, XS, REX_W;
967 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
968 "cvttsd2si\t{$src, $dst|$dst, $src}",
970 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
971 "cvttsd2si\t{$src, $dst|$dst, $src}",
972 WriteCvtSD2I>, XD, REX_W;
973 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
974 "cvtsi2ss{l}\t{$src, $dst|$dst, $src}",
976 defm CVTSI642SS : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
977 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
978 WriteCvtI2SS>, XS, REX_W;
979 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
980 "cvtsi2sd{l}\t{$src, $dst|$dst, $src}",
982 defm CVTSI642SD : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
983 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
984 WriteCvtI2SD>, XD, REX_W;
986 def : InstAlias<"cvttss2si{l}\t{$src, $dst|$dst, $src}",
987 (CVTTSS2SIrr GR32:$dst, FR32:$src), 0, "att">;
988 def : InstAlias<"cvttss2si{l}\t{$src, $dst|$dst, $src}",
989 (CVTTSS2SIrm GR32:$dst, f32mem:$src), 0, "att">;
990 def : InstAlias<"cvttsd2si{l}\t{$src, $dst|$dst, $src}",
991 (CVTTSD2SIrr GR32:$dst, FR64:$src), 0, "att">;
992 def : InstAlias<"cvttsd2si{l}\t{$src, $dst|$dst, $src}",
993 (CVTTSD2SIrm GR32:$dst, f64mem:$src), 0, "att">;
994 def : InstAlias<"cvttss2si{q}\t{$src, $dst|$dst, $src}",
995 (CVTTSS2SI64rr GR64:$dst, FR32:$src), 0, "att">;
996 def : InstAlias<"cvttss2si{q}\t{$src, $dst|$dst, $src}",
997 (CVTTSS2SI64rm GR64:$dst, f32mem:$src), 0, "att">;
998 def : InstAlias<"cvttsd2si{q}\t{$src, $dst|$dst, $src}",
999 (CVTTSD2SI64rr GR64:$dst, FR64:$src), 0, "att">;
1000 def : InstAlias<"cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1001 (CVTTSD2SI64rm GR64:$dst, f64mem:$src), 0, "att">;
1003 def : InstAlias<"cvtsi2ss\t{$src, $dst|$dst, $src}",
1004 (CVTSI2SSrm FR64:$dst, i32mem:$src), 0, "att">;
1005 def : InstAlias<"cvtsi2sd\t{$src, $dst|$dst, $src}",
1006 (CVTSI2SDrm FR64:$dst, i32mem:$src), 0, "att">;
1008 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
1009 // and/or XMM operand(s).
1011 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1012 ValueType DstVT, ValueType SrcVT, SDNode OpNode,
1013 Operand memop, ComplexPattern mem_cpat, string asm,
1014 X86FoldableSchedWrite sched> {
1015 def rr_Int : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
1016 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1017 [(set DstRC:$dst, (DstVT (OpNode (SrcVT SrcRC:$src))))]>,
1019 def rm_Int : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins memop:$src),
1020 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1021 [(set DstRC:$dst, (DstVT (OpNode (SrcVT mem_cpat:$src))))]>,
1022 Sched<[sched.Folded]>;
1025 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
1026 RegisterClass DstRC, X86MemOperand x86memop,
1027 string asm, X86FoldableSchedWrite sched,
1029 let hasSideEffects = 0 in {
1030 def rr_Int : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
1032 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
1033 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
1034 []>, Sched<[sched]>;
1036 def rm_Int : SI<opc, MRMSrcMem, (outs DstRC:$dst),
1037 (ins DstRC:$src1, x86memop:$src2),
1039 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
1040 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
1041 []>, Sched<[sched.Folded, sched.ReadAfterFold]>;
1045 let Predicates = [UseAVX] in {
1046 defm VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, i32, v2f64,
1047 X86cvts2si, sdmem, sse_load_f64, "cvtsd2si",
1048 WriteCvtSD2I>, XD, VEX, VEX_LIG;
1049 defm VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, i64, v2f64,
1050 X86cvts2si, sdmem, sse_load_f64, "cvtsd2si",
1051 WriteCvtSD2I>, XD, VEX, VEX_W, VEX_LIG;
1053 defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, i32, v2f64, X86cvts2si,
1054 sdmem, sse_load_f64, "cvtsd2si", WriteCvtSD2I>, XD;
1055 defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, i64, v2f64, X86cvts2si,
1056 sdmem, sse_load_f64, "cvtsd2si", WriteCvtSD2I>, XD, REX_W;
1059 let isCodeGenOnly = 1 in {
1060 let Predicates = [UseAVX] in {
1061 defm VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1062 i32mem, "cvtsi2ss{l}", WriteCvtI2SS, 0>, XS, VEX_4V;
1063 defm VCVTSI642SS : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1064 i64mem, "cvtsi2ss{q}", WriteCvtI2SS, 0>, XS, VEX_4V, VEX_W;
1065 defm VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1066 i32mem, "cvtsi2sd{l}", WriteCvtI2SD, 0>, XD, VEX_4V;
1067 defm VCVTSI642SD : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1068 i64mem, "cvtsi2sd{q}", WriteCvtI2SD, 0>, XD, VEX_4V, VEX_W;
1070 let Constraints = "$src1 = $dst" in {
1071 defm CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1072 i32mem, "cvtsi2ss{l}", WriteCvtI2SS>, XS;
1073 defm CVTSI642SS : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1074 i64mem, "cvtsi2ss{q}", WriteCvtI2SS>, XS, REX_W;
1075 defm CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1076 i32mem, "cvtsi2sd{l}", WriteCvtI2SD>, XD;
1077 defm CVTSI642SD : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1078 i64mem, "cvtsi2sd{q}", WriteCvtI2SD>, XD, REX_W;
1080 } // isCodeGenOnly = 1
1084 // Aliases for intrinsics
1085 let isCodeGenOnly = 1 in {
1086 let Predicates = [UseAVX] in {
1087 defm VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, i32, v4f32, X86cvtts2Int,
1088 ssmem, sse_load_f32, "cvttss2si",
1089 WriteCvtSS2I>, XS, VEX;
1090 defm VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64, i64, v4f32,
1091 X86cvtts2Int, ssmem, sse_load_f32,
1092 "cvttss2si", WriteCvtSS2I>,
1094 defm VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, i32, v2f64, X86cvtts2Int,
1095 sdmem, sse_load_f64, "cvttsd2si",
1096 WriteCvtSS2I>, XD, VEX;
1097 defm VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64, i64, v2f64,
1098 X86cvtts2Int, sdmem, sse_load_f64,
1099 "cvttsd2si", WriteCvtSS2I>,
1102 defm CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, i32, v4f32, X86cvtts2Int,
1103 ssmem, sse_load_f32, "cvttss2si",
1105 defm CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64, i64, v4f32,
1106 X86cvtts2Int, ssmem, sse_load_f32,
1107 "cvttss2si", WriteCvtSS2I>, XS, REX_W;
1108 defm CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, i32, v2f64, X86cvtts2Int,
1109 sdmem, sse_load_f64, "cvttsd2si",
1111 defm CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64, i64, v2f64,
1112 X86cvtts2Int, sdmem, sse_load_f64,
1113 "cvttsd2si", WriteCvtSD2I>, XD, REX_W;
1114 } // isCodeGenOnly = 1
1116 let Predicates = [UseAVX] in {
1117 defm VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, i32, v4f32, X86cvts2si,
1118 ssmem, sse_load_f32, "cvtss2si",
1119 WriteCvtSS2I>, XS, VEX, VEX_LIG;
1120 defm VCVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, i64, v4f32, X86cvts2si,
1121 ssmem, sse_load_f32, "cvtss2si",
1122 WriteCvtSS2I>, XS, VEX, VEX_W, VEX_LIG;
1124 defm CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, i32, v4f32, X86cvts2si,
1125 ssmem, sse_load_f32, "cvtss2si",
1127 defm CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, i64, v4f32, X86cvts2si,
1128 ssmem, sse_load_f32, "cvtss2si",
1129 WriteCvtSS2I>, XS, REX_W;
1131 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, i128mem, v4f32, v4i32, load,
1132 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1133 SSEPackedSingle, WriteCvtI2PS>,
1134 PS, VEX, Requires<[HasAVX, NoVLX]>, VEX_WIG;
1135 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, i256mem, v8f32, v8i32, load,
1136 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1137 SSEPackedSingle, WriteCvtI2PSY>,
1138 PS, VEX, VEX_L, Requires<[HasAVX, NoVLX]>, VEX_WIG;
1140 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, i128mem, v4f32, v4i32, memop,
1141 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1142 SSEPackedSingle, WriteCvtI2PS>,
1143 PS, Requires<[UseSSE2]>;
1145 let Predicates = [UseAVX] in {
1146 def : InstAlias<"vcvtss2si{l}\t{$src, $dst|$dst, $src}",
1147 (VCVTSS2SIrr_Int GR32:$dst, VR128:$src), 0, "att">;
1148 def : InstAlias<"vcvtss2si{l}\t{$src, $dst|$dst, $src}",
1149 (VCVTSS2SIrm_Int GR32:$dst, ssmem:$src), 0, "att">;
1150 def : InstAlias<"vcvtsd2si{l}\t{$src, $dst|$dst, $src}",
1151 (VCVTSD2SIrr_Int GR32:$dst, VR128:$src), 0, "att">;
1152 def : InstAlias<"vcvtsd2si{l}\t{$src, $dst|$dst, $src}",
1153 (VCVTSD2SIrm_Int GR32:$dst, sdmem:$src), 0, "att">;
1154 def : InstAlias<"vcvtss2si{q}\t{$src, $dst|$dst, $src}",
1155 (VCVTSS2SI64rr_Int GR64:$dst, VR128:$src), 0, "att">;
1156 def : InstAlias<"vcvtss2si{q}\t{$src, $dst|$dst, $src}",
1157 (VCVTSS2SI64rm_Int GR64:$dst, ssmem:$src), 0, "att">;
1158 def : InstAlias<"vcvtsd2si{q}\t{$src, $dst|$dst, $src}",
1159 (VCVTSD2SI64rr_Int GR64:$dst, VR128:$src), 0, "att">;
1160 def : InstAlias<"vcvtsd2si{q}\t{$src, $dst|$dst, $src}",
1161 (VCVTSD2SI64rm_Int GR64:$dst, sdmem:$src), 0, "att">;
1164 def : InstAlias<"cvtss2si{l}\t{$src, $dst|$dst, $src}",
1165 (CVTSS2SIrr_Int GR32:$dst, VR128:$src), 0, "att">;
1166 def : InstAlias<"cvtss2si{l}\t{$src, $dst|$dst, $src}",
1167 (CVTSS2SIrm_Int GR32:$dst, ssmem:$src), 0, "att">;
1168 def : InstAlias<"cvtsd2si{l}\t{$src, $dst|$dst, $src}",
1169 (CVTSD2SIrr_Int GR32:$dst, VR128:$src), 0, "att">;
1170 def : InstAlias<"cvtsd2si{l}\t{$src, $dst|$dst, $src}",
1171 (CVTSD2SIrm_Int GR32:$dst, sdmem:$src), 0, "att">;
1172 def : InstAlias<"cvtss2si{q}\t{$src, $dst|$dst, $src}",
1173 (CVTSS2SI64rr_Int GR64:$dst, VR128:$src), 0, "att">;
1174 def : InstAlias<"cvtss2si{q}\t{$src, $dst|$dst, $src}",
1175 (CVTSS2SI64rm_Int GR64:$dst, ssmem:$src), 0, "att">;
1176 def : InstAlias<"cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1177 (CVTSD2SI64rr_Int GR64:$dst, VR128:$src), 0, "att">;
1178 def : InstAlias<"cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1179 (CVTSD2SI64rm_Int GR64:$dst, sdmem:$src), 0, "att">;
1183 // Convert scalar double to scalar single
1184 let hasSideEffects = 0, Predicates = [UseAVX] in {
1185 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
1186 (ins FR32:$src1, FR64:$src2),
1187 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
1188 VEX_4V, VEX_LIG, VEX_WIG,
1189 Sched<[WriteCvtSD2SS]>;
1191 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
1192 (ins FR32:$src1, f64mem:$src2),
1193 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
1194 XD, VEX_4V, VEX_LIG, VEX_WIG,
1195 Sched<[WriteCvtSD2SS.Folded, WriteCvtSD2SS.ReadAfterFold]>;
1198 def : Pat<(f32 (fpround FR64:$src)),
1199 (VCVTSD2SSrr (f32 (IMPLICIT_DEF)), FR64:$src)>,
1202 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
1203 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1204 [(set FR32:$dst, (fpround FR64:$src))]>,
1205 Sched<[WriteCvtSD2SS]>;
1206 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
1207 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1208 [(set FR32:$dst, (fpround (loadf64 addr:$src)))]>,
1209 XD, Requires<[UseSSE2, OptForSize]>,
1210 Sched<[WriteCvtSD2SS.Folded]>;
1212 let isCodeGenOnly = 1 in {
1213 def VCVTSD2SSrr_Int: I<0x5A, MRMSrcReg,
1214 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1215 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1217 (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))]>,
1218 XD, VEX_4V, VEX_WIG, Requires<[HasAVX]>,
1219 Sched<[WriteCvtSD2SS]>;
1220 def VCVTSD2SSrm_Int: I<0x5A, MRMSrcMem,
1221 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1222 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1223 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss
1224 VR128:$src1, sse_load_f64:$src2))]>,
1225 XD, VEX_4V, VEX_WIG, Requires<[HasAVX]>,
1226 Sched<[WriteCvtSD2SS.Folded, WriteCvtSD2SS.ReadAfterFold]>;
1227 let Constraints = "$src1 = $dst" in {
1228 def CVTSD2SSrr_Int: I<0x5A, MRMSrcReg,
1229 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1230 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
1232 (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))]>,
1233 XD, Requires<[UseSSE2]>, Sched<[WriteCvtSD2SS]>;
1234 def CVTSD2SSrm_Int: I<0x5A, MRMSrcMem,
1235 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1236 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
1237 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss
1238 VR128:$src1, sse_load_f64:$src2))]>,
1239 XD, Requires<[UseSSE2]>,
1240 Sched<[WriteCvtSD2SS.Folded, WriteCvtSD2SS.ReadAfterFold]>;
1242 } // isCodeGenOnly = 1
1244 // Convert scalar single to scalar double
1245 // SSE2 instructions with XS prefix
1246 let hasSideEffects = 0 in {
1247 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
1248 (ins FR64:$src1, FR32:$src2),
1249 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
1250 XS, VEX_4V, VEX_LIG, VEX_WIG,
1251 Sched<[WriteCvtSS2SD]>, Requires<[UseAVX]>;
1253 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
1254 (ins FR64:$src1, f32mem:$src2),
1255 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
1256 XS, VEX_4V, VEX_LIG, VEX_WIG,
1257 Sched<[WriteCvtSS2SD.Folded, WriteCvtSS2SD.ReadAfterFold]>,
1258 Requires<[UseAVX, OptForSize]>;
1261 def : Pat<(f64 (fpextend FR32:$src)),
1262 (VCVTSS2SDrr (f64 (IMPLICIT_DEF)), FR32:$src)>, Requires<[UseAVX]>;
1263 def : Pat<(fpextend (loadf32 addr:$src)),
1264 (VCVTSS2SDrm (f64 (IMPLICIT_DEF)), addr:$src)>, Requires<[UseAVX, OptForSize]>;
1266 def : Pat<(extloadf32 addr:$src),
1267 (VCVTSS2SDrm (f64 (IMPLICIT_DEF)), addr:$src)>,
1268 Requires<[UseAVX, OptForSize]>;
1269 def : Pat<(extloadf32 addr:$src),
1270 (VCVTSS2SDrr (f64 (IMPLICIT_DEF)), (VMOVSSrm addr:$src))>,
1271 Requires<[UseAVX, OptForSpeed]>;
1273 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
1274 "cvtss2sd\t{$src, $dst|$dst, $src}",
1275 [(set FR64:$dst, (fpextend FR32:$src))]>,
1276 XS, Requires<[UseSSE2]>, Sched<[WriteCvtSS2SD]>;
1277 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
1278 "cvtss2sd\t{$src, $dst|$dst, $src}",
1279 [(set FR64:$dst, (extloadf32 addr:$src))]>,
1280 XS, Requires<[UseSSE2, OptForSize]>,
1281 Sched<[WriteCvtSS2SD.Folded]>;
1283 // extload f32 -> f64. This matches load+fpextend because we have a hack in
1284 // the isel (PreprocessForFPConvert) that can introduce loads after dag
1286 // Since these loads aren't folded into the fpextend, we have to match it
1288 def : Pat<(fpextend (loadf32 addr:$src)),
1289 (CVTSS2SDrm addr:$src)>, Requires<[UseSSE2, OptForSize]>;
1290 def : Pat<(extloadf32 addr:$src),
1291 (CVTSS2SDrr (MOVSSrm addr:$src))>, Requires<[UseSSE2, OptForSpeed]>;
1293 let isCodeGenOnly = 1, hasSideEffects = 0 in {
1294 def VCVTSS2SDrr_Int: I<0x5A, MRMSrcReg,
1295 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1296 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1297 []>, XS, VEX_4V, VEX_WIG,
1298 Requires<[HasAVX]>, Sched<[WriteCvtSS2SD]>;
1300 def VCVTSS2SDrm_Int: I<0x5A, MRMSrcMem,
1301 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
1302 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1303 []>, XS, VEX_4V, VEX_WIG, Requires<[HasAVX]>,
1304 Sched<[WriteCvtSS2SD.Folded, WriteCvtSS2SD.ReadAfterFold]>;
1305 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
1306 def CVTSS2SDrr_Int: I<0x5A, MRMSrcReg,
1307 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1308 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1309 []>, XS, Requires<[UseSSE2]>,
1310 Sched<[WriteCvtSS2SD]>;
1312 def CVTSS2SDrm_Int: I<0x5A, MRMSrcMem,
1313 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
1314 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1315 []>, XS, Requires<[UseSSE2]>,
1316 Sched<[WriteCvtSS2SD.Folded, WriteCvtSS2SD.ReadAfterFold]>;
1318 } // isCodeGenOnly = 1
1320 // Patterns used for matching (v)cvtsi2ss, (v)cvtsi2sd, (v)cvtsd2ss and
1321 // (v)cvtss2sd intrinsic sequences from clang which produce unnecessary
1322 // vmovs{s,d} instructions
1323 let Predicates = [UseAVX] in {
1324 def : Pat<(v4f32 (X86Movss
1326 (v4f32 (scalar_to_vector
1327 (f32 (fpround (f64 (extractelt VR128:$src, (iPTR 0))))))))),
1328 (VCVTSD2SSrr_Int VR128:$dst, VR128:$src)>;
1330 def : Pat<(v2f64 (X86Movsd
1332 (v2f64 (scalar_to_vector
1333 (f64 (fpextend (f32 (extractelt VR128:$src, (iPTR 0))))))))),
1334 (VCVTSS2SDrr_Int VR128:$dst, VR128:$src)>;
1336 def : Pat<(v4f32 (X86Movss
1338 (v4f32 (scalar_to_vector (f32 (sint_to_fp GR64:$src)))))),
1339 (VCVTSI642SSrr_Int VR128:$dst, GR64:$src)>;
1341 def : Pat<(v4f32 (X86Movss
1343 (v4f32 (scalar_to_vector (f32 (sint_to_fp (loadi64 addr:$src))))))),
1344 (VCVTSI642SSrm_Int VR128:$dst, addr:$src)>;
1346 def : Pat<(v4f32 (X86Movss
1348 (v4f32 (scalar_to_vector (f32 (sint_to_fp GR32:$src)))))),
1349 (VCVTSI2SSrr_Int VR128:$dst, GR32:$src)>;
1351 def : Pat<(v4f32 (X86Movss
1353 (v4f32 (scalar_to_vector (f32 (sint_to_fp (loadi32 addr:$src))))))),
1354 (VCVTSI2SSrm_Int VR128:$dst, addr:$src)>;
1356 def : Pat<(v2f64 (X86Movsd
1358 (v2f64 (scalar_to_vector (f64 (sint_to_fp GR64:$src)))))),
1359 (VCVTSI642SDrr_Int VR128:$dst, GR64:$src)>;
1361 def : Pat<(v2f64 (X86Movsd
1363 (v2f64 (scalar_to_vector (f64 (sint_to_fp (loadi64 addr:$src))))))),
1364 (VCVTSI642SDrm_Int VR128:$dst, addr:$src)>;
1366 def : Pat<(v2f64 (X86Movsd
1368 (v2f64 (scalar_to_vector (f64 (sint_to_fp GR32:$src)))))),
1369 (VCVTSI2SDrr_Int VR128:$dst, GR32:$src)>;
1371 def : Pat<(v2f64 (X86Movsd
1373 (v2f64 (scalar_to_vector (f64 (sint_to_fp (loadi32 addr:$src))))))),
1374 (VCVTSI2SDrm_Int VR128:$dst, addr:$src)>;
1375 } // Predicates = [UseAVX]
1377 let Predicates = [UseSSE2] in {
1378 def : Pat<(v4f32 (X86Movss
1380 (v4f32 (scalar_to_vector
1381 (f32 (fpround (f64 (extractelt VR128:$src, (iPTR 0))))))))),
1382 (CVTSD2SSrr_Int VR128:$dst, VR128:$src)>;
1384 def : Pat<(v2f64 (X86Movsd
1386 (v2f64 (scalar_to_vector
1387 (f64 (fpextend (f32 (extractelt VR128:$src, (iPTR 0))))))))),
1388 (CVTSS2SDrr_Int VR128:$dst, VR128:$src)>;
1390 def : Pat<(v2f64 (X86Movsd
1392 (v2f64 (scalar_to_vector (f64 (sint_to_fp GR64:$src)))))),
1393 (CVTSI642SDrr_Int VR128:$dst, GR64:$src)>;
1395 def : Pat<(v2f64 (X86Movsd
1397 (v2f64 (scalar_to_vector (f64 (sint_to_fp (loadi64 addr:$src))))))),
1398 (CVTSI642SDrm_Int VR128:$dst, addr:$src)>;
1400 def : Pat<(v2f64 (X86Movsd
1402 (v2f64 (scalar_to_vector (f64 (sint_to_fp GR32:$src)))))),
1403 (CVTSI2SDrr_Int VR128:$dst, GR32:$src)>;
1405 def : Pat<(v2f64 (X86Movsd
1407 (v2f64 (scalar_to_vector (f64 (sint_to_fp (loadi32 addr:$src))))))),
1408 (CVTSI2SDrm_Int VR128:$dst, addr:$src)>;
1409 } // Predicates = [UseSSE2]
1411 let Predicates = [UseSSE1] in {
1412 def : Pat<(v4f32 (X86Movss
1414 (v4f32 (scalar_to_vector (f32 (sint_to_fp GR64:$src)))))),
1415 (CVTSI642SSrr_Int VR128:$dst, GR64:$src)>;
1417 def : Pat<(v4f32 (X86Movss
1419 (v4f32 (scalar_to_vector (f32 (sint_to_fp (loadi64 addr:$src))))))),
1420 (CVTSI642SSrm_Int VR128:$dst, addr:$src)>;
1422 def : Pat<(v4f32 (X86Movss
1424 (v4f32 (scalar_to_vector (f32 (sint_to_fp GR32:$src)))))),
1425 (CVTSI2SSrr_Int VR128:$dst, GR32:$src)>;
1427 def : Pat<(v4f32 (X86Movss
1429 (v4f32 (scalar_to_vector (f32 (sint_to_fp (loadi32 addr:$src))))))),
1430 (CVTSI2SSrm_Int VR128:$dst, addr:$src)>;
1431 } // Predicates = [UseSSE1]
1433 let Predicates = [HasAVX, NoVLX] in {
1434 // Convert packed single/double fp to doubleword
1435 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1436 "cvtps2dq\t{$src, $dst|$dst, $src}",
1437 [(set VR128:$dst, (v4i32 (X86cvtp2Int (v4f32 VR128:$src))))]>,
1438 VEX, Sched<[WriteCvtPS2I]>, VEX_WIG;
1439 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1440 "cvtps2dq\t{$src, $dst|$dst, $src}",
1442 (v4i32 (X86cvtp2Int (loadv4f32 addr:$src))))]>,
1443 VEX, Sched<[WriteCvtPS2ILd]>, VEX_WIG;
1444 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1445 "cvtps2dq\t{$src, $dst|$dst, $src}",
1447 (v8i32 (X86cvtp2Int (v8f32 VR256:$src))))]>,
1448 VEX, VEX_L, Sched<[WriteCvtPS2IY]>, VEX_WIG;
1449 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1450 "cvtps2dq\t{$src, $dst|$dst, $src}",
1452 (v8i32 (X86cvtp2Int (loadv8f32 addr:$src))))]>,
1453 VEX, VEX_L, Sched<[WriteCvtPS2IYLd]>, VEX_WIG;
1455 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1456 "cvtps2dq\t{$src, $dst|$dst, $src}",
1457 [(set VR128:$dst, (v4i32 (X86cvtp2Int (v4f32 VR128:$src))))]>,
1458 Sched<[WriteCvtPS2I]>;
1459 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1460 "cvtps2dq\t{$src, $dst|$dst, $src}",
1462 (v4i32 (X86cvtp2Int (memopv4f32 addr:$src))))]>,
1463 Sched<[WriteCvtPS2ILd]>;
1466 // Convert Packed Double FP to Packed DW Integers
1467 let Predicates = [HasAVX, NoVLX] in {
1468 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1469 // register, but the same isn't true when using memory operands instead.
1470 // Provide other assembly rr and rm forms to address this explicitly.
1471 def VCVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1472 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
1474 (v4i32 (X86cvtp2Int (v2f64 VR128:$src))))]>,
1475 VEX, Sched<[WriteCvtPD2I]>, VEX_WIG;
1478 def : InstAlias<"vcvtpd2dqx\t{$src, $dst|$dst, $src}",
1479 (VCVTPD2DQrr VR128:$dst, VR128:$src), 0>;
1480 def VCVTPD2DQrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1481 "vcvtpd2dq{x}\t{$src, $dst|$dst, $src}",
1483 (v4i32 (X86cvtp2Int (loadv2f64 addr:$src))))]>, VEX,
1484 Sched<[WriteCvtPD2ILd]>, VEX_WIG;
1485 def : InstAlias<"vcvtpd2dqx\t{$src, $dst|$dst, $src}",
1486 (VCVTPD2DQrm VR128:$dst, f128mem:$src), 0, "intel">;
1489 def VCVTPD2DQYrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1490 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
1492 (v4i32 (X86cvtp2Int (v4f64 VR256:$src))))]>,
1493 VEX, VEX_L, Sched<[WriteCvtPD2IY]>, VEX_WIG;
1494 def VCVTPD2DQYrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1495 "vcvtpd2dq{y}\t{$src, $dst|$dst, $src}",
1497 (v4i32 (X86cvtp2Int (loadv4f64 addr:$src))))]>,
1498 VEX, VEX_L, Sched<[WriteCvtPD2IYLd]>, VEX_WIG;
1499 def : InstAlias<"vcvtpd2dqy\t{$src, $dst|$dst, $src}",
1500 (VCVTPD2DQYrr VR128:$dst, VR256:$src), 0>;
1501 def : InstAlias<"vcvtpd2dqy\t{$src, $dst|$dst, $src}",
1502 (VCVTPD2DQYrm VR128:$dst, f256mem:$src), 0, "intel">;
1505 def CVTPD2DQrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1506 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1508 (v4i32 (X86cvtp2Int (memopv2f64 addr:$src))))]>,
1509 Sched<[WriteCvtPD2ILd]>;
1510 def CVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1511 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1513 (v4i32 (X86cvtp2Int (v2f64 VR128:$src))))]>,
1514 Sched<[WriteCvtPD2I]>;
1516 // Convert with truncation packed single/double fp to doubleword
1517 // SSE2 packed instructions with XS prefix
1518 let Predicates = [HasAVX, NoVLX] in {
1519 def VCVTTPS2DQrr : VS2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1520 "cvttps2dq\t{$src, $dst|$dst, $src}",
1522 (v4i32 (X86cvttp2si (v4f32 VR128:$src))))]>,
1523 VEX, Sched<[WriteCvtPS2I]>, VEX_WIG;
1524 def VCVTTPS2DQrm : VS2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1525 "cvttps2dq\t{$src, $dst|$dst, $src}",
1527 (v4i32 (X86cvttp2si (loadv4f32 addr:$src))))]>,
1528 VEX, Sched<[WriteCvtPS2ILd]>, VEX_WIG;
1529 def VCVTTPS2DQYrr : VS2SI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1530 "cvttps2dq\t{$src, $dst|$dst, $src}",
1532 (v8i32 (X86cvttp2si (v8f32 VR256:$src))))]>,
1533 VEX, VEX_L, Sched<[WriteCvtPS2IY]>, VEX_WIG;
1534 def VCVTTPS2DQYrm : VS2SI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1535 "cvttps2dq\t{$src, $dst|$dst, $src}",
1537 (v8i32 (X86cvttp2si (loadv8f32 addr:$src))))]>,
1539 Sched<[WriteCvtPS2IYLd]>, VEX_WIG;
1542 let Predicates = [HasAVX, NoVLX] in {
1543 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
1544 (VCVTTPS2DQrr VR128:$src)>;
1545 def : Pat<(v4i32 (fp_to_sint (loadv4f32 addr:$src))),
1546 (VCVTTPS2DQrm addr:$src)>;
1547 def : Pat<(v8i32 (fp_to_sint (v8f32 VR256:$src))),
1548 (VCVTTPS2DQYrr VR256:$src)>;
1549 def : Pat<(v8i32 (fp_to_sint (loadv8f32 addr:$src))),
1550 (VCVTTPS2DQYrm addr:$src)>;
1553 def CVTTPS2DQrr : S2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1554 "cvttps2dq\t{$src, $dst|$dst, $src}",
1556 (v4i32 (X86cvttp2si (v4f32 VR128:$src))))]>,
1557 Sched<[WriteCvtPS2I]>;
1558 def CVTTPS2DQrm : S2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1559 "cvttps2dq\t{$src, $dst|$dst, $src}",
1561 (v4i32 (X86cvttp2si (memopv4f32 addr:$src))))]>,
1562 Sched<[WriteCvtPS2ILd]>;
1564 let Predicates = [UseSSE2] in {
1565 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
1566 (CVTTPS2DQrr VR128:$src)>;
1567 def : Pat<(v4i32 (fp_to_sint (memopv4f32 addr:$src))),
1568 (CVTTPS2DQrm addr:$src)>;
1571 let Predicates = [HasAVX, NoVLX] in
1572 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1573 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1575 (v4i32 (X86cvttp2si (v2f64 VR128:$src))))]>,
1576 VEX, Sched<[WriteCvtPD2I]>, VEX_WIG;
1578 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1579 // register, but the same isn't true when using memory operands instead.
1580 // Provide other assembly rr and rm forms to address this explicitly.
1583 def : InstAlias<"vcvttpd2dqx\t{$src, $dst|$dst, $src}",
1584 (VCVTTPD2DQrr VR128:$dst, VR128:$src), 0>;
1586 let Predicates = [HasAVX, NoVLX] in
1587 def VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1588 "cvttpd2dq{x}\t{$src, $dst|$dst, $src}",
1590 (v4i32 (X86cvttp2si (loadv2f64 addr:$src))))]>,
1591 VEX, Sched<[WriteCvtPD2ILd]>, VEX_WIG;
1592 def : InstAlias<"vcvttpd2dqx\t{$src, $dst|$dst, $src}",
1593 (VCVTTPD2DQrm VR128:$dst, f128mem:$src), 0, "intel">;
1596 let Predicates = [HasAVX, NoVLX] in {
1597 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1598 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1600 (v4i32 (X86cvttp2si (v4f64 VR256:$src))))]>,
1601 VEX, VEX_L, Sched<[WriteCvtPD2IY]>, VEX_WIG;
1602 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1603 "cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
1605 (v4i32 (X86cvttp2si (loadv4f64 addr:$src))))]>,
1606 VEX, VEX_L, Sched<[WriteCvtPD2IYLd]>, VEX_WIG;
1608 def : InstAlias<"vcvttpd2dqy\t{$src, $dst|$dst, $src}",
1609 (VCVTTPD2DQYrr VR128:$dst, VR256:$src), 0>;
1610 def : InstAlias<"vcvttpd2dqy\t{$src, $dst|$dst, $src}",
1611 (VCVTTPD2DQYrm VR128:$dst, f256mem:$src), 0, "intel">;
1613 let Predicates = [HasAVX, NoVLX] in {
1614 def : Pat<(v4i32 (fp_to_sint (v4f64 VR256:$src))),
1615 (VCVTTPD2DQYrr VR256:$src)>;
1616 def : Pat<(v4i32 (fp_to_sint (loadv4f64 addr:$src))),
1617 (VCVTTPD2DQYrm addr:$src)>;
1620 def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1621 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1623 (v4i32 (X86cvttp2si (v2f64 VR128:$src))))]>,
1624 Sched<[WriteCvtPD2I]>;
1625 def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
1626 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1628 (v4i32 (X86cvttp2si (memopv2f64 addr:$src))))]>,
1629 Sched<[WriteCvtPD2ILd]>;
1631 // Convert packed single to packed double
1632 let Predicates = [HasAVX, NoVLX] in {
1633 // SSE2 instructions without OpSize prefix
1634 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1635 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1636 [(set VR128:$dst, (v2f64 (X86vfpext (v4f32 VR128:$src))))]>,
1637 PS, VEX, Sched<[WriteCvtPS2PD]>, VEX_WIG;
1638 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1639 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1640 [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))]>,
1641 PS, VEX, Sched<[WriteCvtPS2PD.Folded]>, VEX_WIG;
1642 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
1643 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1644 [(set VR256:$dst, (v4f64 (fpextend (v4f32 VR128:$src))))]>,
1645 PS, VEX, VEX_L, Sched<[WriteCvtPS2PDY]>, VEX_WIG;
1646 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
1647 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1648 [(set VR256:$dst, (v4f64 (extloadv4f32 addr:$src)))]>,
1649 PS, VEX, VEX_L, Sched<[WriteCvtPS2PDY.Folded]>, VEX_WIG;
1652 let Predicates = [UseSSE2] in {
1653 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1654 "cvtps2pd\t{$src, $dst|$dst, $src}",
1655 [(set VR128:$dst, (v2f64 (X86vfpext (v4f32 VR128:$src))))]>,
1656 PS, Sched<[WriteCvtPS2PD]>;
1657 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1658 "cvtps2pd\t{$src, $dst|$dst, $src}",
1659 [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))]>,
1660 PS, Sched<[WriteCvtPS2PD.Folded]>;
1663 // Convert Packed DW Integers to Packed Double FP
1664 let Predicates = [HasAVX, NoVLX] in {
1665 let hasSideEffects = 0, mayLoad = 1 in
1666 def VCVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1667 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
1669 (v2f64 (X86VSintToFP (loadv4i32 addr:$src))))]>,
1670 VEX, Sched<[WriteCvtI2PDLd]>, VEX_WIG;
1671 def VCVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1672 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
1674 (v2f64 (X86VSintToFP (v4i32 VR128:$src))))]>,
1675 VEX, Sched<[WriteCvtI2PD]>, VEX_WIG;
1676 def VCVTDQ2PDYrm : S2SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins i128mem:$src),
1677 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
1679 (v4f64 (sint_to_fp (loadv4i32 addr:$src))))]>,
1680 VEX, VEX_L, Sched<[WriteCvtI2PDYLd]>,
1682 def VCVTDQ2PDYrr : S2SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
1683 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
1685 (v4f64 (sint_to_fp (v4i32 VR128:$src))))]>,
1686 VEX, VEX_L, Sched<[WriteCvtI2PDY]>, VEX_WIG;
1689 let hasSideEffects = 0, mayLoad = 1 in
1690 def CVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1691 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1693 (v2f64 (X86VSintToFP (loadv4i32 addr:$src))))]>,
1694 Sched<[WriteCvtI2PDLd]>;
1695 def CVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1696 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1698 (v2f64 (X86VSintToFP (v4i32 VR128:$src))))]>,
1699 Sched<[WriteCvtI2PD]>;
1701 // AVX register conversion intrinsics
1702 let Predicates = [HasAVX, NoVLX] in {
1703 def : Pat<(v2f64 (X86VSintToFP (bc_v4i32 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
1704 (VCVTDQ2PDrm addr:$src)>;
1705 def : Pat<(v2f64 (X86VSintToFP (bc_v4i32 (v2i64 (X86vzload addr:$src))))),
1706 (VCVTDQ2PDrm addr:$src)>;
1707 } // Predicates = [HasAVX, NoVLX]
1709 // SSE2 register conversion intrinsics
1710 let Predicates = [UseSSE2] in {
1711 def : Pat<(v2f64 (X86VSintToFP (bc_v4i32 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
1712 (CVTDQ2PDrm addr:$src)>;
1713 def : Pat<(v2f64 (X86VSintToFP (bc_v4i32 (v2i64 (X86vzload addr:$src))))),
1714 (CVTDQ2PDrm addr:$src)>;
1715 } // Predicates = [UseSSE2]
1717 // Convert packed double to packed single
1718 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1719 // register, but the same isn't true when using memory operands instead.
1720 // Provide other assembly rr and rm forms to address this explicitly.
1721 let Predicates = [HasAVX, NoVLX] in
1722 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1723 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1724 [(set VR128:$dst, (X86vfpround (v2f64 VR128:$src)))]>,
1725 VEX, Sched<[WriteCvtPD2PS]>, VEX_WIG;
1728 def : InstAlias<"vcvtpd2psx\t{$src, $dst|$dst, $src}",
1729 (VCVTPD2PSrr VR128:$dst, VR128:$src), 0>;
1730 let Predicates = [HasAVX, NoVLX] in
1731 def VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1732 "cvtpd2ps{x}\t{$src, $dst|$dst, $src}",
1733 [(set VR128:$dst, (X86vfpround (loadv2f64 addr:$src)))]>,
1734 VEX, Sched<[WriteCvtPD2PS.Folded]>, VEX_WIG;
1735 def : InstAlias<"vcvtpd2psx\t{$src, $dst|$dst, $src}",
1736 (VCVTPD2PSrm VR128:$dst, f128mem:$src), 0, "intel">;
1739 let Predicates = [HasAVX, NoVLX] in {
1740 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1741 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1742 [(set VR128:$dst, (X86vfpround VR256:$src))]>,
1743 VEX, VEX_L, Sched<[WriteCvtPD2PSY]>, VEX_WIG;
1744 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1745 "cvtpd2ps{y}\t{$src, $dst|$dst, $src}",
1746 [(set VR128:$dst, (X86vfpround (loadv4f64 addr:$src)))]>,
1747 VEX, VEX_L, Sched<[WriteCvtPD2PSY.Folded]>, VEX_WIG;
1749 def : InstAlias<"vcvtpd2psy\t{$src, $dst|$dst, $src}",
1750 (VCVTPD2PSYrr VR128:$dst, VR256:$src), 0>;
1751 def : InstAlias<"vcvtpd2psy\t{$src, $dst|$dst, $src}",
1752 (VCVTPD2PSYrm VR128:$dst, f256mem:$src), 0, "intel">;
1754 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1755 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1756 [(set VR128:$dst, (X86vfpround (v2f64 VR128:$src)))]>,
1757 Sched<[WriteCvtPD2PS]>;
1758 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1759 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1760 [(set VR128:$dst, (X86vfpround (memopv2f64 addr:$src)))]>,
1761 Sched<[WriteCvtPD2PS.Folded]>;
1763 let Predicates = [HasAVX, NoVLX] in {
1764 def : Pat<(v4f32 (fpround (v4f64 VR256:$src))),
1765 (VCVTPD2PSYrr VR256:$src)>;
1766 def : Pat<(v4f32 (fpround (loadv4f64 addr:$src))),
1767 (VCVTPD2PSYrm addr:$src)>;
1770 //===----------------------------------------------------------------------===//
1771 // SSE 1 & 2 - Compare Instructions
1772 //===----------------------------------------------------------------------===//
1774 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
1775 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
1776 Operand CC, SDNode OpNode, ValueType VT,
1777 PatFrag ld_frag, string asm, string asm_alt,
1778 X86FoldableSchedWrite sched> {
1779 let isCommutable = 1 in
1780 def rr : SIi8<0xC2, MRMSrcReg,
1781 (outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
1782 [(set RC:$dst, (OpNode (VT RC:$src1), RC:$src2, imm:$cc))]>,
1784 def rm : SIi8<0xC2, MRMSrcMem,
1785 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
1786 [(set RC:$dst, (OpNode (VT RC:$src1),
1787 (ld_frag addr:$src2), imm:$cc))]>,
1788 Sched<[sched.Folded, sched.ReadAfterFold]>;
1790 // Accept explicit immediate argument form instead of comparison code.
1791 let isAsmParserOnly = 1, hasSideEffects = 0 in {
1792 def rr_alt : SIi8<0xC2, MRMSrcReg, (outs RC:$dst),
1793 (ins RC:$src1, RC:$src2, u8imm:$cc), asm_alt, []>,
1794 Sched<[sched]>, NotMemoryFoldable;
1796 def rm_alt : SIi8<0xC2, MRMSrcMem, (outs RC:$dst),
1797 (ins RC:$src1, x86memop:$src2, u8imm:$cc), asm_alt, []>,
1798 Sched<[sched.Folded, sched.ReadAfterFold]>, NotMemoryFoldable;
1802 let ExeDomain = SSEPackedSingle in
1803 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem, AVXCC, X86cmps, f32, loadf32,
1804 "cmp${cc}ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1805 "cmpss\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
1806 SchedWriteFCmpSizes.PS.Scl>, XS, VEX_4V, VEX_LIG, VEX_WIG;
1807 let ExeDomain = SSEPackedDouble in
1808 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem, AVXCC, X86cmps, f64, loadf64,
1809 "cmp${cc}sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1810 "cmpsd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
1811 SchedWriteFCmpSizes.PD.Scl>,
1812 XD, VEX_4V, VEX_LIG, VEX_WIG;
1814 let Constraints = "$src1 = $dst" in {
1815 let ExeDomain = SSEPackedSingle in
1816 defm CMPSS : sse12_cmp_scalar<FR32, f32mem, SSECC, X86cmps, f32, loadf32,
1817 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
1818 "cmpss\t{$cc, $src2, $dst|$dst, $src2, $cc}",
1819 SchedWriteFCmpSizes.PS.Scl>, XS;
1820 let ExeDomain = SSEPackedDouble in
1821 defm CMPSD : sse12_cmp_scalar<FR64, f64mem, SSECC, X86cmps, f64, loadf64,
1822 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
1823 "cmpsd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
1824 SchedWriteFCmpSizes.PD.Scl>, XD;
1827 multiclass sse12_cmp_scalar_int<Operand memop, Operand CC,
1828 Intrinsic Int, string asm, X86FoldableSchedWrite sched,
1829 ComplexPattern mem_cpat> {
1830 def rr_Int : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
1831 (ins VR128:$src1, VR128:$src, CC:$cc), asm,
1832 [(set VR128:$dst, (Int VR128:$src1,
1833 VR128:$src, imm:$cc))]>,
1836 def rm_Int : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
1837 (ins VR128:$src1, memop:$src, CC:$cc), asm,
1838 [(set VR128:$dst, (Int VR128:$src1,
1839 mem_cpat:$src, imm:$cc))]>,
1840 Sched<[sched.Folded, sched.ReadAfterFold]>;
1843 let isCodeGenOnly = 1 in {
1844 // Aliases to match intrinsics which expect XMM operand(s).
1845 let ExeDomain = SSEPackedSingle in
1846 defm VCMPSS : sse12_cmp_scalar_int<ssmem, AVXCC, int_x86_sse_cmp_ss,
1847 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1848 SchedWriteFCmpSizes.PS.Scl, sse_load_f32>, XS, VEX_4V;
1849 let ExeDomain = SSEPackedDouble in
1850 defm VCMPSD : sse12_cmp_scalar_int<sdmem, AVXCC, int_x86_sse2_cmp_sd,
1851 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1852 SchedWriteFCmpSizes.PD.Scl, sse_load_f64>,
1854 let Constraints = "$src1 = $dst" in {
1855 let ExeDomain = SSEPackedSingle in
1856 defm CMPSS : sse12_cmp_scalar_int<ssmem, SSECC, int_x86_sse_cmp_ss,
1857 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
1858 SchedWriteFCmpSizes.PS.Scl, sse_load_f32>, XS;
1859 let ExeDomain = SSEPackedDouble in
1860 defm CMPSD : sse12_cmp_scalar_int<sdmem, SSECC, int_x86_sse2_cmp_sd,
1861 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
1862 SchedWriteFCmpSizes.PD.Scl, sse_load_f64>, XD;
1867 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
1868 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
1869 ValueType vt, X86MemOperand x86memop,
1870 PatFrag ld_frag, string OpcodeStr,
1871 X86FoldableSchedWrite sched> {
1872 let hasSideEffects = 0 in {
1873 def rr: SI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
1874 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1875 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))]>,
1878 def rm: SI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
1879 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1880 [(set EFLAGS, (OpNode (vt RC:$src1),
1881 (ld_frag addr:$src2)))]>,
1882 Sched<[sched.Folded, sched.ReadAfterFold]>;
1886 // sse12_ord_cmp_int - Intrinsic version of sse12_ord_cmp
1887 multiclass sse12_ord_cmp_int<bits<8> opc, RegisterClass RC, SDNode OpNode,
1888 ValueType vt, Operand memop,
1889 ComplexPattern mem_cpat, string OpcodeStr,
1890 X86FoldableSchedWrite sched> {
1891 def rr_Int: SI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
1892 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1893 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))]>,
1896 def rm_Int: SI<opc, MRMSrcMem, (outs), (ins RC:$src1, memop:$src2),
1897 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1898 [(set EFLAGS, (OpNode (vt RC:$src1),
1900 Sched<[sched.Folded, sched.ReadAfterFold]>;
1903 let Defs = [EFLAGS] in {
1904 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1905 "ucomiss", WriteFCom>, PS, VEX, VEX_LIG, VEX_WIG;
1906 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1907 "ucomisd", WriteFCom>, PD, VEX, VEX_LIG, VEX_WIG;
1908 let Pattern = []<dag> in {
1909 defm VCOMISS : sse12_ord_cmp<0x2F, FR32, undef, f32, f32mem, loadf32,
1910 "comiss", WriteFCom>, PS, VEX, VEX_LIG, VEX_WIG;
1911 defm VCOMISD : sse12_ord_cmp<0x2F, FR64, undef, f64, f64mem, loadf64,
1912 "comisd", WriteFCom>, PD, VEX, VEX_LIG, VEX_WIG;
1915 let isCodeGenOnly = 1 in {
1916 defm VUCOMISS : sse12_ord_cmp_int<0x2E, VR128, X86ucomi, v4f32, ssmem,
1917 sse_load_f32, "ucomiss", WriteFCom>, PS, VEX, VEX_WIG;
1918 defm VUCOMISD : sse12_ord_cmp_int<0x2E, VR128, X86ucomi, v2f64, sdmem,
1919 sse_load_f64, "ucomisd", WriteFCom>, PD, VEX, VEX_WIG;
1921 defm VCOMISS : sse12_ord_cmp_int<0x2F, VR128, X86comi, v4f32, ssmem,
1922 sse_load_f32, "comiss", WriteFCom>, PS, VEX, VEX_WIG;
1923 defm VCOMISD : sse12_ord_cmp_int<0x2F, VR128, X86comi, v2f64, sdmem,
1924 sse_load_f64, "comisd", WriteFCom>, PD, VEX, VEX_WIG;
1926 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1927 "ucomiss", WriteFCom>, PS;
1928 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1929 "ucomisd", WriteFCom>, PD;
1931 let Pattern = []<dag> in {
1932 defm COMISS : sse12_ord_cmp<0x2F, FR32, undef, f32, f32mem, loadf32,
1933 "comiss", WriteFCom>, PS;
1934 defm COMISD : sse12_ord_cmp<0x2F, FR64, undef, f64, f64mem, loadf64,
1935 "comisd", WriteFCom>, PD;
1938 let isCodeGenOnly = 1 in {
1939 defm UCOMISS : sse12_ord_cmp_int<0x2E, VR128, X86ucomi, v4f32, ssmem,
1940 sse_load_f32, "ucomiss", WriteFCom>, PS;
1941 defm UCOMISD : sse12_ord_cmp_int<0x2E, VR128, X86ucomi, v2f64, sdmem,
1942 sse_load_f64, "ucomisd", WriteFCom>, PD;
1944 defm COMISS : sse12_ord_cmp_int<0x2F, VR128, X86comi, v4f32, ssmem,
1945 sse_load_f32, "comiss", WriteFCom>, PS;
1946 defm COMISD : sse12_ord_cmp_int<0x2F, VR128, X86comi, v2f64, sdmem,
1947 sse_load_f64, "comisd", WriteFCom>, PD;
1949 } // Defs = [EFLAGS]
1951 // sse12_cmp_packed - sse 1 & 2 compare packed instructions
1952 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
1953 Operand CC, ValueType VT, string asm,
1954 string asm_alt, X86FoldableSchedWrite sched,
1955 Domain d, PatFrag ld_frag> {
1956 let isCommutable = 1 in
1957 def rri : PIi8<0xC2, MRMSrcReg,
1958 (outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
1959 [(set RC:$dst, (VT (X86cmpp RC:$src1, RC:$src2, imm:$cc)))], d>,
1961 def rmi : PIi8<0xC2, MRMSrcMem,
1962 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
1964 (VT (X86cmpp RC:$src1, (ld_frag addr:$src2), imm:$cc)))], d>,
1965 Sched<[sched.Folded, sched.ReadAfterFold]>;
1967 // Accept explicit immediate argument form instead of comparison code.
1968 let isAsmParserOnly = 1, hasSideEffects = 0 in {
1969 def rri_alt : PIi8<0xC2, MRMSrcReg,
1970 (outs RC:$dst), (ins RC:$src1, RC:$src2, u8imm:$cc),
1971 asm_alt, [], d>, Sched<[sched]>, NotMemoryFoldable;
1973 def rmi_alt : PIi8<0xC2, MRMSrcMem,
1974 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, u8imm:$cc),
1975 asm_alt, [], d>, Sched<[sched.Folded, sched.ReadAfterFold]>,
1980 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, AVXCC, v4f32,
1981 "cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1982 "cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
1983 SchedWriteFCmpSizes.PS.XMM, SSEPackedSingle, loadv4f32>, PS, VEX_4V, VEX_WIG;
1984 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, AVXCC, v2f64,
1985 "cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1986 "cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
1987 SchedWriteFCmpSizes.PD.XMM, SSEPackedDouble, loadv2f64>, PD, VEX_4V, VEX_WIG;
1988 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, AVXCC, v8f32,
1989 "cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1990 "cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
1991 SchedWriteFCmpSizes.PS.YMM, SSEPackedSingle, loadv8f32>, PS, VEX_4V, VEX_L, VEX_WIG;
1992 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, AVXCC, v4f64,
1993 "cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1994 "cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
1995 SchedWriteFCmpSizes.PD.YMM, SSEPackedDouble, loadv4f64>, PD, VEX_4V, VEX_L, VEX_WIG;
1996 let Constraints = "$src1 = $dst" in {
1997 defm CMPPS : sse12_cmp_packed<VR128, f128mem, SSECC, v4f32,
1998 "cmp${cc}ps\t{$src2, $dst|$dst, $src2}",
1999 "cmpps\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2000 SchedWriteFCmpSizes.PS.XMM, SSEPackedSingle, memopv4f32>, PS;
2001 defm CMPPD : sse12_cmp_packed<VR128, f128mem, SSECC, v2f64,
2002 "cmp${cc}pd\t{$src2, $dst|$dst, $src2}",
2003 "cmppd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2004 SchedWriteFCmpSizes.PD.XMM, SSEPackedDouble, memopv2f64>, PD;
2007 def CommutableCMPCC : PatLeaf<(imm), [{
2008 uint64_t Imm = N->getZExtValue() & 0x7;
2009 return (Imm == 0x00 || Imm == 0x03 || Imm == 0x04 || Imm == 0x07);
2012 // Patterns to select compares with loads in first operand.
2013 let Predicates = [HasAVX] in {
2014 def : Pat<(v4f64 (X86cmpp (loadv4f64 addr:$src2), VR256:$src1,
2015 CommutableCMPCC:$cc)),
2016 (VCMPPDYrmi VR256:$src1, addr:$src2, imm:$cc)>;
2018 def : Pat<(v8f32 (X86cmpp (loadv8f32 addr:$src2), VR256:$src1,
2019 CommutableCMPCC:$cc)),
2020 (VCMPPSYrmi VR256:$src1, addr:$src2, imm:$cc)>;
2022 def : Pat<(v2f64 (X86cmpp (loadv2f64 addr:$src2), VR128:$src1,
2023 CommutableCMPCC:$cc)),
2024 (VCMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
2026 def : Pat<(v4f32 (X86cmpp (loadv4f32 addr:$src2), VR128:$src1,
2027 CommutableCMPCC:$cc)),
2028 (VCMPPSrmi VR128:$src1, addr:$src2, imm:$cc)>;
2030 def : Pat<(f64 (X86cmps (loadf64 addr:$src2), FR64:$src1,
2031 CommutableCMPCC:$cc)),
2032 (VCMPSDrm FR64:$src1, addr:$src2, imm:$cc)>;
2034 def : Pat<(f32 (X86cmps (loadf32 addr:$src2), FR32:$src1,
2035 CommutableCMPCC:$cc)),
2036 (VCMPSSrm FR32:$src1, addr:$src2, imm:$cc)>;
2039 let Predicates = [UseSSE2] in {
2040 def : Pat<(v2f64 (X86cmpp (memopv2f64 addr:$src2), VR128:$src1,
2041 CommutableCMPCC:$cc)),
2042 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
2044 def : Pat<(f64 (X86cmps (loadf64 addr:$src2), FR64:$src1,
2045 CommutableCMPCC:$cc)),
2046 (CMPSDrm FR64:$src1, addr:$src2, imm:$cc)>;
2049 let Predicates = [UseSSE1] in {
2050 def : Pat<(v4f32 (X86cmpp (memopv4f32 addr:$src2), VR128:$src1,
2051 CommutableCMPCC:$cc)),
2052 (CMPPSrmi VR128:$src1, addr:$src2, imm:$cc)>;
2054 def : Pat<(f32 (X86cmps (loadf32 addr:$src2), FR32:$src1,
2055 CommutableCMPCC:$cc)),
2056 (CMPSSrm FR32:$src1, addr:$src2, imm:$cc)>;
2059 //===----------------------------------------------------------------------===//
2060 // SSE 1 & 2 - Shuffle Instructions
2061 //===----------------------------------------------------------------------===//
2063 /// sse12_shuffle - sse 1 & 2 fp shuffle instructions
2064 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
2065 ValueType vt, string asm, PatFrag mem_frag,
2066 X86FoldableSchedWrite sched, Domain d> {
2067 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
2068 (ins RC:$src1, x86memop:$src2, u8imm:$src3), asm,
2069 [(set RC:$dst, (vt (X86Shufp RC:$src1, (mem_frag addr:$src2),
2070 (i8 imm:$src3))))], d>,
2071 Sched<[sched.Folded, sched.ReadAfterFold]>;
2072 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
2073 (ins RC:$src1, RC:$src2, u8imm:$src3), asm,
2074 [(set RC:$dst, (vt (X86Shufp RC:$src1, RC:$src2,
2075 (i8 imm:$src3))))], d>,
2079 let Predicates = [HasAVX, NoVLX] in {
2080 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
2081 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2082 loadv4f32, SchedWriteFShuffle.XMM, SSEPackedSingle>,
2083 PS, VEX_4V, VEX_WIG;
2084 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
2085 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2086 loadv8f32, SchedWriteFShuffle.YMM, SSEPackedSingle>,
2087 PS, VEX_4V, VEX_L, VEX_WIG;
2088 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
2089 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2090 loadv2f64, SchedWriteFShuffle.XMM, SSEPackedDouble>,
2091 PD, VEX_4V, VEX_WIG;
2092 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
2093 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2094 loadv4f64, SchedWriteFShuffle.YMM, SSEPackedDouble>,
2095 PD, VEX_4V, VEX_L, VEX_WIG;
2097 let Constraints = "$src1 = $dst" in {
2098 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
2099 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2100 memopv4f32, SchedWriteFShuffle.XMM, SSEPackedSingle>, PS;
2101 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
2102 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2103 memopv2f64, SchedWriteFShuffle.XMM, SSEPackedDouble>, PD;
2106 //===----------------------------------------------------------------------===//
2107 // SSE 1 & 2 - Unpack FP Instructions
2108 //===----------------------------------------------------------------------===//
2110 /// sse12_unpack_interleave - sse 1 & 2 fp unpack and interleave
2111 multiclass sse12_unpack_interleave<bits<8> opc, SDNode OpNode, ValueType vt,
2112 PatFrag mem_frag, RegisterClass RC,
2113 X86MemOperand x86memop, string asm,
2114 X86FoldableSchedWrite sched, Domain d,
2115 bit IsCommutable = 0> {
2116 let isCommutable = IsCommutable in
2117 def rr : PI<opc, MRMSrcReg,
2118 (outs RC:$dst), (ins RC:$src1, RC:$src2),
2120 (vt (OpNode RC:$src1, RC:$src2)))], d>,
2122 def rm : PI<opc, MRMSrcMem,
2123 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2125 (vt (OpNode RC:$src1,
2126 (mem_frag addr:$src2))))], d>,
2127 Sched<[sched.Folded, sched.ReadAfterFold]>;
2130 let Predicates = [HasAVX, NoVLX] in {
2131 defm VUNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, load,
2132 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2133 SchedWriteFShuffle.XMM, SSEPackedSingle>, PS, VEX_4V, VEX_WIG;
2134 defm VUNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, load,
2135 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2136 SchedWriteFShuffle.XMM, SSEPackedDouble, 1>, PD, VEX_4V, VEX_WIG;
2137 defm VUNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, load,
2138 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2139 SchedWriteFShuffle.XMM, SSEPackedSingle>, PS, VEX_4V, VEX_WIG;
2140 defm VUNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, load,
2141 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2142 SchedWriteFShuffle.XMM, SSEPackedDouble>, PD, VEX_4V, VEX_WIG;
2144 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, X86Unpckh, v8f32, load,
2145 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2146 SchedWriteFShuffle.YMM, SSEPackedSingle>, PS, VEX_4V, VEX_L, VEX_WIG;
2147 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, X86Unpckh, v4f64, load,
2148 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2149 SchedWriteFShuffle.YMM, SSEPackedDouble>, PD, VEX_4V, VEX_L, VEX_WIG;
2150 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, X86Unpckl, v8f32, load,
2151 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2152 SchedWriteFShuffle.YMM, SSEPackedSingle>, PS, VEX_4V, VEX_L, VEX_WIG;
2153 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, X86Unpckl, v4f64, load,
2154 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2155 SchedWriteFShuffle.YMM, SSEPackedDouble>, PD, VEX_4V, VEX_L, VEX_WIG;
2156 }// Predicates = [HasAVX, NoVLX]
2158 let Constraints = "$src1 = $dst" in {
2159 defm UNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, memop,
2160 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
2161 SchedWriteFShuffle.XMM, SSEPackedSingle>, PS;
2162 defm UNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, memop,
2163 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
2164 SchedWriteFShuffle.XMM, SSEPackedDouble, 1>, PD;
2165 defm UNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, memop,
2166 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
2167 SchedWriteFShuffle.XMM, SSEPackedSingle>, PS;
2168 defm UNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, memop,
2169 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
2170 SchedWriteFShuffle.XMM, SSEPackedDouble>, PD;
2171 } // Constraints = "$src1 = $dst"
2173 let Predicates = [HasAVX1Only] in {
2174 def : Pat<(v8i32 (X86Unpckl VR256:$src1, (loadv8i32 addr:$src2))),
2175 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>;
2176 def : Pat<(v8i32 (X86Unpckl VR256:$src1, VR256:$src2)),
2177 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>;
2178 def : Pat<(v8i32 (X86Unpckh VR256:$src1, (loadv8i32 addr:$src2))),
2179 (VUNPCKHPSYrm VR256:$src1, addr:$src2)>;
2180 def : Pat<(v8i32 (X86Unpckh VR256:$src1, VR256:$src2)),
2181 (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>;
2183 def : Pat<(v4i64 (X86Unpckl VR256:$src1, (loadv4i64 addr:$src2))),
2184 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>;
2185 def : Pat<(v4i64 (X86Unpckl VR256:$src1, VR256:$src2)),
2186 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>;
2187 def : Pat<(v4i64 (X86Unpckh VR256:$src1, (loadv4i64 addr:$src2))),
2188 (VUNPCKHPDYrm VR256:$src1, addr:$src2)>;
2189 def : Pat<(v4i64 (X86Unpckh VR256:$src1, VR256:$src2)),
2190 (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>;
2193 //===----------------------------------------------------------------------===//
2194 // SSE 1 & 2 - Extract Floating-Point Sign mask
2195 //===----------------------------------------------------------------------===//
2197 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
2198 multiclass sse12_extr_sign_mask<RegisterClass RC, ValueType vt,
2199 string asm, Domain d> {
2200 def rr : PI<0x50, MRMSrcReg, (outs GR32orGR64:$dst), (ins RC:$src),
2201 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
2202 [(set GR32orGR64:$dst, (X86movmsk (vt RC:$src)))], d>,
2203 Sched<[WriteFMOVMSK]>;
2206 let Predicates = [HasAVX] in {
2207 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, v4f32, "movmskps",
2208 SSEPackedSingle>, PS, VEX, VEX_WIG;
2209 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, v2f64, "movmskpd",
2210 SSEPackedDouble>, PD, VEX, VEX_WIG;
2211 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, v8f32, "movmskps",
2212 SSEPackedSingle>, PS, VEX, VEX_L, VEX_WIG;
2213 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, v4f64, "movmskpd",
2214 SSEPackedDouble>, PD, VEX, VEX_L, VEX_WIG;
2216 // Also support integer VTs to avoid a int->fp bitcast in the DAG.
2217 def : Pat<(X86movmsk (v4i32 VR128:$src)),
2218 (VMOVMSKPSrr VR128:$src)>;
2219 def : Pat<(X86movmsk (v2i64 VR128:$src)),
2220 (VMOVMSKPDrr VR128:$src)>;
2221 def : Pat<(X86movmsk (v8i32 VR256:$src)),
2222 (VMOVMSKPSYrr VR256:$src)>;
2223 def : Pat<(X86movmsk (v4i64 VR256:$src)),
2224 (VMOVMSKPDYrr VR256:$src)>;
2227 defm MOVMSKPS : sse12_extr_sign_mask<VR128, v4f32, "movmskps",
2228 SSEPackedSingle>, PS;
2229 defm MOVMSKPD : sse12_extr_sign_mask<VR128, v2f64, "movmskpd",
2230 SSEPackedDouble>, PD;
2232 let Predicates = [UseSSE2] in {
2233 // Also support integer VTs to avoid a int->fp bitcast in the DAG.
2234 def : Pat<(X86movmsk (v4i32 VR128:$src)),
2235 (MOVMSKPSrr VR128:$src)>;
2236 def : Pat<(X86movmsk (v2i64 VR128:$src)),
2237 (MOVMSKPDrr VR128:$src)>;
2240 //===---------------------------------------------------------------------===//
2241 // SSE2 - Packed Integer Logical Instructions
2242 //===---------------------------------------------------------------------===//
2244 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2246 /// PDI_binop_rm - Simple SSE2 binary operator.
2247 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2248 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
2249 X86MemOperand x86memop, X86FoldableSchedWrite sched,
2250 bit IsCommutable, bit Is2Addr> {
2251 let isCommutable = IsCommutable in
2252 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
2253 (ins RC:$src1, RC:$src2),
2255 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2256 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2257 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>,
2259 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
2260 (ins RC:$src1, x86memop:$src2),
2262 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2263 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2264 [(set RC:$dst, (OpVT (OpNode RC:$src1, (memop_frag addr:$src2))))]>,
2265 Sched<[sched.Folded, sched.ReadAfterFold]>;
2267 } // ExeDomain = SSEPackedInt
2269 multiclass PDI_binop_all<bits<8> opc, string OpcodeStr, SDNode Opcode,
2270 ValueType OpVT128, ValueType OpVT256,
2271 X86SchedWriteWidths sched, bit IsCommutable,
2273 let Predicates = [HasAVX, prd] in
2274 defm V#NAME : PDI_binop_rm<opc, !strconcat("v", OpcodeStr), Opcode, OpVT128,
2275 VR128, load, i128mem, sched.XMM,
2276 IsCommutable, 0>, VEX_4V, VEX_WIG;
2278 let Constraints = "$src1 = $dst" in
2279 defm NAME : PDI_binop_rm<opc, OpcodeStr, Opcode, OpVT128, VR128,
2280 memop, i128mem, sched.XMM, IsCommutable, 1>;
2282 let Predicates = [HasAVX2, prd] in
2283 defm V#NAME#Y : PDI_binop_rm<opc, !strconcat("v", OpcodeStr), Opcode,
2284 OpVT256, VR256, load, i256mem, sched.YMM,
2285 IsCommutable, 0>, VEX_4V, VEX_L, VEX_WIG;
2288 // These are ordered here for pattern ordering requirements with the fp versions
2290 defm PAND : PDI_binop_all<0xDB, "pand", and, v2i64, v4i64,
2291 SchedWriteVecLogic, 1, NoVLX>;
2292 defm POR : PDI_binop_all<0xEB, "por", or, v2i64, v4i64,
2293 SchedWriteVecLogic, 1, NoVLX>;
2294 defm PXOR : PDI_binop_all<0xEF, "pxor", xor, v2i64, v4i64,
2295 SchedWriteVecLogic, 1, NoVLX>;
2296 defm PANDN : PDI_binop_all<0xDF, "pandn", X86andnp, v2i64, v4i64,
2297 SchedWriteVecLogic, 0, NoVLX>;
2299 //===----------------------------------------------------------------------===//
2300 // SSE 1 & 2 - Logical Instructions
2301 //===----------------------------------------------------------------------===//
2303 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
2305 /// There are no patterns here because isel prefers integer versions for SSE2
2306 /// and later. There are SSE1 v4f32 patterns later.
2307 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
2308 SDNode OpNode, X86SchedWriteWidths sched> {
2309 let Predicates = [HasAVX, NoVLX] in {
2310 defm V#NAME#PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
2311 !strconcat(OpcodeStr, "ps"), f256mem, sched.YMM,
2312 [], [], 0>, PS, VEX_4V, VEX_L, VEX_WIG;
2314 defm V#NAME#PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
2315 !strconcat(OpcodeStr, "pd"), f256mem, sched.YMM,
2316 [], [], 0>, PD, VEX_4V, VEX_L, VEX_WIG;
2318 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2319 !strconcat(OpcodeStr, "ps"), f128mem, sched.XMM,
2320 [], [], 0>, PS, VEX_4V, VEX_WIG;
2322 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
2323 !strconcat(OpcodeStr, "pd"), f128mem, sched.XMM,
2324 [], [], 0>, PD, VEX_4V, VEX_WIG;
2327 let Constraints = "$src1 = $dst" in {
2328 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2329 !strconcat(OpcodeStr, "ps"), f128mem, sched.XMM,
2332 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
2333 !strconcat(OpcodeStr, "pd"), f128mem, sched.XMM,
2338 defm AND : sse12_fp_packed_logical<0x54, "and", and, SchedWriteFLogic>;
2339 defm OR : sse12_fp_packed_logical<0x56, "or", or, SchedWriteFLogic>;
2340 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor, SchedWriteFLogic>;
2341 let isCommutable = 0 in
2342 defm ANDN : sse12_fp_packed_logical<0x55, "andn", X86andnp, SchedWriteFLogic>;
2344 let Predicates = [HasAVX2, NoVLX] in {
2345 def : Pat<(v32i8 (and VR256:$src1, VR256:$src2)),
2346 (VPANDYrr VR256:$src1, VR256:$src2)>;
2347 def : Pat<(v16i16 (and VR256:$src1, VR256:$src2)),
2348 (VPANDYrr VR256:$src1, VR256:$src2)>;
2349 def : Pat<(v8i32 (and VR256:$src1, VR256:$src2)),
2350 (VPANDYrr VR256:$src1, VR256:$src2)>;
2352 def : Pat<(v32i8 (or VR256:$src1, VR256:$src2)),
2353 (VPORYrr VR256:$src1, VR256:$src2)>;
2354 def : Pat<(v16i16 (or VR256:$src1, VR256:$src2)),
2355 (VPORYrr VR256:$src1, VR256:$src2)>;
2356 def : Pat<(v8i32 (or VR256:$src1, VR256:$src2)),
2357 (VPORYrr VR256:$src1, VR256:$src2)>;
2359 def : Pat<(v32i8 (xor VR256:$src1, VR256:$src2)),
2360 (VPXORYrr VR256:$src1, VR256:$src2)>;
2361 def : Pat<(v16i16 (xor VR256:$src1, VR256:$src2)),
2362 (VPXORYrr VR256:$src1, VR256:$src2)>;
2363 def : Pat<(v8i32 (xor VR256:$src1, VR256:$src2)),
2364 (VPXORYrr VR256:$src1, VR256:$src2)>;
2366 def : Pat<(v32i8 (X86andnp VR256:$src1, VR256:$src2)),
2367 (VPANDNYrr VR256:$src1, VR256:$src2)>;
2368 def : Pat<(v16i16 (X86andnp VR256:$src1, VR256:$src2)),
2369 (VPANDNYrr VR256:$src1, VR256:$src2)>;
2370 def : Pat<(v8i32 (X86andnp VR256:$src1, VR256:$src2)),
2371 (VPANDNYrr VR256:$src1, VR256:$src2)>;
2373 def : Pat<(and VR256:$src1, (loadv32i8 addr:$src2)),
2374 (VPANDYrm VR256:$src1, addr:$src2)>;
2375 def : Pat<(and VR256:$src1, (loadv16i16 addr:$src2)),
2376 (VPANDYrm VR256:$src1, addr:$src2)>;
2377 def : Pat<(and VR256:$src1, (loadv8i32 addr:$src2)),
2378 (VPANDYrm VR256:$src1, addr:$src2)>;
2380 def : Pat<(or VR256:$src1, (loadv32i8 addr:$src2)),
2381 (VPORYrm VR256:$src1, addr:$src2)>;
2382 def : Pat<(or VR256:$src1, (loadv16i16 addr:$src2)),
2383 (VPORYrm VR256:$src1, addr:$src2)>;
2384 def : Pat<(or VR256:$src1, (loadv8i32 addr:$src2)),
2385 (VPORYrm VR256:$src1, addr:$src2)>;
2387 def : Pat<(xor VR256:$src1, (loadv32i8 addr:$src2)),
2388 (VPXORYrm VR256:$src1, addr:$src2)>;
2389 def : Pat<(xor VR256:$src1, (loadv16i16 addr:$src2)),
2390 (VPXORYrm VR256:$src1, addr:$src2)>;
2391 def : Pat<(xor VR256:$src1, (loadv8i32 addr:$src2)),
2392 (VPXORYrm VR256:$src1, addr:$src2)>;
2394 def : Pat<(X86andnp VR256:$src1, (loadv32i8 addr:$src2)),
2395 (VPANDNYrm VR256:$src1, addr:$src2)>;
2396 def : Pat<(X86andnp VR256:$src1, (loadv16i16 addr:$src2)),
2397 (VPANDNYrm VR256:$src1, addr:$src2)>;
2398 def : Pat<(X86andnp VR256:$src1, (loadv8i32 addr:$src2)),
2399 (VPANDNYrm VR256:$src1, addr:$src2)>;
2402 // If only AVX1 is supported, we need to handle integer operations with
2403 // floating point instructions since the integer versions aren't available.
2404 let Predicates = [HasAVX1Only] in {
2405 def : Pat<(v32i8 (and VR256:$src1, VR256:$src2)),
2406 (VANDPSYrr VR256:$src1, VR256:$src2)>;
2407 def : Pat<(v16i16 (and VR256:$src1, VR256:$src2)),
2408 (VANDPSYrr VR256:$src1, VR256:$src2)>;
2409 def : Pat<(v8i32 (and VR256:$src1, VR256:$src2)),
2410 (VANDPSYrr VR256:$src1, VR256:$src2)>;
2411 def : Pat<(v4i64 (and VR256:$src1, VR256:$src2)),
2412 (VANDPSYrr VR256:$src1, VR256:$src2)>;
2414 def : Pat<(v32i8 (or VR256:$src1, VR256:$src2)),
2415 (VORPSYrr VR256:$src1, VR256:$src2)>;
2416 def : Pat<(v16i16 (or VR256:$src1, VR256:$src2)),
2417 (VORPSYrr VR256:$src1, VR256:$src2)>;
2418 def : Pat<(v8i32 (or VR256:$src1, VR256:$src2)),
2419 (VORPSYrr VR256:$src1, VR256:$src2)>;
2420 def : Pat<(v4i64 (or VR256:$src1, VR256:$src2)),
2421 (VORPSYrr VR256:$src1, VR256:$src2)>;
2423 def : Pat<(v32i8 (xor VR256:$src1, VR256:$src2)),
2424 (VXORPSYrr VR256:$src1, VR256:$src2)>;
2425 def : Pat<(v16i16 (xor VR256:$src1, VR256:$src2)),
2426 (VXORPSYrr VR256:$src1, VR256:$src2)>;
2427 def : Pat<(v8i32 (xor VR256:$src1, VR256:$src2)),
2428 (VXORPSYrr VR256:$src1, VR256:$src2)>;
2429 def : Pat<(v4i64 (xor VR256:$src1, VR256:$src2)),
2430 (VXORPSYrr VR256:$src1, VR256:$src2)>;
2432 def : Pat<(v32i8 (X86andnp VR256:$src1, VR256:$src2)),
2433 (VANDNPSYrr VR256:$src1, VR256:$src2)>;
2434 def : Pat<(v16i16 (X86andnp VR256:$src1, VR256:$src2)),
2435 (VANDNPSYrr VR256:$src1, VR256:$src2)>;
2436 def : Pat<(v8i32 (X86andnp VR256:$src1, VR256:$src2)),
2437 (VANDNPSYrr VR256:$src1, VR256:$src2)>;
2438 def : Pat<(v4i64 (X86andnp VR256:$src1, VR256:$src2)),
2439 (VANDNPSYrr VR256:$src1, VR256:$src2)>;
2441 def : Pat<(and VR256:$src1, (loadv32i8 addr:$src2)),
2442 (VANDPSYrm VR256:$src1, addr:$src2)>;
2443 def : Pat<(and VR256:$src1, (loadv16i16 addr:$src2)),
2444 (VANDPSYrm VR256:$src1, addr:$src2)>;
2445 def : Pat<(and VR256:$src1, (loadv8i32 addr:$src2)),
2446 (VANDPSYrm VR256:$src1, addr:$src2)>;
2447 def : Pat<(and VR256:$src1, (loadv4i64 addr:$src2)),
2448 (VANDPSYrm VR256:$src1, addr:$src2)>;
2450 def : Pat<(or VR256:$src1, (loadv32i8 addr:$src2)),
2451 (VORPSYrm VR256:$src1, addr:$src2)>;
2452 def : Pat<(or VR256:$src1, (loadv16i16 addr:$src2)),
2453 (VORPSYrm VR256:$src1, addr:$src2)>;
2454 def : Pat<(or VR256:$src1, (loadv8i32 addr:$src2)),
2455 (VORPSYrm VR256:$src1, addr:$src2)>;
2456 def : Pat<(or VR256:$src1, (loadv4i64 addr:$src2)),
2457 (VORPSYrm VR256:$src1, addr:$src2)>;
2459 def : Pat<(xor VR256:$src1, (loadv32i8 addr:$src2)),
2460 (VXORPSYrm VR256:$src1, addr:$src2)>;
2461 def : Pat<(xor VR256:$src1, (loadv16i16 addr:$src2)),
2462 (VXORPSYrm VR256:$src1, addr:$src2)>;
2463 def : Pat<(xor VR256:$src1, (loadv8i32 addr:$src2)),
2464 (VXORPSYrm VR256:$src1, addr:$src2)>;
2465 def : Pat<(xor VR256:$src1, (loadv4i64 addr:$src2)),
2466 (VXORPSYrm VR256:$src1, addr:$src2)>;
2468 def : Pat<(X86andnp VR256:$src1, (loadv32i8 addr:$src2)),
2469 (VANDNPSYrm VR256:$src1, addr:$src2)>;
2470 def : Pat<(X86andnp VR256:$src1, (loadv16i16 addr:$src2)),
2471 (VANDNPSYrm VR256:$src1, addr:$src2)>;
2472 def : Pat<(X86andnp VR256:$src1, (loadv8i32 addr:$src2)),
2473 (VANDNPSYrm VR256:$src1, addr:$src2)>;
2474 def : Pat<(X86andnp VR256:$src1, (loadv4i64 addr:$src2)),
2475 (VANDNPSYrm VR256:$src1, addr:$src2)>;
2478 let Predicates = [HasAVX, NoVLX_Or_NoDQI] in {
2479 // Use packed logical operations for scalar ops.
2480 def : Pat<(f64 (X86fand FR64:$src1, FR64:$src2)),
2482 (v2f64 (VANDPDrr (v2f64 (COPY_TO_REGCLASS FR64:$src1, VR128)),
2483 (v2f64 (COPY_TO_REGCLASS FR64:$src2, VR128)))),
2485 def : Pat<(f64 (X86for FR64:$src1, FR64:$src2)),
2487 (v2f64 (VORPDrr (v2f64 (COPY_TO_REGCLASS FR64:$src1, VR128)),
2488 (v2f64 (COPY_TO_REGCLASS FR64:$src2, VR128)))),
2490 def : Pat<(f64 (X86fxor FR64:$src1, FR64:$src2)),
2492 (v2f64 (VXORPDrr (v2f64 (COPY_TO_REGCLASS FR64:$src1, VR128)),
2493 (v2f64 (COPY_TO_REGCLASS FR64:$src2, VR128)))),
2495 def : Pat<(f64 (X86fandn FR64:$src1, FR64:$src2)),
2497 (v2f64 (VANDNPDrr (v2f64 (COPY_TO_REGCLASS FR64:$src1, VR128)),
2498 (v2f64 (COPY_TO_REGCLASS FR64:$src2, VR128)))),
2501 def : Pat<(f32 (X86fand FR32:$src1, FR32:$src2)),
2503 (v4f32 (VANDPSrr (v4f32 (COPY_TO_REGCLASS FR32:$src1, VR128)),
2504 (v4f32 (COPY_TO_REGCLASS FR32:$src2, VR128)))),
2506 def : Pat<(f32 (X86for FR32:$src1, FR32:$src2)),
2508 (v4f32 (VORPSrr (v4f32 (COPY_TO_REGCLASS FR32:$src1, VR128)),
2509 (v4f32 (COPY_TO_REGCLASS FR32:$src2, VR128)))),
2511 def : Pat<(f32 (X86fxor FR32:$src1, FR32:$src2)),
2513 (v4f32 (VXORPSrr (v4f32 (COPY_TO_REGCLASS FR32:$src1, VR128)),
2514 (v4f32 (COPY_TO_REGCLASS FR32:$src2, VR128)))),
2516 def : Pat<(f32 (X86fandn FR32:$src1, FR32:$src2)),
2518 (v4f32 (VANDNPSrr (v4f32 (COPY_TO_REGCLASS FR32:$src1, VR128)),
2519 (v4f32 (COPY_TO_REGCLASS FR32:$src2, VR128)))),
2523 let Predicates = [UseSSE1] in {
2524 // Use packed logical operations for scalar ops.
2525 def : Pat<(f32 (X86fand FR32:$src1, FR32:$src2)),
2527 (v4f32 (ANDPSrr (v4f32 (COPY_TO_REGCLASS FR32:$src1, VR128)),
2528 (v4f32 (COPY_TO_REGCLASS FR32:$src2, VR128)))),
2530 def : Pat<(f32 (X86for FR32:$src1, FR32:$src2)),
2532 (v4f32 (ORPSrr (v4f32 (COPY_TO_REGCLASS FR32:$src1, VR128)),
2533 (v4f32 (COPY_TO_REGCLASS FR32:$src2, VR128)))),
2535 def : Pat<(f32 (X86fxor FR32:$src1, FR32:$src2)),
2537 (v4f32 (XORPSrr (v4f32 (COPY_TO_REGCLASS FR32:$src1, VR128)),
2538 (v4f32 (COPY_TO_REGCLASS FR32:$src2, VR128)))),
2540 def : Pat<(f32 (X86fandn FR32:$src1, FR32:$src2)),
2542 (v4f32 (ANDNPSrr (v4f32 (COPY_TO_REGCLASS FR32:$src1, VR128)),
2543 (v4f32 (COPY_TO_REGCLASS FR32:$src2, VR128)))),
2547 let Predicates = [UseSSE2] in {
2548 // Use packed logical operations for scalar ops.
2549 def : Pat<(f64 (X86fand FR64:$src1, FR64:$src2)),
2551 (v2f64 (ANDPDrr (v2f64 (COPY_TO_REGCLASS FR64:$src1, VR128)),
2552 (v2f64 (COPY_TO_REGCLASS FR64:$src2, VR128)))),
2554 def : Pat<(f64 (X86for FR64:$src1, FR64:$src2)),
2556 (v2f64 (ORPDrr (v2f64 (COPY_TO_REGCLASS FR64:$src1, VR128)),
2557 (v2f64 (COPY_TO_REGCLASS FR64:$src2, VR128)))),
2559 def : Pat<(f64 (X86fxor FR64:$src1, FR64:$src2)),
2561 (v2f64 (XORPDrr (v2f64 (COPY_TO_REGCLASS FR64:$src1, VR128)),
2562 (v2f64 (COPY_TO_REGCLASS FR64:$src2, VR128)))),
2564 def : Pat<(f64 (X86fandn FR64:$src1, FR64:$src2)),
2566 (v2f64 (ANDNPDrr (v2f64 (COPY_TO_REGCLASS FR64:$src1, VR128)),
2567 (v2f64 (COPY_TO_REGCLASS FR64:$src2, VR128)))),
2571 let Predicates = [HasAVX, NoVLX] in {
2572 def : Pat<(v16i8 (and VR128:$src1, VR128:$src2)),
2573 (VPANDrr VR128:$src1, VR128:$src2)>;
2574 def : Pat<(v8i16 (and VR128:$src1, VR128:$src2)),
2575 (VPANDrr VR128:$src1, VR128:$src2)>;
2576 def : Pat<(v4i32 (and VR128:$src1, VR128:$src2)),
2577 (VPANDrr VR128:$src1, VR128:$src2)>;
2579 def : Pat<(v16i8 (or VR128:$src1, VR128:$src2)),
2580 (VPORrr VR128:$src1, VR128:$src2)>;
2581 def : Pat<(v8i16 (or VR128:$src1, VR128:$src2)),
2582 (VPORrr VR128:$src1, VR128:$src2)>;
2583 def : Pat<(v4i32 (or VR128:$src1, VR128:$src2)),
2584 (VPORrr VR128:$src1, VR128:$src2)>;
2586 def : Pat<(v16i8 (xor VR128:$src1, VR128:$src2)),
2587 (VPXORrr VR128:$src1, VR128:$src2)>;
2588 def : Pat<(v8i16 (xor VR128:$src1, VR128:$src2)),
2589 (VPXORrr VR128:$src1, VR128:$src2)>;
2590 def : Pat<(v4i32 (xor VR128:$src1, VR128:$src2)),
2591 (VPXORrr VR128:$src1, VR128:$src2)>;
2593 def : Pat<(v16i8 (X86andnp VR128:$src1, VR128:$src2)),
2594 (VPANDNrr VR128:$src1, VR128:$src2)>;
2595 def : Pat<(v8i16 (X86andnp VR128:$src1, VR128:$src2)),
2596 (VPANDNrr VR128:$src1, VR128:$src2)>;
2597 def : Pat<(v4i32 (X86andnp VR128:$src1, VR128:$src2)),
2598 (VPANDNrr VR128:$src1, VR128:$src2)>;
2600 def : Pat<(and VR128:$src1, (loadv16i8 addr:$src2)),
2601 (VPANDrm VR128:$src1, addr:$src2)>;
2602 def : Pat<(and VR128:$src1, (loadv8i16 addr:$src2)),
2603 (VPANDrm VR128:$src1, addr:$src2)>;
2604 def : Pat<(and VR128:$src1, (loadv4i32 addr:$src2)),
2605 (VPANDrm VR128:$src1, addr:$src2)>;
2607 def : Pat<(or VR128:$src1, (loadv16i8 addr:$src2)),
2608 (VPORrm VR128:$src1, addr:$src2)>;
2609 def : Pat<(or VR128:$src1, (loadv8i16 addr:$src2)),
2610 (VPORrm VR128:$src1, addr:$src2)>;
2611 def : Pat<(or VR128:$src1, (loadv4i32 addr:$src2)),
2612 (VPORrm VR128:$src1, addr:$src2)>;
2614 def : Pat<(xor VR128:$src1, (loadv16i8 addr:$src2)),
2615 (VPXORrm VR128:$src1, addr:$src2)>;
2616 def : Pat<(xor VR128:$src1, (loadv8i16 addr:$src2)),
2617 (VPXORrm VR128:$src1, addr:$src2)>;
2618 def : Pat<(xor VR128:$src1, (loadv4i32 addr:$src2)),
2619 (VPXORrm VR128:$src1, addr:$src2)>;
2621 def : Pat<(X86andnp VR128:$src1, (loadv16i8 addr:$src2)),
2622 (VPANDNrm VR128:$src1, addr:$src2)>;
2623 def : Pat<(X86andnp VR128:$src1, (loadv8i16 addr:$src2)),
2624 (VPANDNrm VR128:$src1, addr:$src2)>;
2625 def : Pat<(X86andnp VR128:$src1, (loadv4i32 addr:$src2)),
2626 (VPANDNrm VR128:$src1, addr:$src2)>;
2629 let Predicates = [UseSSE2] in {
2630 def : Pat<(v16i8 (and VR128:$src1, VR128:$src2)),
2631 (PANDrr VR128:$src1, VR128:$src2)>;
2632 def : Pat<(v8i16 (and VR128:$src1, VR128:$src2)),
2633 (PANDrr VR128:$src1, VR128:$src2)>;
2634 def : Pat<(v4i32 (and VR128:$src1, VR128:$src2)),
2635 (PANDrr VR128:$src1, VR128:$src2)>;
2637 def : Pat<(v16i8 (or VR128:$src1, VR128:$src2)),
2638 (PORrr VR128:$src1, VR128:$src2)>;
2639 def : Pat<(v8i16 (or VR128:$src1, VR128:$src2)),
2640 (PORrr VR128:$src1, VR128:$src2)>;
2641 def : Pat<(v4i32 (or VR128:$src1, VR128:$src2)),
2642 (PORrr VR128:$src1, VR128:$src2)>;
2644 def : Pat<(v16i8 (xor VR128:$src1, VR128:$src2)),
2645 (PXORrr VR128:$src1, VR128:$src2)>;
2646 def : Pat<(v8i16 (xor VR128:$src1, VR128:$src2)),
2647 (PXORrr VR128:$src1, VR128:$src2)>;
2648 def : Pat<(v4i32 (xor VR128:$src1, VR128:$src2)),
2649 (PXORrr VR128:$src1, VR128:$src2)>;
2651 def : Pat<(v16i8 (X86andnp VR128:$src1, VR128:$src2)),
2652 (PANDNrr VR128:$src1, VR128:$src2)>;
2653 def : Pat<(v8i16 (X86andnp VR128:$src1, VR128:$src2)),
2654 (PANDNrr VR128:$src1, VR128:$src2)>;
2655 def : Pat<(v4i32 (X86andnp VR128:$src1, VR128:$src2)),
2656 (PANDNrr VR128:$src1, VR128:$src2)>;
2658 def : Pat<(and VR128:$src1, (memopv16i8 addr:$src2)),
2659 (PANDrm VR128:$src1, addr:$src2)>;
2660 def : Pat<(and VR128:$src1, (memopv8i16 addr:$src2)),
2661 (PANDrm VR128:$src1, addr:$src2)>;
2662 def : Pat<(and VR128:$src1, (memopv4i32 addr:$src2)),
2663 (PANDrm VR128:$src1, addr:$src2)>;
2665 def : Pat<(or VR128:$src1, (memopv16i8 addr:$src2)),
2666 (PORrm VR128:$src1, addr:$src2)>;
2667 def : Pat<(or VR128:$src1, (memopv8i16 addr:$src2)),
2668 (PORrm VR128:$src1, addr:$src2)>;
2669 def : Pat<(or VR128:$src1, (memopv4i32 addr:$src2)),
2670 (PORrm VR128:$src1, addr:$src2)>;
2672 def : Pat<(xor VR128:$src1, (memopv16i8 addr:$src2)),
2673 (PXORrm VR128:$src1, addr:$src2)>;
2674 def : Pat<(xor VR128:$src1, (memopv8i16 addr:$src2)),
2675 (PXORrm VR128:$src1, addr:$src2)>;
2676 def : Pat<(xor VR128:$src1, (memopv4i32 addr:$src2)),
2677 (PXORrm VR128:$src1, addr:$src2)>;
2679 def : Pat<(X86andnp VR128:$src1, (memopv16i8 addr:$src2)),
2680 (PANDNrm VR128:$src1, addr:$src2)>;
2681 def : Pat<(X86andnp VR128:$src1, (memopv8i16 addr:$src2)),
2682 (PANDNrm VR128:$src1, addr:$src2)>;
2683 def : Pat<(X86andnp VR128:$src1, (memopv4i32 addr:$src2)),
2684 (PANDNrm VR128:$src1, addr:$src2)>;
2687 // Patterns for packed operations when we don't have integer type available.
2688 def : Pat<(v4f32 (X86fand VR128:$src1, VR128:$src2)),
2689 (ANDPSrr VR128:$src1, VR128:$src2)>;
2690 def : Pat<(v4f32 (X86for VR128:$src1, VR128:$src2)),
2691 (ORPSrr VR128:$src1, VR128:$src2)>;
2692 def : Pat<(v4f32 (X86fxor VR128:$src1, VR128:$src2)),
2693 (XORPSrr VR128:$src1, VR128:$src2)>;
2694 def : Pat<(v4f32 (X86fandn VR128:$src1, VR128:$src2)),
2695 (ANDNPSrr VR128:$src1, VR128:$src2)>;
2697 def : Pat<(X86fand VR128:$src1, (memopv4f32 addr:$src2)),
2698 (ANDPSrm VR128:$src1, addr:$src2)>;
2699 def : Pat<(X86for VR128:$src1, (memopv4f32 addr:$src2)),
2700 (ORPSrm VR128:$src1, addr:$src2)>;
2701 def : Pat<(X86fxor VR128:$src1, (memopv4f32 addr:$src2)),
2702 (XORPSrm VR128:$src1, addr:$src2)>;
2703 def : Pat<(X86fandn VR128:$src1, (memopv4f32 addr:$src2)),
2704 (ANDNPSrm VR128:$src1, addr:$src2)>;
2706 //===----------------------------------------------------------------------===//
2707 // SSE 1 & 2 - Arithmetic Instructions
2708 //===----------------------------------------------------------------------===//
2710 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
2713 /// In addition, we also have a special variant of the scalar form here to
2714 /// represent the associated intrinsic operation. This form is unlike the
2715 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
2716 /// and leaves the top elements unmodified (therefore these cannot be commuted).
2718 /// These three forms can each be reg+reg or reg+mem.
2721 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
2723 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr,
2724 SDNode OpNode, X86SchedWriteSizes sched> {
2725 let Predicates = [HasAVX, NoVLX] in {
2726 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
2727 VR128, v4f32, f128mem, loadv4f32,
2728 SSEPackedSingle, sched.PS.XMM, 0>, PS, VEX_4V, VEX_WIG;
2729 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
2730 VR128, v2f64, f128mem, loadv2f64,
2731 SSEPackedDouble, sched.PD.XMM, 0>, PD, VEX_4V, VEX_WIG;
2733 defm V#NAME#PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"),
2734 OpNode, VR256, v8f32, f256mem, loadv8f32,
2735 SSEPackedSingle, sched.PS.YMM, 0>, PS, VEX_4V, VEX_L, VEX_WIG;
2736 defm V#NAME#PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"),
2737 OpNode, VR256, v4f64, f256mem, loadv4f64,
2738 SSEPackedDouble, sched.PD.YMM, 0>, PD, VEX_4V, VEX_L, VEX_WIG;
2741 let Constraints = "$src1 = $dst" in {
2742 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
2743 v4f32, f128mem, memopv4f32, SSEPackedSingle,
2745 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
2746 v2f64, f128mem, memopv2f64, SSEPackedDouble,
2751 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
2752 X86SchedWriteSizes sched> {
2753 defm V#NAME#SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
2754 OpNode, FR32, f32mem, SSEPackedSingle, sched.PS.Scl, 0>,
2755 XS, VEX_4V, VEX_LIG, VEX_WIG;
2756 defm V#NAME#SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
2757 OpNode, FR64, f64mem, SSEPackedDouble, sched.PD.Scl, 0>,
2758 XD, VEX_4V, VEX_LIG, VEX_WIG;
2760 let Constraints = "$src1 = $dst" in {
2761 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
2762 OpNode, FR32, f32mem, SSEPackedSingle,
2764 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
2765 OpNode, FR64, f64mem, SSEPackedDouble,
2770 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
2771 SDPatternOperator OpNode,
2772 X86SchedWriteSizes sched> {
2773 defm V#NAME#SS : sse12_fp_scalar_int<opc, OpcodeStr, OpNode, VR128, v4f32,
2774 !strconcat(OpcodeStr, "ss"), ssmem, sse_load_f32,
2775 SSEPackedSingle, sched.PS.Scl, 0>, XS, VEX_4V, VEX_LIG, VEX_WIG;
2776 defm V#NAME#SD : sse12_fp_scalar_int<opc, OpcodeStr, OpNode, VR128, v2f64,
2777 !strconcat(OpcodeStr, "sd"), sdmem, sse_load_f64,
2778 SSEPackedDouble, sched.PD.Scl, 0>, XD, VEX_4V, VEX_LIG, VEX_WIG;
2780 let Constraints = "$src1 = $dst" in {
2781 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, OpNode, VR128, v4f32,
2782 !strconcat(OpcodeStr, "ss"), ssmem, sse_load_f32,
2783 SSEPackedSingle, sched.PS.Scl>, XS;
2784 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, OpNode, VR128, v2f64,
2785 !strconcat(OpcodeStr, "sd"), sdmem, sse_load_f64,
2786 SSEPackedDouble, sched.PD.Scl>, XD;
2790 // Binary Arithmetic instructions
2791 defm ADD : basic_sse12_fp_binop_p<0x58, "add", fadd, SchedWriteFAddSizes>,
2792 basic_sse12_fp_binop_s<0x58, "add", fadd, SchedWriteFAddSizes>,
2793 basic_sse12_fp_binop_s_int<0x58, "add", null_frag, SchedWriteFAddSizes>;
2794 defm MUL : basic_sse12_fp_binop_p<0x59, "mul", fmul, SchedWriteFMulSizes>,
2795 basic_sse12_fp_binop_s<0x59, "mul", fmul, SchedWriteFMulSizes>,
2796 basic_sse12_fp_binop_s_int<0x59, "mul", null_frag, SchedWriteFMulSizes>;
2797 let isCommutable = 0 in {
2798 defm SUB : basic_sse12_fp_binop_p<0x5C, "sub", fsub, SchedWriteFAddSizes>,
2799 basic_sse12_fp_binop_s<0x5C, "sub", fsub, SchedWriteFAddSizes>,
2800 basic_sse12_fp_binop_s_int<0x5C, "sub", null_frag, SchedWriteFAddSizes>;
2801 defm DIV : basic_sse12_fp_binop_p<0x5E, "div", fdiv, SchedWriteFDivSizes>,
2802 basic_sse12_fp_binop_s<0x5E, "div", fdiv, SchedWriteFDivSizes>,
2803 basic_sse12_fp_binop_s_int<0x5E, "div", null_frag, SchedWriteFDivSizes>;
2804 defm MAX : basic_sse12_fp_binop_p<0x5F, "max", X86fmax, SchedWriteFCmpSizes>,
2805 basic_sse12_fp_binop_s<0x5F, "max", X86fmax, SchedWriteFCmpSizes>,
2806 basic_sse12_fp_binop_s_int<0x5F, "max", X86fmaxs, SchedWriteFCmpSizes>;
2807 defm MIN : basic_sse12_fp_binop_p<0x5D, "min", X86fmin, SchedWriteFCmpSizes>,
2808 basic_sse12_fp_binop_s<0x5D, "min", X86fmin, SchedWriteFCmpSizes>,
2809 basic_sse12_fp_binop_s_int<0x5D, "min", X86fmins, SchedWriteFCmpSizes>;
2812 let isCodeGenOnly = 1 in {
2813 defm MAXC: basic_sse12_fp_binop_p<0x5F, "max", X86fmaxc, SchedWriteFCmpSizes>,
2814 basic_sse12_fp_binop_s<0x5F, "max", X86fmaxc, SchedWriteFCmpSizes>;
2815 defm MINC: basic_sse12_fp_binop_p<0x5D, "min", X86fminc, SchedWriteFCmpSizes>,
2816 basic_sse12_fp_binop_s<0x5D, "min", X86fminc, SchedWriteFCmpSizes>;
2819 // Patterns used to select SSE scalar fp arithmetic instructions from
2822 // (1) a scalar fp operation followed by a blend
2824 // The effect is that the backend no longer emits unnecessary vector
2825 // insert instructions immediately after SSE scalar fp instructions
2826 // like addss or mulss.
2828 // For example, given the following code:
2829 // __m128 foo(__m128 A, __m128 B) {
2834 // Previously we generated:
2835 // addss %xmm0, %xmm1
2836 // movss %xmm1, %xmm0
2839 // addss %xmm1, %xmm0
2841 // (2) a vector packed single/double fp operation followed by a vector insert
2843 // The effect is that the backend converts the packed fp instruction
2844 // followed by a vector insert into a single SSE scalar fp instruction.
2846 // For example, given the following code:
2847 // __m128 foo(__m128 A, __m128 B) {
2848 // __m128 C = A + B;
2849 // return (__m128) {c[0], a[1], a[2], a[3]};
2852 // Previously we generated:
2853 // addps %xmm0, %xmm1
2854 // movss %xmm1, %xmm0
2857 // addss %xmm1, %xmm0
2859 // TODO: Some canonicalization in lowering would simplify the number of
2860 // patterns we have to try to match.
2861 multiclass scalar_math_patterns<SDNode Op, string OpcPrefix, SDNode Move,
2862 ValueType VT, ValueType EltTy,
2863 RegisterClass RC, Predicate BasePredicate> {
2864 let Predicates = [BasePredicate] in {
2865 // extracted scalar math op with insert via movss/movsd
2866 def : Pat<(VT (Move (VT VR128:$dst),
2867 (VT (scalar_to_vector
2868 (Op (EltTy (extractelt (VT VR128:$dst), (iPTR 0))),
2870 (!cast<Instruction>(OpcPrefix#rr_Int) VT:$dst,
2871 (VT (COPY_TO_REGCLASS RC:$src, VR128)))>;
2874 // Repeat for AVX versions of the instructions.
2875 let Predicates = [UseAVX] in {
2876 // extracted scalar math op with insert via movss/movsd
2877 def : Pat<(VT (Move (VT VR128:$dst),
2878 (VT (scalar_to_vector
2879 (Op (EltTy (extractelt (VT VR128:$dst), (iPTR 0))),
2881 (!cast<Instruction>("V"#OpcPrefix#rr_Int) VT:$dst,
2882 (VT (COPY_TO_REGCLASS RC:$src, VR128)))>;
2886 defm : scalar_math_patterns<fadd, "ADDSS", X86Movss, v4f32, f32, FR32, UseSSE1>;
2887 defm : scalar_math_patterns<fsub, "SUBSS", X86Movss, v4f32, f32, FR32, UseSSE1>;
2888 defm : scalar_math_patterns<fmul, "MULSS", X86Movss, v4f32, f32, FR32, UseSSE1>;
2889 defm : scalar_math_patterns<fdiv, "DIVSS", X86Movss, v4f32, f32, FR32, UseSSE1>;
2891 defm : scalar_math_patterns<fadd, "ADDSD", X86Movsd, v2f64, f64, FR64, UseSSE2>;
2892 defm : scalar_math_patterns<fsub, "SUBSD", X86Movsd, v2f64, f64, FR64, UseSSE2>;
2893 defm : scalar_math_patterns<fmul, "MULSD", X86Movsd, v2f64, f64, FR64, UseSSE2>;
2894 defm : scalar_math_patterns<fdiv, "DIVSD", X86Movsd, v2f64, f64, FR64, UseSSE2>;
2897 /// In addition, we also have a special variant of the scalar form here to
2898 /// represent the associated intrinsic operation. This form is unlike the
2899 /// plain scalar form, in that it takes an entire vector (instead of a
2900 /// scalar) and leaves the top elements undefined.
2902 /// And, we have a special variant form for a full-vector intrinsic form.
2904 /// sse_fp_unop_s - SSE1 unops in scalar form
2905 /// For the non-AVX defs, we need $src1 to be tied to $dst because
2906 /// the HW instructions are 2 operand / destructive.
2907 multiclass sse_fp_unop_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
2908 ValueType ScalarVT, X86MemOperand x86memop,
2909 Operand intmemop, SDNode OpNode, Domain d,
2910 X86FoldableSchedWrite sched, Predicate target> {
2911 let hasSideEffects = 0 in {
2912 def r : I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1),
2913 !strconcat(OpcodeStr, "\t{$src1, $dst|$dst, $src1}"),
2914 [(set RC:$dst, (OpNode RC:$src1))], d>, Sched<[sched]>,
2917 def m : I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src1),
2918 !strconcat(OpcodeStr, "\t{$src1, $dst|$dst, $src1}"),
2919 [(set RC:$dst, (OpNode (load addr:$src1)))], d>,
2920 Sched<[sched.Folded]>,
2921 Requires<[target, OptForSize]>;
2923 let isCodeGenOnly = 1, Constraints = "$src1 = $dst", ExeDomain = d in {
2924 def r_Int : I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2925 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), []>,
2928 def m_Int : I<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, intmemop:$src2),
2929 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), []>,
2930 Sched<[sched.Folded, sched.ReadAfterFold]>;
2936 multiclass sse_fp_unop_s_intr<RegisterClass RC, ValueType vt,
2937 ComplexPattern int_cpat, Intrinsic Intr,
2938 Predicate target, string Suffix> {
2939 let Predicates = [target] in {
2940 // These are unary operations, but they are modeled as having 2 source operands
2941 // because the high elements of the destination are unchanged in SSE.
2942 def : Pat<(Intr VR128:$src),
2943 (!cast<Instruction>(NAME#r_Int) VR128:$src, VR128:$src)>;
2945 // We don't want to fold scalar loads into these instructions unless
2946 // optimizing for size. This is because the folded instruction will have a
2947 // partial register update, while the unfolded sequence will not, e.g.
2949 // rcpss %xmm0, %xmm0
2950 // which has a clobber before the rcp, vs.
2952 let Predicates = [target, OptForSize] in {
2953 def : Pat<(Intr int_cpat:$src2),
2954 (!cast<Instruction>(NAME#m_Int)
2955 (vt (IMPLICIT_DEF)), addr:$src2)>;
2959 multiclass avx_fp_unop_s_intr<RegisterClass RC, ValueType vt, ComplexPattern int_cpat,
2960 Intrinsic Intr, Predicate target> {
2961 let Predicates = [target] in {
2962 def : Pat<(Intr VR128:$src),
2963 (!cast<Instruction>(NAME#r_Int) VR128:$src,
2966 let Predicates = [target, OptForSize] in {
2967 def : Pat<(Intr int_cpat:$src2),
2968 (!cast<Instruction>(NAME#m_Int)
2969 (vt (IMPLICIT_DEF)), addr:$src2)>;
2973 multiclass avx_fp_unop_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
2974 ValueType ScalarVT, X86MemOperand x86memop,
2975 Operand intmemop, SDNode OpNode, Domain d,
2976 X86FoldableSchedWrite sched, Predicate target> {
2977 let hasSideEffects = 0 in {
2978 def r : I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
2979 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2980 [], d>, Sched<[sched]>;
2982 def m : I<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2983 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2984 [], d>, Sched<[sched.Folded, sched.ReadAfterFold]>;
2985 let isCodeGenOnly = 1, ExeDomain = d in {
2986 def r_Int : I<opc, MRMSrcReg, (outs VR128:$dst),
2987 (ins VR128:$src1, VR128:$src2),
2988 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2989 []>, Sched<[sched]>;
2991 def m_Int : I<opc, MRMSrcMem, (outs VR128:$dst),
2992 (ins VR128:$src1, intmemop:$src2),
2993 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2994 []>, Sched<[sched.Folded, sched.ReadAfterFold]>;
2998 // We don't want to fold scalar loads into these instructions unless
2999 // optimizing for size. This is because the folded instruction will have a
3000 // partial register update, while the unfolded sequence will not, e.g.
3001 // vmovss mem, %xmm0
3002 // vrcpss %xmm0, %xmm0, %xmm0
3003 // which has a clobber before the rcp, vs.
3004 // vrcpss mem, %xmm0, %xmm0
3005 // TODO: In theory, we could fold the load, and avoid the stall caused by
3006 // the partial register store, either in BreakFalseDeps or with smarter RA.
3007 let Predicates = [target] in {
3008 def : Pat<(OpNode RC:$src), (!cast<Instruction>(NAME#r)
3009 (ScalarVT (IMPLICIT_DEF)), RC:$src)>;
3011 let Predicates = [target, OptForSize] in {
3012 def : Pat<(ScalarVT (OpNode (load addr:$src))),
3013 (!cast<Instruction>(NAME#m) (ScalarVT (IMPLICIT_DEF)),
3018 /// sse1_fp_unop_p - SSE1 unops in packed form.
3019 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
3020 X86SchedWriteWidths sched, list<Predicate> prds> {
3021 let Predicates = prds in {
3022 def V#NAME#PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3023 !strconcat("v", OpcodeStr,
3024 "ps\t{$src, $dst|$dst, $src}"),
3025 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>,
3026 VEX, Sched<[sched.XMM]>, VEX_WIG;
3027 def V#NAME#PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3028 !strconcat("v", OpcodeStr,
3029 "ps\t{$src, $dst|$dst, $src}"),
3030 [(set VR128:$dst, (OpNode (loadv4f32 addr:$src)))]>,
3031 VEX, Sched<[sched.XMM.Folded]>, VEX_WIG;
3032 def V#NAME#PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3033 !strconcat("v", OpcodeStr,
3034 "ps\t{$src, $dst|$dst, $src}"),
3035 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))]>,
3036 VEX, VEX_L, Sched<[sched.YMM]>, VEX_WIG;
3037 def V#NAME#PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3038 !strconcat("v", OpcodeStr,
3039 "ps\t{$src, $dst|$dst, $src}"),
3040 [(set VR256:$dst, (OpNode (loadv8f32 addr:$src)))]>,
3041 VEX, VEX_L, Sched<[sched.YMM.Folded]>, VEX_WIG;
3044 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3045 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3046 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>,
3048 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3049 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3050 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>,
3051 Sched<[sched.XMM.Folded]>;
3054 /// sse2_fp_unop_p - SSE2 unops in vector forms.
3055 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
3056 SDNode OpNode, X86SchedWriteWidths sched> {
3057 let Predicates = [HasAVX, NoVLX] in {
3058 def V#NAME#PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3059 !strconcat("v", OpcodeStr,
3060 "pd\t{$src, $dst|$dst, $src}"),
3061 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>,
3062 VEX, Sched<[sched.XMM]>, VEX_WIG;
3063 def V#NAME#PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3064 !strconcat("v", OpcodeStr,
3065 "pd\t{$src, $dst|$dst, $src}"),
3066 [(set VR128:$dst, (OpNode (loadv2f64 addr:$src)))]>,
3067 VEX, Sched<[sched.XMM.Folded]>, VEX_WIG;
3068 def V#NAME#PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3069 !strconcat("v", OpcodeStr,
3070 "pd\t{$src, $dst|$dst, $src}"),
3071 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))]>,
3072 VEX, VEX_L, Sched<[sched.YMM]>, VEX_WIG;
3073 def V#NAME#PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3074 !strconcat("v", OpcodeStr,
3075 "pd\t{$src, $dst|$dst, $src}"),
3076 [(set VR256:$dst, (OpNode (loadv4f64 addr:$src)))]>,
3077 VEX, VEX_L, Sched<[sched.YMM.Folded]>, VEX_WIG;
3080 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3081 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3082 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>,
3084 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3085 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3086 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>,
3087 Sched<[sched.XMM.Folded]>;
3090 multiclass sse1_fp_unop_s_intr<bits<8> opc, string OpcodeStr, SDNode OpNode,
3091 X86SchedWriteWidths sched, Predicate AVXTarget> {
3092 defm SS : sse_fp_unop_s_intr<FR32, v4f32, sse_load_f32,
3093 !cast<Intrinsic>("int_x86_sse_"##OpcodeStr##_ss),
3095 defm V#NAME#SS : avx_fp_unop_s_intr<FR32, v4f32, sse_load_f32,
3096 !cast<Intrinsic>("int_x86_sse_"##OpcodeStr##_ss),
3098 XS, VEX_4V, VEX_LIG, VEX_WIG, NotMemoryFoldable;
3101 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
3102 X86SchedWriteWidths sched, Predicate AVXTarget> {
3103 defm SS : sse_fp_unop_s<opc, OpcodeStr##ss, FR32, f32, f32mem,
3104 ssmem, OpNode, SSEPackedSingle, sched.Scl, UseSSE1>, XS;
3105 defm V#NAME#SS : avx_fp_unop_s<opc, "v"#OpcodeStr##ss, FR32, f32,
3106 f32mem, ssmem, OpNode, SSEPackedSingle, sched.Scl, AVXTarget>,
3107 XS, VEX_4V, VEX_LIG, VEX_WIG;
3110 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
3111 X86SchedWriteWidths sched, Predicate AVXTarget> {
3112 defm SD : sse_fp_unop_s<opc, OpcodeStr##sd, FR64, f64, f64mem,
3113 sdmem, OpNode, SSEPackedDouble, sched.Scl, UseSSE2>, XD;
3114 defm V#NAME#SD : avx_fp_unop_s<opc, "v"#OpcodeStr##sd, FR64, f64,
3115 f64mem, sdmem, OpNode, SSEPackedDouble, sched.Scl, AVXTarget>,
3116 XD, VEX_4V, VEX_LIG, VEX_WIG;
3120 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, SchedWriteFSqrt, UseAVX>,
3121 sse1_fp_unop_p<0x51, "sqrt", fsqrt, SchedWriteFSqrt, [HasAVX, NoVLX]>,
3122 sse2_fp_unop_s<0x51, "sqrt", fsqrt, SchedWriteFSqrt64, UseAVX>,
3123 sse2_fp_unop_p<0x51, "sqrt", fsqrt, SchedWriteFSqrt64>;
3125 // Reciprocal approximations. Note that these typically require refinement
3126 // in order to obtain suitable precision.
3127 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, SchedWriteFRsqrt, HasAVX>,
3128 sse1_fp_unop_s_intr<0x52, "rsqrt", X86frsqrt, SchedWriteFRsqrt, HasAVX>,
3129 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt, SchedWriteFRsqrt, [HasAVX]>;
3130 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, SchedWriteFRcp, HasAVX>,
3131 sse1_fp_unop_s_intr<0x53, "rcp", X86frcp, SchedWriteFRcp, HasAVX>,
3132 sse1_fp_unop_p<0x53, "rcp", X86frcp, SchedWriteFRcp, [HasAVX]>;
3134 // There is no f64 version of the reciprocal approximation instructions.
3136 multiclass scalar_unary_math_patterns<SDNode OpNode, string OpcPrefix, SDNode Move,
3137 ValueType VT, Predicate BasePredicate> {
3138 let Predicates = [BasePredicate] in {
3139 def : Pat<(VT (Move VT:$dst, (scalar_to_vector
3140 (OpNode (extractelt VT:$src, 0))))),
3141 (!cast<Instruction>(OpcPrefix#r_Int) VT:$dst, VT:$src)>;
3144 // Repeat for AVX versions of the instructions.
3145 let Predicates = [UseAVX] in {
3146 def : Pat<(VT (Move VT:$dst, (scalar_to_vector
3147 (OpNode (extractelt VT:$src, 0))))),
3148 (!cast<Instruction>("V"#OpcPrefix#r_Int) VT:$dst, VT:$src)>;
3152 multiclass scalar_unary_math_imm_patterns<SDNode OpNode, string OpcPrefix, SDNode Move,
3153 ValueType VT, bits<8> ImmV,
3154 Predicate BasePredicate> {
3155 let Predicates = [BasePredicate] in {
3156 def : Pat<(VT (Move VT:$dst, (scalar_to_vector
3157 (OpNode (extractelt VT:$src, 0))))),
3158 (!cast<Instruction>(OpcPrefix#r_Int) VT:$dst, VT:$src, (i32 ImmV))>;
3161 // Repeat for AVX versions of the instructions.
3162 let Predicates = [UseAVX] in {
3163 def : Pat<(VT (Move VT:$dst, (scalar_to_vector
3164 (OpNode (extractelt VT:$src, 0))))),
3165 (!cast<Instruction>("V"#OpcPrefix#r_Int) VT:$dst, VT:$src, (i32 ImmV))>;
3169 defm : scalar_unary_math_patterns<fsqrt, "SQRTSS", X86Movss, v4f32, UseSSE1>;
3170 defm : scalar_unary_math_patterns<fsqrt, "SQRTSD", X86Movsd, v2f64, UseSSE2>;
3172 multiclass scalar_unary_math_intr_patterns<Intrinsic Intr, string OpcPrefix,
3173 SDNode Move, ValueType VT,
3174 Predicate BasePredicate> {
3175 let Predicates = [BasePredicate] in {
3176 def : Pat<(VT (Move VT:$dst, (Intr VT:$src))),
3177 (!cast<Instruction>(OpcPrefix#r_Int) VT:$dst, VT:$src)>;
3180 // Repeat for AVX versions of the instructions.
3181 let Predicates = [HasAVX] in {
3182 def : Pat<(VT (Move VT:$dst, (Intr VT:$src))),
3183 (!cast<Instruction>("V"#OpcPrefix#r_Int) VT:$dst, VT:$src)>;
3187 defm : scalar_unary_math_intr_patterns<int_x86_sse_rcp_ss, "RCPSS", X86Movss,
3189 defm : scalar_unary_math_intr_patterns<int_x86_sse_rsqrt_ss, "RSQRTSS", X86Movss,
3193 //===----------------------------------------------------------------------===//
3194 // SSE 1 & 2 - Non-temporal stores
3195 //===----------------------------------------------------------------------===//
3197 let AddedComplexity = 400 in { // Prefer non-temporal versions
3198 let Predicates = [HasAVX, NoVLX] in {
3199 let SchedRW = [SchedWriteFMoveLSNT.XMM.MR] in {
3200 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
3201 (ins f128mem:$dst, VR128:$src),
3202 "movntps\t{$src, $dst|$dst, $src}",
3203 [(alignednontemporalstore (v4f32 VR128:$src),
3204 addr:$dst)]>, VEX, VEX_WIG;
3205 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
3206 (ins f128mem:$dst, VR128:$src),
3207 "movntpd\t{$src, $dst|$dst, $src}",
3208 [(alignednontemporalstore (v2f64 VR128:$src),
3209 addr:$dst)]>, VEX, VEX_WIG;
3212 let SchedRW = [SchedWriteFMoveLSNT.YMM.MR] in {
3213 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
3214 (ins f256mem:$dst, VR256:$src),
3215 "movntps\t{$src, $dst|$dst, $src}",
3216 [(alignednontemporalstore (v8f32 VR256:$src),
3217 addr:$dst)]>, VEX, VEX_L, VEX_WIG;
3218 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
3219 (ins f256mem:$dst, VR256:$src),
3220 "movntpd\t{$src, $dst|$dst, $src}",
3221 [(alignednontemporalstore (v4f64 VR256:$src),
3222 addr:$dst)]>, VEX, VEX_L, VEX_WIG;
3225 let ExeDomain = SSEPackedInt in {
3226 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
3227 (ins i128mem:$dst, VR128:$src),
3228 "movntdq\t{$src, $dst|$dst, $src}",
3229 [(alignednontemporalstore (v2i64 VR128:$src),
3230 addr:$dst)]>, VEX, VEX_WIG,
3231 Sched<[SchedWriteVecMoveLSNT.XMM.MR]>;
3232 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
3233 (ins i256mem:$dst, VR256:$src),
3234 "movntdq\t{$src, $dst|$dst, $src}",
3235 [(alignednontemporalstore (v4i64 VR256:$src),
3236 addr:$dst)]>, VEX, VEX_L, VEX_WIG,
3237 Sched<[SchedWriteVecMoveLSNT.YMM.MR]>;
3241 let SchedRW = [SchedWriteFMoveLSNT.XMM.MR] in {
3242 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3243 "movntps\t{$src, $dst|$dst, $src}",
3244 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
3245 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3246 "movntpd\t{$src, $dst|$dst, $src}",
3247 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
3250 let ExeDomain = SSEPackedInt, SchedRW = [SchedWriteVecMoveLSNT.XMM.MR] in
3251 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3252 "movntdq\t{$src, $dst|$dst, $src}",
3253 [(alignednontemporalstore (v2i64 VR128:$src), addr:$dst)]>;
3255 let SchedRW = [WriteStoreNT] in {
3256 // There is no AVX form for instructions below this point
3257 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
3258 "movnti{l}\t{$src, $dst|$dst, $src}",
3259 [(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
3260 PS, Requires<[HasSSE2]>;
3261 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
3262 "movnti{q}\t{$src, $dst|$dst, $src}",
3263 [(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
3264 PS, Requires<[HasSSE2]>;
3265 } // SchedRW = [WriteStoreNT]
3267 let Predicates = [HasAVX, NoVLX] in {
3268 def : Pat<(alignednontemporalstore (v8i32 VR256:$src), addr:$dst),
3269 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
3270 def : Pat<(alignednontemporalstore (v16i16 VR256:$src), addr:$dst),
3271 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
3272 def : Pat<(alignednontemporalstore (v32i8 VR256:$src), addr:$dst),
3273 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
3275 def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst),
3276 (VMOVNTDQmr addr:$dst, VR128:$src)>;
3277 def : Pat<(alignednontemporalstore (v8i16 VR128:$src), addr:$dst),
3278 (VMOVNTDQmr addr:$dst, VR128:$src)>;
3279 def : Pat<(alignednontemporalstore (v16i8 VR128:$src), addr:$dst),
3280 (VMOVNTDQmr addr:$dst, VR128:$src)>;
3283 let Predicates = [UseSSE2] in {
3284 def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst),
3285 (MOVNTDQmr addr:$dst, VR128:$src)>;
3286 def : Pat<(alignednontemporalstore (v8i16 VR128:$src), addr:$dst),
3287 (MOVNTDQmr addr:$dst, VR128:$src)>;
3288 def : Pat<(alignednontemporalstore (v16i8 VR128:$src), addr:$dst),
3289 (MOVNTDQmr addr:$dst, VR128:$src)>;
3292 } // AddedComplexity
3294 //===----------------------------------------------------------------------===//
3295 // SSE 1 & 2 - Prefetch and memory fence
3296 //===----------------------------------------------------------------------===//
3298 // Prefetch intrinsic.
3299 let Predicates = [HasSSEPrefetch], SchedRW = [WriteLoad] in {
3300 def PREFETCHT0 : I<0x18, MRM1m, (outs), (ins i8mem:$src),
3301 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3), (i32 1))]>, TB;
3302 def PREFETCHT1 : I<0x18, MRM2m, (outs), (ins i8mem:$src),
3303 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2), (i32 1))]>, TB;
3304 def PREFETCHT2 : I<0x18, MRM3m, (outs), (ins i8mem:$src),
3305 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1), (i32 1))]>, TB;
3306 def PREFETCHNTA : I<0x18, MRM0m, (outs), (ins i8mem:$src),
3307 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0), (i32 1))]>, TB;
3310 // FIXME: How should flush instruction be modeled?
3311 let SchedRW = [WriteLoad] in {
3313 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3314 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
3315 PS, Requires<[HasSSE2]>;
3318 let SchedRW = [WriteNop] in {
3319 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3320 // was introduced with SSE2, it's backward compatible.
3321 def PAUSE : I<0x90, RawFrm, (outs), (ins),
3322 "pause", [(int_x86_sse2_pause)]>, OBXS;
3325 let SchedRW = [WriteFence] in {
3326 // Load, store, and memory fence
3327 // TODO: As with mfence, we may want to ease the availablity of sfence/lfence
3328 // to include any 64-bit target.
3329 def SFENCE : I<0xAE, MRM_F8, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>,
3330 PS, Requires<[HasSSE1]>;
3331 def LFENCE : I<0xAE, MRM_E8, (outs), (ins), "lfence", [(int_x86_sse2_lfence)]>,
3332 PS, Requires<[HasSSE2]>;
3333 def MFENCE : I<0xAE, MRM_F0, (outs), (ins), "mfence", [(int_x86_sse2_mfence)]>,
3334 PS, Requires<[HasMFence]>;
3337 def : Pat<(X86MFence), (MFENCE)>;
3339 //===----------------------------------------------------------------------===//
3340 // SSE 1 & 2 - Load/Store XCSR register
3341 //===----------------------------------------------------------------------===//
3343 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
3344 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>,
3345 VEX, Sched<[WriteLDMXCSR]>, VEX_WIG;
3346 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
3347 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>,
3348 VEX, Sched<[WriteSTMXCSR]>, VEX_WIG;
3350 def LDMXCSR : I<0xAE, MRM2m, (outs), (ins i32mem:$src),
3351 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>,
3352 TB, Sched<[WriteLDMXCSR]>;
3353 def STMXCSR : I<0xAE, MRM3m, (outs), (ins i32mem:$dst),
3354 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>,
3355 TB, Sched<[WriteSTMXCSR]>;
3357 //===---------------------------------------------------------------------===//
3358 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
3359 //===---------------------------------------------------------------------===//
3361 let ExeDomain = SSEPackedInt in { // SSE integer instructions
3363 let hasSideEffects = 0 in {
3364 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3365 "movdqa\t{$src, $dst|$dst, $src}", []>,
3366 Sched<[SchedWriteVecMoveLS.XMM.RR]>, VEX, VEX_WIG;
3367 def VMOVDQUrr : VSSI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3368 "movdqu\t{$src, $dst|$dst, $src}", []>,
3369 Sched<[SchedWriteVecMoveLS.XMM.RR]>, VEX, VEX_WIG;
3370 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3371 "movdqa\t{$src, $dst|$dst, $src}", []>,
3372 Sched<[SchedWriteVecMoveLS.YMM.RR]>, VEX, VEX_L, VEX_WIG;
3373 def VMOVDQUYrr : VSSI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3374 "movdqu\t{$src, $dst|$dst, $src}", []>,
3375 Sched<[SchedWriteVecMoveLS.YMM.RR]>, VEX, VEX_L, VEX_WIG;
3379 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
3380 def VMOVDQArr_REV : VPDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3381 "movdqa\t{$src, $dst|$dst, $src}", []>,
3382 Sched<[SchedWriteVecMoveLS.XMM.RR]>,
3383 VEX, VEX_WIG, FoldGenData<"VMOVDQArr">;
3384 def VMOVDQAYrr_REV : VPDI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
3385 "movdqa\t{$src, $dst|$dst, $src}", []>,
3386 Sched<[SchedWriteVecMoveLS.YMM.RR]>,
3387 VEX, VEX_L, VEX_WIG, FoldGenData<"VMOVDQAYrr">;
3388 def VMOVDQUrr_REV : VSSI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3389 "movdqu\t{$src, $dst|$dst, $src}", []>,
3390 Sched<[SchedWriteVecMoveLS.XMM.RR]>,
3391 VEX, VEX_WIG, FoldGenData<"VMOVDQUrr">;
3392 def VMOVDQUYrr_REV : VSSI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
3393 "movdqu\t{$src, $dst|$dst, $src}", []>,
3394 Sched<[SchedWriteVecMoveLS.YMM.RR]>,
3395 VEX, VEX_L, VEX_WIG, FoldGenData<"VMOVDQUYrr">;
3398 let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1,
3399 hasSideEffects = 0, Predicates = [HasAVX,NoVLX] in {
3400 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3401 "movdqa\t{$src, $dst|$dst, $src}",
3402 [(set VR128:$dst, (alignedloadv2i64 addr:$src))]>,
3403 Sched<[SchedWriteVecMoveLS.XMM.RM]>, VEX, VEX_WIG;
3404 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3405 "movdqa\t{$src, $dst|$dst, $src}", []>,
3406 Sched<[SchedWriteVecMoveLS.YMM.RM]>,
3407 VEX, VEX_L, VEX_WIG;
3408 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3409 "vmovdqu\t{$src, $dst|$dst, $src}",
3410 [(set VR128:$dst, (loadv2i64 addr:$src))]>,
3411 Sched<[SchedWriteVecMoveLS.XMM.RM]>,
3413 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3414 "vmovdqu\t{$src, $dst|$dst, $src}", []>,
3415 Sched<[SchedWriteVecMoveLS.YMM.RM]>,
3416 XS, VEX, VEX_L, VEX_WIG;
3419 let mayStore = 1, hasSideEffects = 0, Predicates = [HasAVX,NoVLX] in {
3420 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
3421 (ins i128mem:$dst, VR128:$src),
3422 "movdqa\t{$src, $dst|$dst, $src}",
3423 [(alignedstore (v2i64 VR128:$src), addr:$dst)]>,
3424 Sched<[SchedWriteVecMoveLS.XMM.MR]>, VEX, VEX_WIG;
3425 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
3426 (ins i256mem:$dst, VR256:$src),
3427 "movdqa\t{$src, $dst|$dst, $src}", []>,
3428 Sched<[SchedWriteVecMoveLS.YMM.MR]>, VEX, VEX_L, VEX_WIG;
3429 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3430 "vmovdqu\t{$src, $dst|$dst, $src}",
3431 [(store (v2i64 VR128:$src), addr:$dst)]>,
3432 Sched<[SchedWriteVecMoveLS.XMM.MR]>, XS, VEX, VEX_WIG;
3433 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
3434 "vmovdqu\t{$src, $dst|$dst, $src}",[]>,
3435 Sched<[SchedWriteVecMoveLS.YMM.MR]>, XS, VEX, VEX_L, VEX_WIG;
3438 let SchedRW = [SchedWriteVecMoveLS.XMM.RR] in {
3439 let hasSideEffects = 0 in {
3440 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3441 "movdqa\t{$src, $dst|$dst, $src}", []>;
3443 def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3444 "movdqu\t{$src, $dst|$dst, $src}", []>,
3445 XS, Requires<[UseSSE2]>;
3449 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
3450 def MOVDQArr_REV : PDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3451 "movdqa\t{$src, $dst|$dst, $src}", []>,
3452 FoldGenData<"MOVDQArr">;
3454 def MOVDQUrr_REV : I<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3455 "movdqu\t{$src, $dst|$dst, $src}", []>,
3456 XS, Requires<[UseSSE2]>, FoldGenData<"MOVDQUrr">;
3460 let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1,
3461 hasSideEffects = 0, SchedRW = [SchedWriteVecMoveLS.XMM.RM] in {
3462 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3463 "movdqa\t{$src, $dst|$dst, $src}",
3464 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
3465 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3466 "movdqu\t{$src, $dst|$dst, $src}",
3467 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
3468 XS, Requires<[UseSSE2]>;
3471 let mayStore = 1, hasSideEffects = 0,
3472 SchedRW = [SchedWriteVecMoveLS.XMM.MR] in {
3473 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3474 "movdqa\t{$src, $dst|$dst, $src}",
3475 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
3476 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3477 "movdqu\t{$src, $dst|$dst, $src}",
3478 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
3479 XS, Requires<[UseSSE2]>;
3482 } // ExeDomain = SSEPackedInt
3484 // Aliases to help the assembler pick two byte VEX encodings by swapping the
3485 // operands relative to the normal instructions to use VEX.R instead of VEX.B.
3486 def : InstAlias<"vmovdqa\t{$src, $dst|$dst, $src}",
3487 (VMOVDQArr_REV VR128L:$dst, VR128H:$src), 0>;
3488 def : InstAlias<"vmovdqa\t{$src, $dst|$dst, $src}",
3489 (VMOVDQAYrr_REV VR256L:$dst, VR256H:$src), 0>;
3490 def : InstAlias<"vmovdqu\t{$src, $dst|$dst, $src}",
3491 (VMOVDQUrr_REV VR128L:$dst, VR128H:$src), 0>;
3492 def : InstAlias<"vmovdqu\t{$src, $dst|$dst, $src}",
3493 (VMOVDQUYrr_REV VR256L:$dst, VR256H:$src), 0>;
3495 // Reversed version with ".s" suffix for GAS compatibility.
3496 def : InstAlias<"vmovdqa.s\t{$src, $dst|$dst, $src}",
3497 (VMOVDQArr_REV VR128:$dst, VR128:$src), 0>;
3498 def : InstAlias<"vmovdqa.s\t{$src, $dst|$dst, $src}",
3499 (VMOVDQAYrr_REV VR256:$dst, VR256:$src), 0>;
3500 def : InstAlias<"vmovdqu.s\t{$src, $dst|$dst, $src}",
3501 (VMOVDQUrr_REV VR128:$dst, VR128:$src), 0>;
3502 def : InstAlias<"vmovdqu.s\t{$src, $dst|$dst, $src}",
3503 (VMOVDQUYrr_REV VR256:$dst, VR256:$src), 0>;
3505 // Reversed version with ".s" suffix for GAS compatibility.
3506 def : InstAlias<"movdqa.s\t{$src, $dst|$dst, $src}",
3507 (MOVDQArr_REV VR128:$dst, VR128:$src), 0>;
3508 def : InstAlias<"movdqu.s\t{$src, $dst|$dst, $src}",
3509 (MOVDQUrr_REV VR128:$dst, VR128:$src), 0>;
3511 let Predicates = [HasAVX, NoVLX] in {
3512 // Additional patterns for other integer sizes.
3513 def : Pat<(alignedloadv4i32 addr:$src),
3514 (VMOVDQArm addr:$src)>;
3515 def : Pat<(alignedloadv8i16 addr:$src),
3516 (VMOVDQArm addr:$src)>;
3517 def : Pat<(alignedloadv16i8 addr:$src),
3518 (VMOVDQArm addr:$src)>;
3519 def : Pat<(loadv4i32 addr:$src),
3520 (VMOVDQUrm addr:$src)>;
3521 def : Pat<(loadv8i16 addr:$src),
3522 (VMOVDQUrm addr:$src)>;
3523 def : Pat<(loadv16i8 addr:$src),
3524 (VMOVDQUrm addr:$src)>;
3526 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3527 (VMOVDQAmr addr:$dst, VR128:$src)>;
3528 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3529 (VMOVDQAmr addr:$dst, VR128:$src)>;
3530 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3531 (VMOVDQAmr addr:$dst, VR128:$src)>;
3532 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3533 (VMOVDQUmr addr:$dst, VR128:$src)>;
3534 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3535 (VMOVDQUmr addr:$dst, VR128:$src)>;
3536 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3537 (VMOVDQUmr addr:$dst, VR128:$src)>;
3540 //===---------------------------------------------------------------------===//
3541 // SSE2 - Packed Integer Arithmetic Instructions
3542 //===---------------------------------------------------------------------===//
3544 let ExeDomain = SSEPackedInt in { // SSE integer instructions
3546 /// PDI_binop_rm2 - Simple SSE2 binary operator with different src and dst types
3547 multiclass PDI_binop_rm2<bits<8> opc, string OpcodeStr, SDNode OpNode,
3548 ValueType DstVT, ValueType SrcVT, RegisterClass RC,
3549 PatFrag memop_frag, X86MemOperand x86memop,
3550 X86FoldableSchedWrite sched, bit Is2Addr = 1> {
3551 let isCommutable = 1 in
3552 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
3553 (ins RC:$src1, RC:$src2),
3555 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3556 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3557 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1), RC:$src2)))]>,
3559 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
3560 (ins RC:$src1, x86memop:$src2),
3562 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3563 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3564 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1),
3565 (memop_frag addr:$src2))))]>,
3566 Sched<[sched.Folded, sched.ReadAfterFold]>;
3568 } // ExeDomain = SSEPackedInt
3570 defm PADDB : PDI_binop_all<0xFC, "paddb", add, v16i8, v32i8,
3571 SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
3572 defm PADDW : PDI_binop_all<0xFD, "paddw", add, v8i16, v16i16,
3573 SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
3574 defm PADDD : PDI_binop_all<0xFE, "paddd", add, v4i32, v8i32,
3575 SchedWriteVecALU, 1, NoVLX>;
3576 defm PADDQ : PDI_binop_all<0xD4, "paddq", add, v2i64, v4i64,
3577 SchedWriteVecALU, 1, NoVLX>;
3578 defm PADDSB : PDI_binop_all<0xEC, "paddsb", saddsat, v16i8, v32i8,
3579 SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
3580 defm PADDSW : PDI_binop_all<0xED, "paddsw", saddsat, v8i16, v16i16,
3581 SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
3582 defm PADDUSB : PDI_binop_all<0xDC, "paddusb", uaddsat, v16i8, v32i8,
3583 SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
3584 defm PADDUSW : PDI_binop_all<0xDD, "paddusw", uaddsat, v8i16, v16i16,
3585 SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
3586 defm PMULLW : PDI_binop_all<0xD5, "pmullw", mul, v8i16, v16i16,
3587 SchedWriteVecIMul, 1, NoVLX_Or_NoBWI>;
3588 defm PMULHUW : PDI_binop_all<0xE4, "pmulhuw", mulhu, v8i16, v16i16,
3589 SchedWriteVecIMul, 1, NoVLX_Or_NoBWI>;
3590 defm PMULHW : PDI_binop_all<0xE5, "pmulhw", mulhs, v8i16, v16i16,
3591 SchedWriteVecIMul, 1, NoVLX_Or_NoBWI>;
3592 defm PSUBB : PDI_binop_all<0xF8, "psubb", sub, v16i8, v32i8,
3593 SchedWriteVecALU, 0, NoVLX_Or_NoBWI>;
3594 defm PSUBW : PDI_binop_all<0xF9, "psubw", sub, v8i16, v16i16,
3595 SchedWriteVecALU, 0, NoVLX_Or_NoBWI>;
3596 defm PSUBD : PDI_binop_all<0xFA, "psubd", sub, v4i32, v8i32,
3597 SchedWriteVecALU, 0, NoVLX>;
3598 defm PSUBQ : PDI_binop_all<0xFB, "psubq", sub, v2i64, v4i64,
3599 SchedWriteVecALU, 0, NoVLX>;
3600 defm PSUBSB : PDI_binop_all<0xE8, "psubsb", ssubsat, v16i8, v32i8,
3601 SchedWriteVecALU, 0, NoVLX_Or_NoBWI>;
3602 defm PSUBSW : PDI_binop_all<0xE9, "psubsw", ssubsat, v8i16, v16i16,
3603 SchedWriteVecALU, 0, NoVLX_Or_NoBWI>;
3604 defm PSUBUSB : PDI_binop_all<0xD8, "psubusb", usubsat, v16i8, v32i8,
3605 SchedWriteVecALU, 0, NoVLX_Or_NoBWI>;
3606 defm PSUBUSW : PDI_binop_all<0xD9, "psubusw", usubsat, v8i16, v16i16,
3607 SchedWriteVecALU, 0, NoVLX_Or_NoBWI>;
3608 defm PMINUB : PDI_binop_all<0xDA, "pminub", umin, v16i8, v32i8,
3609 SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
3610 defm PMINSW : PDI_binop_all<0xEA, "pminsw", smin, v8i16, v16i16,
3611 SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
3612 defm PMAXUB : PDI_binop_all<0xDE, "pmaxub", umax, v16i8, v32i8,
3613 SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
3614 defm PMAXSW : PDI_binop_all<0xEE, "pmaxsw", smax, v8i16, v16i16,
3615 SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
3616 defm PAVGB : PDI_binop_all<0xE0, "pavgb", X86avg, v16i8, v32i8,
3617 SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
3618 defm PAVGW : PDI_binop_all<0xE3, "pavgw", X86avg, v8i16, v16i16,
3619 SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
3620 defm PMULUDQ : PDI_binop_all<0xF4, "pmuludq", X86pmuludq, v2i64, v4i64,
3621 SchedWriteVecIMul, 1, NoVLX>;
3623 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in
3624 defm VPMADDWD : PDI_binop_rm2<0xF5, "vpmaddwd", X86vpmaddwd, v4i32, v8i16, VR128,
3625 load, i128mem, SchedWriteVecIMul.XMM, 0>,
3628 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in
3629 defm VPMADDWDY : PDI_binop_rm2<0xF5, "vpmaddwd", X86vpmaddwd, v8i32, v16i16,
3630 VR256, load, i256mem, SchedWriteVecIMul.YMM,
3631 0>, VEX_4V, VEX_L, VEX_WIG;
3632 let Constraints = "$src1 = $dst" in
3633 defm PMADDWD : PDI_binop_rm2<0xF5, "pmaddwd", X86vpmaddwd, v4i32, v8i16, VR128,
3634 memop, i128mem, SchedWriteVecIMul.XMM>;
3636 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in
3637 defm VPSADBW : PDI_binop_rm2<0xF6, "vpsadbw", X86psadbw, v2i64, v16i8, VR128,
3638 load, i128mem, SchedWritePSADBW.XMM, 0>,
3640 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in
3641 defm VPSADBWY : PDI_binop_rm2<0xF6, "vpsadbw", X86psadbw, v4i64, v32i8, VR256,
3642 load, i256mem, SchedWritePSADBW.YMM, 0>,
3643 VEX_4V, VEX_L, VEX_WIG;
3644 let Constraints = "$src1 = $dst" in
3645 defm PSADBW : PDI_binop_rm2<0xF6, "psadbw", X86psadbw, v2i64, v16i8, VR128,
3646 memop, i128mem, SchedWritePSADBW.XMM>;
3648 //===---------------------------------------------------------------------===//
3649 // SSE2 - Packed Integer Logical Instructions
3650 //===---------------------------------------------------------------------===//
3652 multiclass PDI_binop_rmi<bits<8> opc, bits<8> opc2, Format ImmForm,
3653 string OpcodeStr, SDNode OpNode,
3654 SDNode OpNode2, RegisterClass RC,
3655 X86FoldableSchedWrite sched,
3656 X86FoldableSchedWrite schedImm,
3657 ValueType DstVT, ValueType SrcVT,
3658 PatFrag ld_frag, bit Is2Addr = 1> {
3659 // src2 is always 128-bit
3660 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
3661 (ins RC:$src1, VR128:$src2),
3663 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3664 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3665 [(set RC:$dst, (DstVT (OpNode RC:$src1, (SrcVT VR128:$src2))))]>,
3667 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
3668 (ins RC:$src1, i128mem:$src2),
3670 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3671 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3672 [(set RC:$dst, (DstVT (OpNode RC:$src1,
3673 (SrcVT (ld_frag addr:$src2)))))]>,
3674 Sched<[sched.Folded, sched.ReadAfterFold]>;
3675 def ri : PDIi8<opc2, ImmForm, (outs RC:$dst),
3676 (ins RC:$src1, u8imm:$src2),
3678 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3679 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3680 [(set RC:$dst, (DstVT (OpNode2 RC:$src1, (i8 imm:$src2))))]>,
3684 multiclass PDI_binop_rmi_all<bits<8> opc, bits<8> opc2, Format ImmForm,
3685 string OpcodeStr, SDNode OpNode,
3686 SDNode OpNode2, ValueType DstVT128,
3687 ValueType DstVT256, ValueType SrcVT,
3688 X86SchedWriteWidths sched,
3689 X86SchedWriteWidths schedImm, Predicate prd> {
3690 let Predicates = [HasAVX, prd] in
3691 defm V#NAME : PDI_binop_rmi<opc, opc2, ImmForm, !strconcat("v", OpcodeStr),
3692 OpNode, OpNode2, VR128, sched.XMM, schedImm.XMM,
3693 DstVT128, SrcVT, load, 0>, VEX_4V, VEX_WIG;
3694 let Predicates = [HasAVX2, prd] in
3695 defm V#NAME#Y : PDI_binop_rmi<opc, opc2, ImmForm, !strconcat("v", OpcodeStr),
3696 OpNode, OpNode2, VR256, sched.YMM, schedImm.YMM,
3697 DstVT256, SrcVT, load, 0>, VEX_4V, VEX_L,
3699 let Constraints = "$src1 = $dst" in
3700 defm NAME : PDI_binop_rmi<opc, opc2, ImmForm, OpcodeStr, OpNode, OpNode2,
3701 VR128, sched.XMM, schedImm.XMM, DstVT128, SrcVT,
3705 multiclass PDI_binop_ri<bits<8> opc, Format ImmForm, string OpcodeStr,
3706 SDNode OpNode, RegisterClass RC, ValueType VT,
3707 X86FoldableSchedWrite sched, bit Is2Addr = 1> {
3708 def ri : PDIi8<opc, ImmForm, (outs RC:$dst), (ins RC:$src1, u8imm:$src2),
3710 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3711 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3712 [(set RC:$dst, (VT (OpNode RC:$src1, (i8 imm:$src2))))]>,
3716 multiclass PDI_binop_ri_all<bits<8> opc, Format ImmForm, string OpcodeStr,
3717 SDNode OpNode, X86SchedWriteWidths sched> {
3718 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in
3719 defm V#NAME : PDI_binop_ri<opc, ImmForm, !strconcat("v", OpcodeStr), OpNode,
3720 VR128, v16i8, sched.XMM, 0>, VEX_4V, VEX_WIG;
3721 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in
3722 defm V#NAME#Y : PDI_binop_ri<opc, ImmForm, !strconcat("v", OpcodeStr), OpNode,
3723 VR256, v32i8, sched.YMM, 0>,
3724 VEX_4V, VEX_L, VEX_WIG;
3725 let Constraints = "$src1 = $dst" in
3726 defm NAME : PDI_binop_ri<opc, ImmForm, OpcodeStr, OpNode, VR128, v16i8,
3730 let ExeDomain = SSEPackedInt in {
3731 defm PSLLW : PDI_binop_rmi_all<0xF1, 0x71, MRM6r, "psllw", X86vshl, X86vshli,
3732 v8i16, v16i16, v8i16, SchedWriteVecShift,
3733 SchedWriteVecShiftImm, NoVLX_Or_NoBWI>;
3734 defm PSLLD : PDI_binop_rmi_all<0xF2, 0x72, MRM6r, "pslld", X86vshl, X86vshli,
3735 v4i32, v8i32, v4i32, SchedWriteVecShift,
3736 SchedWriteVecShiftImm, NoVLX>;
3737 defm PSLLQ : PDI_binop_rmi_all<0xF3, 0x73, MRM6r, "psllq", X86vshl, X86vshli,
3738 v2i64, v4i64, v2i64, SchedWriteVecShift,
3739 SchedWriteVecShiftImm, NoVLX>;
3741 defm PSRLW : PDI_binop_rmi_all<0xD1, 0x71, MRM2r, "psrlw", X86vsrl, X86vsrli,
3742 v8i16, v16i16, v8i16, SchedWriteVecShift,
3743 SchedWriteVecShiftImm, NoVLX_Or_NoBWI>;
3744 defm PSRLD : PDI_binop_rmi_all<0xD2, 0x72, MRM2r, "psrld", X86vsrl, X86vsrli,
3745 v4i32, v8i32, v4i32, SchedWriteVecShift,
3746 SchedWriteVecShiftImm, NoVLX>;
3747 defm PSRLQ : PDI_binop_rmi_all<0xD3, 0x73, MRM2r, "psrlq", X86vsrl, X86vsrli,
3748 v2i64, v4i64, v2i64, SchedWriteVecShift,
3749 SchedWriteVecShiftImm, NoVLX>;
3751 defm PSRAW : PDI_binop_rmi_all<0xE1, 0x71, MRM4r, "psraw", X86vsra, X86vsrai,
3752 v8i16, v16i16, v8i16, SchedWriteVecShift,
3753 SchedWriteVecShiftImm, NoVLX_Or_NoBWI>;
3754 defm PSRAD : PDI_binop_rmi_all<0xE2, 0x72, MRM4r, "psrad", X86vsra, X86vsrai,
3755 v4i32, v8i32, v4i32, SchedWriteVecShift,
3756 SchedWriteVecShiftImm, NoVLX>;
3758 defm PSLLDQ : PDI_binop_ri_all<0x73, MRM7r, "pslldq", X86vshldq,
3760 defm PSRLDQ : PDI_binop_ri_all<0x73, MRM3r, "psrldq", X86vshrdq,
3762 } // ExeDomain = SSEPackedInt
3764 //===---------------------------------------------------------------------===//
3765 // SSE2 - Packed Integer Comparison Instructions
3766 //===---------------------------------------------------------------------===//
3768 defm PCMPEQB : PDI_binop_all<0x74, "pcmpeqb", X86pcmpeq, v16i8, v32i8,
3769 SchedWriteVecALU, 1, TruePredicate>;
3770 defm PCMPEQW : PDI_binop_all<0x75, "pcmpeqw", X86pcmpeq, v8i16, v16i16,
3771 SchedWriteVecALU, 1, TruePredicate>;
3772 defm PCMPEQD : PDI_binop_all<0x76, "pcmpeqd", X86pcmpeq, v4i32, v8i32,
3773 SchedWriteVecALU, 1, TruePredicate>;
3774 defm PCMPGTB : PDI_binop_all<0x64, "pcmpgtb", X86pcmpgt, v16i8, v32i8,
3775 SchedWriteVecALU, 0, TruePredicate>;
3776 defm PCMPGTW : PDI_binop_all<0x65, "pcmpgtw", X86pcmpgt, v8i16, v16i16,
3777 SchedWriteVecALU, 0, TruePredicate>;
3778 defm PCMPGTD : PDI_binop_all<0x66, "pcmpgtd", X86pcmpgt, v4i32, v8i32,
3779 SchedWriteVecALU, 0, TruePredicate>;
3781 //===---------------------------------------------------------------------===//
3782 // SSE2 - Packed Integer Shuffle Instructions
3783 //===---------------------------------------------------------------------===//
3785 let ExeDomain = SSEPackedInt in {
3786 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt128, ValueType vt256,
3787 SDNode OpNode, X86SchedWriteWidths sched,
3789 let Predicates = [HasAVX, prd] in {
3790 def V#NAME#ri : Ii8<0x70, MRMSrcReg, (outs VR128:$dst),
3791 (ins VR128:$src1, u8imm:$src2),
3792 !strconcat("v", OpcodeStr,
3793 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3795 (vt128 (OpNode VR128:$src1, (i8 imm:$src2))))]>,
3796 VEX, Sched<[sched.XMM]>, VEX_WIG;
3797 def V#NAME#mi : Ii8<0x70, MRMSrcMem, (outs VR128:$dst),
3798 (ins i128mem:$src1, u8imm:$src2),
3799 !strconcat("v", OpcodeStr,
3800 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3802 (vt128 (OpNode (load addr:$src1),
3803 (i8 imm:$src2))))]>, VEX,
3804 Sched<[sched.XMM.Folded]>, VEX_WIG;
3807 let Predicates = [HasAVX2, prd] in {
3808 def V#NAME#Yri : Ii8<0x70, MRMSrcReg, (outs VR256:$dst),
3809 (ins VR256:$src1, u8imm:$src2),
3810 !strconcat("v", OpcodeStr,
3811 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3813 (vt256 (OpNode VR256:$src1, (i8 imm:$src2))))]>,
3814 VEX, VEX_L, Sched<[sched.YMM]>, VEX_WIG;
3815 def V#NAME#Ymi : Ii8<0x70, MRMSrcMem, (outs VR256:$dst),
3816 (ins i256mem:$src1, u8imm:$src2),
3817 !strconcat("v", OpcodeStr,
3818 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3820 (vt256 (OpNode (load addr:$src1),
3821 (i8 imm:$src2))))]>, VEX, VEX_L,
3822 Sched<[sched.YMM.Folded]>, VEX_WIG;
3825 let Predicates = [UseSSE2] in {
3826 def ri : Ii8<0x70, MRMSrcReg,
3827 (outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
3828 !strconcat(OpcodeStr,
3829 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3831 (vt128 (OpNode VR128:$src1, (i8 imm:$src2))))]>,
3833 def mi : Ii8<0x70, MRMSrcMem,
3834 (outs VR128:$dst), (ins i128mem:$src1, u8imm:$src2),
3835 !strconcat(OpcodeStr,
3836 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3838 (vt128 (OpNode (memop addr:$src1),
3839 (i8 imm:$src2))))]>,
3840 Sched<[sched.XMM.Folded]>;
3843 } // ExeDomain = SSEPackedInt
3845 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, v8i32, X86PShufd,
3846 SchedWriteShuffle, NoVLX>, PD;
3847 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, v16i16, X86PShufhw,
3848 SchedWriteShuffle, NoVLX_Or_NoBWI>, XS;
3849 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, v16i16, X86PShuflw,
3850 SchedWriteShuffle, NoVLX_Or_NoBWI>, XD;
3852 //===---------------------------------------------------------------------===//
3853 // Packed Integer Pack Instructions (SSE & AVX)
3854 //===---------------------------------------------------------------------===//
3856 let ExeDomain = SSEPackedInt in {
3857 multiclass sse2_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
3858 ValueType ArgVT, SDNode OpNode, RegisterClass RC,
3859 X86MemOperand x86memop, X86FoldableSchedWrite sched,
3860 PatFrag ld_frag, bit Is2Addr = 1> {
3861 def rr : PDI<opc, MRMSrcReg,
3862 (outs RC:$dst), (ins RC:$src1, RC:$src2),
3864 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3865 !strconcat(OpcodeStr,
3866 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3868 (OutVT (OpNode (ArgVT RC:$src1), RC:$src2)))]>,
3870 def rm : PDI<opc, MRMSrcMem,
3871 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3873 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3874 !strconcat(OpcodeStr,
3875 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3877 (OutVT (OpNode (ArgVT RC:$src1),
3878 (ld_frag addr:$src2))))]>,
3879 Sched<[sched.Folded, sched.ReadAfterFold]>;
3882 multiclass sse4_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
3883 ValueType ArgVT, SDNode OpNode, RegisterClass RC,
3884 X86MemOperand x86memop, X86FoldableSchedWrite sched,
3885 PatFrag ld_frag, bit Is2Addr = 1> {
3886 def rr : SS48I<opc, MRMSrcReg,
3887 (outs RC:$dst), (ins RC:$src1, RC:$src2),
3889 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3890 !strconcat(OpcodeStr,
3891 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3893 (OutVT (OpNode (ArgVT RC:$src1), RC:$src2)))]>,
3895 def rm : SS48I<opc, MRMSrcMem,
3896 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3898 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3899 !strconcat(OpcodeStr,
3900 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3902 (OutVT (OpNode (ArgVT RC:$src1),
3903 (ld_frag addr:$src2))))]>,
3904 Sched<[sched.Folded, sched.ReadAfterFold]>;
3907 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
3908 defm VPACKSSWB : sse2_pack<0x63, "vpacksswb", v16i8, v8i16, X86Packss, VR128,
3909 i128mem, SchedWriteShuffle.XMM, load, 0>,
3911 defm VPACKSSDW : sse2_pack<0x6B, "vpackssdw", v8i16, v4i32, X86Packss, VR128,
3912 i128mem, SchedWriteShuffle.XMM, load, 0>,
3915 defm VPACKUSWB : sse2_pack<0x67, "vpackuswb", v16i8, v8i16, X86Packus, VR128,
3916 i128mem, SchedWriteShuffle.XMM, load, 0>,
3918 defm VPACKUSDW : sse4_pack<0x2B, "vpackusdw", v8i16, v4i32, X86Packus, VR128,
3919 i128mem, SchedWriteShuffle.XMM, load, 0>,
3923 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
3924 defm VPACKSSWBY : sse2_pack<0x63, "vpacksswb", v32i8, v16i16, X86Packss, VR256,
3925 i256mem, SchedWriteShuffle.YMM, load, 0>,
3926 VEX_4V, VEX_L, VEX_WIG;
3927 defm VPACKSSDWY : sse2_pack<0x6B, "vpackssdw", v16i16, v8i32, X86Packss, VR256,
3928 i256mem, SchedWriteShuffle.YMM, load, 0>,
3929 VEX_4V, VEX_L, VEX_WIG;
3931 defm VPACKUSWBY : sse2_pack<0x67, "vpackuswb", v32i8, v16i16, X86Packus, VR256,
3932 i256mem, SchedWriteShuffle.YMM, load, 0>,
3933 VEX_4V, VEX_L, VEX_WIG;
3934 defm VPACKUSDWY : sse4_pack<0x2B, "vpackusdw", v16i16, v8i32, X86Packus, VR256,
3935 i256mem, SchedWriteShuffle.YMM, load, 0>,
3939 let Constraints = "$src1 = $dst" in {
3940 defm PACKSSWB : sse2_pack<0x63, "packsswb", v16i8, v8i16, X86Packss, VR128,
3941 i128mem, SchedWriteShuffle.XMM, memop>;
3942 defm PACKSSDW : sse2_pack<0x6B, "packssdw", v8i16, v4i32, X86Packss, VR128,
3943 i128mem, SchedWriteShuffle.XMM, memop>;
3945 defm PACKUSWB : sse2_pack<0x67, "packuswb", v16i8, v8i16, X86Packus, VR128,
3946 i128mem, SchedWriteShuffle.XMM, memop>;
3948 defm PACKUSDW : sse4_pack<0x2B, "packusdw", v8i16, v4i32, X86Packus, VR128,
3949 i128mem, SchedWriteShuffle.XMM, memop>;
3951 } // ExeDomain = SSEPackedInt
3953 //===---------------------------------------------------------------------===//
3954 // SSE2 - Packed Integer Unpack Instructions
3955 //===---------------------------------------------------------------------===//
3957 let ExeDomain = SSEPackedInt in {
3958 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
3959 SDNode OpNode, RegisterClass RC, X86MemOperand x86memop,
3960 X86FoldableSchedWrite sched, PatFrag ld_frag,
3962 def rr : PDI<opc, MRMSrcReg,
3963 (outs RC:$dst), (ins RC:$src1, RC:$src2),
3965 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
3966 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3967 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))]>,
3969 def rm : PDI<opc, MRMSrcMem,
3970 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3972 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
3973 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3974 [(set RC:$dst, (vt (OpNode RC:$src1, (ld_frag addr:$src2))))]>,
3975 Sched<[sched.Folded, sched.ReadAfterFold]>;
3978 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
3979 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, X86Unpckl, VR128,
3980 i128mem, SchedWriteShuffle.XMM, load, 0>,
3982 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, X86Unpckl, VR128,
3983 i128mem, SchedWriteShuffle.XMM, load, 0>,
3985 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, X86Unpckh, VR128,
3986 i128mem, SchedWriteShuffle.XMM, load, 0>,
3988 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, X86Unpckh, VR128,
3989 i128mem, SchedWriteShuffle.XMM, load, 0>,
3993 let Predicates = [HasAVX, NoVLX] in {
3994 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, X86Unpckl, VR128,
3995 i128mem, SchedWriteShuffle.XMM, load, 0>,
3997 defm VPUNPCKLQDQ : sse2_unpack<0x6C, "vpunpcklqdq", v2i64, X86Unpckl, VR128,
3998 i128mem, SchedWriteShuffle.XMM, load, 0>,
4000 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, X86Unpckh, VR128,
4001 i128mem, SchedWriteShuffle.XMM, load, 0>,
4003 defm VPUNPCKHQDQ : sse2_unpack<0x6D, "vpunpckhqdq", v2i64, X86Unpckh, VR128,
4004 i128mem, SchedWriteShuffle.XMM, load, 0>,
4008 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
4009 defm VPUNPCKLBWY : sse2_unpack<0x60, "vpunpcklbw", v32i8, X86Unpckl, VR256,
4010 i256mem, SchedWriteShuffle.YMM, load, 0>,
4011 VEX_4V, VEX_L, VEX_WIG;
4012 defm VPUNPCKLWDY : sse2_unpack<0x61, "vpunpcklwd", v16i16, X86Unpckl, VR256,
4013 i256mem, SchedWriteShuffle.YMM, load, 0>,
4014 VEX_4V, VEX_L, VEX_WIG;
4015 defm VPUNPCKHBWY : sse2_unpack<0x68, "vpunpckhbw", v32i8, X86Unpckh, VR256,
4016 i256mem, SchedWriteShuffle.YMM, load, 0>,
4017 VEX_4V, VEX_L, VEX_WIG;
4018 defm VPUNPCKHWDY : sse2_unpack<0x69, "vpunpckhwd", v16i16, X86Unpckh, VR256,
4019 i256mem, SchedWriteShuffle.YMM, load, 0>,
4020 VEX_4V, VEX_L, VEX_WIG;
4023 let Predicates = [HasAVX2, NoVLX] in {
4024 defm VPUNPCKLDQY : sse2_unpack<0x62, "vpunpckldq", v8i32, X86Unpckl, VR256,
4025 i256mem, SchedWriteShuffle.YMM, load, 0>,
4026 VEX_4V, VEX_L, VEX_WIG;
4027 defm VPUNPCKLQDQY : sse2_unpack<0x6C, "vpunpcklqdq", v4i64, X86Unpckl, VR256,
4028 i256mem, SchedWriteShuffle.YMM, load, 0>,
4029 VEX_4V, VEX_L, VEX_WIG;
4030 defm VPUNPCKHDQY : sse2_unpack<0x6A, "vpunpckhdq", v8i32, X86Unpckh, VR256,
4031 i256mem, SchedWriteShuffle.YMM, load, 0>,
4032 VEX_4V, VEX_L, VEX_WIG;
4033 defm VPUNPCKHQDQY : sse2_unpack<0x6D, "vpunpckhqdq", v4i64, X86Unpckh, VR256,
4034 i256mem, SchedWriteShuffle.YMM, load, 0>,
4035 VEX_4V, VEX_L, VEX_WIG;
4038 let Constraints = "$src1 = $dst" in {
4039 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, X86Unpckl, VR128,
4040 i128mem, SchedWriteShuffle.XMM, memop>;
4041 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, X86Unpckl, VR128,
4042 i128mem, SchedWriteShuffle.XMM, memop>;
4043 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, X86Unpckl, VR128,
4044 i128mem, SchedWriteShuffle.XMM, memop>;
4045 defm PUNPCKLQDQ : sse2_unpack<0x6C, "punpcklqdq", v2i64, X86Unpckl, VR128,
4046 i128mem, SchedWriteShuffle.XMM, memop>;
4048 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, X86Unpckh, VR128,
4049 i128mem, SchedWriteShuffle.XMM, memop>;
4050 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, X86Unpckh, VR128,
4051 i128mem, SchedWriteShuffle.XMM, memop>;
4052 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, X86Unpckh, VR128,
4053 i128mem, SchedWriteShuffle.XMM, memop>;
4054 defm PUNPCKHQDQ : sse2_unpack<0x6D, "punpckhqdq", v2i64, X86Unpckh, VR128,
4055 i128mem, SchedWriteShuffle.XMM, memop>;
4057 } // ExeDomain = SSEPackedInt
4059 //===---------------------------------------------------------------------===//
4060 // SSE2 - Packed Integer Extract and Insert
4061 //===---------------------------------------------------------------------===//
4063 let ExeDomain = SSEPackedInt in {
4064 multiclass sse2_pinsrw<bit Is2Addr = 1> {
4065 def rr : Ii8<0xC4, MRMSrcReg,
4066 (outs VR128:$dst), (ins VR128:$src1,
4067 GR32orGR64:$src2, u8imm:$src3),
4069 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
4070 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4072 (X86pinsrw VR128:$src1, GR32orGR64:$src2, imm:$src3))]>,
4073 Sched<[WriteVecInsert, ReadDefault, ReadInt2Fpu]>;
4074 def rm : Ii8<0xC4, MRMSrcMem,
4075 (outs VR128:$dst), (ins VR128:$src1,
4076 i16mem:$src2, u8imm:$src3),
4078 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
4079 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4081 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
4083 Sched<[WriteVecInsert.Folded, WriteVecInsert.ReadAfterFold]>;
4087 let Predicates = [HasAVX, NoBWI] in
4088 def VPEXTRWrr : Ii8<0xC5, MRMSrcReg,
4089 (outs GR32orGR64:$dst), (ins VR128:$src1, u8imm:$src2),
4090 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4091 [(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1),
4093 PD, VEX, Sched<[WriteVecExtract]>;
4094 def PEXTRWrr : PDIi8<0xC5, MRMSrcReg,
4095 (outs GR32orGR64:$dst), (ins VR128:$src1, u8imm:$src2),
4096 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4097 [(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1),
4099 Sched<[WriteVecExtract]>;
4102 let Predicates = [HasAVX, NoBWI] in
4103 defm VPINSRW : sse2_pinsrw<0>, PD, VEX_4V;
4105 let Predicates = [UseSSE2], Constraints = "$src1 = $dst" in
4106 defm PINSRW : sse2_pinsrw, PD;
4108 } // ExeDomain = SSEPackedInt
4110 //===---------------------------------------------------------------------===//
4111 // SSE2 - Packed Mask Creation
4112 //===---------------------------------------------------------------------===//
4114 let ExeDomain = SSEPackedInt in {
4116 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
4118 "pmovmskb\t{$src, $dst|$dst, $src}",
4119 [(set GR32orGR64:$dst, (X86movmsk (v16i8 VR128:$src)))]>,
4120 Sched<[WriteVecMOVMSK]>, VEX, VEX_WIG;
4122 let Predicates = [HasAVX2] in {
4123 def VPMOVMSKBYrr : VPDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
4125 "pmovmskb\t{$src, $dst|$dst, $src}",
4126 [(set GR32orGR64:$dst, (X86movmsk (v32i8 VR256:$src)))]>,
4127 Sched<[WriteVecMOVMSKY]>, VEX, VEX_L, VEX_WIG;
4130 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst), (ins VR128:$src),
4131 "pmovmskb\t{$src, $dst|$dst, $src}",
4132 [(set GR32orGR64:$dst, (X86movmsk (v16i8 VR128:$src)))]>,
4133 Sched<[WriteVecMOVMSK]>;
4135 } // ExeDomain = SSEPackedInt
4137 //===---------------------------------------------------------------------===//
4138 // SSE2 - Conditional Store
4139 //===---------------------------------------------------------------------===//
4141 let ExeDomain = SSEPackedInt, SchedRW = [SchedWriteVecMoveLS.XMM.MR] in {
4142 let Uses = [EDI], Predicates = [HasAVX,Not64BitMode] in
4143 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
4144 (ins VR128:$src, VR128:$mask),
4145 "maskmovdqu\t{$mask, $src|$src, $mask}",
4146 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>,
4148 let Uses = [RDI], Predicates = [HasAVX,In64BitMode] in
4149 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
4150 (ins VR128:$src, VR128:$mask),
4151 "maskmovdqu\t{$mask, $src|$src, $mask}",
4152 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>,
4155 let Uses = [EDI], Predicates = [UseSSE2,Not64BitMode] in
4156 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
4157 "maskmovdqu\t{$mask, $src|$src, $mask}",
4158 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
4159 let Uses = [RDI], Predicates = [UseSSE2,In64BitMode] in
4160 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
4161 "maskmovdqu\t{$mask, $src|$src, $mask}",
4162 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
4164 } // ExeDomain = SSEPackedInt
4166 //===---------------------------------------------------------------------===//
4167 // SSE2 - Move Doubleword/Quadword
4168 //===---------------------------------------------------------------------===//
4170 //===---------------------------------------------------------------------===//
4171 // Move Int Doubleword to Packed Double Int
4173 let ExeDomain = SSEPackedInt in {
4174 def VMOVDI2PDIrr : VS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4175 "movd\t{$src, $dst|$dst, $src}",
4177 (v4i32 (scalar_to_vector GR32:$src)))]>,
4178 VEX, Sched<[WriteVecMoveFromGpr]>;
4179 def VMOVDI2PDIrm : VS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4180 "movd\t{$src, $dst|$dst, $src}",
4182 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
4183 VEX, Sched<[WriteVecLoad]>;
4184 def VMOV64toPQIrr : VRS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4185 "movq\t{$src, $dst|$dst, $src}",
4187 (v2i64 (scalar_to_vector GR64:$src)))]>,
4188 VEX, Sched<[WriteVecMoveFromGpr]>;
4189 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayLoad = 1 in
4190 def VMOV64toPQIrm : VRS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4191 "movq\t{$src, $dst|$dst, $src}", []>,
4192 VEX, Sched<[WriteVecLoad]>;
4193 let isCodeGenOnly = 1 in
4194 def VMOV64toSDrr : VRS2I<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
4195 "movq\t{$src, $dst|$dst, $src}",
4196 [(set FR64:$dst, (bitconvert GR64:$src))]>,
4197 VEX, Sched<[WriteVecMoveFromGpr]>;
4199 def MOVDI2PDIrr : S2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4200 "movd\t{$src, $dst|$dst, $src}",
4202 (v4i32 (scalar_to_vector GR32:$src)))]>,
4203 Sched<[WriteVecMoveFromGpr]>;
4204 def MOVDI2PDIrm : S2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4205 "movd\t{$src, $dst|$dst, $src}",
4207 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
4208 Sched<[WriteVecLoad]>;
4209 def MOV64toPQIrr : RS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4210 "movq\t{$src, $dst|$dst, $src}",
4212 (v2i64 (scalar_to_vector GR64:$src)))]>,
4213 Sched<[WriteVecMoveFromGpr]>;
4214 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayLoad = 1 in
4215 def MOV64toPQIrm : RS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4216 "movq\t{$src, $dst|$dst, $src}", []>,
4217 Sched<[WriteVecLoad]>;
4218 let isCodeGenOnly = 1 in
4219 def MOV64toSDrr : RS2I<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
4220 "movq\t{$src, $dst|$dst, $src}",
4221 [(set FR64:$dst, (bitconvert GR64:$src))]>,
4222 Sched<[WriteVecMoveFromGpr]>;
4223 } // ExeDomain = SSEPackedInt
4225 //===---------------------------------------------------------------------===//
4226 // Move Int Doubleword to Single Scalar
4228 let ExeDomain = SSEPackedInt, isCodeGenOnly = 1 in {
4229 def VMOVDI2SSrr : VS2I<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
4230 "movd\t{$src, $dst|$dst, $src}",
4231 [(set FR32:$dst, (bitconvert GR32:$src))]>,
4232 VEX, Sched<[WriteVecMoveFromGpr]>;
4234 def VMOVDI2SSrm : VS2I<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
4235 "movd\t{$src, $dst|$dst, $src}",
4236 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
4237 VEX, Sched<[WriteVecLoad]>;
4238 def MOVDI2SSrr : S2I<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
4239 "movd\t{$src, $dst|$dst, $src}",
4240 [(set FR32:$dst, (bitconvert GR32:$src))]>,
4241 Sched<[WriteVecMoveFromGpr]>;
4243 def MOVDI2SSrm : S2I<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
4244 "movd\t{$src, $dst|$dst, $src}",
4245 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
4246 Sched<[WriteVecLoad]>;
4247 } // ExeDomain = SSEPackedInt, isCodeGenOnly = 1
4249 //===---------------------------------------------------------------------===//
4250 // Move Packed Doubleword Int to Packed Double Int
4252 let ExeDomain = SSEPackedInt in {
4253 def VMOVPDI2DIrr : VS2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
4254 "movd\t{$src, $dst|$dst, $src}",
4255 [(set GR32:$dst, (extractelt (v4i32 VR128:$src),
4257 Sched<[WriteVecMoveToGpr]>;
4258 def VMOVPDI2DImr : VS2I<0x7E, MRMDestMem, (outs),
4259 (ins i32mem:$dst, VR128:$src),
4260 "movd\t{$src, $dst|$dst, $src}",
4261 [(store (i32 (extractelt (v4i32 VR128:$src),
4262 (iPTR 0))), addr:$dst)]>,
4263 VEX, Sched<[WriteVecStore]>;
4264 def MOVPDI2DIrr : S2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
4265 "movd\t{$src, $dst|$dst, $src}",
4266 [(set GR32:$dst, (extractelt (v4i32 VR128:$src),
4268 Sched<[WriteVecMoveToGpr]>;
4269 def MOVPDI2DImr : S2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
4270 "movd\t{$src, $dst|$dst, $src}",
4271 [(store (i32 (extractelt (v4i32 VR128:$src),
4272 (iPTR 0))), addr:$dst)]>,
4273 Sched<[WriteVecStore]>;
4274 } // ExeDomain = SSEPackedInt
4276 //===---------------------------------------------------------------------===//
4277 // Move Packed Doubleword Int first element to Doubleword Int
4279 let ExeDomain = SSEPackedInt in {
4280 let SchedRW = [WriteVecMoveToGpr] in {
4281 def VMOVPQIto64rr : VRS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4282 "movq\t{$src, $dst|$dst, $src}",
4283 [(set GR64:$dst, (extractelt (v2i64 VR128:$src),
4287 def MOVPQIto64rr : RS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4288 "movq\t{$src, $dst|$dst, $src}",
4289 [(set GR64:$dst, (extractelt (v2i64 VR128:$src),
4293 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayStore = 1 in
4294 def VMOVPQIto64mr : VRS2I<0x7E, MRMDestMem, (outs),
4295 (ins i64mem:$dst, VR128:$src),
4296 "movq\t{$src, $dst|$dst, $src}", []>,
4297 VEX, Sched<[WriteVecStore]>;
4298 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayStore = 1 in
4299 def MOVPQIto64mr : RS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4300 "movq\t{$src, $dst|$dst, $src}", []>,
4301 Sched<[WriteVecStore]>;
4302 } // ExeDomain = SSEPackedInt
4304 //===---------------------------------------------------------------------===//
4305 // Bitcast FR64 <-> GR64
4307 let ExeDomain = SSEPackedInt, isCodeGenOnly = 1 in {
4308 let Predicates = [UseAVX] in
4309 def VMOV64toSDrm : VS2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
4310 "movq\t{$src, $dst|$dst, $src}",
4311 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>,
4312 VEX, Sched<[WriteVecLoad]>;
4313 def VMOVSDto64rr : VRS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
4314 "movq\t{$src, $dst|$dst, $src}",
4315 [(set GR64:$dst, (bitconvert FR64:$src))]>,
4316 VEX, Sched<[WriteVecMoveToGpr]>;
4317 def VMOVSDto64mr : VRS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
4318 "movq\t{$src, $dst|$dst, $src}",
4319 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>,
4320 VEX, Sched<[WriteVecStore]>;
4322 def MOV64toSDrm : S2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
4323 "movq\t{$src, $dst|$dst, $src}",
4324 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>,
4325 Sched<[WriteVecLoad]>;
4326 def MOVSDto64rr : RS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
4327 "movq\t{$src, $dst|$dst, $src}",
4328 [(set GR64:$dst, (bitconvert FR64:$src))]>,
4329 Sched<[WriteVecMoveToGpr]>;
4330 def MOVSDto64mr : RS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
4331 "movq\t{$src, $dst|$dst, $src}",
4332 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>,
4333 Sched<[WriteVecStore]>;
4334 } // ExeDomain = SSEPackedInt, isCodeGenOnly = 1
4336 //===---------------------------------------------------------------------===//
4337 // Move Scalar Single to Double Int
4339 let ExeDomain = SSEPackedInt, isCodeGenOnly = 1 in {
4340 def VMOVSS2DIrr : VS2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
4341 "movd\t{$src, $dst|$dst, $src}",
4342 [(set GR32:$dst, (bitconvert FR32:$src))]>,
4343 VEX, Sched<[WriteVecMoveToGpr]>;
4344 def VMOVSS2DImr : VS2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
4345 "movd\t{$src, $dst|$dst, $src}",
4346 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>,
4347 VEX, Sched<[WriteVecStore]>;
4348 def MOVSS2DIrr : S2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
4349 "movd\t{$src, $dst|$dst, $src}",
4350 [(set GR32:$dst, (bitconvert FR32:$src))]>,
4351 Sched<[WriteVecMoveToGpr]>;
4352 def MOVSS2DImr : S2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
4353 "movd\t{$src, $dst|$dst, $src}",
4354 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>,
4355 Sched<[WriteVecStore]>;
4356 } // ExeDomain = SSEPackedInt, isCodeGenOnly = 1
4358 let Predicates = [UseAVX] in {
4359 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
4360 (VMOVDI2PDIrr GR32:$src)>;
4362 def : Pat<(v2i64 (X86vzmovl (v2i64 (scalar_to_vector GR64:$src)))),
4363 (VMOV64toPQIrr GR64:$src)>;
4365 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
4366 (v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
4367 (SUBREG_TO_REG (i64 0), (v2i64 (VMOV64toPQIrr GR64:$src)), sub_xmm)>;
4368 // AVX 128-bit movd/movq instructions write zeros in the high 128-bit part.
4369 // These instructions also write zeros in the high part of a 256-bit register.
4370 def : Pat<(v2i64 (X86vzmovl (v2i64 (scalar_to_vector (zextloadi64i32 addr:$src))))),
4371 (VMOVDI2PDIrm addr:$src)>;
4372 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
4373 (VMOVDI2PDIrm addr:$src)>;
4374 def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
4375 (VMOVDI2PDIrm addr:$src)>;
4376 def : Pat<(v4i32 (X86vzload addr:$src)),
4377 (VMOVDI2PDIrm addr:$src)>;
4378 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
4379 (v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
4380 (SUBREG_TO_REG (i32 0), (v4i32 (VMOVDI2PDIrm addr:$src)), sub_xmm)>;
4381 def : Pat<(v8i32 (X86vzload addr:$src)),
4382 (SUBREG_TO_REG (i64 0), (v4i32 (VMOVDI2PDIrm addr:$src)), sub_xmm)>;
4383 // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
4384 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
4385 (v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))),
4386 (SUBREG_TO_REG (i32 0), (v4i32 (VMOVDI2PDIrr GR32:$src)), sub_xmm)>;
4389 let Predicates = [UseSSE2] in {
4390 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
4391 (MOVDI2PDIrr GR32:$src)>;
4393 def : Pat<(v2i64 (X86vzmovl (v2i64 (scalar_to_vector GR64:$src)))),
4394 (MOV64toPQIrr GR64:$src)>;
4395 def : Pat<(v2i64 (X86vzmovl (v2i64 (scalar_to_vector (zextloadi64i32 addr:$src))))),
4396 (MOVDI2PDIrm addr:$src)>;
4397 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
4398 (MOVDI2PDIrm addr:$src)>;
4399 def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
4400 (MOVDI2PDIrm addr:$src)>;
4401 def : Pat<(v4i32 (X86vzload addr:$src)),
4402 (MOVDI2PDIrm addr:$src)>;
4405 // Before the MC layer of LLVM existed, clang emitted "movd" assembly instead of
4406 // "movq" due to MacOS parsing limitation. In order to parse old assembly, we add
4408 def : InstAlias<"movd\t{$src, $dst|$dst, $src}",
4409 (MOV64toPQIrr VR128:$dst, GR64:$src), 0>;
4410 def : InstAlias<"movd\t{$src, $dst|$dst, $src}",
4411 (MOVPQIto64rr GR64:$dst, VR128:$src), 0>;
4412 // Allow "vmovd" but print "vmovq" since we don't need compatibility for AVX.
4413 def : InstAlias<"vmovd\t{$src, $dst|$dst, $src}",
4414 (VMOV64toPQIrr VR128:$dst, GR64:$src), 0>;
4415 def : InstAlias<"vmovd\t{$src, $dst|$dst, $src}",
4416 (VMOVPQIto64rr GR64:$dst, VR128:$src), 0>;
4418 //===---------------------------------------------------------------------===//
4419 // SSE2 - Move Quadword
4420 //===---------------------------------------------------------------------===//
4422 //===---------------------------------------------------------------------===//
4423 // Move Quadword Int to Packed Quadword Int
4426 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecLoad] in {
4427 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4428 "vmovq\t{$src, $dst|$dst, $src}",
4430 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
4431 VEX, Requires<[UseAVX]>, VEX_WIG;
4432 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4433 "movq\t{$src, $dst|$dst, $src}",
4435 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>,
4436 XS, Requires<[UseSSE2]>; // SSE2 instruction with XS Prefix
4437 } // ExeDomain, SchedRW
4439 //===---------------------------------------------------------------------===//
4440 // Move Packed Quadword Int to Quadword Int
4442 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecStore] in {
4443 def VMOVPQI2QImr : VS2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4444 "movq\t{$src, $dst|$dst, $src}",
4445 [(store (i64 (extractelt (v2i64 VR128:$src),
4446 (iPTR 0))), addr:$dst)]>,
4448 def MOVPQI2QImr : S2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4449 "movq\t{$src, $dst|$dst, $src}",
4450 [(store (i64 (extractelt (v2i64 VR128:$src),
4451 (iPTR 0))), addr:$dst)]>;
4452 } // ExeDomain, SchedRW
4454 // For disassembler only
4455 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
4456 SchedRW = [SchedWriteVecLogic.XMM] in {
4457 def VMOVPQI2QIrr : VS2I<0xD6, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
4458 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_WIG;
4459 def MOVPQI2QIrr : S2I<0xD6, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
4460 "movq\t{$src, $dst|$dst, $src}", []>;
4463 // Aliases to help the assembler pick two byte VEX encodings by swapping the
4464 // operands relative to the normal instructions to use VEX.R instead of VEX.B.
4465 def : InstAlias<"vmovq\t{$src, $dst|$dst, $src}",
4466 (VMOVPQI2QIrr VR128L:$dst, VR128H:$src), 0>;
4468 def : InstAlias<"vmovq.s\t{$src, $dst|$dst, $src}",
4469 (VMOVPQI2QIrr VR128:$dst, VR128:$src), 0>;
4470 def : InstAlias<"movq.s\t{$src, $dst|$dst, $src}",
4471 (MOVPQI2QIrr VR128:$dst, VR128:$src), 0>;
4473 let Predicates = [UseAVX] in {
4474 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
4475 (VMOVQI2PQIrm addr:$src)>;
4476 def : Pat<(v2i64 (X86vzload addr:$src)),
4477 (VMOVQI2PQIrm addr:$src)>;
4478 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
4479 (v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))),
4480 (SUBREG_TO_REG (i64 0), (v2i64 (VMOVQI2PQIrm addr:$src)), sub_xmm)>;
4481 def : Pat<(v4i64 (X86vzload addr:$src)),
4482 (SUBREG_TO_REG (i64 0), (v2i64 (VMOVQI2PQIrm addr:$src)), sub_xmm)>;
4485 let Predicates = [UseSSE2] in {
4486 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
4487 (MOVQI2PQIrm addr:$src)>;
4488 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVQI2PQIrm addr:$src)>;
4491 //===---------------------------------------------------------------------===//
4492 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
4493 // IA32 document. movq xmm1, xmm2 does clear the high bits.
4495 let ExeDomain = SSEPackedInt, SchedRW = [SchedWriteVecLogic.XMM] in {
4496 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4497 "vmovq\t{$src, $dst|$dst, $src}",
4498 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
4499 XS, VEX, Requires<[UseAVX]>, VEX_WIG;
4500 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4501 "movq\t{$src, $dst|$dst, $src}",
4502 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
4503 XS, Requires<[UseSSE2]>;
4504 } // ExeDomain, SchedRW
4506 let Predicates = [UseAVX] in {
4507 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
4508 (VMOVZPQILo2PQIrr VR128:$src)>;
4510 let Predicates = [UseSSE2] in {
4511 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
4512 (MOVZPQILo2PQIrr VR128:$src)>;
4515 //===---------------------------------------------------------------------===//
4516 // SSE3 - Replicate Single FP - MOVSHDUP and MOVSLDUP
4517 //===---------------------------------------------------------------------===//
4519 multiclass sse3_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
4520 ValueType vt, RegisterClass RC, PatFrag mem_frag,
4521 X86MemOperand x86memop, X86FoldableSchedWrite sched> {
4522 def rr : S3SI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
4523 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4524 [(set RC:$dst, (vt (OpNode RC:$src)))]>,
4526 def rm : S3SI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
4527 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4528 [(set RC:$dst, (OpNode (mem_frag addr:$src)))]>,
4529 Sched<[sched.Folded]>;
4532 let Predicates = [HasAVX, NoVLX] in {
4533 defm VMOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
4534 v4f32, VR128, loadv4f32, f128mem,
4535 SchedWriteFShuffle.XMM>, VEX, VEX_WIG;
4536 defm VMOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
4537 v4f32, VR128, loadv4f32, f128mem,
4538 SchedWriteFShuffle.XMM>, VEX, VEX_WIG;
4539 defm VMOVSHDUPY : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
4540 v8f32, VR256, loadv8f32, f256mem,
4541 SchedWriteFShuffle.YMM>, VEX, VEX_L, VEX_WIG;
4542 defm VMOVSLDUPY : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
4543 v8f32, VR256, loadv8f32, f256mem,
4544 SchedWriteFShuffle.YMM>, VEX, VEX_L, VEX_WIG;
4546 defm MOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "movshdup", v4f32, VR128,
4547 memopv4f32, f128mem, SchedWriteFShuffle.XMM>;
4548 defm MOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "movsldup", v4f32, VR128,
4549 memopv4f32, f128mem, SchedWriteFShuffle.XMM>;
4551 let Predicates = [HasAVX, NoVLX] in {
4552 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
4553 (VMOVSHDUPrr VR128:$src)>;
4554 def : Pat<(v4i32 (X86Movshdup (load addr:$src))),
4555 (VMOVSHDUPrm addr:$src)>;
4556 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
4557 (VMOVSLDUPrr VR128:$src)>;
4558 def : Pat<(v4i32 (X86Movsldup (load addr:$src))),
4559 (VMOVSLDUPrm addr:$src)>;
4560 def : Pat<(v8i32 (X86Movshdup VR256:$src)),
4561 (VMOVSHDUPYrr VR256:$src)>;
4562 def : Pat<(v8i32 (X86Movshdup (load addr:$src))),
4563 (VMOVSHDUPYrm addr:$src)>;
4564 def : Pat<(v8i32 (X86Movsldup VR256:$src)),
4565 (VMOVSLDUPYrr VR256:$src)>;
4566 def : Pat<(v8i32 (X86Movsldup (load addr:$src))),
4567 (VMOVSLDUPYrm addr:$src)>;
4570 let Predicates = [UseSSE3] in {
4571 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
4572 (MOVSHDUPrr VR128:$src)>;
4573 def : Pat<(v4i32 (X86Movshdup (memop addr:$src))),
4574 (MOVSHDUPrm addr:$src)>;
4575 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
4576 (MOVSLDUPrr VR128:$src)>;
4577 def : Pat<(v4i32 (X86Movsldup (memop addr:$src))),
4578 (MOVSLDUPrm addr:$src)>;
4581 //===---------------------------------------------------------------------===//
4582 // SSE3 - Replicate Double FP - MOVDDUP
4583 //===---------------------------------------------------------------------===//
4585 multiclass sse3_replicate_dfp<string OpcodeStr, X86SchedWriteWidths sched> {
4586 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4587 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4588 [(set VR128:$dst, (v2f64 (X86Movddup VR128:$src)))]>,
4590 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
4591 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4594 (scalar_to_vector (loadf64 addr:$src)))))]>,
4595 Sched<[sched.XMM.Folded]>;
4598 // FIXME: Merge with above classes when there are patterns for the ymm version
4599 multiclass sse3_replicate_dfp_y<string OpcodeStr, X86SchedWriteWidths sched> {
4600 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
4601 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4602 [(set VR256:$dst, (v4f64 (X86Movddup VR256:$src)))]>,
4604 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
4605 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4607 (v4f64 (X86Movddup (loadv4f64 addr:$src))))]>,
4608 Sched<[sched.YMM.Folded]>;
4611 let Predicates = [HasAVX, NoVLX] in {
4612 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup", SchedWriteFShuffle>,
4614 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup", SchedWriteFShuffle>,
4615 VEX, VEX_L, VEX_WIG;
4618 defm MOVDDUP : sse3_replicate_dfp<"movddup", SchedWriteFShuffle>;
4621 let Predicates = [HasAVX, NoVLX] in {
4622 def : Pat<(X86Movddup (loadv2f64 addr:$src)),
4623 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4624 def : Pat<(X86Movddup (v2f64 (X86vzload addr:$src))),
4625 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4628 let Predicates = [UseSSE3] in {
4629 // No need for aligned memory as this only loads 64-bits.
4630 def : Pat<(X86Movddup (loadv2f64 addr:$src)),
4631 (MOVDDUPrm addr:$src)>;
4632 def : Pat<(X86Movddup (v2f64 (X86vzload addr:$src))),
4633 (MOVDDUPrm addr:$src)>;
4636 //===---------------------------------------------------------------------===//
4637 // SSE3 - Move Unaligned Integer
4638 //===---------------------------------------------------------------------===//
4640 let Predicates = [HasAVX] in {
4641 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4642 "vlddqu\t{$src, $dst|$dst, $src}",
4643 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>,
4644 Sched<[SchedWriteVecMoveLS.XMM.RM]>, VEX, VEX_WIG;
4645 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
4646 "vlddqu\t{$src, $dst|$dst, $src}",
4647 [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>,
4648 Sched<[SchedWriteVecMoveLS.YMM.RM]>, VEX, VEX_L, VEX_WIG;
4651 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4652 "lddqu\t{$src, $dst|$dst, $src}",
4653 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>,
4654 Sched<[SchedWriteVecMoveLS.XMM.RM]>;
4656 //===---------------------------------------------------------------------===//
4657 // SSE3 - Arithmetic
4658 //===---------------------------------------------------------------------===//
4660 multiclass sse3_addsub<string OpcodeStr, ValueType vt, RegisterClass RC,
4661 X86MemOperand x86memop, X86FoldableSchedWrite sched,
4662 PatFrag ld_frag, bit Is2Addr = 1> {
4663 def rr : I<0xD0, MRMSrcReg,
4664 (outs RC:$dst), (ins RC:$src1, RC:$src2),
4666 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4667 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4668 [(set RC:$dst, (vt (X86Addsub RC:$src1, RC:$src2)))]>,
4670 def rm : I<0xD0, MRMSrcMem,
4671 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
4673 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4674 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4675 [(set RC:$dst, (vt (X86Addsub RC:$src1, (ld_frag addr:$src2))))]>,
4676 Sched<[sched.Folded, sched.ReadAfterFold]>;
4679 let Predicates = [HasAVX] in {
4680 let ExeDomain = SSEPackedSingle in {
4681 defm VADDSUBPS : sse3_addsub<"vaddsubps", v4f32, VR128, f128mem,
4682 SchedWriteFAddSizes.PS.XMM, loadv4f32, 0>,
4683 XD, VEX_4V, VEX_WIG;
4684 defm VADDSUBPSY : sse3_addsub<"vaddsubps", v8f32, VR256, f256mem,
4685 SchedWriteFAddSizes.PS.YMM, loadv8f32, 0>,
4686 XD, VEX_4V, VEX_L, VEX_WIG;
4688 let ExeDomain = SSEPackedDouble in {
4689 defm VADDSUBPD : sse3_addsub<"vaddsubpd", v2f64, VR128, f128mem,
4690 SchedWriteFAddSizes.PD.XMM, loadv2f64, 0>,
4691 PD, VEX_4V, VEX_WIG;
4692 defm VADDSUBPDY : sse3_addsub<"vaddsubpd", v4f64, VR256, f256mem,
4693 SchedWriteFAddSizes.PD.YMM, loadv4f64, 0>,
4694 PD, VEX_4V, VEX_L, VEX_WIG;
4697 let Constraints = "$src1 = $dst", Predicates = [UseSSE3] in {
4698 let ExeDomain = SSEPackedSingle in
4699 defm ADDSUBPS : sse3_addsub<"addsubps", v4f32, VR128, f128mem,
4700 SchedWriteFAddSizes.PS.XMM, memopv4f32>, XD;
4701 let ExeDomain = SSEPackedDouble in
4702 defm ADDSUBPD : sse3_addsub<"addsubpd", v2f64, VR128, f128mem,
4703 SchedWriteFAddSizes.PD.XMM, memopv2f64>, PD;
4706 //===---------------------------------------------------------------------===//
4707 // SSE3 Instructions
4708 //===---------------------------------------------------------------------===//
4711 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
4712 X86MemOperand x86memop, SDNode OpNode,
4713 X86FoldableSchedWrite sched, PatFrag ld_frag,
4715 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
4717 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4718 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4719 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))]>,
4722 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
4724 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4725 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4726 [(set RC:$dst, (vt (OpNode RC:$src1, (ld_frag addr:$src2))))]>,
4727 Sched<[sched.Folded, sched.ReadAfterFold]>;
4729 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
4730 X86MemOperand x86memop, SDNode OpNode,
4731 X86FoldableSchedWrite sched, PatFrag ld_frag,
4733 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
4735 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4736 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4737 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))]>,
4740 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
4742 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4743 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4744 [(set RC:$dst, (vt (OpNode RC:$src1, (ld_frag addr:$src2))))]>,
4745 Sched<[sched.Folded, sched.ReadAfterFold]>;
4748 let Predicates = [HasAVX] in {
4749 let ExeDomain = SSEPackedSingle in {
4750 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
4751 X86fhadd, WriteFHAdd, loadv4f32, 0>, VEX_4V, VEX_WIG;
4752 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
4753 X86fhsub, WriteFHAdd, loadv4f32, 0>, VEX_4V, VEX_WIG;
4754 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
4755 X86fhadd, WriteFHAddY, loadv8f32, 0>, VEX_4V, VEX_L, VEX_WIG;
4756 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
4757 X86fhsub, WriteFHAddY, loadv8f32, 0>, VEX_4V, VEX_L, VEX_WIG;
4759 let ExeDomain = SSEPackedDouble in {
4760 defm VHADDPD : S3_Int<0x7C, "vhaddpd", v2f64, VR128, f128mem,
4761 X86fhadd, WriteFHAdd, loadv2f64, 0>, VEX_4V, VEX_WIG;
4762 defm VHSUBPD : S3_Int<0x7D, "vhsubpd", v2f64, VR128, f128mem,
4763 X86fhsub, WriteFHAdd, loadv2f64, 0>, VEX_4V, VEX_WIG;
4764 defm VHADDPDY : S3_Int<0x7C, "vhaddpd", v4f64, VR256, f256mem,
4765 X86fhadd, WriteFHAddY, loadv4f64, 0>, VEX_4V, VEX_L, VEX_WIG;
4766 defm VHSUBPDY : S3_Int<0x7D, "vhsubpd", v4f64, VR256, f256mem,
4767 X86fhsub, WriteFHAddY, loadv4f64, 0>, VEX_4V, VEX_L, VEX_WIG;
4771 let Constraints = "$src1 = $dst" in {
4772 let ExeDomain = SSEPackedSingle in {
4773 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem, X86fhadd,
4774 WriteFHAdd, memopv4f32>;
4775 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem, X86fhsub,
4776 WriteFHAdd, memopv4f32>;
4778 let ExeDomain = SSEPackedDouble in {
4779 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem, X86fhadd,
4780 WriteFHAdd, memopv2f64>;
4781 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem, X86fhsub,
4782 WriteFHAdd, memopv2f64>;
4786 //===---------------------------------------------------------------------===//
4787 // SSSE3 - Packed Absolute Instructions
4788 //===---------------------------------------------------------------------===//
4790 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
4791 multiclass SS3I_unop_rm<bits<8> opc, string OpcodeStr, ValueType vt,
4792 SDNode OpNode, X86SchedWriteWidths sched, PatFrag ld_frag> {
4793 def rr : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
4795 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4796 [(set VR128:$dst, (vt (OpNode VR128:$src)))]>,
4799 def rm : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
4801 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4803 (vt (OpNode (ld_frag addr:$src))))]>,
4804 Sched<[sched.XMM.Folded]>;
4807 /// SS3I_unop_rm_int_y - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
4808 multiclass SS3I_unop_rm_y<bits<8> opc, string OpcodeStr, ValueType vt,
4809 SDNode OpNode, X86SchedWriteWidths sched> {
4810 def Yrr : SS38I<opc, MRMSrcReg, (outs VR256:$dst),
4812 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4813 [(set VR256:$dst, (vt (OpNode VR256:$src)))]>,
4816 def Yrm : SS38I<opc, MRMSrcMem, (outs VR256:$dst),
4818 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4820 (vt (OpNode (load addr:$src))))]>,
4821 Sched<[sched.YMM.Folded]>;
4824 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
4825 defm VPABSB : SS3I_unop_rm<0x1C, "vpabsb", v16i8, abs, SchedWriteVecALU,
4826 load>, VEX, VEX_WIG;
4827 defm VPABSW : SS3I_unop_rm<0x1D, "vpabsw", v8i16, abs, SchedWriteVecALU,
4828 load>, VEX, VEX_WIG;
4830 let Predicates = [HasAVX, NoVLX] in {
4831 defm VPABSD : SS3I_unop_rm<0x1E, "vpabsd", v4i32, abs, SchedWriteVecALU,
4832 load>, VEX, VEX_WIG;
4834 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
4835 defm VPABSB : SS3I_unop_rm_y<0x1C, "vpabsb", v32i8, abs, SchedWriteVecALU>,
4836 VEX, VEX_L, VEX_WIG;
4837 defm VPABSW : SS3I_unop_rm_y<0x1D, "vpabsw", v16i16, abs, SchedWriteVecALU>,
4838 VEX, VEX_L, VEX_WIG;
4840 let Predicates = [HasAVX2, NoVLX] in {
4841 defm VPABSD : SS3I_unop_rm_y<0x1E, "vpabsd", v8i32, abs, SchedWriteVecALU>,
4842 VEX, VEX_L, VEX_WIG;
4845 defm PABSB : SS3I_unop_rm<0x1C, "pabsb", v16i8, abs, SchedWriteVecALU,
4847 defm PABSW : SS3I_unop_rm<0x1D, "pabsw", v8i16, abs, SchedWriteVecALU,
4849 defm PABSD : SS3I_unop_rm<0x1E, "pabsd", v4i32, abs, SchedWriteVecALU,
4852 //===---------------------------------------------------------------------===//
4853 // SSSE3 - Packed Binary Operator Instructions
4854 //===---------------------------------------------------------------------===//
4856 /// SS3I_binop_rm - Simple SSSE3 bin op
4857 multiclass SS3I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
4858 ValueType DstVT, ValueType OpVT, RegisterClass RC,
4859 PatFrag memop_frag, X86MemOperand x86memop,
4860 X86FoldableSchedWrite sched, bit Is2Addr = 1> {
4861 let isCommutable = 1 in
4862 def rr : SS38I<opc, MRMSrcReg, (outs RC:$dst),
4863 (ins RC:$src1, RC:$src2),
4865 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4866 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4867 [(set RC:$dst, (DstVT (OpNode (OpVT RC:$src1), RC:$src2)))]>,
4869 def rm : SS38I<opc, MRMSrcMem, (outs RC:$dst),
4870 (ins RC:$src1, x86memop:$src2),
4872 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4873 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4875 (DstVT (OpNode (OpVT RC:$src1), (memop_frag addr:$src2))))]>,
4876 Sched<[sched.Folded, sched.ReadAfterFold]>;
4879 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
4880 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
4881 Intrinsic IntId128, X86FoldableSchedWrite sched,
4882 PatFrag ld_frag, bit Is2Addr = 1> {
4883 let isCommutable = 1 in
4884 def rr : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
4885 (ins VR128:$src1, VR128:$src2),
4887 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4888 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4889 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
4891 def rm : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
4892 (ins VR128:$src1, i128mem:$src2),
4894 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4895 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4897 (IntId128 VR128:$src1, (ld_frag addr:$src2)))]>,
4898 Sched<[sched.Folded, sched.ReadAfterFold]>;
4901 multiclass SS3I_binop_rm_int_y<bits<8> opc, string OpcodeStr,
4903 X86FoldableSchedWrite sched> {
4904 let isCommutable = 1 in
4905 def Yrr : SS38I<opc, MRMSrcReg, (outs VR256:$dst),
4906 (ins VR256:$src1, VR256:$src2),
4907 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4908 [(set VR256:$dst, (IntId256 VR256:$src1, VR256:$src2))]>,
4910 def Yrm : SS38I<opc, MRMSrcMem, (outs VR256:$dst),
4911 (ins VR256:$src1, i256mem:$src2),
4912 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4914 (IntId256 VR256:$src1, (load addr:$src2)))]>,
4915 Sched<[sched.Folded, sched.ReadAfterFold]>;
4918 let ImmT = NoImm, Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
4919 let isCommutable = 0 in {
4920 defm VPSHUFB : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v16i8, v16i8,
4921 VR128, load, i128mem,
4922 SchedWriteVarShuffle.XMM, 0>, VEX_4V, VEX_WIG;
4923 defm VPMADDUBSW : SS3I_binop_rm<0x04, "vpmaddubsw", X86vpmaddubsw, v8i16,
4924 v16i8, VR128, load, i128mem,
4925 SchedWriteVecIMul.XMM, 0>, VEX_4V, VEX_WIG;
4927 defm VPMULHRSW : SS3I_binop_rm<0x0B, "vpmulhrsw", X86mulhrs, v8i16, v8i16,
4928 VR128, load, i128mem,
4929 SchedWriteVecIMul.XMM, 0>, VEX_4V, VEX_WIG;
4932 let ImmT = NoImm, Predicates = [HasAVX] in {
4933 let isCommutable = 0 in {
4934 defm VPHADDW : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v8i16, v8i16, VR128,
4936 SchedWritePHAdd.XMM, 0>, VEX_4V, VEX_WIG;
4937 defm VPHADDD : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v4i32, v4i32, VR128,
4939 SchedWritePHAdd.XMM, 0>, VEX_4V, VEX_WIG;
4940 defm VPHSUBW : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v8i16, v8i16, VR128,
4942 SchedWritePHAdd.XMM, 0>, VEX_4V, VEX_WIG;
4943 defm VPHSUBD : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v4i32, v4i32, VR128,
4945 SchedWritePHAdd.XMM, 0>, VEX_4V;
4946 defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb",
4947 int_x86_ssse3_psign_b_128,
4948 SchedWriteVecALU.XMM, load, 0>, VEX_4V, VEX_WIG;
4949 defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw",
4950 int_x86_ssse3_psign_w_128,
4951 SchedWriteVecALU.XMM, load, 0>, VEX_4V, VEX_WIG;
4952 defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd",
4953 int_x86_ssse3_psign_d_128,
4954 SchedWriteVecALU.XMM, load, 0>, VEX_4V, VEX_WIG;
4955 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw",
4956 int_x86_ssse3_phadd_sw_128,
4957 SchedWritePHAdd.XMM, load, 0>, VEX_4V, VEX_WIG;
4958 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw",
4959 int_x86_ssse3_phsub_sw_128,
4960 SchedWritePHAdd.XMM, load, 0>, VEX_4V, VEX_WIG;
4964 let ImmT = NoImm, Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
4965 let isCommutable = 0 in {
4966 defm VPSHUFBY : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v32i8, v32i8,
4967 VR256, load, i256mem,
4968 SchedWriteVarShuffle.YMM, 0>, VEX_4V, VEX_L, VEX_WIG;
4969 defm VPMADDUBSWY : SS3I_binop_rm<0x04, "vpmaddubsw", X86vpmaddubsw, v16i16,
4970 v32i8, VR256, load, i256mem,
4971 SchedWriteVecIMul.YMM, 0>, VEX_4V, VEX_L, VEX_WIG;
4973 defm VPMULHRSWY : SS3I_binop_rm<0x0B, "vpmulhrsw", X86mulhrs, v16i16, v16i16,
4974 VR256, load, i256mem,
4975 SchedWriteVecIMul.YMM, 0>, VEX_4V, VEX_L, VEX_WIG;
4978 let ImmT = NoImm, Predicates = [HasAVX2] in {
4979 let isCommutable = 0 in {
4980 defm VPHADDWY : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v16i16, v16i16,
4981 VR256, load, i256mem,
4982 SchedWritePHAdd.YMM, 0>, VEX_4V, VEX_L, VEX_WIG;
4983 defm VPHADDDY : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v8i32, v8i32, VR256,
4985 SchedWritePHAdd.YMM, 0>, VEX_4V, VEX_L, VEX_WIG;
4986 defm VPHSUBWY : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v16i16, v16i16,
4987 VR256, load, i256mem,
4988 SchedWritePHAdd.YMM, 0>, VEX_4V, VEX_L, VEX_WIG;
4989 defm VPHSUBDY : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v8i32, v8i32, VR256,
4991 SchedWritePHAdd.YMM, 0>, VEX_4V, VEX_L;
4992 defm VPSIGNB : SS3I_binop_rm_int_y<0x08, "vpsignb", int_x86_avx2_psign_b,
4993 SchedWriteVecALU.YMM>, VEX_4V, VEX_L, VEX_WIG;
4994 defm VPSIGNW : SS3I_binop_rm_int_y<0x09, "vpsignw", int_x86_avx2_psign_w,
4995 SchedWriteVecALU.YMM>, VEX_4V, VEX_L, VEX_WIG;
4996 defm VPSIGND : SS3I_binop_rm_int_y<0x0A, "vpsignd", int_x86_avx2_psign_d,
4997 SchedWriteVecALU.YMM>, VEX_4V, VEX_L, VEX_WIG;
4998 defm VPHADDSW : SS3I_binop_rm_int_y<0x03, "vphaddsw",
4999 int_x86_avx2_phadd_sw,
5000 SchedWritePHAdd.YMM>, VEX_4V, VEX_L, VEX_WIG;
5001 defm VPHSUBSW : SS3I_binop_rm_int_y<0x07, "vphsubsw",
5002 int_x86_avx2_phsub_sw,
5003 SchedWritePHAdd.YMM>, VEX_4V, VEX_L, VEX_WIG;
5007 // None of these have i8 immediate fields.
5008 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
5009 let isCommutable = 0 in {
5010 defm PHADDW : SS3I_binop_rm<0x01, "phaddw", X86hadd, v8i16, v8i16, VR128,
5011 memop, i128mem, SchedWritePHAdd.XMM>;
5012 defm PHADDD : SS3I_binop_rm<0x02, "phaddd", X86hadd, v4i32, v4i32, VR128,
5013 memop, i128mem, SchedWritePHAdd.XMM>;
5014 defm PHSUBW : SS3I_binop_rm<0x05, "phsubw", X86hsub, v8i16, v8i16, VR128,
5015 memop, i128mem, SchedWritePHAdd.XMM>;
5016 defm PHSUBD : SS3I_binop_rm<0x06, "phsubd", X86hsub, v4i32, v4i32, VR128,
5017 memop, i128mem, SchedWritePHAdd.XMM>;
5018 defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", int_x86_ssse3_psign_b_128,
5019 SchedWriteVecALU.XMM, memop>;
5020 defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", int_x86_ssse3_psign_w_128,
5021 SchedWriteVecALU.XMM, memop>;
5022 defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", int_x86_ssse3_psign_d_128,
5023 SchedWriteVecALU.XMM, memop>;
5024 defm PSHUFB : SS3I_binop_rm<0x00, "pshufb", X86pshufb, v16i8, v16i8, VR128,
5025 memop, i128mem, SchedWriteVarShuffle.XMM>;
5026 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw",
5027 int_x86_ssse3_phadd_sw_128,
5028 SchedWritePHAdd.XMM, memop>;
5029 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw",
5030 int_x86_ssse3_phsub_sw_128,
5031 SchedWritePHAdd.XMM, memop>;
5032 defm PMADDUBSW : SS3I_binop_rm<0x04, "pmaddubsw", X86vpmaddubsw, v8i16,
5033 v16i8, VR128, memop, i128mem,
5034 SchedWriteVecIMul.XMM>;
5036 defm PMULHRSW : SS3I_binop_rm<0x0B, "pmulhrsw", X86mulhrs, v8i16, v8i16,
5037 VR128, memop, i128mem, SchedWriteVecIMul.XMM>;
5040 //===---------------------------------------------------------------------===//
5041 // SSSE3 - Packed Align Instruction Patterns
5042 //===---------------------------------------------------------------------===//
5044 multiclass ssse3_palignr<string asm, ValueType VT, RegisterClass RC,
5045 PatFrag memop_frag, X86MemOperand x86memop,
5046 X86FoldableSchedWrite sched, bit Is2Addr = 1> {
5047 let hasSideEffects = 0 in {
5048 def rri : SS3AI<0x0F, MRMSrcReg, (outs RC:$dst),
5049 (ins RC:$src1, RC:$src2, u8imm:$src3),
5051 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5053 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5054 [(set RC:$dst, (VT (X86PAlignr RC:$src1, RC:$src2, (i8 imm:$src3))))]>,
5057 def rmi : SS3AI<0x0F, MRMSrcMem, (outs RC:$dst),
5058 (ins RC:$src1, x86memop:$src2, u8imm:$src3),
5060 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5062 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5063 [(set RC:$dst, (VT (X86PAlignr RC:$src1,
5064 (memop_frag addr:$src2),
5065 (i8 imm:$src3))))]>,
5066 Sched<[sched.Folded, sched.ReadAfterFold]>;
5070 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in
5071 defm VPALIGNR : ssse3_palignr<"vpalignr", v16i8, VR128, load, i128mem,
5072 SchedWriteShuffle.XMM, 0>, VEX_4V, VEX_WIG;
5073 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in
5074 defm VPALIGNRY : ssse3_palignr<"vpalignr", v32i8, VR256, load, i256mem,
5075 SchedWriteShuffle.YMM, 0>, VEX_4V, VEX_L, VEX_WIG;
5076 let Constraints = "$src1 = $dst", Predicates = [UseSSSE3] in
5077 defm PALIGNR : ssse3_palignr<"palignr", v16i8, VR128, memop, i128mem,
5078 SchedWriteShuffle.XMM>;
5080 //===---------------------------------------------------------------------===//
5081 // SSSE3 - Thread synchronization
5082 //===---------------------------------------------------------------------===//
5084 let SchedRW = [WriteSystem] in {
5085 let usesCustomInserter = 1 in {
5086 def MONITOR : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
5087 [(int_x86_sse3_monitor addr:$src1, GR32:$src2, GR32:$src3)]>,
5088 Requires<[HasSSE3]>;
5091 let Uses = [EAX, ECX, EDX] in
5092 def MONITORrrr : I<0x01, MRM_C8, (outs), (ins), "monitor", []>,
5093 TB, Requires<[HasSSE3]>;
5095 let Uses = [ECX, EAX] in
5096 def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait",
5097 [(int_x86_sse3_mwait ECX, EAX)]>, TB, Requires<[HasSSE3]>;
5100 def : InstAlias<"mwait\t{%eax, %ecx|ecx, eax}", (MWAITrr)>, Requires<[Not64BitMode]>;
5101 def : InstAlias<"mwait\t{%rax, %rcx|rcx, rax}", (MWAITrr)>, Requires<[In64BitMode]>;
5103 def : InstAlias<"monitor\t{%eax, %ecx, %edx|edx, ecx, eax}", (MONITORrrr)>,
5104 Requires<[Not64BitMode]>;
5105 def : InstAlias<"monitor\t{%rax, %rcx, %rdx|rdx, rcx, rax}", (MONITORrrr)>,
5106 Requires<[In64BitMode]>;
5108 //===----------------------------------------------------------------------===//
5109 // SSE4.1 - Packed Move with Sign/Zero Extend
5110 //===----------------------------------------------------------------------===//
5112 multiclass SS41I_pmovx_rrrm<bits<8> opc, string OpcodeStr, X86MemOperand MemOp,
5113 RegisterClass OutRC, RegisterClass InRC,
5114 X86FoldableSchedWrite sched> {
5115 def rr : SS48I<opc, MRMSrcReg, (outs OutRC:$dst), (ins InRC:$src),
5116 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>,
5119 def rm : SS48I<opc, MRMSrcMem, (outs OutRC:$dst), (ins MemOp:$src),
5120 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>,
5121 Sched<[sched.Folded]>;
5124 multiclass SS41I_pmovx_rm_all<bits<8> opc, string OpcodeStr,
5125 X86MemOperand MemOp, X86MemOperand MemYOp,
5127 defm NAME : SS41I_pmovx_rrrm<opc, OpcodeStr, MemOp, VR128, VR128,
5128 SchedWriteShuffle.XMM>;
5129 let Predicates = [HasAVX, prd] in
5130 defm V#NAME : SS41I_pmovx_rrrm<opc, !strconcat("v", OpcodeStr), MemOp,
5131 VR128, VR128, SchedWriteShuffle.XMM>,
5133 let Predicates = [HasAVX2, prd] in
5134 defm V#NAME#Y : SS41I_pmovx_rrrm<opc, !strconcat("v", OpcodeStr), MemYOp,
5135 VR256, VR128, WriteShuffle256>,
5136 VEX, VEX_L, VEX_WIG;
5139 multiclass SS41I_pmovx_rm<bits<8> opc, string OpcodeStr, X86MemOperand MemOp,
5140 X86MemOperand MemYOp, Predicate prd> {
5141 defm PMOVSX#NAME : SS41I_pmovx_rm_all<opc, !strconcat("pmovsx", OpcodeStr),
5142 MemOp, MemYOp, prd>;
5143 defm PMOVZX#NAME : SS41I_pmovx_rm_all<!add(opc, 0x10),
5144 !strconcat("pmovzx", OpcodeStr),
5145 MemOp, MemYOp, prd>;
5148 defm BW : SS41I_pmovx_rm<0x20, "bw", i64mem, i128mem, NoVLX_Or_NoBWI>;
5149 defm WD : SS41I_pmovx_rm<0x23, "wd", i64mem, i128mem, NoVLX>;
5150 defm DQ : SS41I_pmovx_rm<0x25, "dq", i64mem, i128mem, NoVLX>;
5152 defm BD : SS41I_pmovx_rm<0x21, "bd", i32mem, i64mem, NoVLX>;
5153 defm WQ : SS41I_pmovx_rm<0x24, "wq", i32mem, i64mem, NoVLX>;
5155 defm BQ : SS41I_pmovx_rm<0x22, "bq", i16mem, i32mem, NoVLX>;
5157 // Patterns that we also need for any_extend.
5158 // Any_extend_vector_inreg is currently legalized to zero_extend_vector_inreg.
5159 multiclass SS41I_pmovx_avx2_patterns_base<string OpcPrefix, SDNode ExtOp> {
5160 // Register-Register patterns
5161 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
5162 def : Pat<(v16i16 (ExtOp (v16i8 VR128:$src))),
5163 (!cast<I>(OpcPrefix#BWYrr) VR128:$src)>;
5166 let Predicates = [HasAVX2, NoVLX] in {
5167 def : Pat<(v8i32 (ExtOp (v8i16 VR128:$src))),
5168 (!cast<I>(OpcPrefix#WDYrr) VR128:$src)>;
5170 def : Pat<(v4i64 (ExtOp (v4i32 VR128:$src))),
5171 (!cast<I>(OpcPrefix#DQYrr) VR128:$src)>;
5174 // AVX2 Register-Memory patterns
5175 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
5176 def : Pat<(v16i16 (ExtOp (loadv16i8 addr:$src))),
5177 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
5178 def : Pat<(v16i16 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
5179 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
5180 def : Pat<(v16i16 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
5181 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
5184 let Predicates = [HasAVX2, NoVLX] in {
5185 def : Pat<(v8i32 (ExtOp (loadv8i16 addr:$src))),
5186 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
5187 def : Pat<(v8i32 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))),
5188 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
5189 def : Pat<(v8i32 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
5190 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
5192 def : Pat<(v4i64 (ExtOp (loadv4i32 addr:$src))),
5193 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
5194 def : Pat<(v4i64 (ExtOp (v4i32 (vzmovl_v2i64 addr:$src)))),
5195 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
5196 def : Pat<(v4i64 (ExtOp (v4i32 (vzload_v2i64 addr:$src)))),
5197 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
5202 multiclass SS41I_pmovx_avx2_patterns<string OpcPrefix, string ExtTy,
5203 SDNode ExtOp, SDNode InVecOp> :
5204 SS41I_pmovx_avx2_patterns_base<OpcPrefix, ExtOp> {
5206 // Register-Register patterns
5207 let Predicates = [HasAVX2, NoVLX] in {
5208 def : Pat<(v8i32 (InVecOp (v16i8 VR128:$src))),
5209 (!cast<I>(OpcPrefix#BDYrr) VR128:$src)>;
5210 def : Pat<(v4i64 (InVecOp (v16i8 VR128:$src))),
5211 (!cast<I>(OpcPrefix#BQYrr) VR128:$src)>;
5213 def : Pat<(v4i64 (InVecOp (v8i16 VR128:$src))),
5214 (!cast<I>(OpcPrefix#WQYrr) VR128:$src)>;
5217 // Simple Register-Memory patterns
5218 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
5219 def : Pat<(v16i16 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5220 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
5222 let Predicates = [HasAVX2, NoVLX] in {
5223 def : Pat<(v8i32 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5224 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
5225 def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5226 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
5228 def : Pat<(v8i32 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
5229 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
5230 def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
5231 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
5233 def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi32") addr:$src)),
5234 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
5237 // AVX2 Register-Memory patterns
5238 let Predicates = [HasAVX2, NoVLX] in {
5239 def : Pat<(v8i32 (InVecOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
5240 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
5241 def : Pat<(v8i32 (InVecOp (v16i8 (vzmovl_v2i64 addr:$src)))),
5242 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
5243 def : Pat<(v8i32 (InVecOp (v16i8 (vzload_v2i64 addr:$src)))),
5244 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
5245 def : Pat<(v8i32 (InVecOp (loadv16i8 addr:$src))),
5246 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
5248 def : Pat<(v4i64 (InVecOp (bc_v16i8 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
5249 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
5250 def : Pat<(v4i64 (InVecOp (v16i8 (vzmovl_v4i32 addr:$src)))),
5251 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
5252 def : Pat<(v4i64 (InVecOp (v16i8 (vzload_v2i64 addr:$src)))),
5253 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
5254 def : Pat<(v4i64 (InVecOp (loadv16i8 addr:$src))),
5255 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
5257 def : Pat<(v4i64 (InVecOp (bc_v8i16 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
5258 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
5259 def : Pat<(v4i64 (InVecOp (v8i16 (vzmovl_v2i64 addr:$src)))),
5260 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
5261 def : Pat<(v4i64 (InVecOp (v8i16 (vzload_v2i64 addr:$src)))),
5262 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
5263 def : Pat<(v4i64 (InVecOp (loadv8i16 addr:$src))),
5264 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
5268 defm : SS41I_pmovx_avx2_patterns<"VPMOVSX", "s", sext, sext_invec>;
5269 defm : SS41I_pmovx_avx2_patterns<"VPMOVZX", "z", zext, zext_invec>;
5270 defm : SS41I_pmovx_avx2_patterns_base<"VPMOVZX", anyext>;
5272 // SSE4.1/AVX patterns.
5273 multiclass SS41I_pmovx_patterns<string OpcPrefix, string ExtTy,
5275 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
5276 def : Pat<(v8i16 (ExtOp (v16i8 VR128:$src))),
5277 (!cast<I>(OpcPrefix#BWrr) VR128:$src)>;
5279 let Predicates = [HasAVX, NoVLX] in {
5280 def : Pat<(v4i32 (ExtOp (v16i8 VR128:$src))),
5281 (!cast<I>(OpcPrefix#BDrr) VR128:$src)>;
5282 def : Pat<(v2i64 (ExtOp (v16i8 VR128:$src))),
5283 (!cast<I>(OpcPrefix#BQrr) VR128:$src)>;
5285 def : Pat<(v4i32 (ExtOp (v8i16 VR128:$src))),
5286 (!cast<I>(OpcPrefix#WDrr) VR128:$src)>;
5287 def : Pat<(v2i64 (ExtOp (v8i16 VR128:$src))),
5288 (!cast<I>(OpcPrefix#WQrr) VR128:$src)>;
5290 def : Pat<(v2i64 (ExtOp (v4i32 VR128:$src))),
5291 (!cast<I>(OpcPrefix#DQrr) VR128:$src)>;
5293 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
5294 def : Pat<(v8i16 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5295 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
5297 let Predicates = [HasAVX, NoVLX] in {
5298 def : Pat<(v4i32 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5299 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
5300 def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5301 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
5303 def : Pat<(v4i32 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
5304 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
5305 def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
5306 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
5308 def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi32") addr:$src)),
5309 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
5311 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
5312 def : Pat<(v8i16 (ExtOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
5313 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
5314 def : Pat<(v8i16 (ExtOp (bc_v16i8 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
5315 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
5316 def : Pat<(v8i16 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
5317 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
5318 def : Pat<(v8i16 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
5319 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
5320 def : Pat<(v8i16 (ExtOp (loadv16i8 addr:$src))),
5321 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
5323 let Predicates = [HasAVX, NoVLX] in {
5324 def : Pat<(v4i32 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
5325 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
5326 def : Pat<(v4i32 (ExtOp (v16i8 (vzmovl_v4i32 addr:$src)))),
5327 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
5328 def : Pat<(v4i32 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
5329 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
5330 def : Pat<(v4i32 (ExtOp (loadv16i8 addr:$src))),
5331 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
5333 def : Pat<(v2i64 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (extloadi32i16 addr:$src)))))),
5334 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
5335 def : Pat<(v2i64 (ExtOp (v16i8 (vzmovl_v4i32 addr:$src)))),
5336 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
5337 def : Pat<(v2i64 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
5338 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
5339 def : Pat<(v2i64 (ExtOp (loadv16i8 addr:$src))),
5340 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
5342 def : Pat<(v4i32 (ExtOp (bc_v8i16 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
5343 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
5344 def : Pat<(v4i32 (ExtOp (bc_v8i16 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
5345 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
5346 def : Pat<(v4i32 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))),
5347 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
5348 def : Pat<(v4i32 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
5349 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
5350 def : Pat<(v4i32 (ExtOp (loadv8i16 addr:$src))),
5351 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
5353 def : Pat<(v2i64 (ExtOp (bc_v8i16 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
5354 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
5355 def : Pat<(v2i64 (ExtOp (v8i16 (vzmovl_v4i32 addr:$src)))),
5356 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
5357 def : Pat<(v2i64 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
5358 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
5359 def : Pat<(v2i64 (ExtOp (loadv8i16 addr:$src))),
5360 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
5362 def : Pat<(v2i64 (ExtOp (bc_v4i32 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
5363 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
5364 def : Pat<(v2i64 (ExtOp (bc_v4i32 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
5365 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
5366 def : Pat<(v2i64 (ExtOp (v4i32 (vzmovl_v2i64 addr:$src)))),
5367 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
5368 def : Pat<(v2i64 (ExtOp (v4i32 (vzload_v2i64 addr:$src)))),
5369 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
5370 def : Pat<(v2i64 (ExtOp (loadv4i32 addr:$src))),
5371 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
5375 defm : SS41I_pmovx_patterns<"VPMOVSX", "s", sext_invec>;
5376 defm : SS41I_pmovx_patterns<"VPMOVZX", "z", zext_invec>;
5378 let Predicates = [UseSSE41] in {
5379 defm : SS41I_pmovx_patterns<"PMOVSX", "s", sext_invec>;
5380 defm : SS41I_pmovx_patterns<"PMOVZX", "z", zext_invec>;
5383 //===----------------------------------------------------------------------===//
5384 // SSE4.1 - Extract Instructions
5385 //===----------------------------------------------------------------------===//
5387 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
5388 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
5389 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
5390 (ins VR128:$src1, u8imm:$src2),
5391 !strconcat(OpcodeStr,
5392 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5393 [(set GR32orGR64:$dst, (X86pextrb (v16i8 VR128:$src1),
5395 Sched<[WriteVecExtract]>;
5396 let hasSideEffects = 0, mayStore = 1 in
5397 def mr : SS4AIi8<opc, MRMDestMem, (outs),
5398 (ins i8mem:$dst, VR128:$src1, u8imm:$src2),
5399 !strconcat(OpcodeStr,
5400 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5401 [(store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))),
5402 addr:$dst)]>, Sched<[WriteVecExtractSt]>;
5405 let Predicates = [HasAVX, NoBWI] in
5406 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
5408 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
5411 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
5412 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
5413 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
5414 def rr_REV : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
5415 (ins VR128:$src1, u8imm:$src2),
5416 !strconcat(OpcodeStr,
5417 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
5418 Sched<[WriteVecExtract]>, FoldGenData<NAME#rr>;
5420 let hasSideEffects = 0, mayStore = 1 in
5421 def mr : SS4AIi8<opc, MRMDestMem, (outs),
5422 (ins i16mem:$dst, VR128:$src1, u8imm:$src2),
5423 !strconcat(OpcodeStr,
5424 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5425 [(store (i16 (trunc (X86pextrw (v8i16 VR128:$src1), imm:$src2))),
5426 addr:$dst)]>, Sched<[WriteVecExtractSt]>;
5429 let Predicates = [HasAVX, NoBWI] in
5430 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
5432 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
5435 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
5436 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
5437 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
5438 (ins VR128:$src1, u8imm:$src2),
5439 !strconcat(OpcodeStr,
5440 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5442 (extractelt (v4i32 VR128:$src1), imm:$src2))]>,
5443 Sched<[WriteVecExtract]>;
5444 def mr : SS4AIi8<opc, MRMDestMem, (outs),
5445 (ins i32mem:$dst, VR128:$src1, u8imm:$src2),
5446 !strconcat(OpcodeStr,
5447 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5448 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
5449 addr:$dst)]>, Sched<[WriteVecExtractSt]>;
5452 let Predicates = [HasAVX, NoDQI] in
5453 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
5455 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
5457 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
5458 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
5459 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
5460 (ins VR128:$src1, u8imm:$src2),
5461 !strconcat(OpcodeStr,
5462 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5464 (extractelt (v2i64 VR128:$src1), imm:$src2))]>,
5465 Sched<[WriteVecExtract]>;
5466 def mr : SS4AIi8<opc, MRMDestMem, (outs),
5467 (ins i64mem:$dst, VR128:$src1, u8imm:$src2),
5468 !strconcat(OpcodeStr,
5469 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5470 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
5471 addr:$dst)]>, Sched<[WriteVecExtractSt]>;
5474 let Predicates = [HasAVX, NoDQI] in
5475 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
5477 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">, REX_W;
5479 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
5481 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
5482 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
5483 (ins VR128:$src1, u8imm:$src2),
5484 !strconcat(OpcodeStr,
5485 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5486 [(set GR32orGR64:$dst,
5487 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
5488 Sched<[WriteVecExtract]>;
5489 def mr : SS4AIi8<opc, MRMDestMem, (outs),
5490 (ins f32mem:$dst, VR128:$src1, u8imm:$src2),
5491 !strconcat(OpcodeStr,
5492 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5493 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
5494 addr:$dst)]>, Sched<[WriteVecExtractSt]>;
5497 let ExeDomain = SSEPackedSingle in {
5498 let Predicates = [UseAVX] in
5499 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX, VEX_WIG;
5500 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
5503 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
5504 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
5507 (VEXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
5509 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
5512 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
5513 Requires<[UseSSE41]>;
5515 //===----------------------------------------------------------------------===//
5516 // SSE4.1 - Insert Instructions
5517 //===----------------------------------------------------------------------===//
5519 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
5520 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
5521 (ins VR128:$src1, GR32orGR64:$src2, u8imm:$src3),
5523 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5525 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5527 (X86pinsrb VR128:$src1, GR32orGR64:$src2, imm:$src3))]>,
5528 Sched<[WriteVecInsert, ReadDefault, ReadInt2Fpu]>;
5529 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
5530 (ins VR128:$src1, i8mem:$src2, u8imm:$src3),
5532 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5534 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5536 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2), imm:$src3))]>,
5537 Sched<[WriteVecInsert.Folded, WriteVecInsert.ReadAfterFold]>;
5540 let Predicates = [HasAVX, NoBWI] in
5541 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
5542 let Constraints = "$src1 = $dst" in
5543 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
5545 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
5546 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
5547 (ins VR128:$src1, GR32:$src2, u8imm:$src3),
5549 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5551 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5553 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
5554 Sched<[WriteVecInsert, ReadDefault, ReadInt2Fpu]>;
5555 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
5556 (ins VR128:$src1, i32mem:$src2, u8imm:$src3),
5558 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5560 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5562 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2), imm:$src3)))]>,
5563 Sched<[WriteVecInsert.Folded, WriteVecInsert.ReadAfterFold]>;
5566 let Predicates = [HasAVX, NoDQI] in
5567 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
5568 let Constraints = "$src1 = $dst" in
5569 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
5571 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
5572 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
5573 (ins VR128:$src1, GR64:$src2, u8imm:$src3),
5575 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5577 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5579 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
5580 Sched<[WriteVecInsert, ReadDefault, ReadInt2Fpu]>;
5581 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
5582 (ins VR128:$src1, i64mem:$src2, u8imm:$src3),
5584 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5586 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5588 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2), imm:$src3)))]>,
5589 Sched<[WriteVecInsert.Folded, WriteVecInsert.ReadAfterFold]>;
5592 let Predicates = [HasAVX, NoDQI] in
5593 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
5594 let Constraints = "$src1 = $dst" in
5595 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
5597 // insertps has a few different modes, there's the first two here below which
5598 // are optimized inserts that won't zero arbitrary elements in the destination
5599 // vector. The next one matches the intrinsic and could zero arbitrary elements
5600 // in the target vector.
5601 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
5602 let isCommutable = 1 in
5603 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
5604 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
5606 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5608 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5610 (X86insertps VR128:$src1, VR128:$src2, imm:$src3))]>,
5611 Sched<[SchedWriteFShuffle.XMM]>;
5612 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
5613 (ins VR128:$src1, f32mem:$src2, u8imm:$src3),
5615 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5617 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5619 (X86insertps VR128:$src1,
5620 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
5622 Sched<[SchedWriteFShuffle.XMM.Folded, SchedWriteFShuffle.XMM.ReadAfterFold]>;
5625 let ExeDomain = SSEPackedSingle in {
5626 let Predicates = [UseAVX] in
5627 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>,
5629 let Constraints = "$src1 = $dst" in
5630 defm INSERTPS : SS41I_insertf32<0x21, "insertps", 1>;
5633 let Predicates = [UseAVX] in {
5634 // If we're inserting an element from a vbroadcast of a load, fold the
5635 // load into the X86insertps instruction.
5636 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1),
5637 (X86VBroadcast (loadf32 addr:$src2)), imm:$src3)),
5638 (VINSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
5639 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1),
5640 (X86VBroadcast (loadv4f32 addr:$src2)), imm:$src3)),
5641 (VINSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
5644 //===----------------------------------------------------------------------===//
5645 // SSE4.1 - Round Instructions
5646 //===----------------------------------------------------------------------===//
5648 multiclass sse41_fp_unop_p<bits<8> opc, string OpcodeStr,
5649 X86MemOperand x86memop, RegisterClass RC,
5650 ValueType VT, PatFrag mem_frag, SDNode OpNode,
5651 X86FoldableSchedWrite sched> {
5652 // Intrinsic operation, reg.
5653 // Vector intrinsic operation, reg
5654 def r : SS4AIi8<opc, MRMSrcReg,
5655 (outs RC:$dst), (ins RC:$src1, i32u8imm:$src2),
5656 !strconcat(OpcodeStr,
5657 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5658 [(set RC:$dst, (VT (OpNode RC:$src1, imm:$src2)))]>,
5661 // Vector intrinsic operation, mem
5662 def m : SS4AIi8<opc, MRMSrcMem,
5663 (outs RC:$dst), (ins x86memop:$src1, i32u8imm:$src2),
5664 !strconcat(OpcodeStr,
5665 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5667 (VT (OpNode (mem_frag addr:$src1),imm:$src2)))]>,
5668 Sched<[sched.Folded]>;
5671 multiclass avx_fp_unop_rm<bits<8> opcss, bits<8> opcsd,
5672 string OpcodeStr, X86FoldableSchedWrite sched> {
5673 let ExeDomain = SSEPackedSingle, hasSideEffects = 0 in {
5674 def SSr : SS4AIi8<opcss, MRMSrcReg,
5675 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2, i32u8imm:$src3),
5676 !strconcat(OpcodeStr,
5677 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5678 []>, Sched<[sched]>;
5681 def SSm : SS4AIi8<opcss, MRMSrcMem,
5682 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2, i32u8imm:$src3),
5683 !strconcat(OpcodeStr,
5684 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5685 []>, Sched<[sched.Folded, sched.ReadAfterFold]>;
5686 } // ExeDomain = SSEPackedSingle, hasSideEffects = 0
5688 let ExeDomain = SSEPackedDouble, hasSideEffects = 0 in {
5689 def SDr : SS4AIi8<opcsd, MRMSrcReg,
5690 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2, i32u8imm:$src3),
5691 !strconcat(OpcodeStr,
5692 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5693 []>, Sched<[sched]>;
5696 def SDm : SS4AIi8<opcsd, MRMSrcMem,
5697 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src2, i32u8imm:$src3),
5698 !strconcat(OpcodeStr,
5699 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5700 []>, Sched<[sched.Folded, sched.ReadAfterFold]>;
5701 } // ExeDomain = SSEPackedDouble, hasSideEffects = 0
5704 multiclass sse41_fp_unop_s<bits<8> opcss, bits<8> opcsd,
5705 string OpcodeStr, X86FoldableSchedWrite sched> {
5706 let ExeDomain = SSEPackedSingle, hasSideEffects = 0 in {
5707 def SSr : SS4AIi8<opcss, MRMSrcReg,
5708 (outs FR32:$dst), (ins FR32:$src1, i32u8imm:$src2),
5709 !strconcat(OpcodeStr,
5710 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5711 []>, Sched<[sched]>;
5714 def SSm : SS4AIi8<opcss, MRMSrcMem,
5715 (outs FR32:$dst), (ins f32mem:$src1, i32u8imm:$src2),
5716 !strconcat(OpcodeStr,
5717 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5718 []>, Sched<[sched.Folded, sched.ReadAfterFold]>;
5719 } // ExeDomain = SSEPackedSingle, hasSideEffects = 0
5721 let ExeDomain = SSEPackedDouble, hasSideEffects = 0 in {
5722 def SDr : SS4AIi8<opcsd, MRMSrcReg,
5723 (outs FR64:$dst), (ins FR64:$src1, i32u8imm:$src2),
5724 !strconcat(OpcodeStr,
5725 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5726 []>, Sched<[sched]>;
5729 def SDm : SS4AIi8<opcsd, MRMSrcMem,
5730 (outs FR64:$dst), (ins f64mem:$src1, i32u8imm:$src2),
5731 !strconcat(OpcodeStr,
5732 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5733 []>, Sched<[sched.Folded, sched.ReadAfterFold]>;
5734 } // ExeDomain = SSEPackedDouble, hasSideEffects = 0
5737 multiclass sse41_fp_binop_s<bits<8> opcss, bits<8> opcsd,
5738 string OpcodeStr, X86FoldableSchedWrite sched,
5739 ValueType VT32, ValueType VT64,
5740 SDNode OpNode, bit Is2Addr = 1> {
5741 let ExeDomain = SSEPackedSingle, isCodeGenOnly = 1 in {
5742 def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
5743 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32u8imm:$src3),
5745 !strconcat(OpcodeStr,
5746 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5747 !strconcat(OpcodeStr,
5748 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5749 [(set VR128:$dst, (VT32 (OpNode VR128:$src1, VR128:$src2, imm:$src3)))]>,
5752 def SSm_Int : SS4AIi8<opcss, MRMSrcMem,
5753 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32u8imm:$src3),
5755 !strconcat(OpcodeStr,
5756 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5757 !strconcat(OpcodeStr,
5758 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5760 (OpNode VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
5761 Sched<[sched.Folded, sched.ReadAfterFold]>;
5762 } // ExeDomain = SSEPackedSingle, isCodeGenOnly = 1
5764 let ExeDomain = SSEPackedDouble, isCodeGenOnly = 1 in {
5765 def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
5766 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32u8imm:$src3),
5768 !strconcat(OpcodeStr,
5769 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5770 !strconcat(OpcodeStr,
5771 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5772 [(set VR128:$dst, (VT64 (OpNode VR128:$src1, VR128:$src2, imm:$src3)))]>,
5775 def SDm_Int : SS4AIi8<opcsd, MRMSrcMem,
5776 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32u8imm:$src3),
5778 !strconcat(OpcodeStr,
5779 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5780 !strconcat(OpcodeStr,
5781 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5783 (OpNode VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
5784 Sched<[sched.Folded, sched.ReadAfterFold]>;
5785 } // ExeDomain = SSEPackedDouble, isCodeGenOnly = 1
5788 // FP round - roundss, roundps, roundsd, roundpd
5789 let Predicates = [HasAVX, NoVLX] in {
5790 let ExeDomain = SSEPackedSingle in {
5792 defm VROUNDPS : sse41_fp_unop_p<0x08, "vroundps", f128mem, VR128, v4f32,
5793 loadv4f32, X86VRndScale, SchedWriteFRnd.XMM>,
5795 defm VROUNDPSY : sse41_fp_unop_p<0x08, "vroundps", f256mem, VR256, v8f32,
5796 loadv8f32, X86VRndScale, SchedWriteFRnd.YMM>,
5797 VEX, VEX_L, VEX_WIG;
5800 let ExeDomain = SSEPackedDouble in {
5801 defm VROUNDPD : sse41_fp_unop_p<0x09, "vroundpd", f128mem, VR128, v2f64,
5802 loadv2f64, X86VRndScale, SchedWriteFRnd.XMM>,
5804 defm VROUNDPDY : sse41_fp_unop_p<0x09, "vroundpd", f256mem, VR256, v4f64,
5805 loadv4f64, X86VRndScale, SchedWriteFRnd.YMM>,
5806 VEX, VEX_L, VEX_WIG;
5809 let Predicates = [HasAVX, NoAVX512] in {
5810 defm VROUND : sse41_fp_binop_s<0x0A, 0x0B, "vround", SchedWriteFRnd.Scl,
5811 v4f32, v2f64, X86RndScales, 0>,
5812 VEX_4V, VEX_LIG, VEX_WIG;
5813 defm VROUND : avx_fp_unop_rm<0x0A, 0x0B, "vround", SchedWriteFRnd.Scl>,
5814 VEX_4V, VEX_LIG, VEX_WIG;
5817 let Predicates = [UseAVX] in {
5818 def : Pat<(ffloor FR32:$src),
5819 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x9))>;
5820 def : Pat<(f32 (fnearbyint FR32:$src)),
5821 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xC))>;
5822 def : Pat<(f32 (fceil FR32:$src)),
5823 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xA))>;
5824 def : Pat<(f32 (frint FR32:$src)),
5825 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x4))>;
5826 def : Pat<(f32 (ftrunc FR32:$src)),
5827 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xB))>;
5829 def : Pat<(f64 (ffloor FR64:$src)),
5830 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x9))>;
5831 def : Pat<(f64 (fnearbyint FR64:$src)),
5832 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xC))>;
5833 def : Pat<(f64 (fceil FR64:$src)),
5834 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xA))>;
5835 def : Pat<(f64 (frint FR64:$src)),
5836 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x4))>;
5837 def : Pat<(f64 (ftrunc FR64:$src)),
5838 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xB))>;
5841 let Predicates = [UseAVX, OptForSize] in {
5842 def : Pat<(ffloor (loadf32 addr:$src)),
5843 (VROUNDSSm (f32 (IMPLICIT_DEF)), addr:$src, (i32 0x9))>;
5844 def : Pat<(f32 (fnearbyint (loadf32 addr:$src))),
5845 (VROUNDSSm (f32 (IMPLICIT_DEF)), addr:$src, (i32 0xC))>;
5846 def : Pat<(f32 (fceil (loadf32 addr:$src))),
5847 (VROUNDSSm (f32 (IMPLICIT_DEF)), addr:$src, (i32 0xA))>;
5848 def : Pat<(f32 (frint (loadf32 addr:$src))),
5849 (VROUNDSSm (f32 (IMPLICIT_DEF)), addr:$src, (i32 0x4))>;
5850 def : Pat<(f32 (ftrunc (loadf32 addr:$src))),
5851 (VROUNDSSm (f32 (IMPLICIT_DEF)), addr:$src, (i32 0xB))>;
5853 def : Pat<(f64 (ffloor (loadf64 addr:$src))),
5854 (VROUNDSDm (f64 (IMPLICIT_DEF)), addr:$src, (i32 0x9))>;
5855 def : Pat<(f64 (fnearbyint (loadf64 addr:$src))),
5856 (VROUNDSDm (f64 (IMPLICIT_DEF)), addr:$src, (i32 0xC))>;
5857 def : Pat<(f64 (fceil (loadf64 addr:$src))),
5858 (VROUNDSDm (f64 (IMPLICIT_DEF)), addr:$src, (i32 0xA))>;
5859 def : Pat<(f64 (frint (loadf64 addr:$src))),
5860 (VROUNDSDm (f64 (IMPLICIT_DEF)), addr:$src, (i32 0x4))>;
5861 def : Pat<(f64 (ftrunc (loadf64 addr:$src))),
5862 (VROUNDSDm (f64 (IMPLICIT_DEF)), addr:$src, (i32 0xB))>;
5865 let Predicates = [HasAVX, NoVLX] in {
5866 def : Pat<(v4f32 (ffloor VR128:$src)),
5867 (VROUNDPSr VR128:$src, (i32 0x9))>;
5868 def : Pat<(v4f32 (fnearbyint VR128:$src)),
5869 (VROUNDPSr VR128:$src, (i32 0xC))>;
5870 def : Pat<(v4f32 (fceil VR128:$src)),
5871 (VROUNDPSr VR128:$src, (i32 0xA))>;
5872 def : Pat<(v4f32 (frint VR128:$src)),
5873 (VROUNDPSr VR128:$src, (i32 0x4))>;
5874 def : Pat<(v4f32 (ftrunc VR128:$src)),
5875 (VROUNDPSr VR128:$src, (i32 0xB))>;
5877 def : Pat<(v4f32 (ffloor (loadv4f32 addr:$src))),
5878 (VROUNDPSm addr:$src, (i32 0x9))>;
5879 def : Pat<(v4f32 (fnearbyint (loadv4f32 addr:$src))),
5880 (VROUNDPSm addr:$src, (i32 0xC))>;
5881 def : Pat<(v4f32 (fceil (loadv4f32 addr:$src))),
5882 (VROUNDPSm addr:$src, (i32 0xA))>;
5883 def : Pat<(v4f32 (frint (loadv4f32 addr:$src))),
5884 (VROUNDPSm addr:$src, (i32 0x4))>;
5885 def : Pat<(v4f32 (ftrunc (loadv4f32 addr:$src))),
5886 (VROUNDPSm addr:$src, (i32 0xB))>;
5888 def : Pat<(v2f64 (ffloor VR128:$src)),
5889 (VROUNDPDr VR128:$src, (i32 0x9))>;
5890 def : Pat<(v2f64 (fnearbyint VR128:$src)),
5891 (VROUNDPDr VR128:$src, (i32 0xC))>;
5892 def : Pat<(v2f64 (fceil VR128:$src)),
5893 (VROUNDPDr VR128:$src, (i32 0xA))>;
5894 def : Pat<(v2f64 (frint VR128:$src)),
5895 (VROUNDPDr VR128:$src, (i32 0x4))>;
5896 def : Pat<(v2f64 (ftrunc VR128:$src)),
5897 (VROUNDPDr VR128:$src, (i32 0xB))>;
5899 def : Pat<(v2f64 (ffloor (loadv2f64 addr:$src))),
5900 (VROUNDPDm addr:$src, (i32 0x9))>;
5901 def : Pat<(v2f64 (fnearbyint (loadv2f64 addr:$src))),
5902 (VROUNDPDm addr:$src, (i32 0xC))>;
5903 def : Pat<(v2f64 (fceil (loadv2f64 addr:$src))),
5904 (VROUNDPDm addr:$src, (i32 0xA))>;
5905 def : Pat<(v2f64 (frint (loadv2f64 addr:$src))),
5906 (VROUNDPDm addr:$src, (i32 0x4))>;
5907 def : Pat<(v2f64 (ftrunc (loadv2f64 addr:$src))),
5908 (VROUNDPDm addr:$src, (i32 0xB))>;
5910 def : Pat<(v8f32 (ffloor VR256:$src)),
5911 (VROUNDPSYr VR256:$src, (i32 0x9))>;
5912 def : Pat<(v8f32 (fnearbyint VR256:$src)),
5913 (VROUNDPSYr VR256:$src, (i32 0xC))>;
5914 def : Pat<(v8f32 (fceil VR256:$src)),
5915 (VROUNDPSYr VR256:$src, (i32 0xA))>;
5916 def : Pat<(v8f32 (frint VR256:$src)),
5917 (VROUNDPSYr VR256:$src, (i32 0x4))>;
5918 def : Pat<(v8f32 (ftrunc VR256:$src)),
5919 (VROUNDPSYr VR256:$src, (i32 0xB))>;
5921 def : Pat<(v8f32 (ffloor (loadv8f32 addr:$src))),
5922 (VROUNDPSYm addr:$src, (i32 0x9))>;
5923 def : Pat<(v8f32 (fnearbyint (loadv8f32 addr:$src))),
5924 (VROUNDPSYm addr:$src, (i32 0xC))>;
5925 def : Pat<(v8f32 (fceil (loadv8f32 addr:$src))),
5926 (VROUNDPSYm addr:$src, (i32 0xA))>;
5927 def : Pat<(v8f32 (frint (loadv8f32 addr:$src))),
5928 (VROUNDPSYm addr:$src, (i32 0x4))>;
5929 def : Pat<(v8f32 (ftrunc (loadv8f32 addr:$src))),
5930 (VROUNDPSYm addr:$src, (i32 0xB))>;
5932 def : Pat<(v4f64 (ffloor VR256:$src)),
5933 (VROUNDPDYr VR256:$src, (i32 0x9))>;
5934 def : Pat<(v4f64 (fnearbyint VR256:$src)),
5935 (VROUNDPDYr VR256:$src, (i32 0xC))>;
5936 def : Pat<(v4f64 (fceil VR256:$src)),
5937 (VROUNDPDYr VR256:$src, (i32 0xA))>;
5938 def : Pat<(v4f64 (frint VR256:$src)),
5939 (VROUNDPDYr VR256:$src, (i32 0x4))>;
5940 def : Pat<(v4f64 (ftrunc VR256:$src)),
5941 (VROUNDPDYr VR256:$src, (i32 0xB))>;
5943 def : Pat<(v4f64 (ffloor (loadv4f64 addr:$src))),
5944 (VROUNDPDYm addr:$src, (i32 0x9))>;
5945 def : Pat<(v4f64 (fnearbyint (loadv4f64 addr:$src))),
5946 (VROUNDPDYm addr:$src, (i32 0xC))>;
5947 def : Pat<(v4f64 (fceil (loadv4f64 addr:$src))),
5948 (VROUNDPDYm addr:$src, (i32 0xA))>;
5949 def : Pat<(v4f64 (frint (loadv4f64 addr:$src))),
5950 (VROUNDPDYm addr:$src, (i32 0x4))>;
5951 def : Pat<(v4f64 (ftrunc (loadv4f64 addr:$src))),
5952 (VROUNDPDYm addr:$src, (i32 0xB))>;
5955 let ExeDomain = SSEPackedSingle in
5956 defm ROUNDPS : sse41_fp_unop_p<0x08, "roundps", f128mem, VR128, v4f32,
5957 memopv4f32, X86VRndScale, SchedWriteFRnd.XMM>;
5958 let ExeDomain = SSEPackedDouble in
5959 defm ROUNDPD : sse41_fp_unop_p<0x09, "roundpd", f128mem, VR128, v2f64,
5960 memopv2f64, X86VRndScale, SchedWriteFRnd.XMM>;
5962 defm ROUND : sse41_fp_unop_s<0x0A, 0x0B, "round", SchedWriteFRnd.Scl>;
5964 let Constraints = "$src1 = $dst" in
5965 defm ROUND : sse41_fp_binop_s<0x0A, 0x0B, "round", SchedWriteFRnd.Scl,
5966 v4f32, v2f64, X86RndScales>;
5968 let Predicates = [UseSSE41] in {
5969 def : Pat<(ffloor FR32:$src),
5970 (ROUNDSSr FR32:$src, (i32 0x9))>;
5971 def : Pat<(f32 (fnearbyint FR32:$src)),
5972 (ROUNDSSr FR32:$src, (i32 0xC))>;
5973 def : Pat<(f32 (fceil FR32:$src)),
5974 (ROUNDSSr FR32:$src, (i32 0xA))>;
5975 def : Pat<(f32 (frint FR32:$src)),
5976 (ROUNDSSr FR32:$src, (i32 0x4))>;
5977 def : Pat<(f32 (ftrunc FR32:$src)),
5978 (ROUNDSSr FR32:$src, (i32 0xB))>;
5980 def : Pat<(f64 (ffloor FR64:$src)),
5981 (ROUNDSDr FR64:$src, (i32 0x9))>;
5982 def : Pat<(f64 (fnearbyint FR64:$src)),
5983 (ROUNDSDr FR64:$src, (i32 0xC))>;
5984 def : Pat<(f64 (fceil FR64:$src)),
5985 (ROUNDSDr FR64:$src, (i32 0xA))>;
5986 def : Pat<(f64 (frint FR64:$src)),
5987 (ROUNDSDr FR64:$src, (i32 0x4))>;
5988 def : Pat<(f64 (ftrunc FR64:$src)),
5989 (ROUNDSDr FR64:$src, (i32 0xB))>;
5992 let Predicates = [UseSSE41, OptForSize] in {
5993 def : Pat<(ffloor (loadf32 addr:$src)),
5994 (ROUNDSSm addr:$src, (i32 0x9))>;
5995 def : Pat<(f32 (fnearbyint (loadf32 addr:$src))),
5996 (ROUNDSSm addr:$src, (i32 0xC))>;
5997 def : Pat<(f32 (fceil (loadf32 addr:$src))),
5998 (ROUNDSSm addr:$src, (i32 0xA))>;
5999 def : Pat<(f32 (frint (loadf32 addr:$src))),
6000 (ROUNDSSm addr:$src, (i32 0x4))>;
6001 def : Pat<(f32 (ftrunc (loadf32 addr:$src))),
6002 (ROUNDSSm addr:$src, (i32 0xB))>;
6004 def : Pat<(f64 (ffloor (loadf64 addr:$src))),
6005 (ROUNDSDm addr:$src, (i32 0x9))>;
6006 def : Pat<(f64 (fnearbyint (loadf64 addr:$src))),
6007 (ROUNDSDm addr:$src, (i32 0xC))>;
6008 def : Pat<(f64 (fceil (loadf64 addr:$src))),
6009 (ROUNDSDm addr:$src, (i32 0xA))>;
6010 def : Pat<(f64 (frint (loadf64 addr:$src))),
6011 (ROUNDSDm addr:$src, (i32 0x4))>;
6012 def : Pat<(f64 (ftrunc (loadf64 addr:$src))),
6013 (ROUNDSDm addr:$src, (i32 0xB))>;
6016 let Predicates = [UseSSE41] in {
6017 def : Pat<(v4f32 (ffloor VR128:$src)),
6018 (ROUNDPSr VR128:$src, (i32 0x9))>;
6019 def : Pat<(v4f32 (fnearbyint VR128:$src)),
6020 (ROUNDPSr VR128:$src, (i32 0xC))>;
6021 def : Pat<(v4f32 (fceil VR128:$src)),
6022 (ROUNDPSr VR128:$src, (i32 0xA))>;
6023 def : Pat<(v4f32 (frint VR128:$src)),
6024 (ROUNDPSr VR128:$src, (i32 0x4))>;
6025 def : Pat<(v4f32 (ftrunc VR128:$src)),
6026 (ROUNDPSr VR128:$src, (i32 0xB))>;
6028 def : Pat<(v4f32 (ffloor (memopv4f32 addr:$src))),
6029 (ROUNDPSm addr:$src, (i32 0x9))>;
6030 def : Pat<(v4f32 (fnearbyint (memopv4f32 addr:$src))),
6031 (ROUNDPSm addr:$src, (i32 0xC))>;
6032 def : Pat<(v4f32 (fceil (memopv4f32 addr:$src))),
6033 (ROUNDPSm addr:$src, (i32 0xA))>;
6034 def : Pat<(v4f32 (frint (memopv4f32 addr:$src))),
6035 (ROUNDPSm addr:$src, (i32 0x4))>;
6036 def : Pat<(v4f32 (ftrunc (memopv4f32 addr:$src))),
6037 (ROUNDPSm addr:$src, (i32 0xB))>;
6039 def : Pat<(v2f64 (ffloor VR128:$src)),
6040 (ROUNDPDr VR128:$src, (i32 0x9))>;
6041 def : Pat<(v2f64 (fnearbyint VR128:$src)),
6042 (ROUNDPDr VR128:$src, (i32 0xC))>;
6043 def : Pat<(v2f64 (fceil VR128:$src)),
6044 (ROUNDPDr VR128:$src, (i32 0xA))>;
6045 def : Pat<(v2f64 (frint VR128:$src)),
6046 (ROUNDPDr VR128:$src, (i32 0x4))>;
6047 def : Pat<(v2f64 (ftrunc VR128:$src)),
6048 (ROUNDPDr VR128:$src, (i32 0xB))>;
6050 def : Pat<(v2f64 (ffloor (memopv2f64 addr:$src))),
6051 (ROUNDPDm addr:$src, (i32 0x9))>;
6052 def : Pat<(v2f64 (fnearbyint (memopv2f64 addr:$src))),
6053 (ROUNDPDm addr:$src, (i32 0xC))>;
6054 def : Pat<(v2f64 (fceil (memopv2f64 addr:$src))),
6055 (ROUNDPDm addr:$src, (i32 0xA))>;
6056 def : Pat<(v2f64 (frint (memopv2f64 addr:$src))),
6057 (ROUNDPDm addr:$src, (i32 0x4))>;
6058 def : Pat<(v2f64 (ftrunc (memopv2f64 addr:$src))),
6059 (ROUNDPDm addr:$src, (i32 0xB))>;
6062 defm : scalar_unary_math_imm_patterns<ffloor, "ROUNDSS", X86Movss,
6063 v4f32, 0x01, UseSSE41>;
6064 defm : scalar_unary_math_imm_patterns<fceil, "ROUNDSS", X86Movss,
6065 v4f32, 0x02, UseSSE41>;
6066 defm : scalar_unary_math_imm_patterns<ffloor, "ROUNDSD", X86Movsd,
6067 v2f64, 0x01, UseSSE41>;
6068 defm : scalar_unary_math_imm_patterns<fceil, "ROUNDSD", X86Movsd,
6069 v2f64, 0x02, UseSSE41>;
6071 //===----------------------------------------------------------------------===//
6072 // SSE4.1 - Packed Bit Test
6073 //===----------------------------------------------------------------------===//
6075 // ptest instruction we'll lower to this in X86ISelLowering primarily from
6076 // the intel intrinsic that corresponds to this.
6077 let Defs = [EFLAGS], Predicates = [HasAVX] in {
6078 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
6079 "vptest\t{$src2, $src1|$src1, $src2}",
6080 [(set EFLAGS, (X86ptest VR128:$src1, (v2i64 VR128:$src2)))]>,
6081 Sched<[SchedWriteVecTest.XMM]>, VEX, VEX_WIG;
6082 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
6083 "vptest\t{$src2, $src1|$src1, $src2}",
6084 [(set EFLAGS,(X86ptest VR128:$src1, (loadv2i64 addr:$src2)))]>,
6085 Sched<[SchedWriteVecTest.XMM.Folded, SchedWriteVecTest.XMM.ReadAfterFold]>,
6088 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
6089 "vptest\t{$src2, $src1|$src1, $src2}",
6090 [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
6091 Sched<[SchedWriteVecTest.YMM]>, VEX, VEX_L, VEX_WIG;
6092 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
6093 "vptest\t{$src2, $src1|$src1, $src2}",
6094 [(set EFLAGS,(X86ptest VR256:$src1, (loadv4i64 addr:$src2)))]>,
6095 Sched<[SchedWriteVecTest.YMM.Folded, SchedWriteVecTest.YMM.ReadAfterFold]>,
6096 VEX, VEX_L, VEX_WIG;
6099 let Defs = [EFLAGS] in {
6100 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
6101 "ptest\t{$src2, $src1|$src1, $src2}",
6102 [(set EFLAGS, (X86ptest VR128:$src1, (v2i64 VR128:$src2)))]>,
6103 Sched<[SchedWriteVecTest.XMM]>;
6104 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
6105 "ptest\t{$src2, $src1|$src1, $src2}",
6106 [(set EFLAGS, (X86ptest VR128:$src1, (memopv2i64 addr:$src2)))]>,
6107 Sched<[SchedWriteVecTest.XMM.Folded, SchedWriteVecTest.XMM.ReadAfterFold]>;
6110 // The bit test instructions below are AVX only
6111 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
6112 X86MemOperand x86memop, PatFrag mem_frag, ValueType vt,
6113 X86FoldableSchedWrite sched> {
6114 def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
6115 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
6116 [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>,
6117 Sched<[sched]>, VEX;
6118 def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
6119 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
6120 [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
6121 Sched<[sched.Folded, sched.ReadAfterFold]>, VEX;
6124 let Defs = [EFLAGS], Predicates = [HasAVX] in {
6125 let ExeDomain = SSEPackedSingle in {
6126 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, loadv4f32, v4f32,
6127 SchedWriteFTest.XMM>;
6128 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, loadv8f32, v8f32,
6129 SchedWriteFTest.YMM>, VEX_L;
6131 let ExeDomain = SSEPackedDouble in {
6132 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, loadv2f64, v2f64,
6133 SchedWriteFTest.XMM>;
6134 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, loadv4f64, v4f64,
6135 SchedWriteFTest.YMM>, VEX_L;
6139 //===----------------------------------------------------------------------===//
6140 // SSE4.1 - Misc Instructions
6141 //===----------------------------------------------------------------------===//
6143 let Defs = [EFLAGS], Predicates = [HasPOPCNT] in {
6144 def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
6145 "popcnt{w}\t{$src, $dst|$dst, $src}",
6146 [(set GR16:$dst, (ctpop GR16:$src)), (implicit EFLAGS)]>,
6147 Sched<[WritePOPCNT]>, OpSize16, XS;
6148 def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
6149 "popcnt{w}\t{$src, $dst|$dst, $src}",
6150 [(set GR16:$dst, (ctpop (loadi16 addr:$src))),
6151 (implicit EFLAGS)]>,
6152 Sched<[WritePOPCNT.Folded]>, OpSize16, XS;
6154 def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
6155 "popcnt{l}\t{$src, $dst|$dst, $src}",
6156 [(set GR32:$dst, (ctpop GR32:$src)), (implicit EFLAGS)]>,
6157 Sched<[WritePOPCNT]>, OpSize32, XS;
6159 def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
6160 "popcnt{l}\t{$src, $dst|$dst, $src}",
6161 [(set GR32:$dst, (ctpop (loadi32 addr:$src))),
6162 (implicit EFLAGS)]>,
6163 Sched<[WritePOPCNT.Folded]>, OpSize32, XS;
6165 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
6166 "popcnt{q}\t{$src, $dst|$dst, $src}",
6167 [(set GR64:$dst, (ctpop GR64:$src)), (implicit EFLAGS)]>,
6168 Sched<[WritePOPCNT]>, XS;
6169 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
6170 "popcnt{q}\t{$src, $dst|$dst, $src}",
6171 [(set GR64:$dst, (ctpop (loadi64 addr:$src))),
6172 (implicit EFLAGS)]>,
6173 Sched<[WritePOPCNT.Folded]>, XS;
6176 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
6177 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
6178 SDNode OpNode, PatFrag ld_frag,
6179 X86FoldableSchedWrite Sched> {
6180 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
6182 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
6183 [(set VR128:$dst, (v8i16 (OpNode (v8i16 VR128:$src))))]>,
6185 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
6187 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
6189 (v8i16 (OpNode (ld_frag addr:$src))))]>,
6190 Sched<[Sched.Folded]>;
6193 // PHMIN has the same profile as PSAD, thus we use the same scheduling
6194 // model, although the naming is misleading.
6195 let Predicates = [HasAVX] in
6196 defm VPHMINPOSUW : SS41I_unop_rm_int_v16<0x41, "vphminposuw",
6198 WritePHMINPOS>, VEX, VEX_WIG;
6199 defm PHMINPOSUW : SS41I_unop_rm_int_v16<0x41, "phminposuw",
6203 /// SS48I_binop_rm - Simple SSE41 binary operator.
6204 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
6205 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
6206 X86MemOperand x86memop, X86FoldableSchedWrite sched,
6208 let isCommutable = 1 in
6209 def rr : SS48I<opc, MRMSrcReg, (outs RC:$dst),
6210 (ins RC:$src1, RC:$src2),
6212 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6213 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6214 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>,
6216 def rm : SS48I<opc, MRMSrcMem, (outs RC:$dst),
6217 (ins RC:$src1, x86memop:$src2),
6219 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6220 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6222 (OpVT (OpNode RC:$src1, (memop_frag addr:$src2))))]>,
6223 Sched<[sched.Folded, sched.ReadAfterFold]>;
6226 let Predicates = [HasAVX, NoVLX] in {
6227 defm VPMINSD : SS48I_binop_rm<0x39, "vpminsd", smin, v4i32, VR128,
6228 load, i128mem, SchedWriteVecALU.XMM, 0>,
6230 defm VPMINUD : SS48I_binop_rm<0x3B, "vpminud", umin, v4i32, VR128,
6231 load, i128mem, SchedWriteVecALU.XMM, 0>,
6233 defm VPMAXSD : SS48I_binop_rm<0x3D, "vpmaxsd", smax, v4i32, VR128,
6234 load, i128mem, SchedWriteVecALU.XMM, 0>,
6236 defm VPMAXUD : SS48I_binop_rm<0x3F, "vpmaxud", umax, v4i32, VR128,
6237 load, i128mem, SchedWriteVecALU.XMM, 0>,
6239 defm VPMULDQ : SS48I_binop_rm<0x28, "vpmuldq", X86pmuldq, v2i64, VR128,
6240 load, i128mem, SchedWriteVecIMul.XMM, 0>,
6243 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
6244 defm VPMINSB : SS48I_binop_rm<0x38, "vpminsb", smin, v16i8, VR128,
6245 load, i128mem, SchedWriteVecALU.XMM, 0>,
6247 defm VPMINUW : SS48I_binop_rm<0x3A, "vpminuw", umin, v8i16, VR128,
6248 load, i128mem, SchedWriteVecALU.XMM, 0>,
6250 defm VPMAXSB : SS48I_binop_rm<0x3C, "vpmaxsb", smax, v16i8, VR128,
6251 load, i128mem, SchedWriteVecALU.XMM, 0>,
6253 defm VPMAXUW : SS48I_binop_rm<0x3E, "vpmaxuw", umax, v8i16, VR128,
6254 load, i128mem, SchedWriteVecALU.XMM, 0>,
6258 let Predicates = [HasAVX2, NoVLX] in {
6259 defm VPMINSDY : SS48I_binop_rm<0x39, "vpminsd", smin, v8i32, VR256,
6260 load, i256mem, SchedWriteVecALU.YMM, 0>,
6261 VEX_4V, VEX_L, VEX_WIG;
6262 defm VPMINUDY : SS48I_binop_rm<0x3B, "vpminud", umin, v8i32, VR256,
6263 load, i256mem, SchedWriteVecALU.YMM, 0>,
6264 VEX_4V, VEX_L, VEX_WIG;
6265 defm VPMAXSDY : SS48I_binop_rm<0x3D, "vpmaxsd", smax, v8i32, VR256,
6266 load, i256mem, SchedWriteVecALU.YMM, 0>,
6267 VEX_4V, VEX_L, VEX_WIG;
6268 defm VPMAXUDY : SS48I_binop_rm<0x3F, "vpmaxud", umax, v8i32, VR256,
6269 load, i256mem, SchedWriteVecALU.YMM, 0>,
6270 VEX_4V, VEX_L, VEX_WIG;
6271 defm VPMULDQY : SS48I_binop_rm<0x28, "vpmuldq", X86pmuldq, v4i64, VR256,
6272 load, i256mem, SchedWriteVecIMul.YMM, 0>,
6273 VEX_4V, VEX_L, VEX_WIG;
6275 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
6276 defm VPMINSBY : SS48I_binop_rm<0x38, "vpminsb", smin, v32i8, VR256,
6277 load, i256mem, SchedWriteVecALU.YMM, 0>,
6278 VEX_4V, VEX_L, VEX_WIG;
6279 defm VPMINUWY : SS48I_binop_rm<0x3A, "vpminuw", umin, v16i16, VR256,
6280 load, i256mem, SchedWriteVecALU.YMM, 0>,
6281 VEX_4V, VEX_L, VEX_WIG;
6282 defm VPMAXSBY : SS48I_binop_rm<0x3C, "vpmaxsb", smax, v32i8, VR256,
6283 load, i256mem, SchedWriteVecALU.YMM, 0>,
6284 VEX_4V, VEX_L, VEX_WIG;
6285 defm VPMAXUWY : SS48I_binop_rm<0x3E, "vpmaxuw", umax, v16i16, VR256,
6286 load, i256mem, SchedWriteVecALU.YMM, 0>,
6287 VEX_4V, VEX_L, VEX_WIG;
6290 let Constraints = "$src1 = $dst" in {
6291 defm PMINSB : SS48I_binop_rm<0x38, "pminsb", smin, v16i8, VR128,
6292 memop, i128mem, SchedWriteVecALU.XMM, 1>;
6293 defm PMINSD : SS48I_binop_rm<0x39, "pminsd", smin, v4i32, VR128,
6294 memop, i128mem, SchedWriteVecALU.XMM, 1>;
6295 defm PMINUD : SS48I_binop_rm<0x3B, "pminud", umin, v4i32, VR128,
6296 memop, i128mem, SchedWriteVecALU.XMM, 1>;
6297 defm PMINUW : SS48I_binop_rm<0x3A, "pminuw", umin, v8i16, VR128,
6298 memop, i128mem, SchedWriteVecALU.XMM, 1>;
6299 defm PMAXSB : SS48I_binop_rm<0x3C, "pmaxsb", smax, v16i8, VR128,
6300 memop, i128mem, SchedWriteVecALU.XMM, 1>;
6301 defm PMAXSD : SS48I_binop_rm<0x3D, "pmaxsd", smax, v4i32, VR128,
6302 memop, i128mem, SchedWriteVecALU.XMM, 1>;
6303 defm PMAXUD : SS48I_binop_rm<0x3F, "pmaxud", umax, v4i32, VR128,
6304 memop, i128mem, SchedWriteVecALU.XMM, 1>;
6305 defm PMAXUW : SS48I_binop_rm<0x3E, "pmaxuw", umax, v8i16, VR128,
6306 memop, i128mem, SchedWriteVecALU.XMM, 1>;
6307 defm PMULDQ : SS48I_binop_rm<0x28, "pmuldq", X86pmuldq, v2i64, VR128,
6308 memop, i128mem, SchedWriteVecIMul.XMM, 1>;
6311 let Predicates = [HasAVX, NoVLX] in
6312 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, VR128,
6313 load, i128mem, SchedWritePMULLD.XMM, 0>,
6315 let Predicates = [HasAVX] in
6316 defm VPCMPEQQ : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v2i64, VR128,
6317 load, i128mem, SchedWriteVecALU.XMM, 0>,
6320 let Predicates = [HasAVX2, NoVLX] in
6321 defm VPMULLDY : SS48I_binop_rm<0x40, "vpmulld", mul, v8i32, VR256,
6322 load, i256mem, SchedWritePMULLD.YMM, 0>,
6323 VEX_4V, VEX_L, VEX_WIG;
6324 let Predicates = [HasAVX2] in
6325 defm VPCMPEQQY : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v4i64, VR256,
6326 load, i256mem, SchedWriteVecALU.YMM, 0>,
6327 VEX_4V, VEX_L, VEX_WIG;
6329 let Constraints = "$src1 = $dst" in {
6330 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32, VR128,
6331 memop, i128mem, SchedWritePMULLD.XMM, 1>;
6332 defm PCMPEQQ : SS48I_binop_rm<0x29, "pcmpeqq", X86pcmpeq, v2i64, VR128,
6333 memop, i128mem, SchedWriteVecALU.XMM, 1>;
6336 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
6337 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
6338 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
6339 X86MemOperand x86memop, bit Is2Addr,
6340 X86FoldableSchedWrite sched> {
6341 let isCommutable = 1 in
6342 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
6343 (ins RC:$src1, RC:$src2, u8imm:$src3),
6345 !strconcat(OpcodeStr,
6346 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6347 !strconcat(OpcodeStr,
6348 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6349 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
6351 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
6352 (ins RC:$src1, x86memop:$src2, u8imm:$src3),
6354 !strconcat(OpcodeStr,
6355 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6356 !strconcat(OpcodeStr,
6357 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6359 (IntId RC:$src1, (memop_frag addr:$src2), imm:$src3))]>,
6360 Sched<[sched.Folded, sched.ReadAfterFold]>;
6363 /// SS41I_binop_rmi - SSE 4.1 binary operator with 8-bit immediate
6364 multiclass SS41I_binop_rmi<bits<8> opc, string OpcodeStr, SDNode OpNode,
6365 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
6366 X86MemOperand x86memop, bit Is2Addr,
6367 X86FoldableSchedWrite sched> {
6368 let isCommutable = 1 in
6369 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
6370 (ins RC:$src1, RC:$src2, u8imm:$src3),
6372 !strconcat(OpcodeStr,
6373 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6374 !strconcat(OpcodeStr,
6375 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6376 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2, imm:$src3)))]>,
6378 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
6379 (ins RC:$src1, x86memop:$src2, u8imm:$src3),
6381 !strconcat(OpcodeStr,
6382 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6383 !strconcat(OpcodeStr,
6384 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6386 (OpVT (OpNode RC:$src1, (memop_frag addr:$src2), imm:$src3)))]>,
6387 Sched<[sched.Folded, sched.ReadAfterFold]>;
6390 def BlendCommuteImm2 : SDNodeXForm<imm, [{
6391 uint8_t Imm = N->getZExtValue() & 0x03;
6392 return getI8Imm(Imm ^ 0x03, SDLoc(N));
6395 def BlendCommuteImm4 : SDNodeXForm<imm, [{
6396 uint8_t Imm = N->getZExtValue() & 0x0f;
6397 return getI8Imm(Imm ^ 0x0f, SDLoc(N));
6400 def BlendCommuteImm8 : SDNodeXForm<imm, [{
6401 uint8_t Imm = N->getZExtValue() & 0xff;
6402 return getI8Imm(Imm ^ 0xff, SDLoc(N));
6405 let Predicates = [HasAVX] in {
6406 let isCommutable = 0 in {
6407 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
6408 VR128, load, i128mem, 0,
6409 SchedWriteMPSAD.XMM>, VEX_4V, VEX_WIG;
6412 let ExeDomain = SSEPackedSingle in
6413 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
6414 VR128, load, f128mem, 0,
6415 SchedWriteDPPS.XMM>, VEX_4V, VEX_WIG;
6416 let ExeDomain = SSEPackedDouble in
6417 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
6418 VR128, load, f128mem, 0,
6419 SchedWriteDPPD.XMM>, VEX_4V, VEX_WIG;
6420 let ExeDomain = SSEPackedSingle in
6421 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
6422 VR256, load, i256mem, 0,
6423 SchedWriteDPPS.YMM>, VEX_4V, VEX_L, VEX_WIG;
6426 let Predicates = [HasAVX2] in {
6427 let isCommutable = 0 in {
6428 defm VMPSADBWY : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_avx2_mpsadbw,
6429 VR256, load, i256mem, 0,
6430 SchedWriteMPSAD.YMM>, VEX_4V, VEX_L, VEX_WIG;
6434 let Constraints = "$src1 = $dst" in {
6435 let isCommutable = 0 in {
6436 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
6437 VR128, memop, i128mem, 1,
6438 SchedWriteMPSAD.XMM>;
6441 let ExeDomain = SSEPackedSingle in
6442 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
6443 VR128, memop, f128mem, 1,
6444 SchedWriteDPPS.XMM>;
6445 let ExeDomain = SSEPackedDouble in
6446 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
6447 VR128, memop, f128mem, 1,
6448 SchedWriteDPPD.XMM>;
6451 /// SS41I_blend_rmi - SSE 4.1 blend with 8-bit immediate
6452 multiclass SS41I_blend_rmi<bits<8> opc, string OpcodeStr, SDNode OpNode,
6453 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
6454 X86MemOperand x86memop, bit Is2Addr, Domain d,
6455 X86FoldableSchedWrite sched, SDNodeXForm commuteXForm> {
6456 let ExeDomain = d, Constraints = !if(Is2Addr, "$src1 = $dst", "") in {
6457 let isCommutable = 1 in
6458 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
6459 (ins RC:$src1, RC:$src2, u8imm:$src3),
6461 !strconcat(OpcodeStr,
6462 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6463 !strconcat(OpcodeStr,
6464 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6465 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2, imm:$src3)))]>,
6467 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
6468 (ins RC:$src1, x86memop:$src2, u8imm:$src3),
6470 !strconcat(OpcodeStr,
6471 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6472 !strconcat(OpcodeStr,
6473 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6475 (OpVT (OpNode RC:$src1, (memop_frag addr:$src2), imm:$src3)))]>,
6476 Sched<[sched.Folded, sched.ReadAfterFold]>;
6479 // Pattern to commute if load is in first source.
6480 def : Pat<(OpVT (OpNode (memop_frag addr:$src2), RC:$src1, imm:$src3)),
6481 (!cast<Instruction>(NAME#"rmi") RC:$src1, addr:$src2,
6482 (commuteXForm imm:$src3))>;
6485 let Predicates = [HasAVX] in {
6486 defm VBLENDPS : SS41I_blend_rmi<0x0C, "vblendps", X86Blendi, v4f32,
6487 VR128, load, f128mem, 0, SSEPackedSingle,
6488 SchedWriteFBlend.XMM, BlendCommuteImm4>,
6490 defm VBLENDPSY : SS41I_blend_rmi<0x0C, "vblendps", X86Blendi, v8f32,
6491 VR256, load, f256mem, 0, SSEPackedSingle,
6492 SchedWriteFBlend.YMM, BlendCommuteImm8>,
6493 VEX_4V, VEX_L, VEX_WIG;
6494 defm VBLENDPD : SS41I_blend_rmi<0x0D, "vblendpd", X86Blendi, v2f64,
6495 VR128, load, f128mem, 0, SSEPackedDouble,
6496 SchedWriteFBlend.XMM, BlendCommuteImm2>,
6498 defm VBLENDPDY : SS41I_blend_rmi<0x0D, "vblendpd", X86Blendi, v4f64,
6499 VR256, load, f256mem, 0, SSEPackedDouble,
6500 SchedWriteFBlend.YMM, BlendCommuteImm4>,
6501 VEX_4V, VEX_L, VEX_WIG;
6502 defm VPBLENDW : SS41I_blend_rmi<0x0E, "vpblendw", X86Blendi, v8i16,
6503 VR128, load, i128mem, 0, SSEPackedInt,
6504 SchedWriteBlend.XMM, BlendCommuteImm8>,
6508 let Predicates = [HasAVX2] in {
6509 defm VPBLENDWY : SS41I_blend_rmi<0x0E, "vpblendw", X86Blendi, v16i16,
6510 VR256, load, i256mem, 0, SSEPackedInt,
6511 SchedWriteBlend.YMM, BlendCommuteImm8>,
6512 VEX_4V, VEX_L, VEX_WIG;
6515 defm BLENDPS : SS41I_blend_rmi<0x0C, "blendps", X86Blendi, v4f32,
6516 VR128, memop, f128mem, 1, SSEPackedSingle,
6517 SchedWriteFBlend.XMM, BlendCommuteImm4>;
6518 defm BLENDPD : SS41I_blend_rmi<0x0D, "blendpd", X86Blendi, v2f64,
6519 VR128, memop, f128mem, 1, SSEPackedDouble,
6520 SchedWriteFBlend.XMM, BlendCommuteImm2>;
6521 defm PBLENDW : SS41I_blend_rmi<0x0E, "pblendw", X86Blendi, v8i16,
6522 VR128, memop, i128mem, 1, SSEPackedInt,
6523 SchedWriteBlend.XMM, BlendCommuteImm8>;
6525 // For insertion into the zero index (low half) of a 256-bit vector, it is
6526 // more efficient to generate a blend with immediate instead of an insert*128.
6527 let Predicates = [HasAVX] in {
6528 def : Pat<(insert_subvector (v4f64 VR256:$src1), (v2f64 VR128:$src2), (iPTR 0)),
6529 (VBLENDPDYrri VR256:$src1,
6530 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)),
6531 VR128:$src2, sub_xmm), 0x3)>;
6532 def : Pat<(insert_subvector (v8f32 VR256:$src1), (v4f32 VR128:$src2), (iPTR 0)),
6533 (VBLENDPSYrri VR256:$src1,
6534 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)),
6535 VR128:$src2, sub_xmm), 0xf)>;
6538 /// SS41I_quaternary_vx - AVX SSE 4.1 with 4 operators
6539 multiclass SS41I_quaternary_avx<bits<8> opc, string OpcodeStr, RegisterClass RC,
6540 X86MemOperand x86memop, ValueType VT,
6541 PatFrag mem_frag, SDNode OpNode,
6542 X86FoldableSchedWrite sched> {
6543 def rr : Ii8Reg<opc, MRMSrcReg, (outs RC:$dst),
6544 (ins RC:$src1, RC:$src2, RC:$src3),
6545 !strconcat(OpcodeStr,
6546 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
6547 [(set RC:$dst, (VT (OpNode RC:$src3, RC:$src2, RC:$src1)))],
6548 SSEPackedInt>, TAPD, VEX_4V,
6551 def rm : Ii8Reg<opc, MRMSrcMem, (outs RC:$dst),
6552 (ins RC:$src1, x86memop:$src2, RC:$src3),
6553 !strconcat(OpcodeStr,
6554 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
6556 (OpNode RC:$src3, (mem_frag addr:$src2),
6557 RC:$src1))], SSEPackedInt>, TAPD, VEX_4V,
6558 Sched<[sched.Folded, sched.ReadAfterFold,
6560 ReadDefault, ReadDefault, ReadDefault, ReadDefault,
6563 sched.ReadAfterFold]>;
6566 let Predicates = [HasAVX] in {
6567 let ExeDomain = SSEPackedDouble in {
6568 defm VBLENDVPD : SS41I_quaternary_avx<0x4B, "vblendvpd", VR128, f128mem,
6569 v2f64, loadv2f64, X86Blendv,
6570 SchedWriteFVarBlend.XMM>;
6571 defm VBLENDVPDY : SS41I_quaternary_avx<0x4B, "vblendvpd", VR256, f256mem,
6572 v4f64, loadv4f64, X86Blendv,
6573 SchedWriteFVarBlend.YMM>, VEX_L;
6574 } // ExeDomain = SSEPackedDouble
6575 let ExeDomain = SSEPackedSingle in {
6576 defm VBLENDVPS : SS41I_quaternary_avx<0x4A, "vblendvps", VR128, f128mem,
6577 v4f32, loadv4f32, X86Blendv,
6578 SchedWriteFVarBlend.XMM>;
6579 defm VBLENDVPSY : SS41I_quaternary_avx<0x4A, "vblendvps", VR256, f256mem,
6580 v8f32, loadv8f32, X86Blendv,
6581 SchedWriteFVarBlend.YMM>, VEX_L;
6582 } // ExeDomain = SSEPackedSingle
6583 defm VPBLENDVB : SS41I_quaternary_avx<0x4C, "vpblendvb", VR128, i128mem,
6584 v16i8, loadv16i8, X86Blendv,
6585 SchedWriteVarBlend.XMM>;
6588 let Predicates = [HasAVX2] in {
6589 defm VPBLENDVBY : SS41I_quaternary_avx<0x4C, "vpblendvb", VR256, i256mem,
6590 v32i8, loadv32i8, X86Blendv,
6591 SchedWriteVarBlend.YMM>, VEX_L;
6594 let Predicates = [HasAVX] in {
6595 def : Pat<(v4i32 (X86Blendv (v4i32 VR128:$mask), (v4i32 VR128:$src1),
6596 (v4i32 VR128:$src2))),
6597 (VBLENDVPSrr VR128:$src2, VR128:$src1, VR128:$mask)>;
6598 def : Pat<(v2i64 (X86Blendv (v2i64 VR128:$mask), (v2i64 VR128:$src1),
6599 (v2i64 VR128:$src2))),
6600 (VBLENDVPDrr VR128:$src2, VR128:$src1, VR128:$mask)>;
6601 def : Pat<(v8i32 (X86Blendv (v8i32 VR256:$mask), (v8i32 VR256:$src1),
6602 (v8i32 VR256:$src2))),
6603 (VBLENDVPSYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
6604 def : Pat<(v4i64 (X86Blendv (v4i64 VR256:$mask), (v4i64 VR256:$src1),
6605 (v4i64 VR256:$src2))),
6606 (VBLENDVPDYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
6609 // Prefer a movss or movsd over a blendps when optimizing for size. these were
6610 // changed to use blends because blends have better throughput on sandybridge
6611 // and haswell, but movs[s/d] are 1-2 byte shorter instructions.
6612 let Predicates = [HasAVX, OptForSpeed] in {
6613 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
6614 (VBLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>;
6615 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
6616 (VPBLENDWrri (v4i32 (V_SET0)), VR128:$src, (i8 3))>;
6618 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
6619 (VBLENDPSrri VR128:$src1, VR128:$src2, (i8 1))>;
6620 def : Pat<(v4f32 (X86Movss VR128:$src1, (loadv4f32 addr:$src2))),
6621 (VBLENDPSrmi VR128:$src1, addr:$src2, (i8 1))>;
6622 def : Pat<(v4f32 (X86Movss (loadv4f32 addr:$src2), VR128:$src1)),
6623 (VBLENDPSrmi VR128:$src1, addr:$src2, (i8 0xe))>;
6625 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
6626 (VBLENDPDrri VR128:$src1, VR128:$src2, (i8 1))>;
6627 def : Pat<(v2f64 (X86Movsd VR128:$src1, (loadv2f64 addr:$src2))),
6628 (VBLENDPDrmi VR128:$src1, addr:$src2, (i8 1))>;
6629 def : Pat<(v2f64 (X86Movsd (loadv2f64 addr:$src2), VR128:$src1)),
6630 (VBLENDPDrmi VR128:$src1, addr:$src2, (i8 2))>;
6632 // Move low f32 and clear high bits.
6633 def : Pat<(v8f32 (X86vzmovl (v8f32 VR256:$src))),
6634 (SUBREG_TO_REG (i32 0),
6635 (v4f32 (VBLENDPSrri (v4f32 (V_SET0)),
6636 (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm)),
6637 (i8 1))), sub_xmm)>;
6638 def : Pat<(v8i32 (X86vzmovl (v8i32 VR256:$src))),
6639 (SUBREG_TO_REG (i32 0),
6640 (v4i32 (VPBLENDWrri (v4i32 (V_SET0)),
6641 (v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm)),
6642 (i8 3))), sub_xmm)>;
6644 def : Pat<(v4f64 (X86vzmovl (v4f64 VR256:$src))),
6645 (SUBREG_TO_REG (i32 0),
6646 (v2f64 (VBLENDPDrri (v2f64 (V_SET0)),
6647 (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm)),
6648 (i8 1))), sub_xmm)>;
6649 def : Pat<(v4i64 (X86vzmovl (v4i64 VR256:$src))),
6650 (SUBREG_TO_REG (i32 0),
6651 (v2i64 (VPBLENDWrri (v2i64 (V_SET0)),
6652 (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm)),
6653 (i8 0xf))), sub_xmm)>;
6656 // Prefer a movss or movsd over a blendps when optimizing for size. these were
6657 // changed to use blends because blends have better throughput on sandybridge
6658 // and haswell, but movs[s/d] are 1-2 byte shorter instructions.
6659 let Predicates = [UseSSE41, OptForSpeed] in {
6660 // With SSE41 we can use blends for these patterns.
6661 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
6662 (BLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>;
6663 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
6664 (PBLENDWrri (v4i32 (V_SET0)), VR128:$src, (i8 3))>;
6666 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
6667 (BLENDPSrri VR128:$src1, VR128:$src2, (i8 1))>;
6668 def : Pat<(v4f32 (X86Movss VR128:$src1, (memopv4f32 addr:$src2))),
6669 (BLENDPSrmi VR128:$src1, addr:$src2, (i8 1))>;
6670 def : Pat<(v4f32 (X86Movss (memopv4f32 addr:$src2), VR128:$src1)),
6671 (BLENDPSrmi VR128:$src1, addr:$src2, (i8 0xe))>;
6673 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
6674 (BLENDPDrri VR128:$src1, VR128:$src2, (i8 1))>;
6675 def : Pat<(v2f64 (X86Movsd VR128:$src1, (memopv2f64 addr:$src2))),
6676 (BLENDPDrmi VR128:$src1, addr:$src2, (i8 1))>;
6677 def : Pat<(v2f64 (X86Movsd (memopv2f64 addr:$src2), VR128:$src1)),
6678 (BLENDPDrmi VR128:$src1, addr:$src2, (i8 2))>;
6682 /// SS41I_ternary - SSE 4.1 ternary operator
6683 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
6684 multiclass SS41I_ternary<bits<8> opc, string OpcodeStr, ValueType VT,
6685 PatFrag mem_frag, X86MemOperand x86memop,
6686 SDNode OpNode, X86FoldableSchedWrite sched> {
6687 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
6688 (ins VR128:$src1, VR128:$src2),
6689 !strconcat(OpcodeStr,
6690 "\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}"),
6692 (VT (OpNode XMM0, VR128:$src2, VR128:$src1)))]>,
6695 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
6696 (ins VR128:$src1, x86memop:$src2),
6697 !strconcat(OpcodeStr,
6698 "\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}"),
6700 (OpNode XMM0, (mem_frag addr:$src2), VR128:$src1))]>,
6701 Sched<[sched.Folded, sched.ReadAfterFold]>;
6705 let ExeDomain = SSEPackedDouble in
6706 defm BLENDVPD : SS41I_ternary<0x15, "blendvpd", v2f64, memopv2f64, f128mem,
6707 X86Blendv, SchedWriteFVarBlend.XMM>;
6708 let ExeDomain = SSEPackedSingle in
6709 defm BLENDVPS : SS41I_ternary<0x14, "blendvps", v4f32, memopv4f32, f128mem,
6710 X86Blendv, SchedWriteFVarBlend.XMM>;
6711 defm PBLENDVB : SS41I_ternary<0x10, "pblendvb", v16i8, memopv16i8, i128mem,
6712 X86Blendv, SchedWriteVarBlend.XMM>;
6714 // Aliases with the implicit xmm0 argument
6715 def : InstAlias<"blendvpd\t{$src2, $dst|$dst, $src2}",
6716 (BLENDVPDrr0 VR128:$dst, VR128:$src2), 0>;
6717 def : InstAlias<"blendvpd\t{$src2, $dst|$dst, $src2}",
6718 (BLENDVPDrm0 VR128:$dst, f128mem:$src2), 0>;
6719 def : InstAlias<"blendvps\t{$src2, $dst|$dst, $src2}",
6720 (BLENDVPSrr0 VR128:$dst, VR128:$src2), 0>;
6721 def : InstAlias<"blendvps\t{$src2, $dst|$dst, $src2}",
6722 (BLENDVPSrm0 VR128:$dst, f128mem:$src2), 0>;
6723 def : InstAlias<"pblendvb\t{$src2, $dst|$dst, $src2}",
6724 (PBLENDVBrr0 VR128:$dst, VR128:$src2), 0>;
6725 def : InstAlias<"pblendvb\t{$src2, $dst|$dst, $src2}",
6726 (PBLENDVBrm0 VR128:$dst, i128mem:$src2), 0>;
6728 let Predicates = [UseSSE41] in {
6729 def : Pat<(v4i32 (X86Blendv (v4i32 XMM0), (v4i32 VR128:$src1),
6730 (v4i32 VR128:$src2))),
6731 (BLENDVPSrr0 VR128:$src2, VR128:$src1)>;
6732 def : Pat<(v2i64 (X86Blendv (v2i64 XMM0), (v2i64 VR128:$src1),
6733 (v2i64 VR128:$src2))),
6734 (BLENDVPDrr0 VR128:$src2, VR128:$src1)>;
6737 let AddedComplexity = 400 in { // Prefer non-temporal versions
6739 let Predicates = [HasAVX, NoVLX] in
6740 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
6741 "vmovntdqa\t{$src, $dst|$dst, $src}", []>,
6742 Sched<[SchedWriteVecMoveLSNT.XMM.RM]>, VEX, VEX_WIG;
6743 let Predicates = [HasAVX2, NoVLX] in
6744 def VMOVNTDQAYrm : SS48I<0x2A, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
6745 "vmovntdqa\t{$src, $dst|$dst, $src}", []>,
6746 Sched<[SchedWriteVecMoveLSNT.YMM.RM]>, VEX, VEX_L, VEX_WIG;
6747 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
6748 "movntdqa\t{$src, $dst|$dst, $src}", []>,
6749 Sched<[SchedWriteVecMoveLSNT.XMM.RM]>;
6751 let Predicates = [HasAVX2, NoVLX] in {
6752 def : Pat<(v8f32 (alignednontemporalload addr:$src)),
6753 (VMOVNTDQAYrm addr:$src)>;
6754 def : Pat<(v4f64 (alignednontemporalload addr:$src)),
6755 (VMOVNTDQAYrm addr:$src)>;
6756 def : Pat<(v4i64 (alignednontemporalload addr:$src)),
6757 (VMOVNTDQAYrm addr:$src)>;
6758 def : Pat<(v8i32 (alignednontemporalload addr:$src)),
6759 (VMOVNTDQAYrm addr:$src)>;
6760 def : Pat<(v16i16 (alignednontemporalload addr:$src)),
6761 (VMOVNTDQAYrm addr:$src)>;
6762 def : Pat<(v32i8 (alignednontemporalload addr:$src)),
6763 (VMOVNTDQAYrm addr:$src)>;
6766 let Predicates = [HasAVX, NoVLX] in {
6767 def : Pat<(v4f32 (alignednontemporalload addr:$src)),
6768 (VMOVNTDQArm addr:$src)>;
6769 def : Pat<(v2f64 (alignednontemporalload addr:$src)),
6770 (VMOVNTDQArm addr:$src)>;
6771 def : Pat<(v2i64 (alignednontemporalload addr:$src)),
6772 (VMOVNTDQArm addr:$src)>;
6773 def : Pat<(v4i32 (alignednontemporalload addr:$src)),
6774 (VMOVNTDQArm addr:$src)>;
6775 def : Pat<(v8i16 (alignednontemporalload addr:$src)),
6776 (VMOVNTDQArm addr:$src)>;
6777 def : Pat<(v16i8 (alignednontemporalload addr:$src)),
6778 (VMOVNTDQArm addr:$src)>;
6781 let Predicates = [UseSSE41] in {
6782 def : Pat<(v4f32 (alignednontemporalload addr:$src)),
6783 (MOVNTDQArm addr:$src)>;
6784 def : Pat<(v2f64 (alignednontemporalload addr:$src)),
6785 (MOVNTDQArm addr:$src)>;
6786 def : Pat<(v2i64 (alignednontemporalload addr:$src)),
6787 (MOVNTDQArm addr:$src)>;
6788 def : Pat<(v4i32 (alignednontemporalload addr:$src)),
6789 (MOVNTDQArm addr:$src)>;
6790 def : Pat<(v8i16 (alignednontemporalload addr:$src)),
6791 (MOVNTDQArm addr:$src)>;
6792 def : Pat<(v16i8 (alignednontemporalload addr:$src)),
6793 (MOVNTDQArm addr:$src)>;
6796 } // AddedComplexity
6798 //===----------------------------------------------------------------------===//
6799 // SSE4.2 - Compare Instructions
6800 //===----------------------------------------------------------------------===//
6802 /// SS42I_binop_rm - Simple SSE 4.2 binary operator
6803 multiclass SS42I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
6804 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
6805 X86MemOperand x86memop, X86FoldableSchedWrite sched,
6807 def rr : SS428I<opc, MRMSrcReg, (outs RC:$dst),
6808 (ins RC:$src1, RC:$src2),
6810 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6811 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6812 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>,
6814 def rm : SS428I<opc, MRMSrcMem, (outs RC:$dst),
6815 (ins RC:$src1, x86memop:$src2),
6817 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6818 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6820 (OpVT (OpNode RC:$src1, (memop_frag addr:$src2))))]>,
6821 Sched<[sched.Folded, sched.ReadAfterFold]>;
6824 let Predicates = [HasAVX] in
6825 defm VPCMPGTQ : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v2i64, VR128,
6826 load, i128mem, SchedWriteVecALU.XMM, 0>,
6829 let Predicates = [HasAVX2] in
6830 defm VPCMPGTQY : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v4i64, VR256,
6831 load, i256mem, SchedWriteVecALU.YMM, 0>,
6832 VEX_4V, VEX_L, VEX_WIG;
6834 let Constraints = "$src1 = $dst" in
6835 defm PCMPGTQ : SS42I_binop_rm<0x37, "pcmpgtq", X86pcmpgt, v2i64, VR128,
6836 memop, i128mem, SchedWriteVecALU.XMM>;
6838 //===----------------------------------------------------------------------===//
6839 // SSE4.2 - String/text Processing Instructions
6840 //===----------------------------------------------------------------------===//
6842 multiclass pcmpistrm_SS42AI<string asm> {
6843 def rr : SS42AI<0x62, MRMSrcReg, (outs),
6844 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
6845 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
6846 []>, Sched<[WritePCmpIStrM]>;
6848 def rm :SS42AI<0x62, MRMSrcMem, (outs),
6849 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
6850 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
6851 []>, Sched<[WritePCmpIStrM.Folded, WritePCmpIStrM.ReadAfterFold]>;
6854 let Defs = [XMM0, EFLAGS], hasSideEffects = 0 in {
6855 let Predicates = [HasAVX] in
6856 defm VPCMPISTRM : pcmpistrm_SS42AI<"vpcmpistrm">, VEX;
6857 defm PCMPISTRM : pcmpistrm_SS42AI<"pcmpistrm"> ;
6860 multiclass SS42AI_pcmpestrm<string asm> {
6861 def rr : SS42AI<0x60, MRMSrcReg, (outs),
6862 (ins VR128:$src1, VR128:$src3, u8imm:$src5),
6863 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
6864 []>, Sched<[WritePCmpEStrM]>;
6866 def rm : SS42AI<0x60, MRMSrcMem, (outs),
6867 (ins VR128:$src1, i128mem:$src3, u8imm:$src5),
6868 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
6869 []>, Sched<[WritePCmpEStrM.Folded, WritePCmpEStrM.ReadAfterFold]>;
6872 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX], hasSideEffects = 0 in {
6873 let Predicates = [HasAVX] in
6874 defm VPCMPESTRM : SS42AI_pcmpestrm<"vpcmpestrm">, VEX;
6875 defm PCMPESTRM : SS42AI_pcmpestrm<"pcmpestrm">;
6878 multiclass SS42AI_pcmpistri<string asm> {
6879 def rr : SS42AI<0x63, MRMSrcReg, (outs),
6880 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
6881 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
6882 []>, Sched<[WritePCmpIStrI]>;
6884 def rm : SS42AI<0x63, MRMSrcMem, (outs),
6885 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
6886 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
6887 []>, Sched<[WritePCmpIStrI.Folded, WritePCmpIStrI.ReadAfterFold]>;
6890 let Defs = [ECX, EFLAGS], hasSideEffects = 0 in {
6891 let Predicates = [HasAVX] in
6892 defm VPCMPISTRI : SS42AI_pcmpistri<"vpcmpistri">, VEX;
6893 defm PCMPISTRI : SS42AI_pcmpistri<"pcmpistri">;
6896 multiclass SS42AI_pcmpestri<string asm> {
6897 def rr : SS42AI<0x61, MRMSrcReg, (outs),
6898 (ins VR128:$src1, VR128:$src3, u8imm:$src5),
6899 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
6900 []>, Sched<[WritePCmpEStrI]>;
6902 def rm : SS42AI<0x61, MRMSrcMem, (outs),
6903 (ins VR128:$src1, i128mem:$src3, u8imm:$src5),
6904 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
6905 []>, Sched<[WritePCmpEStrI.Folded, WritePCmpEStrI.ReadAfterFold]>;
6908 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX], hasSideEffects = 0 in {
6909 let Predicates = [HasAVX] in
6910 defm VPCMPESTRI : SS42AI_pcmpestri<"vpcmpestri">, VEX;
6911 defm PCMPESTRI : SS42AI_pcmpestri<"pcmpestri">;
6914 //===----------------------------------------------------------------------===//
6915 // SSE4.2 - CRC Instructions
6916 //===----------------------------------------------------------------------===//
6918 // No CRC instructions have AVX equivalents
6920 // crc intrinsic instruction
6921 // This set of instructions are only rm, the only difference is the size
6923 class SS42I_crc32r<bits<8> opc, string asm, RegisterClass RCOut,
6924 RegisterClass RCIn, SDPatternOperator Int> :
6925 SS42FI<opc, MRMSrcReg, (outs RCOut:$dst), (ins RCOut:$src1, RCIn:$src2),
6926 !strconcat(asm, "\t{$src2, $src1|$src1, $src2}"),
6927 [(set RCOut:$dst, (Int RCOut:$src1, RCIn:$src2))]>,
6928 Sched<[WriteCRC32]>;
6930 class SS42I_crc32m<bits<8> opc, string asm, RegisterClass RCOut,
6931 X86MemOperand x86memop, SDPatternOperator Int> :
6932 SS42FI<opc, MRMSrcMem, (outs RCOut:$dst), (ins RCOut:$src1, x86memop:$src2),
6933 !strconcat(asm, "\t{$src2, $src1|$src1, $src2}"),
6934 [(set RCOut:$dst, (Int RCOut:$src1, (load addr:$src2)))]>,
6935 Sched<[WriteCRC32.Folded, WriteCRC32.ReadAfterFold]>;
6937 let Constraints = "$src1 = $dst" in {
6938 def CRC32r32m8 : SS42I_crc32m<0xF0, "crc32{b}", GR32, i8mem,
6939 int_x86_sse42_crc32_32_8>;
6940 def CRC32r32r8 : SS42I_crc32r<0xF0, "crc32{b}", GR32, GR8,
6941 int_x86_sse42_crc32_32_8>;
6942 def CRC32r32m16 : SS42I_crc32m<0xF1, "crc32{w}", GR32, i16mem,
6943 int_x86_sse42_crc32_32_16>, OpSize16;
6944 def CRC32r32r16 : SS42I_crc32r<0xF1, "crc32{w}", GR32, GR16,
6945 int_x86_sse42_crc32_32_16>, OpSize16;
6946 def CRC32r32m32 : SS42I_crc32m<0xF1, "crc32{l}", GR32, i32mem,
6947 int_x86_sse42_crc32_32_32>, OpSize32;
6948 def CRC32r32r32 : SS42I_crc32r<0xF1, "crc32{l}", GR32, GR32,
6949 int_x86_sse42_crc32_32_32>, OpSize32;
6950 def CRC32r64m64 : SS42I_crc32m<0xF1, "crc32{q}", GR64, i64mem,
6951 int_x86_sse42_crc32_64_64>, REX_W;
6952 def CRC32r64r64 : SS42I_crc32r<0xF1, "crc32{q}", GR64, GR64,
6953 int_x86_sse42_crc32_64_64>, REX_W;
6954 let hasSideEffects = 0 in {
6956 def CRC32r64m8 : SS42I_crc32m<0xF0, "crc32{b}", GR64, i8mem,
6958 def CRC32r64r8 : SS42I_crc32r<0xF0, "crc32{b}", GR64, GR8,
6963 //===----------------------------------------------------------------------===//
6964 // SHA-NI Instructions
6965 //===----------------------------------------------------------------------===//
6967 // FIXME: Is there a better scheduler class for SHA than WriteVecIMul?
6968 multiclass SHAI_binop<bits<8> Opc, string OpcodeStr, Intrinsic IntId,
6969 X86FoldableSchedWrite sched, bit UsesXMM0 = 0> {
6970 def rr : I<Opc, MRMSrcReg, (outs VR128:$dst),
6971 (ins VR128:$src1, VR128:$src2),
6973 !strconcat(OpcodeStr, "\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}"),
6974 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}")),
6976 (set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0)),
6977 (set VR128:$dst, (IntId VR128:$src1, VR128:$src2)))]>,
6980 def rm : I<Opc, MRMSrcMem, (outs VR128:$dst),
6981 (ins VR128:$src1, i128mem:$src2),
6983 !strconcat(OpcodeStr, "\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}"),
6984 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}")),
6986 (set VR128:$dst, (IntId VR128:$src1,
6987 (memop addr:$src2), XMM0)),
6988 (set VR128:$dst, (IntId VR128:$src1,
6989 (memop addr:$src2))))]>, T8,
6990 Sched<[sched.Folded, sched.ReadAfterFold]>;
6993 let Constraints = "$src1 = $dst", Predicates = [HasSHA] in {
6994 def SHA1RNDS4rri : Ii8<0xCC, MRMSrcReg, (outs VR128:$dst),
6995 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
6996 "sha1rnds4\t{$src3, $src2, $dst|$dst, $src2, $src3}",
6998 (int_x86_sha1rnds4 VR128:$src1, VR128:$src2,
6999 (i8 imm:$src3)))]>, TA,
7000 Sched<[SchedWriteVecIMul.XMM]>;
7001 def SHA1RNDS4rmi : Ii8<0xCC, MRMSrcMem, (outs VR128:$dst),
7002 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7003 "sha1rnds4\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7005 (int_x86_sha1rnds4 VR128:$src1,
7007 (i8 imm:$src3)))]>, TA,
7008 Sched<[SchedWriteVecIMul.XMM.Folded,
7009 SchedWriteVecIMul.XMM.ReadAfterFold]>;
7011 defm SHA1NEXTE : SHAI_binop<0xC8, "sha1nexte", int_x86_sha1nexte,
7012 SchedWriteVecIMul.XMM>;
7013 defm SHA1MSG1 : SHAI_binop<0xC9, "sha1msg1", int_x86_sha1msg1,
7014 SchedWriteVecIMul.XMM>;
7015 defm SHA1MSG2 : SHAI_binop<0xCA, "sha1msg2", int_x86_sha1msg2,
7016 SchedWriteVecIMul.XMM>;
7019 defm SHA256RNDS2 : SHAI_binop<0xCB, "sha256rnds2", int_x86_sha256rnds2,
7020 SchedWriteVecIMul.XMM, 1>;
7022 defm SHA256MSG1 : SHAI_binop<0xCC, "sha256msg1", int_x86_sha256msg1,
7023 SchedWriteVecIMul.XMM>;
7024 defm SHA256MSG2 : SHAI_binop<0xCD, "sha256msg2", int_x86_sha256msg2,
7025 SchedWriteVecIMul.XMM>;
7028 // Aliases with explicit %xmm0
7029 def : InstAlias<"sha256rnds2\t{$src2, $dst|$dst, $src2}",
7030 (SHA256RNDS2rr VR128:$dst, VR128:$src2), 0>;
7031 def : InstAlias<"sha256rnds2\t{$src2, $dst|$dst, $src2}",
7032 (SHA256RNDS2rm VR128:$dst, i128mem:$src2), 0>;
7034 //===----------------------------------------------------------------------===//
7035 // AES-NI Instructions
7036 //===----------------------------------------------------------------------===//
7038 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
7039 Intrinsic IntId, PatFrag ld_frag,
7040 bit Is2Addr = 0, RegisterClass RC = VR128,
7041 X86MemOperand MemOp = i128mem> {
7042 let AsmString = OpcodeStr##
7043 !if(Is2Addr, "\t{$src2, $dst|$dst, $src2}",
7044 "\t{$src2, $src1, $dst|$dst, $src1, $src2}") in {
7045 def rr : AES8I<opc, MRMSrcReg, (outs RC:$dst),
7046 (ins RC:$src1, RC:$src2), "",
7047 [(set RC:$dst, (IntId RC:$src1, RC:$src2))]>,
7048 Sched<[WriteAESDecEnc]>;
7049 def rm : AES8I<opc, MRMSrcMem, (outs RC:$dst),
7050 (ins RC:$src1, MemOp:$src2), "",
7051 [(set RC:$dst, (IntId RC:$src1, (ld_frag addr:$src2)))]>,
7052 Sched<[WriteAESDecEnc.Folded, WriteAESDecEnc.ReadAfterFold]>;
7056 // Perform One Round of an AES Encryption/Decryption Flow
7057 let Predicates = [HasAVX, NoVLX_Or_NoVAES, HasAES] in {
7058 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
7059 int_x86_aesni_aesenc, load>, VEX_4V, VEX_WIG;
7060 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
7061 int_x86_aesni_aesenclast, load>, VEX_4V, VEX_WIG;
7062 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
7063 int_x86_aesni_aesdec, load>, VEX_4V, VEX_WIG;
7064 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
7065 int_x86_aesni_aesdeclast, load>, VEX_4V, VEX_WIG;
7068 let Predicates = [NoVLX, HasVAES] in {
7069 defm VAESENCY : AESI_binop_rm_int<0xDC, "vaesenc",
7070 int_x86_aesni_aesenc_256, load, 0, VR256,
7071 i256mem>, VEX_4V, VEX_L, VEX_WIG;
7072 defm VAESENCLASTY : AESI_binop_rm_int<0xDD, "vaesenclast",
7073 int_x86_aesni_aesenclast_256, load, 0, VR256,
7074 i256mem>, VEX_4V, VEX_L, VEX_WIG;
7075 defm VAESDECY : AESI_binop_rm_int<0xDE, "vaesdec",
7076 int_x86_aesni_aesdec_256, load, 0, VR256,
7077 i256mem>, VEX_4V, VEX_L, VEX_WIG;
7078 defm VAESDECLASTY : AESI_binop_rm_int<0xDF, "vaesdeclast",
7079 int_x86_aesni_aesdeclast_256, load, 0, VR256,
7080 i256mem>, VEX_4V, VEX_L, VEX_WIG;
7083 let Constraints = "$src1 = $dst" in {
7084 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
7085 int_x86_aesni_aesenc, memop, 1>;
7086 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
7087 int_x86_aesni_aesenclast, memop, 1>;
7088 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
7089 int_x86_aesni_aesdec, memop, 1>;
7090 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
7091 int_x86_aesni_aesdeclast, memop, 1>;
7094 // Perform the AES InvMixColumn Transformation
7095 let Predicates = [HasAVX, HasAES] in {
7096 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
7098 "vaesimc\t{$src1, $dst|$dst, $src1}",
7100 (int_x86_aesni_aesimc VR128:$src1))]>, Sched<[WriteAESIMC]>,
7102 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
7103 (ins i128mem:$src1),
7104 "vaesimc\t{$src1, $dst|$dst, $src1}",
7105 [(set VR128:$dst, (int_x86_aesni_aesimc (load addr:$src1)))]>,
7106 Sched<[WriteAESIMC.Folded]>, VEX, VEX_WIG;
7108 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
7110 "aesimc\t{$src1, $dst|$dst, $src1}",
7112 (int_x86_aesni_aesimc VR128:$src1))]>, Sched<[WriteAESIMC]>;
7113 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
7114 (ins i128mem:$src1),
7115 "aesimc\t{$src1, $dst|$dst, $src1}",
7116 [(set VR128:$dst, (int_x86_aesni_aesimc (memop addr:$src1)))]>,
7117 Sched<[WriteAESIMC.Folded]>;
7119 // AES Round Key Generation Assist
7120 let Predicates = [HasAVX, HasAES] in {
7121 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
7122 (ins VR128:$src1, u8imm:$src2),
7123 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7125 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
7126 Sched<[WriteAESKeyGen]>, VEX, VEX_WIG;
7127 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
7128 (ins i128mem:$src1, u8imm:$src2),
7129 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7131 (int_x86_aesni_aeskeygenassist (load addr:$src1), imm:$src2))]>,
7132 Sched<[WriteAESKeyGen.Folded]>, VEX, VEX_WIG;
7134 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
7135 (ins VR128:$src1, u8imm:$src2),
7136 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7138 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
7139 Sched<[WriteAESKeyGen]>;
7140 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
7141 (ins i128mem:$src1, u8imm:$src2),
7142 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7144 (int_x86_aesni_aeskeygenassist (memop addr:$src1), imm:$src2))]>,
7145 Sched<[WriteAESKeyGen.Folded]>;
7147 //===----------------------------------------------------------------------===//
7148 // PCLMUL Instructions
7149 //===----------------------------------------------------------------------===//
7151 // Immediate transform to help with commuting.
7152 def PCLMULCommuteImm : SDNodeXForm<imm, [{
7153 uint8_t Imm = N->getZExtValue();
7154 return getI8Imm((uint8_t)((Imm >> 4) | (Imm << 4)), SDLoc(N));
7157 // SSE carry-less Multiplication instructions
7158 let Predicates = [NoAVX, HasPCLMUL] in {
7159 let Constraints = "$src1 = $dst" in {
7160 let isCommutable = 1 in
7161 def PCLMULQDQrr : PCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
7162 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7163 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7165 (int_x86_pclmulqdq VR128:$src1, VR128:$src2, imm:$src3))]>,
7166 Sched<[WriteCLMul]>;
7168 def PCLMULQDQrm : PCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
7169 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7170 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7172 (int_x86_pclmulqdq VR128:$src1, (memop addr:$src2),
7174 Sched<[WriteCLMul.Folded, WriteCLMul.ReadAfterFold]>;
7175 } // Constraints = "$src1 = $dst"
7177 def : Pat<(int_x86_pclmulqdq (memop addr:$src2), VR128:$src1,
7179 (PCLMULQDQrm VR128:$src1, addr:$src2,
7180 (PCLMULCommuteImm imm:$src3))>;
7181 } // Predicates = [NoAVX, HasPCLMUL]
7184 foreach HI = ["hq","lq"] in
7185 foreach LO = ["hq","lq"] in {
7186 def : InstAlias<"pclmul" # HI # LO # "dq\t{$src, $dst|$dst, $src}",
7187 (PCLMULQDQrr VR128:$dst, VR128:$src,
7188 !add(!shl(!eq(LO,"hq"),4),!eq(HI,"hq"))), 0>;
7189 def : InstAlias<"pclmul" # HI # LO # "dq\t{$src, $dst|$dst, $src}",
7190 (PCLMULQDQrm VR128:$dst, i128mem:$src,
7191 !add(!shl(!eq(LO,"hq"),4),!eq(HI,"hq"))), 0>;
7194 // AVX carry-less Multiplication instructions
7195 multiclass vpclmulqdq<RegisterClass RC, X86MemOperand MemOp,
7196 PatFrag LdFrag, Intrinsic IntId> {
7197 let isCommutable = 1 in
7198 def rr : PCLMULIi8<0x44, MRMSrcReg, (outs RC:$dst),
7199 (ins RC:$src1, RC:$src2, u8imm:$src3),
7200 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7202 (IntId RC:$src1, RC:$src2, imm:$src3))]>,
7203 Sched<[WriteCLMul]>;
7205 def rm : PCLMULIi8<0x44, MRMSrcMem, (outs RC:$dst),
7206 (ins RC:$src1, MemOp:$src2, u8imm:$src3),
7207 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7209 (IntId RC:$src1, (LdFrag addr:$src2), imm:$src3))]>,
7210 Sched<[WriteCLMul.Folded, WriteCLMul.ReadAfterFold]>;
7212 // We can commute a load in the first operand by swapping the sources and
7213 // rotating the immediate.
7214 def : Pat<(IntId (LdFrag addr:$src2), RC:$src1, (i8 imm:$src3)),
7215 (!cast<Instruction>(NAME#"rm") RC:$src1, addr:$src2,
7216 (PCLMULCommuteImm imm:$src3))>;
7219 let Predicates = [HasAVX, NoVLX_Or_NoVPCLMULQDQ, HasPCLMUL] in
7220 defm VPCLMULQDQ : vpclmulqdq<VR128, i128mem, load,
7221 int_x86_pclmulqdq>, VEX_4V, VEX_WIG;
7223 let Predicates = [NoVLX, HasVPCLMULQDQ] in
7224 defm VPCLMULQDQY : vpclmulqdq<VR256, i256mem, load,
7225 int_x86_pclmulqdq_256>, VEX_4V, VEX_L, VEX_WIG;
7227 multiclass vpclmulqdq_aliases_impl<string InstStr, RegisterClass RC,
7228 X86MemOperand MemOp, string Hi, string Lo> {
7229 def : InstAlias<"vpclmul"##Hi##Lo##"dq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7230 (!cast<Instruction>(InstStr # "rr") RC:$dst, RC:$src1, RC:$src2,
7231 !add(!shl(!eq(Lo,"hq"),4),!eq(Hi,"hq"))), 0>;
7232 def : InstAlias<"vpclmul"##Hi##Lo##"dq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7233 (!cast<Instruction>(InstStr # "rm") RC:$dst, RC:$src1, MemOp:$src2,
7234 !add(!shl(!eq(Lo,"hq"),4),!eq(Hi,"hq"))), 0>;
7237 multiclass vpclmulqdq_aliases<string InstStr, RegisterClass RC,
7238 X86MemOperand MemOp> {
7239 defm : vpclmulqdq_aliases_impl<InstStr, RC, MemOp, "hq", "hq">;
7240 defm : vpclmulqdq_aliases_impl<InstStr, RC, MemOp, "hq", "lq">;
7241 defm : vpclmulqdq_aliases_impl<InstStr, RC, MemOp, "lq", "hq">;
7242 defm : vpclmulqdq_aliases_impl<InstStr, RC, MemOp, "lq", "lq">;
7246 defm : vpclmulqdq_aliases<"VPCLMULQDQ", VR128, i128mem>;
7247 defm : vpclmulqdq_aliases<"VPCLMULQDQY", VR256, i256mem>;
7249 //===----------------------------------------------------------------------===//
7250 // SSE4A Instructions
7251 //===----------------------------------------------------------------------===//
7253 let Predicates = [HasSSE4A] in {
7255 let ExeDomain = SSEPackedInt in {
7256 let Constraints = "$src = $dst" in {
7257 def EXTRQI : Ii8<0x78, MRMXr, (outs VR128:$dst),
7258 (ins VR128:$src, u8imm:$len, u8imm:$idx),
7259 "extrq\t{$idx, $len, $src|$src, $len, $idx}",
7260 [(set VR128:$dst, (X86extrqi VR128:$src, imm:$len,
7262 PD, Sched<[SchedWriteVecALU.XMM]>;
7263 def EXTRQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
7264 (ins VR128:$src, VR128:$mask),
7265 "extrq\t{$mask, $src|$src, $mask}",
7266 [(set VR128:$dst, (int_x86_sse4a_extrq VR128:$src,
7268 PD, Sched<[SchedWriteVecALU.XMM]>;
7270 def INSERTQI : Ii8<0x78, MRMSrcReg, (outs VR128:$dst),
7271 (ins VR128:$src, VR128:$src2, u8imm:$len, u8imm:$idx),
7272 "insertq\t{$idx, $len, $src2, $src|$src, $src2, $len, $idx}",
7273 [(set VR128:$dst, (X86insertqi VR128:$src, VR128:$src2,
7274 imm:$len, imm:$idx))]>,
7275 XD, Sched<[SchedWriteVecALU.XMM]>;
7276 def INSERTQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
7277 (ins VR128:$src, VR128:$mask),
7278 "insertq\t{$mask, $src|$src, $mask}",
7279 [(set VR128:$dst, (int_x86_sse4a_insertq VR128:$src,
7281 XD, Sched<[SchedWriteVecALU.XMM]>;
7283 } // ExeDomain = SSEPackedInt
7285 // Non-temporal (unaligned) scalar stores.
7286 let AddedComplexity = 400 in { // Prefer non-temporal versions
7287 let hasSideEffects = 0, mayStore = 1, SchedRW = [SchedWriteFMoveLSNT.Scl.MR] in {
7288 def MOVNTSS : I<0x2B, MRMDestMem, (outs), (ins f32mem:$dst, VR128:$src),
7289 "movntss\t{$src, $dst|$dst, $src}", []>, XS;
7291 def MOVNTSD : I<0x2B, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
7292 "movntsd\t{$src, $dst|$dst, $src}", []>, XD;
7295 def : Pat<(nontemporalstore FR32:$src, addr:$dst),
7296 (MOVNTSS addr:$dst, (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)))>;
7298 def : Pat<(nontemporalstore FR64:$src, addr:$dst),
7299 (MOVNTSD addr:$dst, (v2f64 (COPY_TO_REGCLASS FR64:$src, VR128)))>;
7301 } // AddedComplexity
7304 //===----------------------------------------------------------------------===//
7306 //===----------------------------------------------------------------------===//
7308 //===----------------------------------------------------------------------===//
7309 // VBROADCAST - Load from memory and broadcast to all elements of the
7310 // destination operand
7312 class avx_broadcast_rm<bits<8> opc, string OpcodeStr, RegisterClass RC,
7313 X86MemOperand x86memop, ValueType VT,
7314 PatFrag ld_frag, SchedWrite Sched> :
7315 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
7316 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7317 [(set RC:$dst, (VT (X86VBroadcast (ld_frag addr:$src))))]>,
7318 Sched<[Sched]>, VEX;
7320 // AVX2 adds register forms
7321 class avx2_broadcast_rr<bits<8> opc, string OpcodeStr, RegisterClass RC,
7322 ValueType ResVT, ValueType OpVT, SchedWrite Sched> :
7323 AVX28I<opc, MRMSrcReg, (outs RC:$dst), (ins VR128:$src),
7324 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7325 [(set RC:$dst, (ResVT (X86VBroadcast (OpVT VR128:$src))))]>,
7326 Sched<[Sched]>, VEX;
7328 let ExeDomain = SSEPackedSingle, Predicates = [HasAVX, NoVLX] in {
7329 def VBROADCASTSSrm : avx_broadcast_rm<0x18, "vbroadcastss", VR128,
7330 f32mem, v4f32, loadf32,
7331 SchedWriteFShuffle.XMM.Folded>;
7332 def VBROADCASTSSYrm : avx_broadcast_rm<0x18, "vbroadcastss", VR256,
7333 f32mem, v8f32, loadf32,
7334 SchedWriteFShuffle.XMM.Folded>, VEX_L;
7336 let ExeDomain = SSEPackedDouble, Predicates = [HasAVX, NoVLX] in
7337 def VBROADCASTSDYrm : avx_broadcast_rm<0x19, "vbroadcastsd", VR256, f64mem,
7339 SchedWriteFShuffle.XMM.Folded>, VEX_L;
7341 let ExeDomain = SSEPackedSingle, Predicates = [HasAVX2, NoVLX] in {
7342 def VBROADCASTSSrr : avx2_broadcast_rr<0x18, "vbroadcastss", VR128,
7343 v4f32, v4f32, SchedWriteFShuffle.XMM>;
7344 def VBROADCASTSSYrr : avx2_broadcast_rr<0x18, "vbroadcastss", VR256,
7345 v8f32, v4f32, WriteFShuffle256>, VEX_L;
7347 let ExeDomain = SSEPackedDouble, Predicates = [HasAVX2, NoVLX] in
7348 def VBROADCASTSDYrr : avx2_broadcast_rr<0x19, "vbroadcastsd", VR256,
7349 v4f64, v2f64, WriteFShuffle256>, VEX_L;
7351 let Predicates = [HasAVX, NoVLX] in {
7352 def : Pat<(v4f32 (X86VBroadcast (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
7353 (VBROADCASTSSrm addr:$src)>;
7354 def : Pat<(v8f32 (X86VBroadcast (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
7355 (VBROADCASTSSYrm addr:$src)>;
7356 def : Pat<(v4f64 (X86VBroadcast (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
7357 (VBROADCASTSDYrm addr:$src)>;
7360 //===----------------------------------------------------------------------===//
7361 // VBROADCAST*128 - Load from memory and broadcast 128-bit vector to both
7362 // halves of a 256-bit vector.
7364 let mayLoad = 1, hasSideEffects = 0, Predicates = [HasAVX2] in
7365 def VBROADCASTI128 : AVX8I<0x5A, MRMSrcMem, (outs VR256:$dst),
7367 "vbroadcasti128\t{$src, $dst|$dst, $src}", []>,
7368 Sched<[WriteShuffleLd]>, VEX, VEX_L;
7370 let mayLoad = 1, hasSideEffects = 0, Predicates = [HasAVX],
7371 ExeDomain = SSEPackedSingle in
7372 def VBROADCASTF128 : AVX8I<0x1A, MRMSrcMem, (outs VR256:$dst),
7374 "vbroadcastf128\t{$src, $dst|$dst, $src}", []>,
7375 Sched<[SchedWriteFShuffle.XMM.Folded]>, VEX, VEX_L;
7377 let Predicates = [HasAVX2, NoVLX] in {
7378 def : Pat<(v4i64 (X86SubVBroadcast (loadv2i64 addr:$src))),
7379 (VBROADCASTI128 addr:$src)>;
7380 def : Pat<(v8i32 (X86SubVBroadcast (loadv4i32 addr:$src))),
7381 (VBROADCASTI128 addr:$src)>;
7382 def : Pat<(v16i16 (X86SubVBroadcast (loadv8i16 addr:$src))),
7383 (VBROADCASTI128 addr:$src)>;
7384 def : Pat<(v32i8 (X86SubVBroadcast (loadv16i8 addr:$src))),
7385 (VBROADCASTI128 addr:$src)>;
7388 let Predicates = [HasAVX, NoVLX] in {
7389 def : Pat<(v4f64 (X86SubVBroadcast (loadv2f64 addr:$src))),
7390 (VBROADCASTF128 addr:$src)>;
7391 def : Pat<(v8f32 (X86SubVBroadcast (loadv4f32 addr:$src))),
7392 (VBROADCASTF128 addr:$src)>;
7395 let Predicates = [HasAVX1Only] in {
7396 def : Pat<(v4i64 (X86SubVBroadcast (loadv2i64 addr:$src))),
7397 (VBROADCASTF128 addr:$src)>;
7398 def : Pat<(v8i32 (X86SubVBroadcast (loadv4i32 addr:$src))),
7399 (VBROADCASTF128 addr:$src)>;
7400 def : Pat<(v16i16 (X86SubVBroadcast (loadv8i16 addr:$src))),
7401 (VBROADCASTF128 addr:$src)>;
7402 def : Pat<(v32i8 (X86SubVBroadcast (loadv16i8 addr:$src))),
7403 (VBROADCASTF128 addr:$src)>;
7406 //===----------------------------------------------------------------------===//
7407 // VINSERTF128 - Insert packed floating-point values
7409 let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
7410 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
7411 (ins VR256:$src1, VR128:$src2, u8imm:$src3),
7412 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7413 []>, Sched<[WriteFShuffle256]>, VEX_4V, VEX_L;
7415 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
7416 (ins VR256:$src1, f128mem:$src2, u8imm:$src3),
7417 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7418 []>, Sched<[WriteFShuffle256.Folded, WriteFShuffle256.ReadAfterFold]>, VEX_4V, VEX_L;
7421 // To create a 256-bit all ones value, we should produce VCMPTRUEPS
7422 // with YMM register containing zero.
7423 // FIXME: Avoid producing vxorps to clear the fake inputs.
7424 let Predicates = [HasAVX1Only] in {
7425 def : Pat<(v8i32 immAllOnesV), (VCMPPSYrri (AVX_SET0), (AVX_SET0), 0xf)>;
7428 multiclass vinsert_lowering<string InstrStr, ValueType From, ValueType To,
7429 PatFrag memop_frag> {
7430 def : Pat<(vinsert128_insert:$ins (To VR256:$src1), (From VR128:$src2),
7432 (!cast<Instruction>(InstrStr#rr) VR256:$src1, VR128:$src2,
7433 (INSERT_get_vinsert128_imm VR256:$ins))>;
7434 def : Pat<(vinsert128_insert:$ins (To VR256:$src1),
7435 (From (memop_frag addr:$src2)),
7437 (!cast<Instruction>(InstrStr#rm) VR256:$src1, addr:$src2,
7438 (INSERT_get_vinsert128_imm VR256:$ins))>;
7441 let Predicates = [HasAVX, NoVLX] in {
7442 defm : vinsert_lowering<"VINSERTF128", v4f32, v8f32, loadv4f32>;
7443 defm : vinsert_lowering<"VINSERTF128", v2f64, v4f64, loadv2f64>;
7446 let Predicates = [HasAVX1Only] in {
7447 defm : vinsert_lowering<"VINSERTF128", v2i64, v4i64, loadv2i64>;
7448 defm : vinsert_lowering<"VINSERTF128", v4i32, v8i32, loadv4i32>;
7449 defm : vinsert_lowering<"VINSERTF128", v8i16, v16i16, loadv8i16>;
7450 defm : vinsert_lowering<"VINSERTF128", v16i8, v32i8, loadv16i8>;
7453 //===----------------------------------------------------------------------===//
7454 // VEXTRACTF128 - Extract packed floating-point values
7456 let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
7457 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
7458 (ins VR256:$src1, u8imm:$src2),
7459 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7460 []>, Sched<[WriteFShuffle256]>, VEX, VEX_L;
7462 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
7463 (ins f128mem:$dst, VR256:$src1, u8imm:$src2),
7464 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7465 []>, Sched<[WriteFStoreX]>, VEX, VEX_L;
7468 multiclass vextract_lowering<string InstrStr, ValueType From, ValueType To> {
7469 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
7470 (To (!cast<Instruction>(InstrStr#rr)
7472 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
7473 def : Pat<(store (To (vextract128_extract:$ext (From VR256:$src1),
7474 (iPTR imm))), addr:$dst),
7475 (!cast<Instruction>(InstrStr#mr) addr:$dst, VR256:$src1,
7476 (EXTRACT_get_vextract128_imm VR128:$ext))>;
7480 let Predicates = [HasAVX, NoVLX] in {
7481 defm : vextract_lowering<"VEXTRACTF128", v8f32, v4f32>;
7482 defm : vextract_lowering<"VEXTRACTF128", v4f64, v2f64>;
7485 let Predicates = [HasAVX1Only] in {
7486 defm : vextract_lowering<"VEXTRACTF128", v4i64, v2i64>;
7487 defm : vextract_lowering<"VEXTRACTF128", v8i32, v4i32>;
7488 defm : vextract_lowering<"VEXTRACTF128", v16i16, v8i16>;
7489 defm : vextract_lowering<"VEXTRACTF128", v32i8, v16i8>;
7492 //===----------------------------------------------------------------------===//
7493 // VMASKMOV - Conditional SIMD Packed Loads and Stores
7495 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
7496 Intrinsic IntLd, Intrinsic IntLd256,
7497 Intrinsic IntSt, Intrinsic IntSt256> {
7498 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
7499 (ins VR128:$src1, f128mem:$src2),
7500 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7501 [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
7502 VEX_4V, Sched<[WriteFMaskedLoad]>;
7503 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
7504 (ins VR256:$src1, f256mem:$src2),
7505 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7506 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
7507 VEX_4V, VEX_L, Sched<[WriteFMaskedLoadY]>;
7508 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
7509 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
7510 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7511 [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>,
7512 VEX_4V, Sched<[WriteFMaskedStore]>;
7513 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
7514 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
7515 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7516 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>,
7517 VEX_4V, VEX_L, Sched<[WriteFMaskedStoreY]>;
7520 let ExeDomain = SSEPackedSingle in
7521 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
7522 int_x86_avx_maskload_ps,
7523 int_x86_avx_maskload_ps_256,
7524 int_x86_avx_maskstore_ps,
7525 int_x86_avx_maskstore_ps_256>;
7526 let ExeDomain = SSEPackedDouble in
7527 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
7528 int_x86_avx_maskload_pd,
7529 int_x86_avx_maskload_pd_256,
7530 int_x86_avx_maskstore_pd,
7531 int_x86_avx_maskstore_pd_256>;
7533 //===----------------------------------------------------------------------===//
7534 // VPERMIL - Permute Single and Double Floating-Point Values
7537 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
7538 RegisterClass RC, X86MemOperand x86memop_f,
7539 X86MemOperand x86memop_i,
7540 ValueType f_vt, ValueType i_vt,
7541 X86FoldableSchedWrite sched,
7542 X86FoldableSchedWrite varsched> {
7543 let Predicates = [HasAVX, NoVLX] in {
7544 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
7545 (ins RC:$src1, RC:$src2),
7546 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7547 [(set RC:$dst, (f_vt (X86VPermilpv RC:$src1, (i_vt RC:$src2))))]>, VEX_4V,
7549 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
7550 (ins RC:$src1, x86memop_i:$src2),
7551 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7552 [(set RC:$dst, (f_vt (X86VPermilpv RC:$src1,
7553 (i_vt (load addr:$src2)))))]>, VEX_4V,
7554 Sched<[varsched.Folded, sched.ReadAfterFold]>;
7556 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
7557 (ins RC:$src1, u8imm:$src2),
7558 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7559 [(set RC:$dst, (f_vt (X86VPermilpi RC:$src1, (i8 imm:$src2))))]>, VEX,
7561 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
7562 (ins x86memop_f:$src1, u8imm:$src2),
7563 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7565 (f_vt (X86VPermilpi (load addr:$src1), (i8 imm:$src2))))]>, VEX,
7566 Sched<[sched.Folded]>;
7567 }// Predicates = [HasAVX, NoVLX]
7570 let ExeDomain = SSEPackedSingle in {
7571 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
7572 v4f32, v4i32, SchedWriteFShuffle.XMM,
7573 SchedWriteFVarShuffle.XMM>;
7574 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
7575 v8f32, v8i32, SchedWriteFShuffle.YMM,
7576 SchedWriteFVarShuffle.YMM>, VEX_L;
7578 let ExeDomain = SSEPackedDouble in {
7579 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
7580 v2f64, v2i64, SchedWriteFShuffle.XMM,
7581 SchedWriteFVarShuffle.XMM>;
7582 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
7583 v4f64, v4i64, SchedWriteFShuffle.YMM,
7584 SchedWriteFVarShuffle.YMM>, VEX_L;
7587 //===----------------------------------------------------------------------===//
7588 // VPERM2F128 - Permute Floating-Point Values in 128-bit chunks
7591 let ExeDomain = SSEPackedSingle in {
7592 let isCommutable = 1 in
7593 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
7594 (ins VR256:$src1, VR256:$src2, u8imm:$src3),
7595 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7596 [(set VR256:$dst, (v4f64 (X86VPerm2x128 VR256:$src1, VR256:$src2,
7597 (i8 imm:$src3))))]>, VEX_4V, VEX_L,
7598 Sched<[WriteFShuffle256]>;
7599 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
7600 (ins VR256:$src1, f256mem:$src2, u8imm:$src3),
7601 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7602 [(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (loadv4f64 addr:$src2),
7603 (i8 imm:$src3)))]>, VEX_4V, VEX_L,
7604 Sched<[WriteFShuffle256.Folded, WriteFShuffle256.ReadAfterFold]>;
7607 // Immediate transform to help with commuting.
7608 def Perm2XCommuteImm : SDNodeXForm<imm, [{
7609 return getI8Imm(N->getZExtValue() ^ 0x22, SDLoc(N));
7612 let Predicates = [HasAVX] in {
7613 // Pattern with load in other operand.
7614 def : Pat<(v4f64 (X86VPerm2x128 (loadv4f64 addr:$src2),
7615 VR256:$src1, (i8 imm:$imm))),
7616 (VPERM2F128rm VR256:$src1, addr:$src2, (Perm2XCommuteImm imm:$imm))>;
7619 let Predicates = [HasAVX1Only] in {
7620 def : Pat<(v4i64 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
7621 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
7622 def : Pat<(v4i64 (X86VPerm2x128 VR256:$src1,
7623 (loadv4i64 addr:$src2), (i8 imm:$imm))),
7624 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
7625 // Pattern with load in other operand.
7626 def : Pat<(v4i64 (X86VPerm2x128 (loadv4i64 addr:$src2),
7627 VR256:$src1, (i8 imm:$imm))),
7628 (VPERM2F128rm VR256:$src1, addr:$src2, (Perm2XCommuteImm imm:$imm))>;
7631 //===----------------------------------------------------------------------===//
7632 // VZERO - Zero YMM registers
7633 // Note: These instruction do not affect the YMM16-YMM31.
7636 let SchedRW = [WriteSystem] in {
7637 let Defs = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
7638 YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15] in {
7639 // Zero All YMM registers
7640 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
7641 [(int_x86_avx_vzeroall)]>, PS, VEX, VEX_L,
7642 Requires<[HasAVX]>, VEX_WIG;
7644 // Zero Upper bits of YMM registers
7645 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
7646 [(int_x86_avx_vzeroupper)]>, PS, VEX,
7647 Requires<[HasAVX]>, VEX_WIG;
7651 //===----------------------------------------------------------------------===//
7652 // Half precision conversion instructions
7655 multiclass f16c_ph2ps<RegisterClass RC, X86MemOperand x86memop,
7656 X86FoldableSchedWrite sched> {
7657 def rr : I<0x13, MRMSrcReg, (outs RC:$dst), (ins VR128:$src),
7658 "vcvtph2ps\t{$src, $dst|$dst, $src}",
7659 [(set RC:$dst, (X86cvtph2ps VR128:$src))]>,
7660 T8PD, VEX, Sched<[sched]>;
7661 let hasSideEffects = 0, mayLoad = 1 in
7662 def rm : I<0x13, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
7663 "vcvtph2ps\t{$src, $dst|$dst, $src}",
7664 [(set RC:$dst, (X86cvtph2ps (loadv8i16 addr:$src)))]>,
7665 T8PD, VEX, Sched<[sched.Folded]>;
7668 multiclass f16c_ps2ph<RegisterClass RC, X86MemOperand x86memop,
7669 SchedWrite RR, SchedWrite MR> {
7670 def rr : Ii8<0x1D, MRMDestReg, (outs VR128:$dst),
7671 (ins RC:$src1, i32u8imm:$src2),
7672 "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7673 [(set VR128:$dst, (X86cvtps2ph RC:$src1, imm:$src2))]>,
7674 TAPD, VEX, Sched<[RR]>;
7675 let hasSideEffects = 0, mayStore = 1 in
7676 def mr : Ii8<0x1D, MRMDestMem, (outs),
7677 (ins x86memop:$dst, RC:$src1, i32u8imm:$src2),
7678 "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
7679 TAPD, VEX, Sched<[MR]>;
7682 let Predicates = [HasF16C, NoVLX] in {
7683 defm VCVTPH2PS : f16c_ph2ps<VR128, f64mem, WriteCvtPH2PS>;
7684 defm VCVTPH2PSY : f16c_ph2ps<VR256, f128mem, WriteCvtPH2PSY>, VEX_L;
7685 defm VCVTPS2PH : f16c_ps2ph<VR128, f64mem, WriteCvtPS2PH,
7687 defm VCVTPS2PHY : f16c_ps2ph<VR256, f128mem, WriteCvtPS2PHY,
7688 WriteCvtPS2PHYSt>, VEX_L;
7690 // Pattern match vcvtph2ps of a scalar i64 load.
7691 def : Pat<(v4f32 (X86cvtph2ps (v8i16 (vzmovl_v2i64 addr:$src)))),
7692 (VCVTPH2PSrm addr:$src)>;
7693 def : Pat<(v4f32 (X86cvtph2ps (v8i16 (vzload_v2i64 addr:$src)))),
7694 (VCVTPH2PSrm addr:$src)>;
7695 def : Pat<(v4f32 (X86cvtph2ps (v8i16 (bitconvert
7696 (v2i64 (scalar_to_vector (loadi64 addr:$src))))))),
7697 (VCVTPH2PSrm addr:$src)>;
7699 def : Pat<(store (f64 (extractelt
7700 (bc_v2f64 (v8i16 (X86cvtps2ph VR128:$src1, i32:$src2))),
7701 (iPTR 0))), addr:$dst),
7702 (VCVTPS2PHmr addr:$dst, VR128:$src1, imm:$src2)>;
7703 def : Pat<(store (i64 (extractelt
7704 (bc_v2i64 (v8i16 (X86cvtps2ph VR128:$src1, i32:$src2))),
7705 (iPTR 0))), addr:$dst),
7706 (VCVTPS2PHmr addr:$dst, VR128:$src1, imm:$src2)>;
7707 def : Pat<(store (v8i16 (X86cvtps2ph VR256:$src1, i32:$src2)), addr:$dst),
7708 (VCVTPS2PHYmr addr:$dst, VR256:$src1, imm:$src2)>;
7711 // Patterns for matching conversions from float to half-float and vice versa.
7712 let Predicates = [HasF16C, NoVLX] in {
7713 // Use MXCSR.RC for rounding instead of explicitly specifying the default
7714 // rounding mode (Nearest-Even, encoded as 0). Both are equivalent in the
7715 // configurations we support (the default). However, falling back to MXCSR is
7716 // more consistent with other instructions, which are always controlled by it.
7717 // It's encoded as 0b100.
7718 def : Pat<(fp_to_f16 FR32:$src),
7719 (i16 (EXTRACT_SUBREG (VMOVPDI2DIrr (v8i16 (VCVTPS2PHrr
7720 (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)), 4))), sub_16bit))>;
7722 def : Pat<(f16_to_fp GR16:$src),
7723 (f32 (COPY_TO_REGCLASS (v4f32 (VCVTPH2PSrr
7724 (v4i32 (COPY_TO_REGCLASS (MOVSX32rr16 GR16:$src), VR128)))), FR32)) >;
7726 def : Pat<(f16_to_fp (i16 (fp_to_f16 FR32:$src))),
7727 (f32 (COPY_TO_REGCLASS (v4f32 (VCVTPH2PSrr
7728 (v8i16 (VCVTPS2PHrr (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)), 4)))), FR32)) >;
7731 //===----------------------------------------------------------------------===//
7732 // AVX2 Instructions
7733 //===----------------------------------------------------------------------===//
7735 /// AVX2_blend_rmi - AVX2 blend with 8-bit immediate
7736 multiclass AVX2_blend_rmi<bits<8> opc, string OpcodeStr, SDNode OpNode,
7737 ValueType OpVT, X86FoldableSchedWrite sched,
7739 X86MemOperand x86memop, SDNodeXForm commuteXForm> {
7740 let isCommutable = 1 in
7741 def rri : AVX2AIi8<opc, MRMSrcReg, (outs RC:$dst),
7742 (ins RC:$src1, RC:$src2, u8imm:$src3),
7743 !strconcat(OpcodeStr,
7744 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
7745 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2, imm:$src3)))]>,
7746 Sched<[sched]>, VEX_4V;
7747 def rmi : AVX2AIi8<opc, MRMSrcMem, (outs RC:$dst),
7748 (ins RC:$src1, x86memop:$src2, u8imm:$src3),
7749 !strconcat(OpcodeStr,
7750 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
7752 (OpVT (OpNode RC:$src1, (load addr:$src2), imm:$src3)))]>,
7753 Sched<[sched.Folded, sched.ReadAfterFold]>, VEX_4V;
7755 // Pattern to commute if load is in first source.
7756 def : Pat<(OpVT (OpNode (load addr:$src2), RC:$src1, imm:$src3)),
7757 (!cast<Instruction>(NAME#"rmi") RC:$src1, addr:$src2,
7758 (commuteXForm imm:$src3))>;
7761 defm VPBLENDD : AVX2_blend_rmi<0x02, "vpblendd", X86Blendi, v4i32,
7762 SchedWriteBlend.XMM, VR128, i128mem,
7764 defm VPBLENDDY : AVX2_blend_rmi<0x02, "vpblendd", X86Blendi, v8i32,
7765 SchedWriteBlend.YMM, VR256, i256mem,
7766 BlendCommuteImm8>, VEX_L;
7768 // For insertion into the zero index (low half) of a 256-bit vector, it is
7769 // more efficient to generate a blend with immediate instead of an insert*128.
7770 let Predicates = [HasAVX2] in {
7771 def : Pat<(insert_subvector (v8i32 VR256:$src1), (v4i32 VR128:$src2), (iPTR 0)),
7772 (VPBLENDDYrri VR256:$src1,
7773 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
7774 VR128:$src2, sub_xmm), 0xf)>;
7775 def : Pat<(insert_subvector (v4i64 VR256:$src1), (v2i64 VR128:$src2), (iPTR 0)),
7776 (VPBLENDDYrri VR256:$src1,
7777 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
7778 VR128:$src2, sub_xmm), 0xf)>;
7779 def : Pat<(insert_subvector (v16i16 VR256:$src1), (v8i16 VR128:$src2), (iPTR 0)),
7780 (VPBLENDDYrri VR256:$src1,
7781 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
7782 VR128:$src2, sub_xmm), 0xf)>;
7783 def : Pat<(insert_subvector (v32i8 VR256:$src1), (v16i8 VR128:$src2), (iPTR 0)),
7784 (VPBLENDDYrri VR256:$src1,
7785 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
7786 VR128:$src2, sub_xmm), 0xf)>;
7789 let Predicates = [HasAVX1Only] in {
7790 def : Pat<(insert_subvector (v8i32 VR256:$src1), (v4i32 VR128:$src2), (iPTR 0)),
7791 (VBLENDPSYrri VR256:$src1,
7792 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
7793 VR128:$src2, sub_xmm), 0xf)>;
7794 def : Pat<(insert_subvector (v4i64 VR256:$src1), (v2i64 VR128:$src2), (iPTR 0)),
7795 (VBLENDPSYrri VR256:$src1,
7796 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
7797 VR128:$src2, sub_xmm), 0xf)>;
7798 def : Pat<(insert_subvector (v16i16 VR256:$src1), (v8i16 VR128:$src2), (iPTR 0)),
7799 (VBLENDPSYrri VR256:$src1,
7800 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
7801 VR128:$src2, sub_xmm), 0xf)>;
7802 def : Pat<(insert_subvector (v32i8 VR256:$src1), (v16i8 VR128:$src2), (iPTR 0)),
7803 (VBLENDPSYrri VR256:$src1,
7804 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
7805 VR128:$src2, sub_xmm), 0xf)>;
7808 //===----------------------------------------------------------------------===//
7809 // VPBROADCAST - Load from memory and broadcast to all elements of the
7810 // destination operand
7812 multiclass avx2_broadcast<bits<8> opc, string OpcodeStr,
7813 X86MemOperand x86memop, PatFrag ld_frag,
7814 ValueType OpVT128, ValueType OpVT256, Predicate prd> {
7815 let Predicates = [HasAVX2, prd] in {
7816 def rr : AVX28I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
7817 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7819 (OpVT128 (X86VBroadcast (OpVT128 VR128:$src))))]>,
7820 Sched<[SchedWriteShuffle.XMM]>, VEX;
7821 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst), (ins x86memop:$src),
7822 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7824 (OpVT128 (X86VBroadcast (ld_frag addr:$src))))]>,
7825 Sched<[SchedWriteShuffle.XMM.Folded]>, VEX;
7826 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
7827 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7829 (OpVT256 (X86VBroadcast (OpVT128 VR128:$src))))]>,
7830 Sched<[WriteShuffle256]>, VEX, VEX_L;
7831 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst), (ins x86memop:$src),
7832 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7834 (OpVT256 (X86VBroadcast (ld_frag addr:$src))))]>,
7835 Sched<[SchedWriteShuffle.XMM.Folded]>, VEX, VEX_L;
7837 // Provide aliases for broadcast from the same register class that
7838 // automatically does the extract.
7839 def : Pat<(OpVT256 (X86VBroadcast (OpVT256 VR256:$src))),
7840 (!cast<Instruction>(NAME#"Yrr")
7841 (OpVT128 (EXTRACT_SUBREG (OpVT256 VR256:$src),sub_xmm)))>;
7845 defm VPBROADCASTB : avx2_broadcast<0x78, "vpbroadcastb", i8mem, loadi8,
7846 v16i8, v32i8, NoVLX_Or_NoBWI>;
7847 defm VPBROADCASTW : avx2_broadcast<0x79, "vpbroadcastw", i16mem, loadi16,
7848 v8i16, v16i16, NoVLX_Or_NoBWI>;
7849 defm VPBROADCASTD : avx2_broadcast<0x58, "vpbroadcastd", i32mem, loadi32,
7850 v4i32, v8i32, NoVLX>;
7851 defm VPBROADCASTQ : avx2_broadcast<0x59, "vpbroadcastq", i64mem, loadi64,
7852 v2i64, v4i64, NoVLX>;
7854 let Predicates = [HasAVX2, NoVLX] in {
7855 // 32-bit targets will fail to load a i64 directly but can use ZEXT_LOAD.
7856 def : Pat<(v2i64 (X86VBroadcast (v2i64 (X86vzload addr:$src)))),
7857 (VPBROADCASTQrm addr:$src)>;
7858 def : Pat<(v4i64 (X86VBroadcast (v4i64 (X86vzload addr:$src)))),
7859 (VPBROADCASTQYrm addr:$src)>;
7861 def : Pat<(v4i32 (X86VBroadcast (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
7862 (VPBROADCASTDrm addr:$src)>;
7863 def : Pat<(v8i32 (X86VBroadcast (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
7864 (VPBROADCASTDYrm addr:$src)>;
7865 def : Pat<(v2i64 (X86VBroadcast (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
7866 (VPBROADCASTQrm addr:$src)>;
7867 def : Pat<(v4i64 (X86VBroadcast (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
7868 (VPBROADCASTQYrm addr:$src)>;
7870 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
7871 // loadi16 is tricky to fold, because !isTypeDesirableForOp, justifiably.
7872 // This means we'll encounter truncated i32 loads; match that here.
7873 def : Pat<(v8i16 (X86VBroadcast (i16 (trunc (i32 (load addr:$src)))))),
7874 (VPBROADCASTWrm addr:$src)>;
7875 def : Pat<(v16i16 (X86VBroadcast (i16 (trunc (i32 (load addr:$src)))))),
7876 (VPBROADCASTWYrm addr:$src)>;
7877 def : Pat<(v8i16 (X86VBroadcast
7878 (i16 (trunc (i32 (zextloadi16 addr:$src)))))),
7879 (VPBROADCASTWrm addr:$src)>;
7880 def : Pat<(v16i16 (X86VBroadcast
7881 (i16 (trunc (i32 (zextloadi16 addr:$src)))))),
7882 (VPBROADCASTWYrm addr:$src)>;
7885 let Predicates = [HasAVX2, NoVLX] in {
7886 // Provide aliases for broadcast from the same register class that
7887 // automatically does the extract.
7888 def : Pat<(v8f32 (X86VBroadcast (v8f32 VR256:$src))),
7889 (VBROADCASTSSYrr (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src),
7891 def : Pat<(v4f64 (X86VBroadcast (v4f64 VR256:$src))),
7892 (VBROADCASTSDYrr (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src),
7896 let Predicates = [HasAVX2, NoVLX] in {
7897 // Provide fallback in case the load node that is used in the patterns above
7898 // is used by additional users, which prevents the pattern selection.
7899 def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
7900 (VBROADCASTSSrr (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)))>;
7901 def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
7902 (VBROADCASTSSYrr (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)))>;
7903 def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
7904 (VBROADCASTSDYrr (v2f64 (COPY_TO_REGCLASS FR64:$src, VR128)))>;
7907 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
7908 def : Pat<(v16i8 (X86VBroadcast GR8:$src)),
7909 (VPBROADCASTBrr (v16i8 (COPY_TO_REGCLASS
7910 (i32 (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
7911 GR8:$src, sub_8bit)),
7913 def : Pat<(v32i8 (X86VBroadcast GR8:$src)),
7914 (VPBROADCASTBYrr (v16i8 (COPY_TO_REGCLASS
7915 (i32 (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
7916 GR8:$src, sub_8bit)),
7919 def : Pat<(v8i16 (X86VBroadcast GR16:$src)),
7920 (VPBROADCASTWrr (v8i16 (COPY_TO_REGCLASS
7921 (i32 (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
7922 GR16:$src, sub_16bit)),
7924 def : Pat<(v16i16 (X86VBroadcast GR16:$src)),
7925 (VPBROADCASTWYrr (v8i16 (COPY_TO_REGCLASS
7926 (i32 (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
7927 GR16:$src, sub_16bit)),
7930 let Predicates = [HasAVX2, NoVLX] in {
7931 def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
7932 (VPBROADCASTDrr (v4i32 (COPY_TO_REGCLASS GR32:$src, VR128)))>;
7933 def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
7934 (VPBROADCASTDYrr (v4i32 (COPY_TO_REGCLASS GR32:$src, VR128)))>;
7935 def : Pat<(v2i64 (X86VBroadcast GR64:$src)),
7936 (VPBROADCASTQrr (v2i64 (COPY_TO_REGCLASS GR64:$src, VR128)))>;
7937 def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
7938 (VPBROADCASTQYrr (v2i64 (COPY_TO_REGCLASS GR64:$src, VR128)))>;
7941 // AVX1 broadcast patterns
7942 let Predicates = [HasAVX1Only] in {
7943 def : Pat<(v8i32 (X86VBroadcast (loadi32 addr:$src))),
7944 (VBROADCASTSSYrm addr:$src)>;
7945 def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))),
7946 (VBROADCASTSDYrm addr:$src)>;
7947 def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
7948 (VBROADCASTSSrm addr:$src)>;
7951 // Provide fallback in case the load node that is used in the patterns above
7952 // is used by additional users, which prevents the pattern selection.
7953 let Predicates = [HasAVX, NoVLX] in {
7954 // 128bit broadcasts:
7955 def : Pat<(v2f64 (X86VBroadcast f64:$src)),
7956 (VMOVDDUPrr (v2f64 (COPY_TO_REGCLASS FR64:$src, VR128)))>;
7957 def : Pat<(v2f64 (X86VBroadcast (loadf64 addr:$src))),
7958 (VMOVDDUPrm addr:$src)>;
7960 def : Pat<(v2f64 (X86VBroadcast v2f64:$src)),
7961 (VMOVDDUPrr VR128:$src)>;
7962 def : Pat<(v2f64 (X86VBroadcast (loadv2f64 addr:$src))),
7963 (VMOVDDUPrm addr:$src)>;
7964 def : Pat<(v2f64 (X86VBroadcast (v2f64 (X86vzload addr:$src)))),
7965 (VMOVDDUPrm addr:$src)>;
7968 let Predicates = [HasAVX1Only] in {
7969 def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
7970 (VPERMILPSri (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)), 0)>;
7971 def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
7972 (VINSERTF128rr (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)),
7973 (v4f32 (VPERMILPSri (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)), 0)), sub_xmm),
7974 (v4f32 (VPERMILPSri (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)), 0)), 1)>;
7975 def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
7976 (VINSERTF128rr (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)),
7977 (v2f64 (VMOVDDUPrr (v2f64 (COPY_TO_REGCLASS FR64:$src, VR128)))), sub_xmm),
7978 (v2f64 (VMOVDDUPrr (v2f64 (COPY_TO_REGCLASS FR64:$src, VR128)))), 1)>;
7980 def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
7981 (VPSHUFDri (v4i32 (COPY_TO_REGCLASS GR32:$src, VR128)), 0)>;
7982 def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
7983 (VINSERTF128rr (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
7984 (v4i32 (VPSHUFDri (v4i32 (COPY_TO_REGCLASS GR32:$src, VR128)), 0)), sub_xmm),
7985 (v4i32 (VPSHUFDri (v4i32 (COPY_TO_REGCLASS GR32:$src, VR128)), 0)), 1)>;
7986 def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
7987 (VINSERTF128rr (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)),
7988 (v4i32 (VPSHUFDri (v4i32 (COPY_TO_REGCLASS GR64:$src, VR128)), 0x44)), sub_xmm),
7989 (v4i32 (VPSHUFDri (v4i32 (COPY_TO_REGCLASS GR64:$src, VR128)), 0x44)), 1)>;
7991 def : Pat<(v2i64 (X86VBroadcast i64:$src)),
7992 (VPSHUFDri (v4i32 (COPY_TO_REGCLASS GR64:$src, VR128)), 0x44)>;
7993 def : Pat<(v2i64 (X86VBroadcast (loadi64 addr:$src))),
7994 (VMOVDDUPrm addr:$src)>;
7997 //===----------------------------------------------------------------------===//
7998 // VPERM - Permute instructions
8001 multiclass avx2_perm<bits<8> opc, string OpcodeStr,
8002 ValueType OpVT, X86FoldableSchedWrite Sched,
8003 X86MemOperand memOp> {
8004 let Predicates = [HasAVX2, NoVLX] in {
8005 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
8006 (ins VR256:$src1, VR256:$src2),
8007 !strconcat(OpcodeStr,
8008 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8010 (OpVT (X86VPermv VR256:$src1, VR256:$src2)))]>,
8011 Sched<[Sched]>, VEX_4V, VEX_L;
8012 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
8013 (ins VR256:$src1, memOp:$src2),
8014 !strconcat(OpcodeStr,
8015 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8017 (OpVT (X86VPermv VR256:$src1,
8018 (load addr:$src2))))]>,
8019 Sched<[Sched.Folded, Sched.ReadAfterFold]>, VEX_4V, VEX_L;
8023 defm VPERMD : avx2_perm<0x36, "vpermd", v8i32, WriteVarShuffle256, i256mem>;
8024 let ExeDomain = SSEPackedSingle in
8025 defm VPERMPS : avx2_perm<0x16, "vpermps", v8f32, WriteFVarShuffle256, f256mem>;
8027 multiclass avx2_perm_imm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
8028 ValueType OpVT, X86FoldableSchedWrite Sched,
8029 X86MemOperand memOp> {
8030 let Predicates = [HasAVX2, NoVLX] in {
8031 def Yri : AVX2AIi8<opc, MRMSrcReg, (outs VR256:$dst),
8032 (ins VR256:$src1, u8imm:$src2),
8033 !strconcat(OpcodeStr,
8034 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8036 (OpVT (X86VPermi VR256:$src1, (i8 imm:$src2))))]>,
8037 Sched<[Sched]>, VEX, VEX_L;
8038 def Ymi : AVX2AIi8<opc, MRMSrcMem, (outs VR256:$dst),
8039 (ins memOp:$src1, u8imm:$src2),
8040 !strconcat(OpcodeStr,
8041 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8043 (OpVT (X86VPermi (mem_frag addr:$src1),
8044 (i8 imm:$src2))))]>,
8045 Sched<[Sched.Folded, Sched.ReadAfterFold]>, VEX, VEX_L;
8049 defm VPERMQ : avx2_perm_imm<0x00, "vpermq", loadv4i64, v4i64,
8050 WriteShuffle256, i256mem>, VEX_W;
8051 let ExeDomain = SSEPackedDouble in
8052 defm VPERMPD : avx2_perm_imm<0x01, "vpermpd", loadv4f64, v4f64,
8053 WriteFShuffle256, f256mem>, VEX_W;
8055 //===----------------------------------------------------------------------===//
8056 // VPERM2I128 - Permute Floating-Point Values in 128-bit chunks
8058 let isCommutable = 1 in
8059 def VPERM2I128rr : AVX2AIi8<0x46, MRMSrcReg, (outs VR256:$dst),
8060 (ins VR256:$src1, VR256:$src2, u8imm:$src3),
8061 "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8062 [(set VR256:$dst, (v4i64 (X86VPerm2x128 VR256:$src1, VR256:$src2,
8063 (i8 imm:$src3))))]>, Sched<[WriteShuffle256]>,
8065 def VPERM2I128rm : AVX2AIi8<0x46, MRMSrcMem, (outs VR256:$dst),
8066 (ins VR256:$src1, f256mem:$src2, u8imm:$src3),
8067 "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8068 [(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (loadv4i64 addr:$src2),
8070 Sched<[WriteShuffle256.Folded, WriteShuffle256.ReadAfterFold]>, VEX_4V, VEX_L;
8072 let Predicates = [HasAVX2] in
8073 def : Pat<(v4i64 (X86VPerm2x128 (loadv4i64 addr:$src2),
8074 VR256:$src1, (i8 imm:$imm))),
8075 (VPERM2I128rm VR256:$src1, addr:$src2, (Perm2XCommuteImm imm:$imm))>;
8078 //===----------------------------------------------------------------------===//
8079 // VINSERTI128 - Insert packed integer values
8081 let hasSideEffects = 0 in {
8082 def VINSERTI128rr : AVX2AIi8<0x38, MRMSrcReg, (outs VR256:$dst),
8083 (ins VR256:$src1, VR128:$src2, u8imm:$src3),
8084 "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8085 []>, Sched<[WriteShuffle256]>, VEX_4V, VEX_L;
8087 def VINSERTI128rm : AVX2AIi8<0x38, MRMSrcMem, (outs VR256:$dst),
8088 (ins VR256:$src1, i128mem:$src2, u8imm:$src3),
8089 "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8090 []>, Sched<[WriteShuffle256.Folded, WriteShuffle256.ReadAfterFold]>, VEX_4V, VEX_L;
8093 let Predicates = [HasAVX2, NoVLX] in {
8094 defm : vinsert_lowering<"VINSERTI128", v2i64, v4i64, loadv2i64>;
8095 defm : vinsert_lowering<"VINSERTI128", v4i32, v8i32, loadv4i32>;
8096 defm : vinsert_lowering<"VINSERTI128", v8i16, v16i16, loadv8i16>;
8097 defm : vinsert_lowering<"VINSERTI128", v16i8, v32i8, loadv16i8>;
8100 //===----------------------------------------------------------------------===//
8101 // VEXTRACTI128 - Extract packed integer values
8103 def VEXTRACTI128rr : AVX2AIi8<0x39, MRMDestReg, (outs VR128:$dst),
8104 (ins VR256:$src1, u8imm:$src2),
8105 "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
8106 Sched<[WriteShuffle256]>, VEX, VEX_L;
8107 let hasSideEffects = 0, mayStore = 1 in
8108 def VEXTRACTI128mr : AVX2AIi8<0x39, MRMDestMem, (outs),
8109 (ins i128mem:$dst, VR256:$src1, u8imm:$src2),
8110 "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
8111 Sched<[SchedWriteVecMoveLS.XMM.MR]>, VEX, VEX_L;
8113 let Predicates = [HasAVX2, NoVLX] in {
8114 defm : vextract_lowering<"VEXTRACTI128", v4i64, v2i64>;
8115 defm : vextract_lowering<"VEXTRACTI128", v8i32, v4i32>;
8116 defm : vextract_lowering<"VEXTRACTI128", v16i16, v8i16>;
8117 defm : vextract_lowering<"VEXTRACTI128", v32i8, v16i8>;
8120 //===----------------------------------------------------------------------===//
8121 // VPMASKMOV - Conditional SIMD Integer Packed Loads and Stores
8123 multiclass avx2_pmovmask<string OpcodeStr,
8124 Intrinsic IntLd128, Intrinsic IntLd256,
8125 Intrinsic IntSt128, Intrinsic IntSt256> {
8126 def rm : AVX28I<0x8c, MRMSrcMem, (outs VR128:$dst),
8127 (ins VR128:$src1, i128mem:$src2),
8128 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8129 [(set VR128:$dst, (IntLd128 addr:$src2, VR128:$src1))]>,
8130 VEX_4V, Sched<[WriteVecMaskedLoad]>;
8131 def Yrm : AVX28I<0x8c, MRMSrcMem, (outs VR256:$dst),
8132 (ins VR256:$src1, i256mem:$src2),
8133 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8134 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
8135 VEX_4V, VEX_L, Sched<[WriteVecMaskedLoadY]>;
8136 def mr : AVX28I<0x8e, MRMDestMem, (outs),
8137 (ins i128mem:$dst, VR128:$src1, VR128:$src2),
8138 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8139 [(IntSt128 addr:$dst, VR128:$src1, VR128:$src2)]>,
8140 VEX_4V, Sched<[WriteVecMaskedStore]>;
8141 def Ymr : AVX28I<0x8e, MRMDestMem, (outs),
8142 (ins i256mem:$dst, VR256:$src1, VR256:$src2),
8143 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8144 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>,
8145 VEX_4V, VEX_L, Sched<[WriteVecMaskedStoreY]>;
8148 defm VPMASKMOVD : avx2_pmovmask<"vpmaskmovd",
8149 int_x86_avx2_maskload_d,
8150 int_x86_avx2_maskload_d_256,
8151 int_x86_avx2_maskstore_d,
8152 int_x86_avx2_maskstore_d_256>;
8153 defm VPMASKMOVQ : avx2_pmovmask<"vpmaskmovq",
8154 int_x86_avx2_maskload_q,
8155 int_x86_avx2_maskload_q_256,
8156 int_x86_avx2_maskstore_q,
8157 int_x86_avx2_maskstore_q_256>, VEX_W;
8159 multiclass maskmov_lowering<string InstrStr, RegisterClass RC, ValueType VT,
8160 ValueType MaskVT, string BlendStr, ValueType ZeroVT> {
8162 def: Pat<(X86mstore (VT RC:$src), addr:$ptr, (MaskVT RC:$mask)),
8163 (!cast<Instruction>(InstrStr#"mr") addr:$ptr, RC:$mask, RC:$src)>;
8165 def: Pat<(VT (X86mload addr:$ptr, (MaskVT RC:$mask), undef)),
8166 (!cast<Instruction>(InstrStr#"rm") RC:$mask, addr:$ptr)>;
8167 def: Pat<(VT (X86mload addr:$ptr, (MaskVT RC:$mask),
8168 (VT (bitconvert (ZeroVT immAllZerosV))))),
8169 (!cast<Instruction>(InstrStr#"rm") RC:$mask, addr:$ptr)>;
8170 def: Pat<(VT (X86mload addr:$ptr, (MaskVT RC:$mask), (VT RC:$src0))),
8171 (!cast<Instruction>(BlendStr#"rr")
8173 (VT (!cast<Instruction>(InstrStr#"rm") RC:$mask, addr:$ptr)),
8176 let Predicates = [HasAVX] in {
8177 defm : maskmov_lowering<"VMASKMOVPS", VR128, v4f32, v4i32, "VBLENDVPS", v4i32>;
8178 defm : maskmov_lowering<"VMASKMOVPD", VR128, v2f64, v2i64, "VBLENDVPD", v4i32>;
8179 defm : maskmov_lowering<"VMASKMOVPSY", VR256, v8f32, v8i32, "VBLENDVPSY", v8i32>;
8180 defm : maskmov_lowering<"VMASKMOVPDY", VR256, v4f64, v4i64, "VBLENDVPDY", v8i32>;
8182 let Predicates = [HasAVX1Only] in {
8183 // load/store i32/i64 not supported use ps/pd version
8184 defm : maskmov_lowering<"VMASKMOVPSY", VR256, v8i32, v8i32, "VBLENDVPSY", v8i32>;
8185 defm : maskmov_lowering<"VMASKMOVPDY", VR256, v4i64, v4i64, "VBLENDVPDY", v8i32>;
8186 defm : maskmov_lowering<"VMASKMOVPS", VR128, v4i32, v4i32, "VBLENDVPS", v4i32>;
8187 defm : maskmov_lowering<"VMASKMOVPD", VR128, v2i64, v2i64, "VBLENDVPD", v4i32>;
8189 let Predicates = [HasAVX2] in {
8190 defm : maskmov_lowering<"VPMASKMOVDY", VR256, v8i32, v8i32, "VBLENDVPSY", v8i32>;
8191 defm : maskmov_lowering<"VPMASKMOVQY", VR256, v4i64, v4i64, "VBLENDVPDY", v8i32>;
8192 defm : maskmov_lowering<"VPMASKMOVD", VR128, v4i32, v4i32, "VBLENDVPS", v4i32>;
8193 defm : maskmov_lowering<"VPMASKMOVQ", VR128, v2i64, v2i64, "VBLENDVPD", v4i32>;
8196 //===----------------------------------------------------------------------===//
8197 // SubVector Broadcasts
8198 // Provide fallback in case the load node that is used in the patterns above
8199 // is used by additional users, which prevents the pattern selection.
8201 let Predicates = [HasAVX2, NoVLX] in {
8202 def : Pat<(v4i64 (X86SubVBroadcast (v2i64 VR128:$src))),
8203 (VINSERTI128rr (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128:$src, sub_xmm),
8204 (v2i64 VR128:$src), 1)>;
8205 def : Pat<(v8i32 (X86SubVBroadcast (v4i32 VR128:$src))),
8206 (VINSERTI128rr (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128:$src, sub_xmm),
8207 (v4i32 VR128:$src), 1)>;
8208 def : Pat<(v16i16 (X86SubVBroadcast (v8i16 VR128:$src))),
8209 (VINSERTI128rr (INSERT_SUBREG (v16i16 (IMPLICIT_DEF)), VR128:$src, sub_xmm),
8210 (v8i16 VR128:$src), 1)>;
8211 def : Pat<(v32i8 (X86SubVBroadcast (v16i8 VR128:$src))),
8212 (VINSERTI128rr (INSERT_SUBREG (v32i8 (IMPLICIT_DEF)), VR128:$src, sub_xmm),
8213 (v16i8 VR128:$src), 1)>;
8216 let Predicates = [HasAVX, NoVLX] in {
8217 def : Pat<(v4f64 (X86SubVBroadcast (v2f64 VR128:$src))),
8218 (VINSERTF128rr (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128:$src, sub_xmm),
8219 (v2f64 VR128:$src), 1)>;
8220 def : Pat<(v8f32 (X86SubVBroadcast (v4f32 VR128:$src))),
8221 (VINSERTF128rr (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128:$src, sub_xmm),
8222 (v4f32 VR128:$src), 1)>;
8225 let Predicates = [HasAVX1Only] in {
8226 def : Pat<(v4i64 (X86SubVBroadcast (v2i64 VR128:$src))),
8227 (VINSERTF128rr (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128:$src, sub_xmm),
8228 (v2i64 VR128:$src), 1)>;
8229 def : Pat<(v8i32 (X86SubVBroadcast (v4i32 VR128:$src))),
8230 (VINSERTF128rr (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128:$src, sub_xmm),
8231 (v4i32 VR128:$src), 1)>;
8232 def : Pat<(v16i16 (X86SubVBroadcast (v8i16 VR128:$src))),
8233 (VINSERTF128rr (INSERT_SUBREG (v16i16 (IMPLICIT_DEF)), VR128:$src, sub_xmm),
8234 (v8i16 VR128:$src), 1)>;
8235 def : Pat<(v32i8 (X86SubVBroadcast (v16i8 VR128:$src))),
8236 (VINSERTF128rr (INSERT_SUBREG (v32i8 (IMPLICIT_DEF)), VR128:$src, sub_xmm),
8237 (v16i8 VR128:$src), 1)>;
8240 //===----------------------------------------------------------------------===//
8241 // Variable Bit Shifts
8243 multiclass avx2_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
8244 SDNode IntrinNode, ValueType vt128, ValueType vt256> {
8245 def rr : AVX28I<opc, MRMSrcReg, (outs VR128:$dst),
8246 (ins VR128:$src1, VR128:$src2),
8247 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8249 (vt128 (OpNode VR128:$src1, (vt128 VR128:$src2))))]>,
8250 VEX_4V, Sched<[SchedWriteVarVecShift.XMM]>;
8251 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst),
8252 (ins VR128:$src1, i128mem:$src2),
8253 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8255 (vt128 (OpNode VR128:$src1,
8256 (vt128 (load addr:$src2)))))]>,
8257 VEX_4V, Sched<[SchedWriteVarVecShift.XMM.Folded,
8258 SchedWriteVarVecShift.XMM.ReadAfterFold]>;
8259 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
8260 (ins VR256:$src1, VR256:$src2),
8261 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8263 (vt256 (OpNode VR256:$src1, (vt256 VR256:$src2))))]>,
8264 VEX_4V, VEX_L, Sched<[SchedWriteVarVecShift.YMM]>;
8265 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
8266 (ins VR256:$src1, i256mem:$src2),
8267 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8269 (vt256 (OpNode VR256:$src1,
8270 (vt256 (load addr:$src2)))))]>,
8271 VEX_4V, VEX_L, Sched<[SchedWriteVarVecShift.YMM.Folded,
8272 SchedWriteVarVecShift.YMM.ReadAfterFold]>;
8274 def : Pat<(vt128 (IntrinNode VR128:$src1, VR128:$src2)),
8275 (!cast<Instruction>(NAME#"rr") VR128:$src1, VR128:$src2)>;
8276 def : Pat<(vt128 (IntrinNode VR128:$src1, (load addr:$src2))),
8277 (!cast<Instruction>(NAME#"rm") VR128:$src1, addr:$src2)>;
8278 def : Pat<(vt256 (IntrinNode VR256:$src1, VR256:$src2)),
8279 (!cast<Instruction>(NAME#"Yrr") VR256:$src1, VR256:$src2)>;
8280 def : Pat<(vt256 (IntrinNode VR256:$src1, (load addr:$src2))),
8281 (!cast<Instruction>(NAME#"Yrm") VR256:$src1, addr:$src2)>;
8284 let Predicates = [HasAVX2, NoVLX] in {
8285 defm VPSLLVD : avx2_var_shift<0x47, "vpsllvd", shl, X86vshlv, v4i32, v8i32>;
8286 defm VPSLLVQ : avx2_var_shift<0x47, "vpsllvq", shl, X86vshlv, v2i64, v4i64>, VEX_W;
8287 defm VPSRLVD : avx2_var_shift<0x45, "vpsrlvd", srl, X86vsrlv, v4i32, v8i32>;
8288 defm VPSRLVQ : avx2_var_shift<0x45, "vpsrlvq", srl, X86vsrlv, v2i64, v4i64>, VEX_W;
8289 defm VPSRAVD : avx2_var_shift<0x46, "vpsravd", sra, X86vsrav, v4i32, v8i32>;
8292 //===----------------------------------------------------------------------===//
8293 // VGATHER - GATHER Operations
8295 // FIXME: Improve scheduling of gather instructions.
8296 multiclass avx2_gather<bits<8> opc, string OpcodeStr, ValueType VTx,
8297 ValueType VTy, PatFrag GatherNode128,
8298 PatFrag GatherNode256, RegisterClass RC256,
8299 X86MemOperand memop128, X86MemOperand memop256,
8300 ValueType MTx = VTx, ValueType MTy = VTy> {
8301 def rm : AVX28I<opc, MRMSrcMem4VOp3, (outs VR128:$dst, VR128:$mask_wb),
8302 (ins VR128:$src1, memop128:$src2, VR128:$mask),
8303 !strconcat(OpcodeStr,
8304 "\t{$mask, $src2, $dst|$dst, $src2, $mask}"),
8305 [(set (VTx VR128:$dst), (MTx VR128:$mask_wb),
8306 (GatherNode128 VR128:$src1, VR128:$mask,
8307 vectoraddr:$src2))]>,
8308 VEX, Sched<[WriteLoad]>;
8309 def Yrm : AVX28I<opc, MRMSrcMem4VOp3, (outs RC256:$dst, RC256:$mask_wb),
8310 (ins RC256:$src1, memop256:$src2, RC256:$mask),
8311 !strconcat(OpcodeStr,
8312 "\t{$mask, $src2, $dst|$dst, $src2, $mask}"),
8313 [(set (VTy RC256:$dst), (MTy RC256:$mask_wb),
8314 (GatherNode256 RC256:$src1, RC256:$mask,
8315 vectoraddr:$src2))]>,
8316 VEX, VEX_L, Sched<[WriteLoad]>;
8319 let Predicates = [HasAVX2] in {
8320 let mayLoad = 1, hasSideEffects = 0, Constraints
8321 = "@earlyclobber $dst,@earlyclobber $mask_wb, $src1 = $dst, $mask = $mask_wb"
8323 defm VPGATHERDQ : avx2_gather<0x90, "vpgatherdq", v2i64, v4i64, mgatherv4i32,
8324 mgatherv4i32, VR256, vx128mem, vx256mem>, VEX_W;
8325 defm VPGATHERQQ : avx2_gather<0x91, "vpgatherqq", v2i64, v4i64, mgatherv2i64,
8326 mgatherv4i64, VR256, vx128mem, vy256mem>, VEX_W;
8327 defm VPGATHERDD : avx2_gather<0x90, "vpgatherdd", v4i32, v8i32, mgatherv4i32,
8328 mgatherv8i32, VR256, vx128mem, vy256mem>;
8329 defm VPGATHERQD : avx2_gather<0x91, "vpgatherqd", v4i32, v4i32, mgatherv2i64,
8330 mgatherv4i64, VR128, vx64mem, vy128mem>;
8332 let ExeDomain = SSEPackedDouble in {
8333 defm VGATHERDPD : avx2_gather<0x92, "vgatherdpd", v2f64, v4f64, mgatherv4i32,
8334 mgatherv4i32, VR256, vx128mem, vx256mem,
8335 v2i64, v4i64>, VEX_W;
8336 defm VGATHERQPD : avx2_gather<0x93, "vgatherqpd", v2f64, v4f64, mgatherv2i64,
8337 mgatherv4i64, VR256, vx128mem, vy256mem,
8338 v2i64, v4i64>, VEX_W;
8341 let ExeDomain = SSEPackedSingle in {
8342 defm VGATHERDPS : avx2_gather<0x92, "vgatherdps", v4f32, v8f32, mgatherv4i32,
8343 mgatherv8i32, VR256, vx128mem, vy256mem,
8345 defm VGATHERQPS : avx2_gather<0x93, "vgatherqps", v4f32, v4f32, mgatherv2i64,
8346 mgatherv4i64, VR128, vx64mem, vy128mem,
8352 //===----------------------------------------------------------------------===//
8353 // GFNI instructions
8354 //===----------------------------------------------------------------------===//
8356 multiclass GF2P8MULB_rm<string OpcodeStr, ValueType OpVT,
8357 RegisterClass RC, PatFrag MemOpFrag,
8358 X86MemOperand X86MemOp, bit Is2Addr = 0> {
8359 let ExeDomain = SSEPackedInt,
8360 AsmString = !if(Is2Addr,
8361 OpcodeStr##"\t{$src2, $dst|$dst, $src2}",
8362 OpcodeStr##"\t{$src2, $src1, $dst|$dst, $src1, $src2}") in {
8363 let isCommutable = 1 in
8364 def rr : PDI<0xCF, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2), "",
8365 [(set RC:$dst, (OpVT (X86GF2P8mulb RC:$src1, RC:$src2)))]>,
8366 Sched<[SchedWriteVecALU.XMM]>, T8PD;
8368 def rm : PDI<0xCF, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, X86MemOp:$src2), "",
8369 [(set RC:$dst, (OpVT (X86GF2P8mulb RC:$src1,
8370 (MemOpFrag addr:$src2))))]>,
8371 Sched<[SchedWriteVecALU.XMM.Folded, SchedWriteVecALU.XMM.ReadAfterFold]>, T8PD;
8375 multiclass GF2P8AFFINE_rmi<bits<8> Op, string OpStr, ValueType OpVT,
8376 SDNode OpNode, RegisterClass RC, PatFrag MemOpFrag,
8377 X86MemOperand X86MemOp, bit Is2Addr = 0> {
8378 let AsmString = !if(Is2Addr,
8379 OpStr##"\t{$src3, $src2, $dst|$dst, $src2, $src3}",
8380 OpStr##"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}") in {
8381 def rri : Ii8<Op, MRMSrcReg, (outs RC:$dst),
8382 (ins RC:$src1, RC:$src2, u8imm:$src3), "",
8383 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2, imm:$src3)))],
8384 SSEPackedInt>, Sched<[SchedWriteVecALU.XMM]>;
8385 def rmi : Ii8<Op, MRMSrcMem, (outs RC:$dst),
8386 (ins RC:$src1, X86MemOp:$src2, u8imm:$src3), "",
8387 [(set RC:$dst, (OpVT (OpNode RC:$src1,
8388 (MemOpFrag addr:$src2),
8389 imm:$src3)))], SSEPackedInt>,
8390 Sched<[SchedWriteVecALU.XMM.Folded, SchedWriteVecALU.XMM.ReadAfterFold]>;
8394 multiclass GF2P8AFFINE_common<bits<8> Op, string OpStr, SDNode OpNode> {
8395 let Constraints = "$src1 = $dst",
8396 Predicates = [HasGFNI, UseSSE2] in
8397 defm NAME : GF2P8AFFINE_rmi<Op, OpStr, v16i8, OpNode,
8398 VR128, load, i128mem, 1>;
8399 let Predicates = [HasGFNI, HasAVX, NoVLX_Or_NoBWI] in {
8400 defm V##NAME : GF2P8AFFINE_rmi<Op, "v"##OpStr, v16i8, OpNode, VR128,
8401 load, i128mem>, VEX_4V, VEX_W;
8402 defm V##NAME##Y : GF2P8AFFINE_rmi<Op, "v"##OpStr, v32i8, OpNode, VR256,
8403 load, i256mem>, VEX_4V, VEX_L, VEX_W;
8408 let Constraints = "$src1 = $dst",
8409 Predicates = [HasGFNI, UseSSE2] in
8410 defm GF2P8MULB : GF2P8MULB_rm<"gf2p8mulb", v16i8, VR128, memop,
8412 let Predicates = [HasGFNI, HasAVX, NoVLX_Or_NoBWI] in {
8413 defm VGF2P8MULB : GF2P8MULB_rm<"vgf2p8mulb", v16i8, VR128, load,
8415 defm VGF2P8MULBY : GF2P8MULB_rm<"vgf2p8mulb", v32i8, VR256, load,
8416 i256mem>, VEX_4V, VEX_L;
8418 // GF2P8AFFINEINVQB, GF2P8AFFINEQB
8419 let isCommutable = 0 in {
8420 defm GF2P8AFFINEINVQB : GF2P8AFFINE_common<0xCF, "gf2p8affineinvqb",
8421 X86GF2P8affineinvqb>, TAPD;
8422 defm GF2P8AFFINEQB : GF2P8AFFINE_common<0xCE, "gf2p8affineqb",
8423 X86GF2P8affineqb>, TAPD;