1 //===-- X86InstrSSE.td - SSE Instruction Set ---------------*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file describes the X86 SSE instruction set, defining the instructions,
10 // and properties of the instructions which are needed for code generation,
11 // machine code emission, and analysis.
13 //===----------------------------------------------------------------------===//
15 //===----------------------------------------------------------------------===//
16 // SSE 1 & 2 Instructions Classes
17 //===----------------------------------------------------------------------===//
19 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
20 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDPatternOperator OpNode,
21 RegisterClass RC, X86MemOperand x86memop,
22 Domain d, X86FoldableSchedWrite sched,
24 let isCodeGenOnly = 1 in {
25 let isCommutable = 1 in {
26 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
28 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
29 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
30 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))], d>,
33 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
35 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
36 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
37 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))], d>,
38 Sched<[sched.Folded, sched.ReadAfterFold]>;
42 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
43 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr,
44 SDPatternOperator OpNode, RegisterClass RC,
45 ValueType VT, string asm, Operand memopr,
46 PatFrags mem_frags, Domain d,
47 X86FoldableSchedWrite sched, bit Is2Addr = 1> {
48 let hasSideEffects = 0 in {
49 def rr_Int : SI_Int<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
51 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
52 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
53 [(set RC:$dst, (VT (OpNode RC:$src1, RC:$src2)))], d>,
56 def rm_Int : SI_Int<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
58 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
59 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
60 [(set RC:$dst, (VT (OpNode RC:$src1, (mem_frags addr:$src2))))], d>,
61 Sched<[sched.Folded, sched.ReadAfterFold]>;
65 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
66 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDPatternOperator OpNode,
67 RegisterClass RC, ValueType vt,
68 X86MemOperand x86memop, PatFrag mem_frag,
69 Domain d, X86FoldableSchedWrite sched,
71 let isCommutable = 1 in
72 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
74 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
75 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
76 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], d>,
79 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
81 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
82 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
83 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))],
85 Sched<[sched.Folded, sched.ReadAfterFold]>;
88 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
89 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
90 string OpcodeStr, X86MemOperand x86memop,
91 X86FoldableSchedWrite sched,
92 list<dag> pat_rr, list<dag> pat_rm,
94 let isCommutable = 1, hasSideEffects = 0 in
95 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
97 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
98 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
101 let hasSideEffects = 0, mayLoad = 1 in
102 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
104 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
105 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
107 Sched<[sched.Folded, sched.ReadAfterFold]>;
111 // Alias instructions that map fld0 to xorps for sse or vxorps for avx.
112 // This is expanded by ExpandPostRAPseudos.
113 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
114 isPseudo = 1, SchedRW = [WriteZero] in {
115 def FsFLD0SS : I<0, Pseudo, (outs FR32:$dst), (ins), "",
116 [(set FR32:$dst, fp32imm0)]>, Requires<[HasSSE1, NoAVX512]>;
117 def FsFLD0SD : I<0, Pseudo, (outs FR64:$dst), (ins), "",
118 [(set FR64:$dst, fp64imm0)]>, Requires<[HasSSE2, NoAVX512]>;
119 def FsFLD0F128 : I<0, Pseudo, (outs VR128:$dst), (ins), "",
120 [(set VR128:$dst, fp128imm0)]>, Requires<[HasSSE1, NoAVX512]>;
123 //===----------------------------------------------------------------------===//
124 // AVX & SSE - Zero/One Vectors
125 //===----------------------------------------------------------------------===//
127 // Alias instruction that maps zero vector to pxor / xorp* for sse.
128 // This is expanded by ExpandPostRAPseudos to an xorps / vxorps, and then
129 // swizzled by ExecutionDomainFix to pxor.
130 // We set canFoldAsLoad because this can be converted to a constant-pool
131 // load of an all-zeros value if folding it would be beneficial.
132 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
133 isPseudo = 1, Predicates = [NoAVX512], SchedRW = [WriteZero] in {
134 def V_SET0 : I<0, Pseudo, (outs VR128:$dst), (ins), "",
135 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
138 let Predicates = [NoAVX512] in {
139 def : Pat<(v16i8 immAllZerosV), (V_SET0)>;
140 def : Pat<(v8i16 immAllZerosV), (V_SET0)>;
141 def : Pat<(v4i32 immAllZerosV), (V_SET0)>;
142 def : Pat<(v2i64 immAllZerosV), (V_SET0)>;
143 def : Pat<(v2f64 immAllZerosV), (V_SET0)>;
147 // The same as done above but for AVX. The 256-bit AVX1 ISA doesn't support PI,
148 // and doesn't need it because on sandy bridge the register is set to zero
149 // at the rename stage without using any execution unit, so SET0PSY
150 // and SET0PDY can be used for vector int instructions without penalty
151 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
152 isPseudo = 1, Predicates = [NoAVX512], SchedRW = [WriteZero] in {
153 def AVX_SET0 : I<0, Pseudo, (outs VR256:$dst), (ins), "",
154 [(set VR256:$dst, (v8i32 immAllZerosV))]>;
157 let Predicates = [NoAVX512] in {
158 def : Pat<(v32i8 immAllZerosV), (AVX_SET0)>;
159 def : Pat<(v16i16 immAllZerosV), (AVX_SET0)>;
160 def : Pat<(v4i64 immAllZerosV), (AVX_SET0)>;
161 def : Pat<(v8f32 immAllZerosV), (AVX_SET0)>;
162 def : Pat<(v4f64 immAllZerosV), (AVX_SET0)>;
165 // We set canFoldAsLoad because this can be converted to a constant-pool
166 // load of an all-ones value if folding it would be beneficial.
167 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
168 isPseudo = 1, SchedRW = [WriteZero] in {
169 def V_SETALLONES : I<0, Pseudo, (outs VR128:$dst), (ins), "",
170 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
171 let Predicates = [HasAVX1Only, OptForMinSize] in {
172 def AVX1_SETALLONES: I<0, Pseudo, (outs VR256:$dst), (ins), "",
173 [(set VR256:$dst, (v8i32 immAllOnesV))]>;
175 let Predicates = [HasAVX2] in
176 def AVX2_SETALLONES : I<0, Pseudo, (outs VR256:$dst), (ins), "",
177 [(set VR256:$dst, (v8i32 immAllOnesV))]>;
180 //===----------------------------------------------------------------------===//
181 // SSE 1 & 2 - Move FP Scalar Instructions
183 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
184 // register copies because it's a partial register update; Register-to-register
185 // movss/movsd is not modeled as an INSERT_SUBREG because INSERT_SUBREG requires
186 // that the insert be implementable in terms of a copy, and just mentioned, we
187 // don't use movss/movsd for copies.
188 //===----------------------------------------------------------------------===//
190 multiclass sse12_move_rr<SDNode OpNode, ValueType vt,
191 X86MemOperand x86memop, string base_opc,
192 string asm_opr, Domain d, string Name> {
193 let isCommutable = 1 in
194 def rr : SI<0x10, MRMSrcReg, (outs VR128:$dst),
195 (ins VR128:$src1, VR128:$src2),
196 !strconcat(base_opc, asm_opr),
197 [(set VR128:$dst, (vt (OpNode VR128:$src1, VR128:$src2)))], d>,
198 Sched<[SchedWriteFShuffle.XMM]>;
200 // For the disassembler
201 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
202 def rr_REV : SI<0x11, MRMDestReg, (outs VR128:$dst),
203 (ins VR128:$src1, VR128:$src2),
204 !strconcat(base_opc, asm_opr), []>,
205 Sched<[SchedWriteFShuffle.XMM]>, FoldGenData<Name#rr>;
208 multiclass sse12_move<RegisterClass RC, SDNode OpNode, ValueType vt,
209 X86MemOperand x86memop, string OpcodeStr,
210 Domain d, string Name, Predicate pred> {
212 let Predicates = [UseAVX, OptForSize] in
213 defm V#NAME : sse12_move_rr<OpNode, vt, x86memop, OpcodeStr,
214 "\t{$src2, $src1, $dst|$dst, $src1, $src2}", d,
216 VEX_4V, VEX_LIG, VEX_WIG;
218 def V#NAME#mr : SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
219 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
220 [(store RC:$src, addr:$dst)], d>,
221 VEX, VEX_LIG, Sched<[WriteFStore]>, VEX_WIG;
223 let Constraints = "$src1 = $dst" in {
224 let Predicates = [pred, NoSSE41_Or_OptForSize] in
225 defm NAME : sse12_move_rr<OpNode, vt, x86memop, OpcodeStr,
226 "\t{$src2, $dst|$dst, $src2}", d, Name>;
229 def NAME#mr : SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
230 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
231 [(store RC:$src, addr:$dst)], d>,
232 Sched<[WriteFStore]>;
234 def : InstAlias<"v"#OpcodeStr#".s\t{$src2, $src1, $dst|$dst, $src1, $src2}",
235 (!cast<Instruction>("V"#NAME#"rr_REV")
236 VR128:$dst, VR128:$src1, VR128:$src2), 0>;
237 def : InstAlias<OpcodeStr#".s\t{$src2, $dst|$dst, $src2}",
238 (!cast<Instruction>(NAME#"rr_REV")
239 VR128:$dst, VR128:$src2), 0>;
242 // Loading from memory automatically zeroing upper bits.
243 multiclass sse12_move_rm<RegisterClass RC, ValueType vt, X86MemOperand x86memop,
244 PatFrag mem_pat, PatFrag vzloadfrag, string OpcodeStr,
246 def V#NAME#rm : SI<0x10, MRMSrcMem, (outs VR128:$dst), (ins x86memop:$src),
247 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
248 [(set VR128:$dst, (vt (vzloadfrag addr:$src)))], d>,
249 VEX, VEX_LIG, Sched<[WriteFLoad]>, VEX_WIG;
250 def NAME#rm : SI<0x10, MRMSrcMem, (outs VR128:$dst), (ins x86memop:$src),
251 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
252 [(set VR128:$dst, (vt (vzloadfrag addr:$src)))], d>,
255 // _alt version uses FR32/FR64 register class.
256 let isCodeGenOnly = 1 in {
257 def V#NAME#rm_alt : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
258 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
259 [(set RC:$dst, (mem_pat addr:$src))], d>,
260 VEX, VEX_LIG, Sched<[WriteFLoad]>, VEX_WIG;
261 def NAME#rm_alt : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
262 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
263 [(set RC:$dst, (mem_pat addr:$src))], d>,
268 defm MOVSS : sse12_move<FR32, X86Movss, v4f32, f32mem, "movss",
269 SSEPackedSingle, "MOVSS", UseSSE1>, XS;
270 defm MOVSD : sse12_move<FR64, X86Movsd, v2f64, f64mem, "movsd",
271 SSEPackedDouble, "MOVSD", UseSSE2>, XD;
273 let canFoldAsLoad = 1, isReMaterializable = 1 in {
274 defm MOVSS : sse12_move_rm<FR32, v4f32, f32mem, loadf32, X86vzload32, "movss",
275 SSEPackedSingle>, XS;
276 defm MOVSD : sse12_move_rm<FR64, v2f64, f64mem, loadf64, X86vzload64, "movsd",
277 SSEPackedDouble>, XD;
281 let Predicates = [UseAVX] in {
282 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
283 (VMOVSSrm addr:$src)>;
284 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
285 (VMOVSDrm addr:$src)>;
287 // Represent the same patterns above but in the form they appear for
289 def : Pat<(v8f32 (X86vzload32 addr:$src)),
290 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_xmm)>;
291 def : Pat<(v4f64 (X86vzload64 addr:$src)),
292 (SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_xmm)>;
295 let Predicates = [UseAVX, OptForSize] in {
296 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
297 // MOVSS to the lower bits.
298 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
299 (VMOVSSrr (v4f32 (V_SET0)), VR128:$src)>;
300 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
301 (VMOVSSrr (v4i32 (V_SET0)), VR128:$src)>;
303 // Move low f32 and clear high bits.
304 def : Pat<(v8f32 (X86vzmovl (v8f32 VR256:$src))),
305 (SUBREG_TO_REG (i32 0),
306 (v4f32 (VMOVSSrr (v4f32 (V_SET0)),
307 (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm)))), sub_xmm)>;
308 def : Pat<(v8i32 (X86vzmovl (v8i32 VR256:$src))),
309 (SUBREG_TO_REG (i32 0),
310 (v4i32 (VMOVSSrr (v4i32 (V_SET0)),
311 (v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm)))), sub_xmm)>;
314 let Predicates = [UseSSE1, NoSSE41_Or_OptForSize] in {
315 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
316 // MOVSS to the lower bits.
317 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
318 (MOVSSrr (v4f32 (V_SET0)), VR128:$src)>;
319 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
320 (MOVSSrr (v4i32 (V_SET0)), VR128:$src)>;
323 let Predicates = [UseSSE2] in
324 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
325 (MOVSDrm addr:$src)>;
327 let Predicates = [UseSSE1] in
328 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
329 (MOVSSrm addr:$src)>;
331 //===----------------------------------------------------------------------===//
332 // SSE 1 & 2 - Move Aligned/Unaligned FP Instructions
333 //===----------------------------------------------------------------------===//
335 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
336 X86MemOperand x86memop, PatFrag ld_frag,
337 string asm, Domain d,
338 X86SchedWriteMoveLS sched> {
339 let hasSideEffects = 0, isMoveReg = 1 in
340 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
341 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>,
343 let canFoldAsLoad = 1, isReMaterializable = 1 in
344 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
345 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
346 [(set RC:$dst, (ld_frag addr:$src))], d>,
350 let Predicates = [HasAVX, NoVLX] in {
351 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32, "movaps",
352 SSEPackedSingle, SchedWriteFMoveLS.XMM>,
354 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64, "movapd",
355 SSEPackedDouble, SchedWriteFMoveLS.XMM>,
357 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32, "movups",
358 SSEPackedSingle, SchedWriteFMoveLS.XMM>,
360 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64, "movupd",
361 SSEPackedDouble, SchedWriteFMoveLS.XMM>,
364 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32, "movaps",
365 SSEPackedSingle, SchedWriteFMoveLS.YMM>,
366 PS, VEX, VEX_L, VEX_WIG;
367 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64, "movapd",
368 SSEPackedDouble, SchedWriteFMoveLS.YMM>,
369 PD, VEX, VEX_L, VEX_WIG;
370 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32, "movups",
371 SSEPackedSingle, SchedWriteFMoveLS.YMM>,
372 PS, VEX, VEX_L, VEX_WIG;
373 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64, "movupd",
374 SSEPackedDouble, SchedWriteFMoveLS.YMM>,
375 PD, VEX, VEX_L, VEX_WIG;
378 let Predicates = [UseSSE1] in {
379 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32, "movaps",
380 SSEPackedSingle, SchedWriteFMoveLS.XMM>,
382 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32, "movups",
383 SSEPackedSingle, SchedWriteFMoveLS.XMM>,
386 let Predicates = [UseSSE2] in {
387 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64, "movapd",
388 SSEPackedDouble, SchedWriteFMoveLS.XMM>,
390 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64, "movupd",
391 SSEPackedDouble, SchedWriteFMoveLS.XMM>,
395 let Predicates = [HasAVX, NoVLX] in {
396 let SchedRW = [SchedWriteFMoveLS.XMM.MR] in {
397 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
398 "movaps\t{$src, $dst|$dst, $src}",
399 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>,
401 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
402 "movapd\t{$src, $dst|$dst, $src}",
403 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>,
405 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
406 "movups\t{$src, $dst|$dst, $src}",
407 [(store (v4f32 VR128:$src), addr:$dst)]>,
409 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
410 "movupd\t{$src, $dst|$dst, $src}",
411 [(store (v2f64 VR128:$src), addr:$dst)]>,
415 let SchedRW = [SchedWriteFMoveLS.YMM.MR] in {
416 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
417 "movaps\t{$src, $dst|$dst, $src}",
418 [(alignedstore (v8f32 VR256:$src), addr:$dst)]>,
420 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
421 "movapd\t{$src, $dst|$dst, $src}",
422 [(alignedstore (v4f64 VR256:$src), addr:$dst)]>,
424 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
425 "movups\t{$src, $dst|$dst, $src}",
426 [(store (v8f32 VR256:$src), addr:$dst)]>,
428 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
429 "movupd\t{$src, $dst|$dst, $src}",
430 [(store (v4f64 VR256:$src), addr:$dst)]>,
436 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
438 let SchedRW = [SchedWriteFMoveLS.XMM.RR] in {
439 def VMOVAPSrr_REV : VPSI<0x29, MRMDestReg, (outs VR128:$dst),
441 "movaps\t{$src, $dst|$dst, $src}", []>,
442 VEX, VEX_WIG, FoldGenData<"VMOVAPSrr">;
443 def VMOVAPDrr_REV : VPDI<0x29, MRMDestReg, (outs VR128:$dst),
445 "movapd\t{$src, $dst|$dst, $src}", []>,
446 VEX, VEX_WIG, FoldGenData<"VMOVAPDrr">;
447 def VMOVUPSrr_REV : VPSI<0x11, MRMDestReg, (outs VR128:$dst),
449 "movups\t{$src, $dst|$dst, $src}", []>,
450 VEX, VEX_WIG, FoldGenData<"VMOVUPSrr">;
451 def VMOVUPDrr_REV : VPDI<0x11, MRMDestReg, (outs VR128:$dst),
453 "movupd\t{$src, $dst|$dst, $src}", []>,
454 VEX, VEX_WIG, FoldGenData<"VMOVUPDrr">;
457 let SchedRW = [SchedWriteFMoveLS.YMM.RR] in {
458 def VMOVAPSYrr_REV : VPSI<0x29, MRMDestReg, (outs VR256:$dst),
460 "movaps\t{$src, $dst|$dst, $src}", []>,
461 VEX, VEX_L, VEX_WIG, FoldGenData<"VMOVAPSYrr">;
462 def VMOVAPDYrr_REV : VPDI<0x29, MRMDestReg, (outs VR256:$dst),
464 "movapd\t{$src, $dst|$dst, $src}", []>,
465 VEX, VEX_L, VEX_WIG, FoldGenData<"VMOVAPDYrr">;
466 def VMOVUPSYrr_REV : VPSI<0x11, MRMDestReg, (outs VR256:$dst),
468 "movups\t{$src, $dst|$dst, $src}", []>,
469 VEX, VEX_L, VEX_WIG, FoldGenData<"VMOVUPSYrr">;
470 def VMOVUPDYrr_REV : VPDI<0x11, MRMDestReg, (outs VR256:$dst),
472 "movupd\t{$src, $dst|$dst, $src}", []>,
473 VEX, VEX_L, VEX_WIG, FoldGenData<"VMOVUPDYrr">;
477 // Reversed version with ".s" suffix for GAS compatibility.
478 def : InstAlias<"vmovaps.s\t{$src, $dst|$dst, $src}",
479 (VMOVAPSrr_REV VR128:$dst, VR128:$src), 0>;
480 def : InstAlias<"vmovapd.s\t{$src, $dst|$dst, $src}",
481 (VMOVAPDrr_REV VR128:$dst, VR128:$src), 0>;
482 def : InstAlias<"vmovups.s\t{$src, $dst|$dst, $src}",
483 (VMOVUPSrr_REV VR128:$dst, VR128:$src), 0>;
484 def : InstAlias<"vmovupd.s\t{$src, $dst|$dst, $src}",
485 (VMOVUPDrr_REV VR128:$dst, VR128:$src), 0>;
486 def : InstAlias<"vmovaps.s\t{$src, $dst|$dst, $src}",
487 (VMOVAPSYrr_REV VR256:$dst, VR256:$src), 0>;
488 def : InstAlias<"vmovapd.s\t{$src, $dst|$dst, $src}",
489 (VMOVAPDYrr_REV VR256:$dst, VR256:$src), 0>;
490 def : InstAlias<"vmovups.s\t{$src, $dst|$dst, $src}",
491 (VMOVUPSYrr_REV VR256:$dst, VR256:$src), 0>;
492 def : InstAlias<"vmovupd.s\t{$src, $dst|$dst, $src}",
493 (VMOVUPDYrr_REV VR256:$dst, VR256:$src), 0>;
495 let SchedRW = [SchedWriteFMoveLS.XMM.MR] in {
496 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
497 "movaps\t{$src, $dst|$dst, $src}",
498 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
499 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
500 "movapd\t{$src, $dst|$dst, $src}",
501 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
502 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
503 "movups\t{$src, $dst|$dst, $src}",
504 [(store (v4f32 VR128:$src), addr:$dst)]>;
505 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
506 "movupd\t{$src, $dst|$dst, $src}",
507 [(store (v2f64 VR128:$src), addr:$dst)]>;
511 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
512 isMoveReg = 1, SchedRW = [SchedWriteFMoveLS.XMM.RR] in {
513 def MOVAPSrr_REV : PSI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
514 "movaps\t{$src, $dst|$dst, $src}", []>,
515 FoldGenData<"MOVAPSrr">;
516 def MOVAPDrr_REV : PDI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
517 "movapd\t{$src, $dst|$dst, $src}", []>,
518 FoldGenData<"MOVAPDrr">;
519 def MOVUPSrr_REV : PSI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
520 "movups\t{$src, $dst|$dst, $src}", []>,
521 FoldGenData<"MOVUPSrr">;
522 def MOVUPDrr_REV : PDI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
523 "movupd\t{$src, $dst|$dst, $src}", []>,
524 FoldGenData<"MOVUPDrr">;
527 // Reversed version with ".s" suffix for GAS compatibility.
528 def : InstAlias<"movaps.s\t{$src, $dst|$dst, $src}",
529 (MOVAPSrr_REV VR128:$dst, VR128:$src), 0>;
530 def : InstAlias<"movapd.s\t{$src, $dst|$dst, $src}",
531 (MOVAPDrr_REV VR128:$dst, VR128:$src), 0>;
532 def : InstAlias<"movups.s\t{$src, $dst|$dst, $src}",
533 (MOVUPSrr_REV VR128:$dst, VR128:$src), 0>;
534 def : InstAlias<"movupd.s\t{$src, $dst|$dst, $src}",
535 (MOVUPDrr_REV VR128:$dst, VR128:$src), 0>;
537 let Predicates = [HasAVX, NoVLX] in {
538 // 256-bit load/store need to use floating point load/store in case we don't
539 // have AVX2. Execution domain fixing will convert to integer if AVX2 is
540 // available and changing the domain is beneficial.
541 def : Pat<(alignedloadv4i64 addr:$src),
542 (VMOVAPSYrm addr:$src)>;
543 def : Pat<(alignedloadv8i32 addr:$src),
544 (VMOVAPSYrm addr:$src)>;
545 def : Pat<(alignedloadv16i16 addr:$src),
546 (VMOVAPSYrm addr:$src)>;
547 def : Pat<(alignedloadv32i8 addr:$src),
548 (VMOVAPSYrm addr:$src)>;
549 def : Pat<(loadv4i64 addr:$src),
550 (VMOVUPSYrm addr:$src)>;
551 def : Pat<(loadv8i32 addr:$src),
552 (VMOVUPSYrm addr:$src)>;
553 def : Pat<(loadv16i16 addr:$src),
554 (VMOVUPSYrm addr:$src)>;
555 def : Pat<(loadv32i8 addr:$src),
556 (VMOVUPSYrm addr:$src)>;
558 def : Pat<(alignedstore (v4i64 VR256:$src), addr:$dst),
559 (VMOVAPSYmr addr:$dst, VR256:$src)>;
560 def : Pat<(alignedstore (v8i32 VR256:$src), addr:$dst),
561 (VMOVAPSYmr addr:$dst, VR256:$src)>;
562 def : Pat<(alignedstore (v16i16 VR256:$src), addr:$dst),
563 (VMOVAPSYmr addr:$dst, VR256:$src)>;
564 def : Pat<(alignedstore (v32i8 VR256:$src), addr:$dst),
565 (VMOVAPSYmr addr:$dst, VR256:$src)>;
566 def : Pat<(store (v4i64 VR256:$src), addr:$dst),
567 (VMOVUPSYmr addr:$dst, VR256:$src)>;
568 def : Pat<(store (v8i32 VR256:$src), addr:$dst),
569 (VMOVUPSYmr addr:$dst, VR256:$src)>;
570 def : Pat<(store (v16i16 VR256:$src), addr:$dst),
571 (VMOVUPSYmr addr:$dst, VR256:$src)>;
572 def : Pat<(store (v32i8 VR256:$src), addr:$dst),
573 (VMOVUPSYmr addr:$dst, VR256:$src)>;
576 // Use movaps / movups for SSE integer load / store (one byte shorter).
577 // The instructions selected below are then converted to MOVDQA/MOVDQU
578 // during the SSE domain pass.
579 let Predicates = [UseSSE1] in {
580 def : Pat<(alignedloadv2i64 addr:$src),
581 (MOVAPSrm addr:$src)>;
582 def : Pat<(alignedloadv4i32 addr:$src),
583 (MOVAPSrm addr:$src)>;
584 def : Pat<(alignedloadv8i16 addr:$src),
585 (MOVAPSrm addr:$src)>;
586 def : Pat<(alignedloadv16i8 addr:$src),
587 (MOVAPSrm addr:$src)>;
588 def : Pat<(loadv2i64 addr:$src),
589 (MOVUPSrm addr:$src)>;
590 def : Pat<(loadv4i32 addr:$src),
591 (MOVUPSrm addr:$src)>;
592 def : Pat<(loadv8i16 addr:$src),
593 (MOVUPSrm addr:$src)>;
594 def : Pat<(loadv16i8 addr:$src),
595 (MOVUPSrm addr:$src)>;
597 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
598 (MOVAPSmr addr:$dst, VR128:$src)>;
599 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
600 (MOVAPSmr addr:$dst, VR128:$src)>;
601 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
602 (MOVAPSmr addr:$dst, VR128:$src)>;
603 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
604 (MOVAPSmr addr:$dst, VR128:$src)>;
605 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
606 (MOVUPSmr addr:$dst, VR128:$src)>;
607 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
608 (MOVUPSmr addr:$dst, VR128:$src)>;
609 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
610 (MOVUPSmr addr:$dst, VR128:$src)>;
611 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
612 (MOVUPSmr addr:$dst, VR128:$src)>;
615 //===----------------------------------------------------------------------===//
616 // SSE 1 & 2 - Move Low packed FP Instructions
617 //===----------------------------------------------------------------------===//
619 multiclass sse12_mov_hilo_packed_base<bits<8>opc, SDPatternOperator pdnode,
620 string base_opc, string asm_opr> {
621 // No pattern as they need be special cased between high and low.
622 let hasSideEffects = 0, mayLoad = 1 in
623 def PSrm : PI<opc, MRMSrcMem,
624 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
625 !strconcat(base_opc, "s", asm_opr),
626 [], SSEPackedSingle>, PS,
627 Sched<[SchedWriteFShuffle.XMM.Folded, SchedWriteFShuffle.XMM.ReadAfterFold]>;
629 def PDrm : PI<opc, MRMSrcMem,
630 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
631 !strconcat(base_opc, "d", asm_opr),
632 [(set VR128:$dst, (v2f64 (pdnode VR128:$src1,
633 (scalar_to_vector (loadf64 addr:$src2)))))],
634 SSEPackedDouble>, PD,
635 Sched<[SchedWriteFShuffle.XMM.Folded, SchedWriteFShuffle.XMM.ReadAfterFold]>;
638 multiclass sse12_mov_hilo_packed<bits<8>opc, SDPatternOperator pdnode,
640 let Predicates = [UseAVX] in
641 defm V#NAME : sse12_mov_hilo_packed_base<opc, pdnode, base_opc,
642 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">,
645 let Constraints = "$src1 = $dst" in
646 defm NAME : sse12_mov_hilo_packed_base<opc, pdnode, base_opc,
647 "\t{$src2, $dst|$dst, $src2}">;
650 defm MOVL : sse12_mov_hilo_packed<0x12, X86Movsd, "movlp">;
652 let SchedRW = [WriteFStore] in {
653 let Predicates = [UseAVX] in {
654 let mayStore = 1, hasSideEffects = 0 in
655 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
656 "movlps\t{$src, $dst|$dst, $src}",
659 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
660 "movlpd\t{$src, $dst|$dst, $src}",
661 [(store (f64 (extractelt (v2f64 VR128:$src),
662 (iPTR 0))), addr:$dst)]>,
665 let mayStore = 1, hasSideEffects = 0 in
666 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
667 "movlps\t{$src, $dst|$dst, $src}",
669 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
670 "movlpd\t{$src, $dst|$dst, $src}",
671 [(store (f64 (extractelt (v2f64 VR128:$src),
672 (iPTR 0))), addr:$dst)]>;
675 let Predicates = [UseSSE1] in {
676 // This pattern helps select MOVLPS on SSE1 only targets. With SSE2 we'll
677 // end up with a movsd or blend instead of shufp.
678 // No need for aligned load, we're only loading 64-bits.
679 def : Pat<(X86Shufp (v4f32 (simple_load addr:$src2)), VR128:$src1,
681 (MOVLPSrm VR128:$src1, addr:$src2)>;
682 def : Pat<(X86Shufp (v4f32 (X86vzload64 addr:$src2)), VR128:$src1, (i8 -28)),
683 (MOVLPSrm VR128:$src1, addr:$src2)>;
685 def : Pat<(v4f32 (X86vzload64 addr:$src)),
686 (MOVLPSrm (v4f32 (V_SET0)), addr:$src)>;
687 def : Pat<(X86vextractstore64 (v4f32 VR128:$src), addr:$dst),
688 (MOVLPSmr addr:$dst, VR128:$src)>;
691 //===----------------------------------------------------------------------===//
692 // SSE 1 & 2 - Move Hi packed FP Instructions
693 //===----------------------------------------------------------------------===//
695 defm MOVH : sse12_mov_hilo_packed<0x16, X86Unpckl, "movhp">;
697 let SchedRW = [WriteFStore] in {
698 // v2f64 extract element 1 is always custom lowered to unpack high to low
699 // and extract element 0 so the non-store version isn't too horrible.
700 let Predicates = [UseAVX] in {
701 let mayStore = 1, hasSideEffects = 0 in
702 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
703 "movhps\t{$src, $dst|$dst, $src}",
705 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
706 "movhpd\t{$src, $dst|$dst, $src}",
707 [(store (f64 (extractelt
708 (v2f64 (X86Unpckh VR128:$src, VR128:$src)),
709 (iPTR 0))), addr:$dst)]>, VEX, VEX_WIG;
711 let mayStore = 1, hasSideEffects = 0 in
712 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
713 "movhps\t{$src, $dst|$dst, $src}",
715 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
716 "movhpd\t{$src, $dst|$dst, $src}",
717 [(store (f64 (extractelt
718 (v2f64 (X86Unpckh VR128:$src, VR128:$src)),
719 (iPTR 0))), addr:$dst)]>;
722 let Predicates = [UseAVX] in {
724 def : Pat<(v2f64 (X86Unpckl VR128:$src1, (X86vzload64 addr:$src2))),
725 (VMOVHPDrm VR128:$src1, addr:$src2)>;
727 def : Pat<(store (f64 (extractelt
728 (v2f64 (X86VPermilpi VR128:$src, (i8 1))),
729 (iPTR 0))), addr:$dst),
730 (VMOVHPDmr addr:$dst, VR128:$src)>;
733 def : Pat<(v2f64 (X86Movsd VR128:$src1, (X86vzload64 addr:$src2))),
734 (VMOVLPDrm VR128:$src1, addr:$src2)>;
737 let Predicates = [UseSSE1] in {
738 // This pattern helps select MOVHPS on SSE1 only targets. With SSE2 we'll
739 // end up with a movsd or blend instead of shufp.
740 // No need for aligned load, we're only loading 64-bits.
741 def : Pat<(X86Movlhps VR128:$src1, (v4f32 (simple_load addr:$src2))),
742 (MOVHPSrm VR128:$src1, addr:$src2)>;
743 def : Pat<(X86Movlhps VR128:$src1, (v4f32 (X86vzload64 addr:$src2))),
744 (MOVHPSrm VR128:$src1, addr:$src2)>;
746 def : Pat<(X86vextractstore64 (v4f32 (X86Movhlps VR128:$src, VR128:$src)),
748 (MOVHPSmr addr:$dst, VR128:$src)>;
751 let Predicates = [UseSSE2] in {
753 def : Pat<(v2f64 (X86Unpckl VR128:$src1, (X86vzload64 addr:$src2))),
754 (MOVHPDrm VR128:$src1, addr:$src2)>;
756 def : Pat<(store (f64 (extractelt
757 (v2f64 (X86Shufp VR128:$src, VR128:$src, (i8 1))),
758 (iPTR 0))), addr:$dst),
759 (MOVHPDmr addr:$dst, VR128:$src)>;
762 def : Pat<(v2f64 (X86Movsd VR128:$src1, (X86vzload64 addr:$src2))),
763 (MOVLPDrm VR128:$src1, addr:$src2)>;
766 let Predicates = [UseSSE2, NoSSE41_Or_OptForSize] in {
767 // Use MOVLPD to load into the low bits from a full vector unless we can use
769 def : Pat<(X86Movsd VR128:$src1, (v2f64 (simple_load addr:$src2))),
770 (MOVLPDrm VR128:$src1, addr:$src2)>;
773 //===----------------------------------------------------------------------===//
774 // SSE 1 & 2 - Move Low to High and High to Low packed FP Instructions
775 //===----------------------------------------------------------------------===//
777 let Predicates = [UseAVX] in {
778 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
779 (ins VR128:$src1, VR128:$src2),
780 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
782 (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))]>,
783 VEX_4V, Sched<[SchedWriteFShuffle.XMM]>, VEX_WIG;
784 let isCommutable = 1 in
785 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
786 (ins VR128:$src1, VR128:$src2),
787 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
789 (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))]>,
790 VEX_4V, Sched<[SchedWriteFShuffle.XMM]>, VEX_WIG,
793 let Constraints = "$src1 = $dst" in {
794 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
795 (ins VR128:$src1, VR128:$src2),
796 "movlhps\t{$src2, $dst|$dst, $src2}",
798 (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))]>,
799 Sched<[SchedWriteFShuffle.XMM]>;
800 let isCommutable = 1 in
801 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
802 (ins VR128:$src1, VR128:$src2),
803 "movhlps\t{$src2, $dst|$dst, $src2}",
805 (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))]>,
806 Sched<[SchedWriteFShuffle.XMM]>, NotMemoryFoldable;
809 //===----------------------------------------------------------------------===//
810 // SSE 1 & 2 - Conversion Instructions
811 //===----------------------------------------------------------------------===//
813 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
814 SDPatternOperator OpNode, X86MemOperand x86memop, PatFrag ld_frag,
815 string asm, string mem, X86FoldableSchedWrite sched,
817 SchedRead Int2Fpu = ReadDefault> {
818 let ExeDomain = d in {
819 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
820 !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
821 [(set DstRC:$dst, (OpNode SrcRC:$src))]>,
822 Sched<[sched, Int2Fpu]>;
823 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
824 mem#"\t{$src, $dst|$dst, $src}",
825 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>,
826 Sched<[sched.Folded]>;
830 multiclass sse12_cvt_p<bits<8> opc, RegisterClass RC, X86MemOperand x86memop,
831 ValueType DstTy, ValueType SrcTy, PatFrag ld_frag,
832 string asm, Domain d, X86FoldableSchedWrite sched> {
833 let hasSideEffects = 0, Uses = [MXCSR], mayRaiseFPException = 1 in {
834 def rr : I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src), asm,
835 [(set RC:$dst, (DstTy (any_sint_to_fp (SrcTy RC:$src))))], d>,
838 def rm : I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src), asm,
839 [(set RC:$dst, (DstTy (any_sint_to_fp
840 (SrcTy (ld_frag addr:$src)))))], d>,
841 Sched<[sched.Folded]>;
845 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
846 X86MemOperand x86memop, string asm, string mem,
847 X86FoldableSchedWrite sched, Domain d> {
848 let hasSideEffects = 0, Predicates = [UseAVX], ExeDomain = d in {
849 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
850 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
851 Sched<[sched, ReadDefault, ReadInt2Fpu]>;
853 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
854 (ins DstRC:$src1, x86memop:$src),
855 asm#"{"#mem#"}\t{$src, $src1, $dst|$dst, $src1, $src}", []>,
856 Sched<[sched.Folded, sched.ReadAfterFold]>;
857 } // hasSideEffects = 0
860 let isCodeGenOnly = 1, Predicates = [UseAVX], Uses = [MXCSR], mayRaiseFPException = 1 in {
861 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, any_fp_to_sint, f32mem, loadf32,
862 "cvttss2si", "cvttss2si",
863 WriteCvtSS2I, SSEPackedSingle>,
865 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, any_fp_to_sint, f32mem, loadf32,
866 "cvttss2si", "cvttss2si",
867 WriteCvtSS2I, SSEPackedSingle>,
868 XS, VEX, VEX_W, VEX_LIG;
869 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, any_fp_to_sint, f64mem, loadf64,
870 "cvttsd2si", "cvttsd2si",
871 WriteCvtSD2I, SSEPackedDouble>,
873 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, any_fp_to_sint, f64mem, loadf64,
874 "cvttsd2si", "cvttsd2si",
875 WriteCvtSD2I, SSEPackedDouble>,
876 XD, VEX, VEX_W, VEX_LIG;
878 defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, lrint, f32mem, loadf32,
879 "cvtss2si", "cvtss2si",
880 WriteCvtSS2I, SSEPackedSingle>,
882 defm VCVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, llrint, f32mem, loadf32,
883 "cvtss2si", "cvtss2si",
884 WriteCvtSS2I, SSEPackedSingle>,
885 XS, VEX, VEX_W, VEX_LIG;
886 defm VCVTSD2SI : sse12_cvt_s<0x2D, FR64, GR32, lrint, f64mem, loadf64,
887 "cvtsd2si", "cvtsd2si",
888 WriteCvtSD2I, SSEPackedDouble>,
890 defm VCVTSD2SI64 : sse12_cvt_s<0x2D, FR64, GR64, llrint, f64mem, loadf64,
891 "cvtsd2si", "cvtsd2si",
892 WriteCvtSD2I, SSEPackedDouble>,
893 XD, VEX, VEX_W, VEX_LIG;
896 // The assembler can recognize rr 64-bit instructions by seeing a rxx
897 // register, but the same isn't true when only using memory operands,
898 // provide other assembly "l" and "q" forms to address this explicitly
899 // where appropriate to do so.
900 let isCodeGenOnly = 1 in {
901 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss", "l",
902 WriteCvtI2SS, SSEPackedSingle>, XS, VEX_4V,
904 defm VCVTSI642SS : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss", "q",
905 WriteCvtI2SS, SSEPackedSingle>, XS, VEX_4V,
906 VEX_W, VEX_LIG, SIMD_EXC;
907 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd", "l",
908 WriteCvtI2SD, SSEPackedDouble>, XD, VEX_4V,
910 defm VCVTSI642SD : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd", "q",
911 WriteCvtI2SD, SSEPackedDouble>, XD, VEX_4V,
912 VEX_W, VEX_LIG, SIMD_EXC;
913 } // isCodeGenOnly = 1
915 let Predicates = [UseAVX] in {
916 def : Pat<(f32 (any_sint_to_fp (loadi32 addr:$src))),
917 (VCVTSI2SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
918 def : Pat<(f32 (any_sint_to_fp (loadi64 addr:$src))),
919 (VCVTSI642SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
920 def : Pat<(f64 (any_sint_to_fp (loadi32 addr:$src))),
921 (VCVTSI2SDrm (f64 (IMPLICIT_DEF)), addr:$src)>;
922 def : Pat<(f64 (any_sint_to_fp (loadi64 addr:$src))),
923 (VCVTSI642SDrm (f64 (IMPLICIT_DEF)), addr:$src)>;
925 def : Pat<(f32 (any_sint_to_fp GR32:$src)),
926 (VCVTSI2SSrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
927 def : Pat<(f32 (any_sint_to_fp GR64:$src)),
928 (VCVTSI642SSrr (f32 (IMPLICIT_DEF)), GR64:$src)>;
929 def : Pat<(f64 (any_sint_to_fp GR32:$src)),
930 (VCVTSI2SDrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
931 def : Pat<(f64 (any_sint_to_fp GR64:$src)),
932 (VCVTSI642SDrr (f64 (IMPLICIT_DEF)), GR64:$src)>;
934 def : Pat<(i64 (lrint FR32:$src)), (VCVTSS2SI64rr FR32:$src)>;
935 def : Pat<(i64 (lrint (loadf32 addr:$src))), (VCVTSS2SI64rm addr:$src)>;
937 def : Pat<(i64 (lrint FR64:$src)), (VCVTSD2SI64rr FR64:$src)>;
938 def : Pat<(i64 (lrint (loadf64 addr:$src))), (VCVTSD2SI64rm addr:$src)>;
941 let isCodeGenOnly = 1 in {
942 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, any_fp_to_sint, f32mem, loadf32,
943 "cvttss2si", "cvttss2si",
944 WriteCvtSS2I, SSEPackedSingle>, XS, SIMD_EXC;
945 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, any_fp_to_sint, f32mem, loadf32,
946 "cvttss2si", "cvttss2si",
947 WriteCvtSS2I, SSEPackedSingle>, XS, REX_W, SIMD_EXC;
948 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, any_fp_to_sint, f64mem, loadf64,
949 "cvttsd2si", "cvttsd2si",
950 WriteCvtSD2I, SSEPackedDouble>, XD, SIMD_EXC;
951 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, any_fp_to_sint, f64mem, loadf64,
952 "cvttsd2si", "cvttsd2si",
953 WriteCvtSD2I, SSEPackedDouble>, XD, REX_W, SIMD_EXC;
955 defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, lrint, f32mem, loadf32,
956 "cvtss2si", "cvtss2si",
957 WriteCvtSS2I, SSEPackedSingle>, XS, SIMD_EXC;
958 defm CVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, llrint, f32mem, loadf32,
959 "cvtss2si", "cvtss2si",
960 WriteCvtSS2I, SSEPackedSingle>, XS, REX_W, SIMD_EXC;
961 defm CVTSD2SI : sse12_cvt_s<0x2D, FR64, GR32, lrint, f64mem, loadf64,
962 "cvtsd2si", "cvtsd2si",
963 WriteCvtSD2I, SSEPackedDouble>, XD, SIMD_EXC;
964 defm CVTSD2SI64 : sse12_cvt_s<0x2D, FR64, GR64, llrint, f64mem, loadf64,
965 "cvtsd2si", "cvtsd2si",
966 WriteCvtSD2I, SSEPackedDouble>, XD, REX_W, SIMD_EXC;
968 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, any_sint_to_fp, i32mem, loadi32,
969 "cvtsi2ss", "cvtsi2ss{l}",
970 WriteCvtI2SS, SSEPackedSingle, ReadInt2Fpu>, XS, SIMD_EXC;
971 defm CVTSI642SS : sse12_cvt_s<0x2A, GR64, FR32, any_sint_to_fp, i64mem, loadi64,
972 "cvtsi2ss", "cvtsi2ss{q}",
973 WriteCvtI2SS, SSEPackedSingle, ReadInt2Fpu>, XS, REX_W, SIMD_EXC;
974 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, any_sint_to_fp, i32mem, loadi32,
975 "cvtsi2sd", "cvtsi2sd{l}",
976 WriteCvtI2SD, SSEPackedDouble, ReadInt2Fpu>, XD;
977 defm CVTSI642SD : sse12_cvt_s<0x2A, GR64, FR64, any_sint_to_fp, i64mem, loadi64,
978 "cvtsi2sd", "cvtsi2sd{q}",
979 WriteCvtI2SD, SSEPackedDouble, ReadInt2Fpu>, XD, REX_W, SIMD_EXC;
980 } // isCodeGenOnly = 1
982 let Predicates = [UseSSE1] in {
983 def : Pat<(i64 (lrint FR32:$src)), (CVTSS2SI64rr FR32:$src)>;
984 def : Pat<(i64 (lrint (loadf32 addr:$src))), (CVTSS2SI64rm addr:$src)>;
987 let Predicates = [UseSSE2] in {
988 def : Pat<(i64 (lrint FR64:$src)), (CVTSD2SI64rr FR64:$src)>;
989 def : Pat<(i64 (lrint (loadf64 addr:$src))), (CVTSD2SI64rm addr:$src)>;
992 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
993 // and/or XMM operand(s).
995 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
996 ValueType DstVT, ValueType SrcVT, SDNode OpNode,
997 Operand memop, PatFrags mem_frags, string asm,
998 X86FoldableSchedWrite sched, Domain d> {
999 let ExeDomain = d in {
1000 def rr_Int : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
1001 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1002 [(set DstRC:$dst, (DstVT (OpNode (SrcVT SrcRC:$src))))]>,
1004 def rm_Int : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins memop:$src),
1005 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1006 [(set DstRC:$dst, (DstVT (OpNode (SrcVT (mem_frags addr:$src)))))]>,
1007 Sched<[sched.Folded]>;
1011 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
1012 RegisterClass DstRC, X86MemOperand x86memop,
1013 string asm, string mem, X86FoldableSchedWrite sched,
1014 Domain d, bit Is2Addr = 1> {
1015 let hasSideEffects = 0, ExeDomain = d in {
1016 def rr_Int : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
1018 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
1019 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
1020 []>, Sched<[sched, ReadDefault, ReadInt2Fpu]>;
1022 def rm_Int : SI<opc, MRMSrcMem, (outs DstRC:$dst),
1023 (ins DstRC:$src1, x86memop:$src2),
1025 asm#"{"#mem#"}\t{$src2, $dst|$dst, $src2}",
1026 asm#"{"#mem#"}\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1027 []>, Sched<[sched.Folded, sched.ReadAfterFold]>;
1031 let Uses = [MXCSR], mayRaiseFPException = 1 in {
1032 let Predicates = [UseAVX] in {
1033 defm VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, i32, v2f64,
1034 X86cvts2si, sdmem, sse_load_f64, "cvtsd2si",
1035 WriteCvtSD2I, SSEPackedDouble>, XD, VEX, VEX_LIG;
1036 defm VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, i64, v2f64,
1037 X86cvts2si, sdmem, sse_load_f64, "cvtsd2si",
1038 WriteCvtSD2I, SSEPackedDouble>, XD, VEX, VEX_W, VEX_LIG;
1040 defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, i32, v2f64, X86cvts2si,
1041 sdmem, sse_load_f64, "cvtsd2si", WriteCvtSD2I,
1042 SSEPackedDouble>, XD;
1043 defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, i64, v2f64, X86cvts2si,
1044 sdmem, sse_load_f64, "cvtsd2si", WriteCvtSD2I,
1045 SSEPackedDouble>, XD, REX_W;
1048 let Predicates = [UseAVX] in {
1049 defm VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1050 i32mem, "cvtsi2ss", "l", WriteCvtI2SS, SSEPackedSingle, 0>,
1051 XS, VEX_4V, VEX_LIG, SIMD_EXC;
1052 defm VCVTSI642SS : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1053 i64mem, "cvtsi2ss", "q", WriteCvtI2SS, SSEPackedSingle, 0>,
1054 XS, VEX_4V, VEX_LIG, VEX_W, SIMD_EXC;
1055 defm VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1056 i32mem, "cvtsi2sd", "l", WriteCvtI2SD, SSEPackedDouble, 0>,
1057 XD, VEX_4V, VEX_LIG;
1058 defm VCVTSI642SD : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1059 i64mem, "cvtsi2sd", "q", WriteCvtI2SD, SSEPackedDouble, 0>,
1060 XD, VEX_4V, VEX_LIG, VEX_W, SIMD_EXC;
1062 let Constraints = "$src1 = $dst" in {
1063 defm CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1064 i32mem, "cvtsi2ss", "l", WriteCvtI2SS, SSEPackedSingle>,
1066 defm CVTSI642SS : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1067 i64mem, "cvtsi2ss", "q", WriteCvtI2SS, SSEPackedSingle>,
1068 XS, REX_W, SIMD_EXC;
1069 defm CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1070 i32mem, "cvtsi2sd", "l", WriteCvtI2SD, SSEPackedDouble>,
1072 defm CVTSI642SD : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1073 i64mem, "cvtsi2sd", "q", WriteCvtI2SD, SSEPackedDouble>,
1074 XD, REX_W, SIMD_EXC;
1077 def : InstAlias<"vcvtsi2ss{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1078 (VCVTSI2SSrr_Int VR128:$dst, VR128:$src1, GR32:$src2), 0, "att">;
1079 def : InstAlias<"vcvtsi2ss{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1080 (VCVTSI642SSrr_Int VR128:$dst, VR128:$src1, GR64:$src2), 0, "att">;
1081 def : InstAlias<"vcvtsi2sd{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1082 (VCVTSI2SDrr_Int VR128:$dst, VR128:$src1, GR32:$src2), 0, "att">;
1083 def : InstAlias<"vcvtsi2sd{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1084 (VCVTSI642SDrr_Int VR128:$dst, VR128:$src1, GR64:$src2), 0, "att">;
1086 def : InstAlias<"vcvtsi2ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1087 (VCVTSI2SSrm_Int VR128:$dst, VR128:$src1, i32mem:$src), 0, "att">;
1088 def : InstAlias<"vcvtsi2sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1089 (VCVTSI2SDrm_Int VR128:$dst, VR128:$src1, i32mem:$src), 0, "att">;
1091 def : InstAlias<"cvtsi2ss{l}\t{$src, $dst|$dst, $src}",
1092 (CVTSI2SSrr_Int VR128:$dst, GR32:$src), 0, "att">;
1093 def : InstAlias<"cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1094 (CVTSI642SSrr_Int VR128:$dst, GR64:$src), 0, "att">;
1095 def : InstAlias<"cvtsi2sd{l}\t{$src, $dst|$dst, $src}",
1096 (CVTSI2SDrr_Int VR128:$dst, GR32:$src), 0, "att">;
1097 def : InstAlias<"cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1098 (CVTSI642SDrr_Int VR128:$dst, GR64:$src), 0, "att">;
1100 def : InstAlias<"cvtsi2ss\t{$src, $dst|$dst, $src}",
1101 (CVTSI2SSrm_Int VR128:$dst, i32mem:$src), 0, "att">;
1102 def : InstAlias<"cvtsi2sd\t{$src, $dst|$dst, $src}",
1103 (CVTSI2SDrm_Int VR128:$dst, i32mem:$src), 0, "att">;
1107 // Aliases for intrinsics
1108 let Predicates = [UseAVX], Uses = [MXCSR], mayRaiseFPException = 1 in {
1109 defm VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, i32, v4f32, X86cvtts2Int,
1110 ssmem, sse_load_f32, "cvttss2si",
1111 WriteCvtSS2I, SSEPackedSingle>, XS, VEX, VEX_LIG;
1112 defm VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64, i64, v4f32,
1113 X86cvtts2Int, ssmem, sse_load_f32,
1114 "cvttss2si", WriteCvtSS2I, SSEPackedSingle>,
1115 XS, VEX, VEX_LIG, VEX_W;
1116 defm VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, i32, v2f64, X86cvtts2Int,
1117 sdmem, sse_load_f64, "cvttsd2si",
1118 WriteCvtSS2I, SSEPackedDouble>, XD, VEX, VEX_LIG;
1119 defm VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64, i64, v2f64,
1120 X86cvtts2Int, sdmem, sse_load_f64,
1121 "cvttsd2si", WriteCvtSS2I, SSEPackedDouble>,
1122 XD, VEX, VEX_LIG, VEX_W;
1124 let Uses = [MXCSR], mayRaiseFPException = 1 in {
1125 defm CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, i32, v4f32, X86cvtts2Int,
1126 ssmem, sse_load_f32, "cvttss2si",
1127 WriteCvtSS2I, SSEPackedSingle>, XS;
1128 defm CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64, i64, v4f32,
1129 X86cvtts2Int, ssmem, sse_load_f32,
1130 "cvttss2si", WriteCvtSS2I, SSEPackedSingle>,
1132 defm CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, i32, v2f64, X86cvtts2Int,
1133 sdmem, sse_load_f64, "cvttsd2si",
1134 WriteCvtSD2I, SSEPackedDouble>, XD;
1135 defm CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64, i64, v2f64,
1136 X86cvtts2Int, sdmem, sse_load_f64,
1137 "cvttsd2si", WriteCvtSD2I, SSEPackedDouble>,
1141 def : InstAlias<"vcvttss2si{l}\t{$src, $dst|$dst, $src}",
1142 (VCVTTSS2SIrr_Int GR32:$dst, VR128:$src), 0, "att">;
1143 def : InstAlias<"vcvttss2si{l}\t{$src, $dst|$dst, $src}",
1144 (VCVTTSS2SIrm_Int GR32:$dst, f32mem:$src), 0, "att">;
1145 def : InstAlias<"vcvttsd2si{l}\t{$src, $dst|$dst, $src}",
1146 (VCVTTSD2SIrr_Int GR32:$dst, VR128:$src), 0, "att">;
1147 def : InstAlias<"vcvttsd2si{l}\t{$src, $dst|$dst, $src}",
1148 (VCVTTSD2SIrm_Int GR32:$dst, f64mem:$src), 0, "att">;
1149 def : InstAlias<"vcvttss2si{q}\t{$src, $dst|$dst, $src}",
1150 (VCVTTSS2SI64rr_Int GR64:$dst, VR128:$src), 0, "att">;
1151 def : InstAlias<"vcvttss2si{q}\t{$src, $dst|$dst, $src}",
1152 (VCVTTSS2SI64rm_Int GR64:$dst, f32mem:$src), 0, "att">;
1153 def : InstAlias<"vcvttsd2si{q}\t{$src, $dst|$dst, $src}",
1154 (VCVTTSD2SI64rr_Int GR64:$dst, VR128:$src), 0, "att">;
1155 def : InstAlias<"vcvttsd2si{q}\t{$src, $dst|$dst, $src}",
1156 (VCVTTSD2SI64rm_Int GR64:$dst, f64mem:$src), 0, "att">;
1158 def : InstAlias<"cvttss2si{l}\t{$src, $dst|$dst, $src}",
1159 (CVTTSS2SIrr_Int GR32:$dst, VR128:$src), 0, "att">;
1160 def : InstAlias<"cvttss2si{l}\t{$src, $dst|$dst, $src}",
1161 (CVTTSS2SIrm_Int GR32:$dst, f32mem:$src), 0, "att">;
1162 def : InstAlias<"cvttsd2si{l}\t{$src, $dst|$dst, $src}",
1163 (CVTTSD2SIrr_Int GR32:$dst, VR128:$src), 0, "att">;
1164 def : InstAlias<"cvttsd2si{l}\t{$src, $dst|$dst, $src}",
1165 (CVTTSD2SIrm_Int GR32:$dst, f64mem:$src), 0, "att">;
1166 def : InstAlias<"cvttss2si{q}\t{$src, $dst|$dst, $src}",
1167 (CVTTSS2SI64rr_Int GR64:$dst, VR128:$src), 0, "att">;
1168 def : InstAlias<"cvttss2si{q}\t{$src, $dst|$dst, $src}",
1169 (CVTTSS2SI64rm_Int GR64:$dst, f32mem:$src), 0, "att">;
1170 def : InstAlias<"cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1171 (CVTTSD2SI64rr_Int GR64:$dst, VR128:$src), 0, "att">;
1172 def : InstAlias<"cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1173 (CVTTSD2SI64rm_Int GR64:$dst, f64mem:$src), 0, "att">;
1175 let Predicates = [UseAVX], Uses = [MXCSR], mayRaiseFPException = 1 in {
1176 defm VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, i32, v4f32, X86cvts2si,
1177 ssmem, sse_load_f32, "cvtss2si",
1178 WriteCvtSS2I, SSEPackedSingle>, XS, VEX, VEX_LIG;
1179 defm VCVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, i64, v4f32, X86cvts2si,
1180 ssmem, sse_load_f32, "cvtss2si",
1181 WriteCvtSS2I, SSEPackedSingle>, XS, VEX, VEX_W, VEX_LIG;
1183 let Uses = [MXCSR], mayRaiseFPException = 1 in {
1184 defm CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, i32, v4f32, X86cvts2si,
1185 ssmem, sse_load_f32, "cvtss2si",
1186 WriteCvtSS2I, SSEPackedSingle>, XS;
1187 defm CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, i64, v4f32, X86cvts2si,
1188 ssmem, sse_load_f32, "cvtss2si",
1189 WriteCvtSS2I, SSEPackedSingle>, XS, REX_W;
1191 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, i128mem, v4f32, v4i32, load,
1192 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1193 SSEPackedSingle, WriteCvtI2PS>,
1194 PS, VEX, Requires<[HasAVX, NoVLX]>, VEX_WIG;
1195 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, i256mem, v8f32, v8i32, load,
1196 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1197 SSEPackedSingle, WriteCvtI2PSY>,
1198 PS, VEX, VEX_L, Requires<[HasAVX, NoVLX]>, VEX_WIG;
1200 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, i128mem, v4f32, v4i32, memop,
1201 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1202 SSEPackedSingle, WriteCvtI2PS>,
1203 PS, Requires<[UseSSE2]>;
1207 def : InstAlias<"vcvtss2si{l}\t{$src, $dst|$dst, $src}",
1208 (VCVTSS2SIrr_Int GR32:$dst, VR128:$src), 0, "att">;
1209 def : InstAlias<"vcvtss2si{l}\t{$src, $dst|$dst, $src}",
1210 (VCVTSS2SIrm_Int GR32:$dst, ssmem:$src), 0, "att">;
1211 def : InstAlias<"vcvtsd2si{l}\t{$src, $dst|$dst, $src}",
1212 (VCVTSD2SIrr_Int GR32:$dst, VR128:$src), 0, "att">;
1213 def : InstAlias<"vcvtsd2si{l}\t{$src, $dst|$dst, $src}",
1214 (VCVTSD2SIrm_Int GR32:$dst, sdmem:$src), 0, "att">;
1215 def : InstAlias<"vcvtss2si{q}\t{$src, $dst|$dst, $src}",
1216 (VCVTSS2SI64rr_Int GR64:$dst, VR128:$src), 0, "att">;
1217 def : InstAlias<"vcvtss2si{q}\t{$src, $dst|$dst, $src}",
1218 (VCVTSS2SI64rm_Int GR64:$dst, ssmem:$src), 0, "att">;
1219 def : InstAlias<"vcvtsd2si{q}\t{$src, $dst|$dst, $src}",
1220 (VCVTSD2SI64rr_Int GR64:$dst, VR128:$src), 0, "att">;
1221 def : InstAlias<"vcvtsd2si{q}\t{$src, $dst|$dst, $src}",
1222 (VCVTSD2SI64rm_Int GR64:$dst, sdmem:$src), 0, "att">;
1225 def : InstAlias<"cvtss2si{l}\t{$src, $dst|$dst, $src}",
1226 (CVTSS2SIrr_Int GR32:$dst, VR128:$src), 0, "att">;
1227 def : InstAlias<"cvtss2si{l}\t{$src, $dst|$dst, $src}",
1228 (CVTSS2SIrm_Int GR32:$dst, ssmem:$src), 0, "att">;
1229 def : InstAlias<"cvtsd2si{l}\t{$src, $dst|$dst, $src}",
1230 (CVTSD2SIrr_Int GR32:$dst, VR128:$src), 0, "att">;
1231 def : InstAlias<"cvtsd2si{l}\t{$src, $dst|$dst, $src}",
1232 (CVTSD2SIrm_Int GR32:$dst, sdmem:$src), 0, "att">;
1233 def : InstAlias<"cvtss2si{q}\t{$src, $dst|$dst, $src}",
1234 (CVTSS2SI64rr_Int GR64:$dst, VR128:$src), 0, "att">;
1235 def : InstAlias<"cvtss2si{q}\t{$src, $dst|$dst, $src}",
1236 (CVTSS2SI64rm_Int GR64:$dst, ssmem:$src), 0, "att">;
1237 def : InstAlias<"cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1238 (CVTSD2SI64rr_Int GR64:$dst, VR128:$src), 0, "att">;
1239 def : InstAlias<"cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1240 (CVTSD2SI64rm_Int GR64:$dst, sdmem:$src), 0, "att">;
1244 // Convert scalar double to scalar single
1245 let isCodeGenOnly = 1, hasSideEffects = 0, Predicates = [UseAVX],
1246 ExeDomain = SSEPackedSingle in {
1247 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
1248 (ins FR32:$src1, FR64:$src2),
1249 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
1250 VEX_4V, VEX_LIG, VEX_WIG,
1251 Sched<[WriteCvtSD2SS]>, SIMD_EXC;
1253 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
1254 (ins FR32:$src1, f64mem:$src2),
1255 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
1256 XD, VEX_4V, VEX_LIG, VEX_WIG,
1257 Sched<[WriteCvtSD2SS.Folded, WriteCvtSD2SS.ReadAfterFold]>, SIMD_EXC;
1260 def : Pat<(f32 (any_fpround FR64:$src)),
1261 (VCVTSD2SSrr (f32 (IMPLICIT_DEF)), FR64:$src)>,
1264 let isCodeGenOnly = 1, ExeDomain = SSEPackedSingle in {
1265 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
1266 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1267 [(set FR32:$dst, (any_fpround FR64:$src))]>,
1268 Sched<[WriteCvtSD2SS]>, SIMD_EXC;
1269 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
1270 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1271 [(set FR32:$dst, (any_fpround (loadf64 addr:$src)))]>,
1272 XD, Requires<[UseSSE2, OptForSize]>,
1273 Sched<[WriteCvtSD2SS.Folded]>, SIMD_EXC;
1276 let Uses = [MXCSR], mayRaiseFPException = 1, ExeDomain = SSEPackedSingle in {
1277 def VCVTSD2SSrr_Int: I<0x5A, MRMSrcReg,
1278 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1279 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1281 (v4f32 (X86frounds VR128:$src1, (v2f64 VR128:$src2))))]>,
1282 XD, VEX_4V, VEX_LIG, VEX_WIG, Requires<[UseAVX]>,
1283 Sched<[WriteCvtSD2SS]>;
1284 def VCVTSD2SSrm_Int: I<0x5A, MRMSrcMem,
1285 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1286 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1288 (v4f32 (X86frounds VR128:$src1, (sse_load_f64 addr:$src2))))]>,
1289 XD, VEX_4V, VEX_LIG, VEX_WIG, Requires<[UseAVX]>,
1290 Sched<[WriteCvtSD2SS.Folded, WriteCvtSD2SS.ReadAfterFold]>;
1291 let Constraints = "$src1 = $dst" in {
1292 def CVTSD2SSrr_Int: I<0x5A, MRMSrcReg,
1293 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1294 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
1296 (v4f32 (X86frounds VR128:$src1, (v2f64 VR128:$src2))))]>,
1297 XD, Requires<[UseSSE2]>, Sched<[WriteCvtSD2SS]>;
1298 def CVTSD2SSrm_Int: I<0x5A, MRMSrcMem,
1299 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1300 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
1302 (v4f32 (X86frounds VR128:$src1, (sse_load_f64 addr:$src2))))]>,
1303 XD, Requires<[UseSSE2]>,
1304 Sched<[WriteCvtSD2SS.Folded, WriteCvtSD2SS.ReadAfterFold]>;
1308 // Convert scalar single to scalar double
1309 // SSE2 instructions with XS prefix
1310 let isCodeGenOnly = 1, hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
1311 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
1312 (ins FR64:$src1, FR32:$src2),
1313 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
1314 XS, VEX_4V, VEX_LIG, VEX_WIG,
1315 Sched<[WriteCvtSS2SD]>, Requires<[UseAVX]>, SIMD_EXC;
1317 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
1318 (ins FR64:$src1, f32mem:$src2),
1319 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
1320 XS, VEX_4V, VEX_LIG, VEX_WIG,
1321 Sched<[WriteCvtSS2SD.Folded, WriteCvtSS2SD.ReadAfterFold]>,
1322 Requires<[UseAVX, OptForSize]>, SIMD_EXC;
1323 } // isCodeGenOnly = 1, hasSideEffects = 0
1325 def : Pat<(f64 (any_fpextend FR32:$src)),
1326 (VCVTSS2SDrr (f64 (IMPLICIT_DEF)), FR32:$src)>, Requires<[UseAVX]>;
1327 def : Pat<(any_fpextend (loadf32 addr:$src)),
1328 (VCVTSS2SDrm (f64 (IMPLICIT_DEF)), addr:$src)>, Requires<[UseAVX, OptForSize]>;
1330 let isCodeGenOnly = 1, ExeDomain = SSEPackedSingle in {
1331 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
1332 "cvtss2sd\t{$src, $dst|$dst, $src}",
1333 [(set FR64:$dst, (any_fpextend FR32:$src))]>,
1334 XS, Requires<[UseSSE2]>, Sched<[WriteCvtSS2SD]>, SIMD_EXC;
1335 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
1336 "cvtss2sd\t{$src, $dst|$dst, $src}",
1337 [(set FR64:$dst, (any_fpextend (loadf32 addr:$src)))]>,
1338 XS, Requires<[UseSSE2, OptForSize]>,
1339 Sched<[WriteCvtSS2SD.Folded]>, SIMD_EXC;
1340 } // isCodeGenOnly = 1
1342 let hasSideEffects = 0, Uses = [MXCSR], mayRaiseFPException = 1,
1343 ExeDomain = SSEPackedSingle in {
1344 def VCVTSS2SDrr_Int: I<0x5A, MRMSrcReg,
1345 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1346 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1347 []>, XS, VEX_4V, VEX_LIG, VEX_WIG,
1348 Requires<[HasAVX]>, Sched<[WriteCvtSS2SD]>;
1350 def VCVTSS2SDrm_Int: I<0x5A, MRMSrcMem,
1351 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
1352 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1353 []>, XS, VEX_4V, VEX_LIG, VEX_WIG, Requires<[HasAVX]>,
1354 Sched<[WriteCvtSS2SD.Folded, WriteCvtSS2SD.ReadAfterFold]>;
1355 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
1356 def CVTSS2SDrr_Int: I<0x5A, MRMSrcReg,
1357 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1358 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1359 []>, XS, Requires<[UseSSE2]>,
1360 Sched<[WriteCvtSS2SD]>;
1362 def CVTSS2SDrm_Int: I<0x5A, MRMSrcMem,
1363 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
1364 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1365 []>, XS, Requires<[UseSSE2]>,
1366 Sched<[WriteCvtSS2SD.Folded, WriteCvtSS2SD.ReadAfterFold]>;
1368 } // hasSideEffects = 0
1370 // Patterns used for matching (v)cvtsi2ss, (v)cvtsi2sd, (v)cvtsd2ss and
1371 // (v)cvtss2sd intrinsic sequences from clang which produce unnecessary
1372 // vmovs{s,d} instructions
1373 let Predicates = [UseAVX] in {
1374 def : Pat<(v4f32 (X86Movss
1376 (v4f32 (scalar_to_vector
1377 (f32 (any_fpround (f64 (extractelt VR128:$src, (iPTR 0))))))))),
1378 (VCVTSD2SSrr_Int VR128:$dst, VR128:$src)>;
1380 def : Pat<(v2f64 (X86Movsd
1382 (v2f64 (scalar_to_vector
1383 (f64 (any_fpextend (f32 (extractelt VR128:$src, (iPTR 0))))))))),
1384 (VCVTSS2SDrr_Int VR128:$dst, VR128:$src)>;
1386 def : Pat<(v4f32 (X86Movss
1388 (v4f32 (scalar_to_vector (f32 (any_sint_to_fp GR64:$src)))))),
1389 (VCVTSI642SSrr_Int VR128:$dst, GR64:$src)>;
1391 def : Pat<(v4f32 (X86Movss
1393 (v4f32 (scalar_to_vector (f32 (any_sint_to_fp (loadi64 addr:$src))))))),
1394 (VCVTSI642SSrm_Int VR128:$dst, addr:$src)>;
1396 def : Pat<(v4f32 (X86Movss
1398 (v4f32 (scalar_to_vector (f32 (any_sint_to_fp GR32:$src)))))),
1399 (VCVTSI2SSrr_Int VR128:$dst, GR32:$src)>;
1401 def : Pat<(v4f32 (X86Movss
1403 (v4f32 (scalar_to_vector (f32 (any_sint_to_fp (loadi32 addr:$src))))))),
1404 (VCVTSI2SSrm_Int VR128:$dst, addr:$src)>;
1406 def : Pat<(v2f64 (X86Movsd
1408 (v2f64 (scalar_to_vector (f64 (any_sint_to_fp GR64:$src)))))),
1409 (VCVTSI642SDrr_Int VR128:$dst, GR64:$src)>;
1411 def : Pat<(v2f64 (X86Movsd
1413 (v2f64 (scalar_to_vector (f64 (any_sint_to_fp (loadi64 addr:$src))))))),
1414 (VCVTSI642SDrm_Int VR128:$dst, addr:$src)>;
1416 def : Pat<(v2f64 (X86Movsd
1418 (v2f64 (scalar_to_vector (f64 (any_sint_to_fp GR32:$src)))))),
1419 (VCVTSI2SDrr_Int VR128:$dst, GR32:$src)>;
1421 def : Pat<(v2f64 (X86Movsd
1423 (v2f64 (scalar_to_vector (f64 (any_sint_to_fp (loadi32 addr:$src))))))),
1424 (VCVTSI2SDrm_Int VR128:$dst, addr:$src)>;
1425 } // Predicates = [UseAVX]
1427 let Predicates = [UseSSE2] in {
1428 def : Pat<(v4f32 (X86Movss
1430 (v4f32 (scalar_to_vector
1431 (f32 (any_fpround (f64 (extractelt VR128:$src, (iPTR 0))))))))),
1432 (CVTSD2SSrr_Int VR128:$dst, VR128:$src)>;
1434 def : Pat<(v2f64 (X86Movsd
1436 (v2f64 (scalar_to_vector
1437 (f64 (any_fpextend (f32 (extractelt VR128:$src, (iPTR 0))))))))),
1438 (CVTSS2SDrr_Int VR128:$dst, VR128:$src)>;
1440 def : Pat<(v2f64 (X86Movsd
1442 (v2f64 (scalar_to_vector (f64 (any_sint_to_fp GR64:$src)))))),
1443 (CVTSI642SDrr_Int VR128:$dst, GR64:$src)>;
1445 def : Pat<(v2f64 (X86Movsd
1447 (v2f64 (scalar_to_vector (f64 (any_sint_to_fp (loadi64 addr:$src))))))),
1448 (CVTSI642SDrm_Int VR128:$dst, addr:$src)>;
1450 def : Pat<(v2f64 (X86Movsd
1452 (v2f64 (scalar_to_vector (f64 (any_sint_to_fp GR32:$src)))))),
1453 (CVTSI2SDrr_Int VR128:$dst, GR32:$src)>;
1455 def : Pat<(v2f64 (X86Movsd
1457 (v2f64 (scalar_to_vector (f64 (any_sint_to_fp (loadi32 addr:$src))))))),
1458 (CVTSI2SDrm_Int VR128:$dst, addr:$src)>;
1459 } // Predicates = [UseSSE2]
1461 let Predicates = [UseSSE1] in {
1462 def : Pat<(v4f32 (X86Movss
1464 (v4f32 (scalar_to_vector (f32 (any_sint_to_fp GR64:$src)))))),
1465 (CVTSI642SSrr_Int VR128:$dst, GR64:$src)>;
1467 def : Pat<(v4f32 (X86Movss
1469 (v4f32 (scalar_to_vector (f32 (any_sint_to_fp (loadi64 addr:$src))))))),
1470 (CVTSI642SSrm_Int VR128:$dst, addr:$src)>;
1472 def : Pat<(v4f32 (X86Movss
1474 (v4f32 (scalar_to_vector (f32 (any_sint_to_fp GR32:$src)))))),
1475 (CVTSI2SSrr_Int VR128:$dst, GR32:$src)>;
1477 def : Pat<(v4f32 (X86Movss
1479 (v4f32 (scalar_to_vector (f32 (any_sint_to_fp (loadi32 addr:$src))))))),
1480 (CVTSI2SSrm_Int VR128:$dst, addr:$src)>;
1481 } // Predicates = [UseSSE1]
1483 let Predicates = [HasAVX, NoVLX] in {
1484 // Convert packed single/double fp to doubleword
1485 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1486 "cvtps2dq\t{$src, $dst|$dst, $src}",
1487 [(set VR128:$dst, (v4i32 (X86cvtp2Int (v4f32 VR128:$src))))]>,
1488 VEX, Sched<[WriteCvtPS2I]>, VEX_WIG, SIMD_EXC;
1489 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1490 "cvtps2dq\t{$src, $dst|$dst, $src}",
1492 (v4i32 (X86cvtp2Int (loadv4f32 addr:$src))))]>,
1493 VEX, Sched<[WriteCvtPS2ILd]>, VEX_WIG, SIMD_EXC;
1494 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1495 "cvtps2dq\t{$src, $dst|$dst, $src}",
1497 (v8i32 (X86cvtp2Int (v8f32 VR256:$src))))]>,
1498 VEX, VEX_L, Sched<[WriteCvtPS2IY]>, VEX_WIG, SIMD_EXC;
1499 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1500 "cvtps2dq\t{$src, $dst|$dst, $src}",
1502 (v8i32 (X86cvtp2Int (loadv8f32 addr:$src))))]>,
1503 VEX, VEX_L, Sched<[WriteCvtPS2IYLd]>, VEX_WIG, SIMD_EXC;
1505 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1506 "cvtps2dq\t{$src, $dst|$dst, $src}",
1507 [(set VR128:$dst, (v4i32 (X86cvtp2Int (v4f32 VR128:$src))))]>,
1508 Sched<[WriteCvtPS2I]>, SIMD_EXC;
1509 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1510 "cvtps2dq\t{$src, $dst|$dst, $src}",
1512 (v4i32 (X86cvtp2Int (memopv4f32 addr:$src))))]>,
1513 Sched<[WriteCvtPS2ILd]>, SIMD_EXC;
1516 // Convert Packed Double FP to Packed DW Integers
1517 let Predicates = [HasAVX, NoVLX], Uses = [MXCSR], mayRaiseFPException = 1 in {
1518 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1519 // register, but the same isn't true when using memory operands instead.
1520 // Provide other assembly rr and rm forms to address this explicitly.
1521 def VCVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1522 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
1524 (v4i32 (X86cvtp2Int (v2f64 VR128:$src))))]>,
1525 VEX, Sched<[WriteCvtPD2I]>, VEX_WIG;
1528 def VCVTPD2DQrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1529 "vcvtpd2dq{x}\t{$src, $dst|$dst, $src}",
1531 (v4i32 (X86cvtp2Int (loadv2f64 addr:$src))))]>, VEX,
1532 Sched<[WriteCvtPD2ILd]>, VEX_WIG;
1535 def VCVTPD2DQYrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1536 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
1538 (v4i32 (X86cvtp2Int (v4f64 VR256:$src))))]>,
1539 VEX, VEX_L, Sched<[WriteCvtPD2IY]>, VEX_WIG;
1540 def VCVTPD2DQYrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1541 "vcvtpd2dq{y}\t{$src, $dst|$dst, $src}",
1543 (v4i32 (X86cvtp2Int (loadv4f64 addr:$src))))]>,
1544 VEX, VEX_L, Sched<[WriteCvtPD2IYLd]>, VEX_WIG;
1547 def : InstAlias<"vcvtpd2dqx\t{$src, $dst|$dst, $src}",
1548 (VCVTPD2DQrr VR128:$dst, VR128:$src), 0, "att">;
1549 def : InstAlias<"vcvtpd2dqy\t{$src, $dst|$dst, $src}",
1550 (VCVTPD2DQYrr VR128:$dst, VR256:$src), 0, "att">;
1552 def CVTPD2DQrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1553 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1555 (v4i32 (X86cvtp2Int (memopv2f64 addr:$src))))]>,
1556 Sched<[WriteCvtPD2ILd]>, SIMD_EXC;
1557 def CVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1558 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1560 (v4i32 (X86cvtp2Int (v2f64 VR128:$src))))]>,
1561 Sched<[WriteCvtPD2I]>, SIMD_EXC;
1563 // Convert with truncation packed single/double fp to doubleword
1564 // SSE2 packed instructions with XS prefix
1565 let Uses = [MXCSR], mayRaiseFPException = 1 in {
1566 let Predicates = [HasAVX, NoVLX] in {
1567 def VCVTTPS2DQrr : VS2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1568 "cvttps2dq\t{$src, $dst|$dst, $src}",
1570 (v4i32 (X86any_cvttp2si (v4f32 VR128:$src))))]>,
1571 VEX, Sched<[WriteCvtPS2I]>, VEX_WIG;
1572 def VCVTTPS2DQrm : VS2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1573 "cvttps2dq\t{$src, $dst|$dst, $src}",
1575 (v4i32 (X86any_cvttp2si (loadv4f32 addr:$src))))]>,
1576 VEX, Sched<[WriteCvtPS2ILd]>, VEX_WIG;
1577 def VCVTTPS2DQYrr : VS2SI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1578 "cvttps2dq\t{$src, $dst|$dst, $src}",
1580 (v8i32 (X86any_cvttp2si (v8f32 VR256:$src))))]>,
1581 VEX, VEX_L, Sched<[WriteCvtPS2IY]>, VEX_WIG;
1582 def VCVTTPS2DQYrm : VS2SI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1583 "cvttps2dq\t{$src, $dst|$dst, $src}",
1585 (v8i32 (X86any_cvttp2si (loadv8f32 addr:$src))))]>,
1587 Sched<[WriteCvtPS2IYLd]>, VEX_WIG;
1590 def CVTTPS2DQrr : S2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1591 "cvttps2dq\t{$src, $dst|$dst, $src}",
1593 (v4i32 (X86any_cvttp2si (v4f32 VR128:$src))))]>,
1594 Sched<[WriteCvtPS2I]>;
1595 def CVTTPS2DQrm : S2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1596 "cvttps2dq\t{$src, $dst|$dst, $src}",
1598 (v4i32 (X86any_cvttp2si (memopv4f32 addr:$src))))]>,
1599 Sched<[WriteCvtPS2ILd]>;
1602 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1603 // register, but the same isn't true when using memory operands instead.
1604 // Provide other assembly rr and rm forms to address this explicitly.
1605 let Predicates = [HasAVX, NoVLX], Uses = [MXCSR], mayRaiseFPException = 1 in {
1607 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1608 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1610 (v4i32 (X86any_cvttp2si (v2f64 VR128:$src))))]>,
1611 VEX, Sched<[WriteCvtPD2I]>, VEX_WIG;
1612 def VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1613 "cvttpd2dq{x}\t{$src, $dst|$dst, $src}",
1615 (v4i32 (X86any_cvttp2si (loadv2f64 addr:$src))))]>,
1616 VEX, Sched<[WriteCvtPD2ILd]>, VEX_WIG;
1619 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1620 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1622 (v4i32 (X86any_cvttp2si (v4f64 VR256:$src))))]>,
1623 VEX, VEX_L, Sched<[WriteCvtPD2IY]>, VEX_WIG;
1624 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1625 "cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
1627 (v4i32 (X86any_cvttp2si (loadv4f64 addr:$src))))]>,
1628 VEX, VEX_L, Sched<[WriteCvtPD2IYLd]>, VEX_WIG;
1629 } // Predicates = [HasAVX, NoVLX]
1631 def : InstAlias<"vcvttpd2dqx\t{$src, $dst|$dst, $src}",
1632 (VCVTTPD2DQrr VR128:$dst, VR128:$src), 0, "att">;
1633 def : InstAlias<"vcvttpd2dqy\t{$src, $dst|$dst, $src}",
1634 (VCVTTPD2DQYrr VR128:$dst, VR256:$src), 0, "att">;
1636 let Predicates = [HasAVX, NoVLX] in {
1637 def : Pat<(v4i32 (any_fp_to_sint (v4f64 VR256:$src))),
1638 (VCVTTPD2DQYrr VR256:$src)>;
1639 def : Pat<(v4i32 (any_fp_to_sint (loadv4f64 addr:$src))),
1640 (VCVTTPD2DQYrm addr:$src)>;
1643 def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1644 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1646 (v4i32 (X86any_cvttp2si (v2f64 VR128:$src))))]>,
1647 Sched<[WriteCvtPD2I]>, SIMD_EXC;
1648 def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
1649 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1651 (v4i32 (X86any_cvttp2si (memopv2f64 addr:$src))))]>,
1652 Sched<[WriteCvtPD2ILd]>, SIMD_EXC;
1654 // Convert packed single to packed double
1655 let Predicates = [HasAVX, NoVLX], Uses = [MXCSR], mayRaiseFPException = 1 in {
1656 // SSE2 instructions without OpSize prefix
1657 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1658 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1659 [(set VR128:$dst, (v2f64 (X86any_vfpext (v4f32 VR128:$src))))]>,
1660 PS, VEX, Sched<[WriteCvtPS2PD]>, VEX_WIG;
1661 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1662 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1663 [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))]>,
1664 PS, VEX, Sched<[WriteCvtPS2PD.Folded]>, VEX_WIG;
1665 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
1666 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1667 [(set VR256:$dst, (v4f64 (any_fpextend (v4f32 VR128:$src))))]>,
1668 PS, VEX, VEX_L, Sched<[WriteCvtPS2PDY]>, VEX_WIG;
1669 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
1670 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1671 [(set VR256:$dst, (v4f64 (extloadv4f32 addr:$src)))]>,
1672 PS, VEX, VEX_L, Sched<[WriteCvtPS2PDY.Folded]>, VEX_WIG;
1675 let Predicates = [UseSSE2], Uses = [MXCSR], mayRaiseFPException = 1 in {
1676 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1677 "cvtps2pd\t{$src, $dst|$dst, $src}",
1678 [(set VR128:$dst, (v2f64 (X86any_vfpext (v4f32 VR128:$src))))]>,
1679 PS, Sched<[WriteCvtPS2PD]>;
1680 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1681 "cvtps2pd\t{$src, $dst|$dst, $src}",
1682 [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))]>,
1683 PS, Sched<[WriteCvtPS2PD.Folded]>;
1686 // Convert Packed DW Integers to Packed Double FP
1687 let Predicates = [HasAVX, NoVLX] in {
1688 let hasSideEffects = 0, mayLoad = 1 in
1689 def VCVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1690 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
1692 (v2f64 (X86any_VSintToFP
1694 (v2i64 (scalar_to_vector
1695 (loadi64 addr:$src)))))))]>,
1696 VEX, Sched<[WriteCvtI2PDLd]>, VEX_WIG;
1697 def VCVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1698 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
1700 (v2f64 (X86any_VSintToFP (v4i32 VR128:$src))))]>,
1701 VEX, Sched<[WriteCvtI2PD]>, VEX_WIG;
1702 def VCVTDQ2PDYrm : S2SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins i128mem:$src),
1703 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
1705 (v4f64 (any_sint_to_fp (loadv4i32 addr:$src))))]>,
1706 VEX, VEX_L, Sched<[WriteCvtI2PDYLd]>,
1708 def VCVTDQ2PDYrr : S2SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
1709 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
1711 (v4f64 (any_sint_to_fp (v4i32 VR128:$src))))]>,
1712 VEX, VEX_L, Sched<[WriteCvtI2PDY]>, VEX_WIG;
1715 let hasSideEffects = 0, mayLoad = 1 in
1716 def CVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1717 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1719 (v2f64 (X86any_VSintToFP
1721 (v2i64 (scalar_to_vector
1722 (loadi64 addr:$src)))))))]>,
1723 Sched<[WriteCvtI2PDLd]>;
1724 def CVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1725 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1727 (v2f64 (X86any_VSintToFP (v4i32 VR128:$src))))]>,
1728 Sched<[WriteCvtI2PD]>;
1730 // AVX register conversion intrinsics
1731 let Predicates = [HasAVX, NoVLX] in {
1732 def : Pat<(v2f64 (X86any_VSintToFP (bc_v4i32 (v2i64 (X86vzload64 addr:$src))))),
1733 (VCVTDQ2PDrm addr:$src)>;
1734 } // Predicates = [HasAVX, NoVLX]
1736 // SSE2 register conversion intrinsics
1737 let Predicates = [UseSSE2] in {
1738 def : Pat<(v2f64 (X86any_VSintToFP (bc_v4i32 (v2i64 (X86vzload64 addr:$src))))),
1739 (CVTDQ2PDrm addr:$src)>;
1740 } // Predicates = [UseSSE2]
1742 // Convert packed double to packed single
1743 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1744 // register, but the same isn't true when using memory operands instead.
1745 // Provide other assembly rr and rm forms to address this explicitly.
1746 let Predicates = [HasAVX, NoVLX], Uses = [MXCSR], mayRaiseFPException = 1 in {
1748 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1749 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1750 [(set VR128:$dst, (v4f32 (X86any_vfpround (v2f64 VR128:$src))))]>,
1751 VEX, Sched<[WriteCvtPD2PS]>, VEX_WIG;
1752 def VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1753 "cvtpd2ps{x}\t{$src, $dst|$dst, $src}",
1754 [(set VR128:$dst, (v4f32 (X86any_vfpround (loadv2f64 addr:$src))))]>,
1755 VEX, Sched<[WriteCvtPD2PS.Folded]>, VEX_WIG;
1757 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1758 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1759 [(set VR128:$dst, (v4f32 (X86any_vfpround (v4f64 VR256:$src))))]>,
1760 VEX, VEX_L, Sched<[WriteCvtPD2PSY]>, VEX_WIG;
1761 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1762 "cvtpd2ps{y}\t{$src, $dst|$dst, $src}",
1763 [(set VR128:$dst, (v4f32 (X86any_vfpround (loadv4f64 addr:$src))))]>,
1764 VEX, VEX_L, Sched<[WriteCvtPD2PSY.Folded]>, VEX_WIG;
1765 } // Predicates = [HasAVX, NoVLX]
1767 def : InstAlias<"vcvtpd2psx\t{$src, $dst|$dst, $src}",
1768 (VCVTPD2PSrr VR128:$dst, VR128:$src), 0, "att">;
1769 def : InstAlias<"vcvtpd2psy\t{$src, $dst|$dst, $src}",
1770 (VCVTPD2PSYrr VR128:$dst, VR256:$src), 0, "att">;
1772 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1773 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1774 [(set VR128:$dst, (v4f32 (X86any_vfpround (v2f64 VR128:$src))))]>,
1775 Sched<[WriteCvtPD2PS]>, SIMD_EXC;
1776 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1777 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1778 [(set VR128:$dst, (v4f32 (X86any_vfpround (memopv2f64 addr:$src))))]>,
1779 Sched<[WriteCvtPD2PS.Folded]>, SIMD_EXC;
1781 //===----------------------------------------------------------------------===//
1782 // SSE 1 & 2 - Compare Instructions
1783 //===----------------------------------------------------------------------===//
1785 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
1786 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
1787 Operand memop, SDNode OpNode, ValueType VT,
1788 PatFrag ld_frag, string asm,
1789 X86FoldableSchedWrite sched,
1790 PatFrags mem_frags> {
1791 def rr_Int : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
1792 (ins VR128:$src1, VR128:$src2, u8imm:$cc), asm,
1793 [(set VR128:$dst, (OpNode (VT VR128:$src1),
1794 VR128:$src2, timm:$cc))]>,
1795 Sched<[sched]>, SIMD_EXC;
1797 def rm_Int : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
1798 (ins VR128:$src1, memop:$src2, u8imm:$cc), asm,
1799 [(set VR128:$dst, (OpNode (VT VR128:$src1),
1800 (mem_frags addr:$src2), timm:$cc))]>,
1801 Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC;
1803 let isCodeGenOnly = 1 in {
1804 let isCommutable = 1 in
1805 def rr : SIi8<0xC2, MRMSrcReg,
1806 (outs RC:$dst), (ins RC:$src1, RC:$src2, u8imm:$cc), asm,
1807 [(set RC:$dst, (OpNode RC:$src1, RC:$src2, timm:$cc))]>,
1808 Sched<[sched]>, SIMD_EXC;
1809 def rm : SIi8<0xC2, MRMSrcMem,
1810 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, u8imm:$cc), asm,
1811 [(set RC:$dst, (OpNode RC:$src1,
1812 (ld_frag addr:$src2), timm:$cc))]>,
1813 Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC;
1817 let ExeDomain = SSEPackedSingle in
1818 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem, ssmem, X86cmps, v4f32, loadf32,
1819 "cmpss\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
1820 SchedWriteFCmpSizes.PS.Scl, sse_load_f32>,
1821 XS, VEX_4V, VEX_LIG, VEX_WIG;
1822 let ExeDomain = SSEPackedDouble in
1823 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem, sdmem, X86cmps, v2f64, loadf64,
1824 "cmpsd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
1825 SchedWriteFCmpSizes.PD.Scl, sse_load_f64>,
1826 XD, VEX_4V, VEX_LIG, VEX_WIG;
1828 let Constraints = "$src1 = $dst" in {
1829 let ExeDomain = SSEPackedSingle in
1830 defm CMPSS : sse12_cmp_scalar<FR32, f32mem, ssmem, X86cmps, v4f32, loadf32,
1831 "cmpss\t{$cc, $src2, $dst|$dst, $src2, $cc}",
1832 SchedWriteFCmpSizes.PS.Scl, sse_load_f32>, XS;
1833 let ExeDomain = SSEPackedDouble in
1834 defm CMPSD : sse12_cmp_scalar<FR64, f64mem, sdmem, X86cmps, v2f64, loadf64,
1835 "cmpsd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
1836 SchedWriteFCmpSizes.PD.Scl, sse_load_f64>, XD;
1839 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
1840 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDPatternOperator OpNode,
1841 ValueType vt, X86MemOperand x86memop,
1842 PatFrag ld_frag, string OpcodeStr, Domain d,
1843 X86FoldableSchedWrite sched = WriteFComX> {
1844 let ExeDomain = d in {
1845 def rr: SI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
1846 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1847 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))]>,
1848 Sched<[sched]>, SIMD_EXC;
1850 def rm: SI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
1851 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1852 [(set EFLAGS, (OpNode (vt RC:$src1),
1853 (ld_frag addr:$src2)))]>,
1854 Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC;
1858 // sse12_ord_cmp_int - Intrinsic version of sse12_ord_cmp
1859 multiclass sse12_ord_cmp_int<bits<8> opc, RegisterClass RC, SDNode OpNode,
1860 ValueType vt, Operand memop,
1861 PatFrags mem_frags, string OpcodeStr,
1863 X86FoldableSchedWrite sched = WriteFComX> {
1864 let ExeDomain = d in {
1865 def rr_Int: SI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
1866 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1867 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))]>,
1868 Sched<[sched]>, SIMD_EXC;
1870 def rm_Int: SI<opc, MRMSrcMem, (outs), (ins RC:$src1, memop:$src2),
1871 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1872 [(set EFLAGS, (OpNode (vt RC:$src1),
1873 (mem_frags addr:$src2)))]>,
1874 Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC;
1878 let Defs = [EFLAGS] in {
1879 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86any_fcmp, f32, f32mem, loadf32,
1880 "ucomiss", SSEPackedSingle>, PS, VEX, VEX_LIG, VEX_WIG;
1881 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86any_fcmp, f64, f64mem, loadf64,
1882 "ucomisd", SSEPackedDouble>, PD, VEX, VEX_LIG, VEX_WIG;
1883 defm VCOMISS : sse12_ord_cmp<0x2F, FR32, X86strict_fcmps, f32, f32mem, loadf32,
1884 "comiss", SSEPackedSingle>, PS, VEX, VEX_LIG, VEX_WIG;
1885 defm VCOMISD : sse12_ord_cmp<0x2F, FR64, X86strict_fcmps, f64, f64mem, loadf64,
1886 "comisd", SSEPackedDouble>, PD, VEX, VEX_LIG, VEX_WIG;
1888 let isCodeGenOnly = 1 in {
1889 defm VUCOMISS : sse12_ord_cmp_int<0x2E, VR128, X86ucomi, v4f32, ssmem,
1890 sse_load_f32, "ucomiss", SSEPackedSingle>, PS, VEX, VEX_LIG, VEX_WIG;
1891 defm VUCOMISD : sse12_ord_cmp_int<0x2E, VR128, X86ucomi, v2f64, sdmem,
1892 sse_load_f64, "ucomisd", SSEPackedDouble>, PD, VEX, VEX_LIG, VEX_WIG;
1894 defm VCOMISS : sse12_ord_cmp_int<0x2F, VR128, X86comi, v4f32, ssmem,
1895 sse_load_f32, "comiss", SSEPackedSingle>, PS, VEX, VEX_LIG, VEX_WIG;
1896 defm VCOMISD : sse12_ord_cmp_int<0x2F, VR128, X86comi, v2f64, sdmem,
1897 sse_load_f64, "comisd", SSEPackedDouble>, PD, VEX, VEX_LIG, VEX_WIG;
1899 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86any_fcmp, f32, f32mem, loadf32,
1900 "ucomiss", SSEPackedSingle>, PS;
1901 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86any_fcmp, f64, f64mem, loadf64,
1902 "ucomisd", SSEPackedDouble>, PD;
1903 defm COMISS : sse12_ord_cmp<0x2F, FR32, X86strict_fcmps, f32, f32mem, loadf32,
1904 "comiss", SSEPackedSingle>, PS;
1905 defm COMISD : sse12_ord_cmp<0x2F, FR64, X86strict_fcmps, f64, f64mem, loadf64,
1906 "comisd", SSEPackedDouble>, PD;
1908 let isCodeGenOnly = 1 in {
1909 defm UCOMISS : sse12_ord_cmp_int<0x2E, VR128, X86ucomi, v4f32, ssmem,
1910 sse_load_f32, "ucomiss", SSEPackedSingle>, PS;
1911 defm UCOMISD : sse12_ord_cmp_int<0x2E, VR128, X86ucomi, v2f64, sdmem,
1912 sse_load_f64, "ucomisd", SSEPackedDouble>, PD;
1914 defm COMISS : sse12_ord_cmp_int<0x2F, VR128, X86comi, v4f32, ssmem,
1915 sse_load_f32, "comiss", SSEPackedSingle>, PS;
1916 defm COMISD : sse12_ord_cmp_int<0x2F, VR128, X86comi, v2f64, sdmem,
1917 sse_load_f64, "comisd", SSEPackedDouble>, PD;
1919 } // Defs = [EFLAGS]
1921 // sse12_cmp_packed - sse 1 & 2 compare packed instructions
1922 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
1923 ValueType VT, string asm,
1924 X86FoldableSchedWrite sched,
1925 Domain d, PatFrag ld_frag> {
1926 let isCommutable = 1 in
1927 def rri : PIi8<0xC2, MRMSrcReg,
1928 (outs RC:$dst), (ins RC:$src1, RC:$src2, u8imm:$cc), asm,
1929 [(set RC:$dst, (VT (X86any_cmpp RC:$src1, RC:$src2, timm:$cc)))], d>,
1930 Sched<[sched]>, SIMD_EXC;
1931 def rmi : PIi8<0xC2, MRMSrcMem,
1932 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, u8imm:$cc), asm,
1934 (VT (X86any_cmpp RC:$src1, (ld_frag addr:$src2), timm:$cc)))], d>,
1935 Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC;
1938 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, v4f32,
1939 "cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
1940 SchedWriteFCmpSizes.PS.XMM, SSEPackedSingle, loadv4f32>, PS, VEX_4V, VEX_WIG;
1941 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, v2f64,
1942 "cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
1943 SchedWriteFCmpSizes.PD.XMM, SSEPackedDouble, loadv2f64>, PD, VEX_4V, VEX_WIG;
1944 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, v8f32,
1945 "cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
1946 SchedWriteFCmpSizes.PS.YMM, SSEPackedSingle, loadv8f32>, PS, VEX_4V, VEX_L, VEX_WIG;
1947 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, v4f64,
1948 "cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
1949 SchedWriteFCmpSizes.PD.YMM, SSEPackedDouble, loadv4f64>, PD, VEX_4V, VEX_L, VEX_WIG;
1950 let Constraints = "$src1 = $dst" in {
1951 defm CMPPS : sse12_cmp_packed<VR128, f128mem, v4f32,
1952 "cmpps\t{$cc, $src2, $dst|$dst, $src2, $cc}",
1953 SchedWriteFCmpSizes.PS.XMM, SSEPackedSingle, memopv4f32>, PS;
1954 defm CMPPD : sse12_cmp_packed<VR128, f128mem, v2f64,
1955 "cmppd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
1956 SchedWriteFCmpSizes.PD.XMM, SSEPackedDouble, memopv2f64>, PD;
1959 def CommutableCMPCC : PatLeaf<(timm), [{
1960 uint64_t Imm = N->getZExtValue() & 0x7;
1961 return (Imm == 0x00 || Imm == 0x03 || Imm == 0x04 || Imm == 0x07);
1964 // Patterns to select compares with loads in first operand.
1965 let Predicates = [HasAVX] in {
1966 def : Pat<(v4f64 (X86any_cmpp (loadv4f64 addr:$src2), VR256:$src1,
1967 CommutableCMPCC:$cc)),
1968 (VCMPPDYrmi VR256:$src1, addr:$src2, timm:$cc)>;
1970 def : Pat<(v8f32 (X86any_cmpp (loadv8f32 addr:$src2), VR256:$src1,
1971 CommutableCMPCC:$cc)),
1972 (VCMPPSYrmi VR256:$src1, addr:$src2, timm:$cc)>;
1974 def : Pat<(v2f64 (X86any_cmpp (loadv2f64 addr:$src2), VR128:$src1,
1975 CommutableCMPCC:$cc)),
1976 (VCMPPDrmi VR128:$src1, addr:$src2, timm:$cc)>;
1978 def : Pat<(v4f32 (X86any_cmpp (loadv4f32 addr:$src2), VR128:$src1,
1979 CommutableCMPCC:$cc)),
1980 (VCMPPSrmi VR128:$src1, addr:$src2, timm:$cc)>;
1982 def : Pat<(f64 (X86cmps (loadf64 addr:$src2), FR64:$src1,
1983 CommutableCMPCC:$cc)),
1984 (VCMPSDrm FR64:$src1, addr:$src2, timm:$cc)>;
1986 def : Pat<(f32 (X86cmps (loadf32 addr:$src2), FR32:$src1,
1987 CommutableCMPCC:$cc)),
1988 (VCMPSSrm FR32:$src1, addr:$src2, timm:$cc)>;
1991 let Predicates = [UseSSE2] in {
1992 def : Pat<(v2f64 (X86any_cmpp (memopv2f64 addr:$src2), VR128:$src1,
1993 CommutableCMPCC:$cc)),
1994 (CMPPDrmi VR128:$src1, addr:$src2, timm:$cc)>;
1996 def : Pat<(f64 (X86cmps (loadf64 addr:$src2), FR64:$src1,
1997 CommutableCMPCC:$cc)),
1998 (CMPSDrm FR64:$src1, addr:$src2, timm:$cc)>;
2001 let Predicates = [UseSSE1] in {
2002 def : Pat<(v4f32 (X86any_cmpp (memopv4f32 addr:$src2), VR128:$src1,
2003 CommutableCMPCC:$cc)),
2004 (CMPPSrmi VR128:$src1, addr:$src2, timm:$cc)>;
2006 def : Pat<(f32 (X86cmps (loadf32 addr:$src2), FR32:$src1,
2007 CommutableCMPCC:$cc)),
2008 (CMPSSrm FR32:$src1, addr:$src2, timm:$cc)>;
2011 //===----------------------------------------------------------------------===//
2012 // SSE 1 & 2 - Shuffle Instructions
2013 //===----------------------------------------------------------------------===//
2015 /// sse12_shuffle - sse 1 & 2 fp shuffle instructions
2016 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
2017 ValueType vt, string asm, PatFrag mem_frag,
2018 X86FoldableSchedWrite sched, Domain d,
2019 bit IsCommutable = 0> {
2020 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
2021 (ins RC:$src1, x86memop:$src2, u8imm:$src3), asm,
2022 [(set RC:$dst, (vt (X86Shufp RC:$src1, (mem_frag addr:$src2),
2023 (i8 timm:$src3))))], d>,
2024 Sched<[sched.Folded, sched.ReadAfterFold]>;
2025 let isCommutable = IsCommutable in
2026 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
2027 (ins RC:$src1, RC:$src2, u8imm:$src3), asm,
2028 [(set RC:$dst, (vt (X86Shufp RC:$src1, RC:$src2,
2029 (i8 timm:$src3))))], d>,
2033 let Predicates = [HasAVX, NoVLX] in {
2034 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
2035 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2036 loadv4f32, SchedWriteFShuffle.XMM, SSEPackedSingle>,
2037 PS, VEX_4V, VEX_WIG;
2038 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
2039 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2040 loadv8f32, SchedWriteFShuffle.YMM, SSEPackedSingle>,
2041 PS, VEX_4V, VEX_L, VEX_WIG;
2042 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
2043 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2044 loadv2f64, SchedWriteFShuffle.XMM, SSEPackedDouble>,
2045 PD, VEX_4V, VEX_WIG;
2046 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
2047 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2048 loadv4f64, SchedWriteFShuffle.YMM, SSEPackedDouble>,
2049 PD, VEX_4V, VEX_L, VEX_WIG;
2051 let Constraints = "$src1 = $dst" in {
2052 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
2053 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2054 memopv4f32, SchedWriteFShuffle.XMM, SSEPackedSingle>, PS;
2055 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
2056 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2057 memopv2f64, SchedWriteFShuffle.XMM, SSEPackedDouble, 1>, PD;
2060 //===----------------------------------------------------------------------===//
2061 // SSE 1 & 2 - Unpack FP Instructions
2062 //===----------------------------------------------------------------------===//
2064 /// sse12_unpack_interleave - sse 1 & 2 fp unpack and interleave
2065 multiclass sse12_unpack_interleave<bits<8> opc, SDNode OpNode, ValueType vt,
2066 PatFrag mem_frag, RegisterClass RC,
2067 X86MemOperand x86memop, string asm,
2068 X86FoldableSchedWrite sched, Domain d,
2069 bit IsCommutable = 0> {
2070 let isCommutable = IsCommutable in
2071 def rr : PI<opc, MRMSrcReg,
2072 (outs RC:$dst), (ins RC:$src1, RC:$src2),
2074 (vt (OpNode RC:$src1, RC:$src2)))], d>,
2076 def rm : PI<opc, MRMSrcMem,
2077 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2079 (vt (OpNode RC:$src1,
2080 (mem_frag addr:$src2))))], d>,
2081 Sched<[sched.Folded, sched.ReadAfterFold]>;
2084 let Predicates = [HasAVX, NoVLX] in {
2085 defm VUNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, load,
2086 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2087 SchedWriteFShuffle.XMM, SSEPackedSingle>, PS, VEX_4V, VEX_WIG;
2088 defm VUNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, load,
2089 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2090 SchedWriteFShuffle.XMM, SSEPackedDouble, 1>, PD, VEX_4V, VEX_WIG;
2091 defm VUNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, load,
2092 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2093 SchedWriteFShuffle.XMM, SSEPackedSingle>, PS, VEX_4V, VEX_WIG;
2094 defm VUNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, load,
2095 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2096 SchedWriteFShuffle.XMM, SSEPackedDouble>, PD, VEX_4V, VEX_WIG;
2098 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, X86Unpckh, v8f32, load,
2099 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2100 SchedWriteFShuffle.YMM, SSEPackedSingle>, PS, VEX_4V, VEX_L, VEX_WIG;
2101 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, X86Unpckh, v4f64, load,
2102 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2103 SchedWriteFShuffle.YMM, SSEPackedDouble>, PD, VEX_4V, VEX_L, VEX_WIG;
2104 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, X86Unpckl, v8f32, load,
2105 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2106 SchedWriteFShuffle.YMM, SSEPackedSingle>, PS, VEX_4V, VEX_L, VEX_WIG;
2107 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, X86Unpckl, v4f64, load,
2108 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2109 SchedWriteFShuffle.YMM, SSEPackedDouble>, PD, VEX_4V, VEX_L, VEX_WIG;
2110 }// Predicates = [HasAVX, NoVLX]
2112 let Constraints = "$src1 = $dst" in {
2113 defm UNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, memop,
2114 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
2115 SchedWriteFShuffle.XMM, SSEPackedSingle>, PS;
2116 defm UNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, memop,
2117 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
2118 SchedWriteFShuffle.XMM, SSEPackedDouble, 1>, PD;
2119 defm UNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, memop,
2120 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
2121 SchedWriteFShuffle.XMM, SSEPackedSingle>, PS;
2122 defm UNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, memop,
2123 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
2124 SchedWriteFShuffle.XMM, SSEPackedDouble>, PD;
2125 } // Constraints = "$src1 = $dst"
2127 let Predicates = [HasAVX1Only] in {
2128 def : Pat<(v8i32 (X86Unpckl VR256:$src1, (loadv8i32 addr:$src2))),
2129 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>;
2130 def : Pat<(v8i32 (X86Unpckl VR256:$src1, VR256:$src2)),
2131 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>;
2132 def : Pat<(v8i32 (X86Unpckh VR256:$src1, (loadv8i32 addr:$src2))),
2133 (VUNPCKHPSYrm VR256:$src1, addr:$src2)>;
2134 def : Pat<(v8i32 (X86Unpckh VR256:$src1, VR256:$src2)),
2135 (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>;
2137 def : Pat<(v4i64 (X86Unpckl VR256:$src1, (loadv4i64 addr:$src2))),
2138 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>;
2139 def : Pat<(v4i64 (X86Unpckl VR256:$src1, VR256:$src2)),
2140 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>;
2141 def : Pat<(v4i64 (X86Unpckh VR256:$src1, (loadv4i64 addr:$src2))),
2142 (VUNPCKHPDYrm VR256:$src1, addr:$src2)>;
2143 def : Pat<(v4i64 (X86Unpckh VR256:$src1, VR256:$src2)),
2144 (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>;
2147 let Predicates = [UseSSE2] in {
2148 // Use MOVHPD if the load isn't aligned enough for UNPCKLPD.
2149 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
2150 (v2f64 (simple_load addr:$src2)))),
2151 (MOVHPDrm VR128:$src1, addr:$src2)>;
2154 //===----------------------------------------------------------------------===//
2155 // SSE 1 & 2 - Extract Floating-Point Sign mask
2156 //===----------------------------------------------------------------------===//
2158 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
2159 multiclass sse12_extr_sign_mask<RegisterClass RC, ValueType vt,
2160 string asm, Domain d> {
2161 def rr : PI<0x50, MRMSrcReg, (outs GR32orGR64:$dst), (ins RC:$src),
2162 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
2163 [(set GR32orGR64:$dst, (X86movmsk (vt RC:$src)))], d>,
2164 Sched<[WriteFMOVMSK]>;
2167 let Predicates = [HasAVX] in {
2168 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, v4f32, "movmskps",
2169 SSEPackedSingle>, PS, VEX, VEX_WIG;
2170 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, v2f64, "movmskpd",
2171 SSEPackedDouble>, PD, VEX, VEX_WIG;
2172 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, v8f32, "movmskps",
2173 SSEPackedSingle>, PS, VEX, VEX_L, VEX_WIG;
2174 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, v4f64, "movmskpd",
2175 SSEPackedDouble>, PD, VEX, VEX_L, VEX_WIG;
2177 // Also support integer VTs to avoid a int->fp bitcast in the DAG.
2178 def : Pat<(X86movmsk (v4i32 VR128:$src)),
2179 (VMOVMSKPSrr VR128:$src)>;
2180 def : Pat<(X86movmsk (v2i64 VR128:$src)),
2181 (VMOVMSKPDrr VR128:$src)>;
2182 def : Pat<(X86movmsk (v8i32 VR256:$src)),
2183 (VMOVMSKPSYrr VR256:$src)>;
2184 def : Pat<(X86movmsk (v4i64 VR256:$src)),
2185 (VMOVMSKPDYrr VR256:$src)>;
2188 defm MOVMSKPS : sse12_extr_sign_mask<VR128, v4f32, "movmskps",
2189 SSEPackedSingle>, PS;
2190 defm MOVMSKPD : sse12_extr_sign_mask<VR128, v2f64, "movmskpd",
2191 SSEPackedDouble>, PD;
2193 let Predicates = [UseSSE2] in {
2194 // Also support integer VTs to avoid a int->fp bitcast in the DAG.
2195 def : Pat<(X86movmsk (v4i32 VR128:$src)),
2196 (MOVMSKPSrr VR128:$src)>;
2197 def : Pat<(X86movmsk (v2i64 VR128:$src)),
2198 (MOVMSKPDrr VR128:$src)>;
2201 //===---------------------------------------------------------------------===//
2202 // SSE2 - Packed Integer Logical Instructions
2203 //===---------------------------------------------------------------------===//
2205 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2207 /// PDI_binop_rm - Simple SSE2 binary operator.
2208 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2209 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
2210 X86MemOperand x86memop, X86FoldableSchedWrite sched,
2211 bit IsCommutable, bit Is2Addr> {
2212 let isCommutable = IsCommutable in
2213 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
2214 (ins RC:$src1, RC:$src2),
2216 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2217 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2218 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>,
2220 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
2221 (ins RC:$src1, x86memop:$src2),
2223 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2224 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2225 [(set RC:$dst, (OpVT (OpNode RC:$src1, (memop_frag addr:$src2))))]>,
2226 Sched<[sched.Folded, sched.ReadAfterFold]>;
2228 } // ExeDomain = SSEPackedInt
2230 multiclass PDI_binop_all<bits<8> opc, string OpcodeStr, SDNode Opcode,
2231 ValueType OpVT128, ValueType OpVT256,
2232 X86SchedWriteWidths sched, bit IsCommutable,
2234 let Predicates = [HasAVX, prd] in
2235 defm V#NAME : PDI_binop_rm<opc, !strconcat("v", OpcodeStr), Opcode, OpVT128,
2236 VR128, load, i128mem, sched.XMM,
2237 IsCommutable, 0>, VEX_4V, VEX_WIG;
2239 let Constraints = "$src1 = $dst" in
2240 defm NAME : PDI_binop_rm<opc, OpcodeStr, Opcode, OpVT128, VR128,
2241 memop, i128mem, sched.XMM, IsCommutable, 1>;
2243 let Predicates = [HasAVX2, prd] in
2244 defm V#NAME#Y : PDI_binop_rm<opc, !strconcat("v", OpcodeStr), Opcode,
2245 OpVT256, VR256, load, i256mem, sched.YMM,
2246 IsCommutable, 0>, VEX_4V, VEX_L, VEX_WIG;
2249 // These are ordered here for pattern ordering requirements with the fp versions
2251 defm PAND : PDI_binop_all<0xDB, "pand", and, v2i64, v4i64,
2252 SchedWriteVecLogic, 1, NoVLX>;
2253 defm POR : PDI_binop_all<0xEB, "por", or, v2i64, v4i64,
2254 SchedWriteVecLogic, 1, NoVLX>;
2255 defm PXOR : PDI_binop_all<0xEF, "pxor", xor, v2i64, v4i64,
2256 SchedWriteVecLogic, 1, NoVLX>;
2257 defm PANDN : PDI_binop_all<0xDF, "pandn", X86andnp, v2i64, v4i64,
2258 SchedWriteVecLogic, 0, NoVLX>;
2260 //===----------------------------------------------------------------------===//
2261 // SSE 1 & 2 - Logical Instructions
2262 //===----------------------------------------------------------------------===//
2264 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
2266 /// There are no patterns here because isel prefers integer versions for SSE2
2267 /// and later. There are SSE1 v4f32 patterns later.
2268 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
2269 SDNode OpNode, X86SchedWriteWidths sched> {
2270 let Predicates = [HasAVX, NoVLX] in {
2271 defm V#NAME#PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
2272 !strconcat(OpcodeStr, "ps"), f256mem, sched.YMM,
2273 [], [], 0>, PS, VEX_4V, VEX_L, VEX_WIG;
2275 defm V#NAME#PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
2276 !strconcat(OpcodeStr, "pd"), f256mem, sched.YMM,
2277 [], [], 0>, PD, VEX_4V, VEX_L, VEX_WIG;
2279 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2280 !strconcat(OpcodeStr, "ps"), f128mem, sched.XMM,
2281 [], [], 0>, PS, VEX_4V, VEX_WIG;
2283 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
2284 !strconcat(OpcodeStr, "pd"), f128mem, sched.XMM,
2285 [], [], 0>, PD, VEX_4V, VEX_WIG;
2288 let Constraints = "$src1 = $dst" in {
2289 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2290 !strconcat(OpcodeStr, "ps"), f128mem, sched.XMM,
2293 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
2294 !strconcat(OpcodeStr, "pd"), f128mem, sched.XMM,
2299 defm AND : sse12_fp_packed_logical<0x54, "and", and, SchedWriteFLogic>;
2300 defm OR : sse12_fp_packed_logical<0x56, "or", or, SchedWriteFLogic>;
2301 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor, SchedWriteFLogic>;
2302 let isCommutable = 0 in
2303 defm ANDN : sse12_fp_packed_logical<0x55, "andn", X86andnp, SchedWriteFLogic>;
2305 let Predicates = [HasAVX2, NoVLX] in {
2306 def : Pat<(v32i8 (and VR256:$src1, VR256:$src2)),
2307 (VPANDYrr VR256:$src1, VR256:$src2)>;
2308 def : Pat<(v16i16 (and VR256:$src1, VR256:$src2)),
2309 (VPANDYrr VR256:$src1, VR256:$src2)>;
2310 def : Pat<(v8i32 (and VR256:$src1, VR256:$src2)),
2311 (VPANDYrr VR256:$src1, VR256:$src2)>;
2313 def : Pat<(v32i8 (or VR256:$src1, VR256:$src2)),
2314 (VPORYrr VR256:$src1, VR256:$src2)>;
2315 def : Pat<(v16i16 (or VR256:$src1, VR256:$src2)),
2316 (VPORYrr VR256:$src1, VR256:$src2)>;
2317 def : Pat<(v8i32 (or VR256:$src1, VR256:$src2)),
2318 (VPORYrr VR256:$src1, VR256:$src2)>;
2320 def : Pat<(v32i8 (xor VR256:$src1, VR256:$src2)),
2321 (VPXORYrr VR256:$src1, VR256:$src2)>;
2322 def : Pat<(v16i16 (xor VR256:$src1, VR256:$src2)),
2323 (VPXORYrr VR256:$src1, VR256:$src2)>;
2324 def : Pat<(v8i32 (xor VR256:$src1, VR256:$src2)),
2325 (VPXORYrr VR256:$src1, VR256:$src2)>;
2327 def : Pat<(v32i8 (X86andnp VR256:$src1, VR256:$src2)),
2328 (VPANDNYrr VR256:$src1, VR256:$src2)>;
2329 def : Pat<(v16i16 (X86andnp VR256:$src1, VR256:$src2)),
2330 (VPANDNYrr VR256:$src1, VR256:$src2)>;
2331 def : Pat<(v8i32 (X86andnp VR256:$src1, VR256:$src2)),
2332 (VPANDNYrr VR256:$src1, VR256:$src2)>;
2334 def : Pat<(and VR256:$src1, (loadv32i8 addr:$src2)),
2335 (VPANDYrm VR256:$src1, addr:$src2)>;
2336 def : Pat<(and VR256:$src1, (loadv16i16 addr:$src2)),
2337 (VPANDYrm VR256:$src1, addr:$src2)>;
2338 def : Pat<(and VR256:$src1, (loadv8i32 addr:$src2)),
2339 (VPANDYrm VR256:$src1, addr:$src2)>;
2341 def : Pat<(or VR256:$src1, (loadv32i8 addr:$src2)),
2342 (VPORYrm VR256:$src1, addr:$src2)>;
2343 def : Pat<(or VR256:$src1, (loadv16i16 addr:$src2)),
2344 (VPORYrm VR256:$src1, addr:$src2)>;
2345 def : Pat<(or VR256:$src1, (loadv8i32 addr:$src2)),
2346 (VPORYrm VR256:$src1, addr:$src2)>;
2348 def : Pat<(xor VR256:$src1, (loadv32i8 addr:$src2)),
2349 (VPXORYrm VR256:$src1, addr:$src2)>;
2350 def : Pat<(xor VR256:$src1, (loadv16i16 addr:$src2)),
2351 (VPXORYrm VR256:$src1, addr:$src2)>;
2352 def : Pat<(xor VR256:$src1, (loadv8i32 addr:$src2)),
2353 (VPXORYrm VR256:$src1, addr:$src2)>;
2355 def : Pat<(X86andnp VR256:$src1, (loadv32i8 addr:$src2)),
2356 (VPANDNYrm VR256:$src1, addr:$src2)>;
2357 def : Pat<(X86andnp VR256:$src1, (loadv16i16 addr:$src2)),
2358 (VPANDNYrm VR256:$src1, addr:$src2)>;
2359 def : Pat<(X86andnp VR256:$src1, (loadv8i32 addr:$src2)),
2360 (VPANDNYrm VR256:$src1, addr:$src2)>;
2363 // If only AVX1 is supported, we need to handle integer operations with
2364 // floating point instructions since the integer versions aren't available.
2365 let Predicates = [HasAVX1Only] in {
2366 def : Pat<(v32i8 (and VR256:$src1, VR256:$src2)),
2367 (VANDPSYrr VR256:$src1, VR256:$src2)>;
2368 def : Pat<(v16i16 (and VR256:$src1, VR256:$src2)),
2369 (VANDPSYrr VR256:$src1, VR256:$src2)>;
2370 def : Pat<(v8i32 (and VR256:$src1, VR256:$src2)),
2371 (VANDPSYrr VR256:$src1, VR256:$src2)>;
2372 def : Pat<(v4i64 (and VR256:$src1, VR256:$src2)),
2373 (VANDPSYrr VR256:$src1, VR256:$src2)>;
2375 def : Pat<(v32i8 (or VR256:$src1, VR256:$src2)),
2376 (VORPSYrr VR256:$src1, VR256:$src2)>;
2377 def : Pat<(v16i16 (or VR256:$src1, VR256:$src2)),
2378 (VORPSYrr VR256:$src1, VR256:$src2)>;
2379 def : Pat<(v8i32 (or VR256:$src1, VR256:$src2)),
2380 (VORPSYrr VR256:$src1, VR256:$src2)>;
2381 def : Pat<(v4i64 (or VR256:$src1, VR256:$src2)),
2382 (VORPSYrr VR256:$src1, VR256:$src2)>;
2384 def : Pat<(v32i8 (xor VR256:$src1, VR256:$src2)),
2385 (VXORPSYrr VR256:$src1, VR256:$src2)>;
2386 def : Pat<(v16i16 (xor VR256:$src1, VR256:$src2)),
2387 (VXORPSYrr VR256:$src1, VR256:$src2)>;
2388 def : Pat<(v8i32 (xor VR256:$src1, VR256:$src2)),
2389 (VXORPSYrr VR256:$src1, VR256:$src2)>;
2390 def : Pat<(v4i64 (xor VR256:$src1, VR256:$src2)),
2391 (VXORPSYrr VR256:$src1, VR256:$src2)>;
2393 def : Pat<(v32i8 (X86andnp VR256:$src1, VR256:$src2)),
2394 (VANDNPSYrr VR256:$src1, VR256:$src2)>;
2395 def : Pat<(v16i16 (X86andnp VR256:$src1, VR256:$src2)),
2396 (VANDNPSYrr VR256:$src1, VR256:$src2)>;
2397 def : Pat<(v8i32 (X86andnp VR256:$src1, VR256:$src2)),
2398 (VANDNPSYrr VR256:$src1, VR256:$src2)>;
2399 def : Pat<(v4i64 (X86andnp VR256:$src1, VR256:$src2)),
2400 (VANDNPSYrr VR256:$src1, VR256:$src2)>;
2402 def : Pat<(and VR256:$src1, (loadv32i8 addr:$src2)),
2403 (VANDPSYrm VR256:$src1, addr:$src2)>;
2404 def : Pat<(and VR256:$src1, (loadv16i16 addr:$src2)),
2405 (VANDPSYrm VR256:$src1, addr:$src2)>;
2406 def : Pat<(and VR256:$src1, (loadv8i32 addr:$src2)),
2407 (VANDPSYrm VR256:$src1, addr:$src2)>;
2408 def : Pat<(and VR256:$src1, (loadv4i64 addr:$src2)),
2409 (VANDPSYrm VR256:$src1, addr:$src2)>;
2411 def : Pat<(or VR256:$src1, (loadv32i8 addr:$src2)),
2412 (VORPSYrm VR256:$src1, addr:$src2)>;
2413 def : Pat<(or VR256:$src1, (loadv16i16 addr:$src2)),
2414 (VORPSYrm VR256:$src1, addr:$src2)>;
2415 def : Pat<(or VR256:$src1, (loadv8i32 addr:$src2)),
2416 (VORPSYrm VR256:$src1, addr:$src2)>;
2417 def : Pat<(or VR256:$src1, (loadv4i64 addr:$src2)),
2418 (VORPSYrm VR256:$src1, addr:$src2)>;
2420 def : Pat<(xor VR256:$src1, (loadv32i8 addr:$src2)),
2421 (VXORPSYrm VR256:$src1, addr:$src2)>;
2422 def : Pat<(xor VR256:$src1, (loadv16i16 addr:$src2)),
2423 (VXORPSYrm VR256:$src1, addr:$src2)>;
2424 def : Pat<(xor VR256:$src1, (loadv8i32 addr:$src2)),
2425 (VXORPSYrm VR256:$src1, addr:$src2)>;
2426 def : Pat<(xor VR256:$src1, (loadv4i64 addr:$src2)),
2427 (VXORPSYrm VR256:$src1, addr:$src2)>;
2429 def : Pat<(X86andnp VR256:$src1, (loadv32i8 addr:$src2)),
2430 (VANDNPSYrm VR256:$src1, addr:$src2)>;
2431 def : Pat<(X86andnp VR256:$src1, (loadv16i16 addr:$src2)),
2432 (VANDNPSYrm VR256:$src1, addr:$src2)>;
2433 def : Pat<(X86andnp VR256:$src1, (loadv8i32 addr:$src2)),
2434 (VANDNPSYrm VR256:$src1, addr:$src2)>;
2435 def : Pat<(X86andnp VR256:$src1, (loadv4i64 addr:$src2)),
2436 (VANDNPSYrm VR256:$src1, addr:$src2)>;
2439 let Predicates = [HasAVX, NoVLX] in {
2440 def : Pat<(v16i8 (and VR128:$src1, VR128:$src2)),
2441 (VPANDrr VR128:$src1, VR128:$src2)>;
2442 def : Pat<(v8i16 (and VR128:$src1, VR128:$src2)),
2443 (VPANDrr VR128:$src1, VR128:$src2)>;
2444 def : Pat<(v4i32 (and VR128:$src1, VR128:$src2)),
2445 (VPANDrr VR128:$src1, VR128:$src2)>;
2447 def : Pat<(v16i8 (or VR128:$src1, VR128:$src2)),
2448 (VPORrr VR128:$src1, VR128:$src2)>;
2449 def : Pat<(v8i16 (or VR128:$src1, VR128:$src2)),
2450 (VPORrr VR128:$src1, VR128:$src2)>;
2451 def : Pat<(v4i32 (or VR128:$src1, VR128:$src2)),
2452 (VPORrr VR128:$src1, VR128:$src2)>;
2454 def : Pat<(v16i8 (xor VR128:$src1, VR128:$src2)),
2455 (VPXORrr VR128:$src1, VR128:$src2)>;
2456 def : Pat<(v8i16 (xor VR128:$src1, VR128:$src2)),
2457 (VPXORrr VR128:$src1, VR128:$src2)>;
2458 def : Pat<(v4i32 (xor VR128:$src1, VR128:$src2)),
2459 (VPXORrr VR128:$src1, VR128:$src2)>;
2461 def : Pat<(v16i8 (X86andnp VR128:$src1, VR128:$src2)),
2462 (VPANDNrr VR128:$src1, VR128:$src2)>;
2463 def : Pat<(v8i16 (X86andnp VR128:$src1, VR128:$src2)),
2464 (VPANDNrr VR128:$src1, VR128:$src2)>;
2465 def : Pat<(v4i32 (X86andnp VR128:$src1, VR128:$src2)),
2466 (VPANDNrr VR128:$src1, VR128:$src2)>;
2468 def : Pat<(and VR128:$src1, (loadv16i8 addr:$src2)),
2469 (VPANDrm VR128:$src1, addr:$src2)>;
2470 def : Pat<(and VR128:$src1, (loadv8i16 addr:$src2)),
2471 (VPANDrm VR128:$src1, addr:$src2)>;
2472 def : Pat<(and VR128:$src1, (loadv4i32 addr:$src2)),
2473 (VPANDrm VR128:$src1, addr:$src2)>;
2475 def : Pat<(or VR128:$src1, (loadv16i8 addr:$src2)),
2476 (VPORrm VR128:$src1, addr:$src2)>;
2477 def : Pat<(or VR128:$src1, (loadv8i16 addr:$src2)),
2478 (VPORrm VR128:$src1, addr:$src2)>;
2479 def : Pat<(or VR128:$src1, (loadv4i32 addr:$src2)),
2480 (VPORrm VR128:$src1, addr:$src2)>;
2482 def : Pat<(xor VR128:$src1, (loadv16i8 addr:$src2)),
2483 (VPXORrm VR128:$src1, addr:$src2)>;
2484 def : Pat<(xor VR128:$src1, (loadv8i16 addr:$src2)),
2485 (VPXORrm VR128:$src1, addr:$src2)>;
2486 def : Pat<(xor VR128:$src1, (loadv4i32 addr:$src2)),
2487 (VPXORrm VR128:$src1, addr:$src2)>;
2489 def : Pat<(X86andnp VR128:$src1, (loadv16i8 addr:$src2)),
2490 (VPANDNrm VR128:$src1, addr:$src2)>;
2491 def : Pat<(X86andnp VR128:$src1, (loadv8i16 addr:$src2)),
2492 (VPANDNrm VR128:$src1, addr:$src2)>;
2493 def : Pat<(X86andnp VR128:$src1, (loadv4i32 addr:$src2)),
2494 (VPANDNrm VR128:$src1, addr:$src2)>;
2497 let Predicates = [UseSSE2] in {
2498 def : Pat<(v16i8 (and VR128:$src1, VR128:$src2)),
2499 (PANDrr VR128:$src1, VR128:$src2)>;
2500 def : Pat<(v8i16 (and VR128:$src1, VR128:$src2)),
2501 (PANDrr VR128:$src1, VR128:$src2)>;
2502 def : Pat<(v4i32 (and VR128:$src1, VR128:$src2)),
2503 (PANDrr VR128:$src1, VR128:$src2)>;
2505 def : Pat<(v16i8 (or VR128:$src1, VR128:$src2)),
2506 (PORrr VR128:$src1, VR128:$src2)>;
2507 def : Pat<(v8i16 (or VR128:$src1, VR128:$src2)),
2508 (PORrr VR128:$src1, VR128:$src2)>;
2509 def : Pat<(v4i32 (or VR128:$src1, VR128:$src2)),
2510 (PORrr VR128:$src1, VR128:$src2)>;
2512 def : Pat<(v16i8 (xor VR128:$src1, VR128:$src2)),
2513 (PXORrr VR128:$src1, VR128:$src2)>;
2514 def : Pat<(v8i16 (xor VR128:$src1, VR128:$src2)),
2515 (PXORrr VR128:$src1, VR128:$src2)>;
2516 def : Pat<(v4i32 (xor VR128:$src1, VR128:$src2)),
2517 (PXORrr VR128:$src1, VR128:$src2)>;
2519 def : Pat<(v16i8 (X86andnp VR128:$src1, VR128:$src2)),
2520 (PANDNrr VR128:$src1, VR128:$src2)>;
2521 def : Pat<(v8i16 (X86andnp VR128:$src1, VR128:$src2)),
2522 (PANDNrr VR128:$src1, VR128:$src2)>;
2523 def : Pat<(v4i32 (X86andnp VR128:$src1, VR128:$src2)),
2524 (PANDNrr VR128:$src1, VR128:$src2)>;
2526 def : Pat<(and VR128:$src1, (memopv16i8 addr:$src2)),
2527 (PANDrm VR128:$src1, addr:$src2)>;
2528 def : Pat<(and VR128:$src1, (memopv8i16 addr:$src2)),
2529 (PANDrm VR128:$src1, addr:$src2)>;
2530 def : Pat<(and VR128:$src1, (memopv4i32 addr:$src2)),
2531 (PANDrm VR128:$src1, addr:$src2)>;
2533 def : Pat<(or VR128:$src1, (memopv16i8 addr:$src2)),
2534 (PORrm VR128:$src1, addr:$src2)>;
2535 def : Pat<(or VR128:$src1, (memopv8i16 addr:$src2)),
2536 (PORrm VR128:$src1, addr:$src2)>;
2537 def : Pat<(or VR128:$src1, (memopv4i32 addr:$src2)),
2538 (PORrm VR128:$src1, addr:$src2)>;
2540 def : Pat<(xor VR128:$src1, (memopv16i8 addr:$src2)),
2541 (PXORrm VR128:$src1, addr:$src2)>;
2542 def : Pat<(xor VR128:$src1, (memopv8i16 addr:$src2)),
2543 (PXORrm VR128:$src1, addr:$src2)>;
2544 def : Pat<(xor VR128:$src1, (memopv4i32 addr:$src2)),
2545 (PXORrm VR128:$src1, addr:$src2)>;
2547 def : Pat<(X86andnp VR128:$src1, (memopv16i8 addr:$src2)),
2548 (PANDNrm VR128:$src1, addr:$src2)>;
2549 def : Pat<(X86andnp VR128:$src1, (memopv8i16 addr:$src2)),
2550 (PANDNrm VR128:$src1, addr:$src2)>;
2551 def : Pat<(X86andnp VR128:$src1, (memopv4i32 addr:$src2)),
2552 (PANDNrm VR128:$src1, addr:$src2)>;
2555 // Patterns for packed operations when we don't have integer type available.
2556 def : Pat<(v4f32 (X86fand VR128:$src1, VR128:$src2)),
2557 (ANDPSrr VR128:$src1, VR128:$src2)>;
2558 def : Pat<(v4f32 (X86for VR128:$src1, VR128:$src2)),
2559 (ORPSrr VR128:$src1, VR128:$src2)>;
2560 def : Pat<(v4f32 (X86fxor VR128:$src1, VR128:$src2)),
2561 (XORPSrr VR128:$src1, VR128:$src2)>;
2562 def : Pat<(v4f32 (X86fandn VR128:$src1, VR128:$src2)),
2563 (ANDNPSrr VR128:$src1, VR128:$src2)>;
2565 def : Pat<(X86fand VR128:$src1, (memopv4f32 addr:$src2)),
2566 (ANDPSrm VR128:$src1, addr:$src2)>;
2567 def : Pat<(X86for VR128:$src1, (memopv4f32 addr:$src2)),
2568 (ORPSrm VR128:$src1, addr:$src2)>;
2569 def : Pat<(X86fxor VR128:$src1, (memopv4f32 addr:$src2)),
2570 (XORPSrm VR128:$src1, addr:$src2)>;
2571 def : Pat<(X86fandn VR128:$src1, (memopv4f32 addr:$src2)),
2572 (ANDNPSrm VR128:$src1, addr:$src2)>;
2574 //===----------------------------------------------------------------------===//
2575 // SSE 1 & 2 - Arithmetic Instructions
2576 //===----------------------------------------------------------------------===//
2578 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
2581 /// In addition, we also have a special variant of the scalar form here to
2582 /// represent the associated intrinsic operation. This form is unlike the
2583 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
2584 /// and leaves the top elements unmodified (therefore these cannot be commuted).
2586 /// These three forms can each be reg+reg or reg+mem.
2589 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
2591 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr,
2592 SDPatternOperator OpNode, X86SchedWriteSizes sched> {
2593 let Uses = [MXCSR], mayRaiseFPException = 1 in {
2594 let Predicates = [HasAVX, NoVLX] in {
2595 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
2596 VR128, v4f32, f128mem, loadv4f32,
2597 SSEPackedSingle, sched.PS.XMM, 0>, PS, VEX_4V, VEX_WIG;
2598 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
2599 VR128, v2f64, f128mem, loadv2f64,
2600 SSEPackedDouble, sched.PD.XMM, 0>, PD, VEX_4V, VEX_WIG;
2602 defm V#NAME#PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"),
2603 OpNode, VR256, v8f32, f256mem, loadv8f32,
2604 SSEPackedSingle, sched.PS.YMM, 0>, PS, VEX_4V, VEX_L, VEX_WIG;
2605 defm V#NAME#PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"),
2606 OpNode, VR256, v4f64, f256mem, loadv4f64,
2607 SSEPackedDouble, sched.PD.YMM, 0>, PD, VEX_4V, VEX_L, VEX_WIG;
2610 let Constraints = "$src1 = $dst" in {
2611 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
2612 v4f32, f128mem, memopv4f32, SSEPackedSingle,
2614 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
2615 v2f64, f128mem, memopv2f64, SSEPackedDouble,
2621 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDPatternOperator OpNode,
2622 X86SchedWriteSizes sched> {
2623 let Uses = [MXCSR], mayRaiseFPException = 1 in {
2624 defm V#NAME#SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
2625 OpNode, FR32, f32mem, SSEPackedSingle, sched.PS.Scl, 0>,
2626 XS, VEX_4V, VEX_LIG, VEX_WIG;
2627 defm V#NAME#SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
2628 OpNode, FR64, f64mem, SSEPackedDouble, sched.PD.Scl, 0>,
2629 XD, VEX_4V, VEX_LIG, VEX_WIG;
2631 let Constraints = "$src1 = $dst" in {
2632 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
2633 OpNode, FR32, f32mem, SSEPackedSingle,
2635 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
2636 OpNode, FR64, f64mem, SSEPackedDouble,
2642 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
2643 SDPatternOperator OpNode,
2644 X86SchedWriteSizes sched> {
2645 let Uses = [MXCSR], mayRaiseFPException = 1 in {
2646 defm V#NAME#SS : sse12_fp_scalar_int<opc, OpcodeStr, OpNode, VR128, v4f32,
2647 !strconcat(OpcodeStr, "ss"), ssmem, sse_load_f32,
2648 SSEPackedSingle, sched.PS.Scl, 0>, XS, VEX_4V, VEX_LIG, VEX_WIG;
2649 defm V#NAME#SD : sse12_fp_scalar_int<opc, OpcodeStr, OpNode, VR128, v2f64,
2650 !strconcat(OpcodeStr, "sd"), sdmem, sse_load_f64,
2651 SSEPackedDouble, sched.PD.Scl, 0>, XD, VEX_4V, VEX_LIG, VEX_WIG;
2653 let Constraints = "$src1 = $dst" in {
2654 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, OpNode, VR128, v4f32,
2655 !strconcat(OpcodeStr, "ss"), ssmem, sse_load_f32,
2656 SSEPackedSingle, sched.PS.Scl>, XS;
2657 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, OpNode, VR128, v2f64,
2658 !strconcat(OpcodeStr, "sd"), sdmem, sse_load_f64,
2659 SSEPackedDouble, sched.PD.Scl>, XD;
2664 // Binary Arithmetic instructions
2665 defm ADD : basic_sse12_fp_binop_p<0x58, "add", any_fadd, SchedWriteFAddSizes>,
2666 basic_sse12_fp_binop_s<0x58, "add", any_fadd, SchedWriteFAddSizes>,
2667 basic_sse12_fp_binop_s_int<0x58, "add", null_frag, SchedWriteFAddSizes>;
2668 defm MUL : basic_sse12_fp_binop_p<0x59, "mul", any_fmul, SchedWriteFMulSizes>,
2669 basic_sse12_fp_binop_s<0x59, "mul", any_fmul, SchedWriteFMulSizes>,
2670 basic_sse12_fp_binop_s_int<0x59, "mul", null_frag, SchedWriteFMulSizes>;
2671 let isCommutable = 0 in {
2672 defm SUB : basic_sse12_fp_binop_p<0x5C, "sub", any_fsub, SchedWriteFAddSizes>,
2673 basic_sse12_fp_binop_s<0x5C, "sub", any_fsub, SchedWriteFAddSizes>,
2674 basic_sse12_fp_binop_s_int<0x5C, "sub", null_frag, SchedWriteFAddSizes>;
2675 defm DIV : basic_sse12_fp_binop_p<0x5E, "div", any_fdiv, SchedWriteFDivSizes>,
2676 basic_sse12_fp_binop_s<0x5E, "div", any_fdiv, SchedWriteFDivSizes>,
2677 basic_sse12_fp_binop_s_int<0x5E, "div", null_frag, SchedWriteFDivSizes>;
2678 defm MAX : basic_sse12_fp_binop_p<0x5F, "max", X86fmax, SchedWriteFCmpSizes>,
2679 basic_sse12_fp_binop_s<0x5F, "max", X86fmax, SchedWriteFCmpSizes>,
2680 basic_sse12_fp_binop_s_int<0x5F, "max", X86fmaxs, SchedWriteFCmpSizes>;
2681 defm MIN : basic_sse12_fp_binop_p<0x5D, "min", X86fmin, SchedWriteFCmpSizes>,
2682 basic_sse12_fp_binop_s<0x5D, "min", X86fmin, SchedWriteFCmpSizes>,
2683 basic_sse12_fp_binop_s_int<0x5D, "min", X86fmins, SchedWriteFCmpSizes>;
2686 let isCodeGenOnly = 1 in {
2687 defm MAXC: basic_sse12_fp_binop_p<0x5F, "max", X86fmaxc, SchedWriteFCmpSizes>,
2688 basic_sse12_fp_binop_s<0x5F, "max", X86fmaxc, SchedWriteFCmpSizes>;
2689 defm MINC: basic_sse12_fp_binop_p<0x5D, "min", X86fminc, SchedWriteFCmpSizes>,
2690 basic_sse12_fp_binop_s<0x5D, "min", X86fminc, SchedWriteFCmpSizes>;
2693 // Patterns used to select SSE scalar fp arithmetic instructions from
2696 // (1) a scalar fp operation followed by a blend
2698 // The effect is that the backend no longer emits unnecessary vector
2699 // insert instructions immediately after SSE scalar fp instructions
2700 // like addss or mulss.
2702 // For example, given the following code:
2703 // __m128 foo(__m128 A, __m128 B) {
2708 // Previously we generated:
2709 // addss %xmm0, %xmm1
2710 // movss %xmm1, %xmm0
2713 // addss %xmm1, %xmm0
2715 // (2) a vector packed single/double fp operation followed by a vector insert
2717 // The effect is that the backend converts the packed fp instruction
2718 // followed by a vector insert into a single SSE scalar fp instruction.
2720 // For example, given the following code:
2721 // __m128 foo(__m128 A, __m128 B) {
2722 // __m128 C = A + B;
2723 // return (__m128) {c[0], a[1], a[2], a[3]};
2726 // Previously we generated:
2727 // addps %xmm0, %xmm1
2728 // movss %xmm1, %xmm0
2731 // addss %xmm1, %xmm0
2733 // TODO: Some canonicalization in lowering would simplify the number of
2734 // patterns we have to try to match.
2735 multiclass scalar_math_patterns<SDPatternOperator Op, string OpcPrefix, SDNode Move,
2736 ValueType VT, ValueType EltTy,
2737 RegisterClass RC, PatFrag ld_frag,
2738 Predicate BasePredicate> {
2739 let Predicates = [BasePredicate] in {
2740 // extracted scalar math op with insert via movss/movsd
2741 def : Pat<(VT (Move (VT VR128:$dst),
2742 (VT (scalar_to_vector
2743 (Op (EltTy (extractelt (VT VR128:$dst), (iPTR 0))),
2745 (!cast<Instruction>(OpcPrefix#rr_Int) VT:$dst,
2746 (VT (COPY_TO_REGCLASS RC:$src, VR128)))>;
2747 def : Pat<(VT (Move (VT VR128:$dst),
2748 (VT (scalar_to_vector
2749 (Op (EltTy (extractelt (VT VR128:$dst), (iPTR 0))),
2750 (ld_frag addr:$src)))))),
2751 (!cast<Instruction>(OpcPrefix#rm_Int) VT:$dst, addr:$src)>;
2754 // Repeat for AVX versions of the instructions.
2755 let Predicates = [UseAVX] in {
2756 // extracted scalar math op with insert via movss/movsd
2757 def : Pat<(VT (Move (VT VR128:$dst),
2758 (VT (scalar_to_vector
2759 (Op (EltTy (extractelt (VT VR128:$dst), (iPTR 0))),
2761 (!cast<Instruction>("V"#OpcPrefix#rr_Int) VT:$dst,
2762 (VT (COPY_TO_REGCLASS RC:$src, VR128)))>;
2763 def : Pat<(VT (Move (VT VR128:$dst),
2764 (VT (scalar_to_vector
2765 (Op (EltTy (extractelt (VT VR128:$dst), (iPTR 0))),
2766 (ld_frag addr:$src)))))),
2767 (!cast<Instruction>("V"#OpcPrefix#rm_Int) VT:$dst, addr:$src)>;
2771 defm : scalar_math_patterns<any_fadd, "ADDSS", X86Movss, v4f32, f32, FR32, loadf32, UseSSE1>;
2772 defm : scalar_math_patterns<any_fsub, "SUBSS", X86Movss, v4f32, f32, FR32, loadf32, UseSSE1>;
2773 defm : scalar_math_patterns<any_fmul, "MULSS", X86Movss, v4f32, f32, FR32, loadf32, UseSSE1>;
2774 defm : scalar_math_patterns<any_fdiv, "DIVSS", X86Movss, v4f32, f32, FR32, loadf32, UseSSE1>;
2776 defm : scalar_math_patterns<any_fadd, "ADDSD", X86Movsd, v2f64, f64, FR64, loadf64, UseSSE2>;
2777 defm : scalar_math_patterns<any_fsub, "SUBSD", X86Movsd, v2f64, f64, FR64, loadf64, UseSSE2>;
2778 defm : scalar_math_patterns<any_fmul, "MULSD", X86Movsd, v2f64, f64, FR64, loadf64, UseSSE2>;
2779 defm : scalar_math_patterns<any_fdiv, "DIVSD", X86Movsd, v2f64, f64, FR64, loadf64, UseSSE2>;
2782 /// In addition, we also have a special variant of the scalar form here to
2783 /// represent the associated intrinsic operation. This form is unlike the
2784 /// plain scalar form, in that it takes an entire vector (instead of a
2785 /// scalar) and leaves the top elements undefined.
2787 /// And, we have a special variant form for a full-vector intrinsic form.
2789 /// sse_fp_unop_s - SSE1 unops in scalar form
2790 /// For the non-AVX defs, we need $src1 to be tied to $dst because
2791 /// the HW instructions are 2 operand / destructive.
2792 multiclass sse_fp_unop_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
2793 ValueType ScalarVT, X86MemOperand x86memop,
2794 Operand intmemop, SDPatternOperator OpNode, Domain d,
2795 X86FoldableSchedWrite sched, Predicate target> {
2796 let isCodeGenOnly = 1, hasSideEffects = 0 in {
2797 def r : I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1),
2798 !strconcat(OpcodeStr, "\t{$src1, $dst|$dst, $src1}"),
2799 [(set RC:$dst, (OpNode RC:$src1))], d>, Sched<[sched]>,
2802 def m : I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src1),
2803 !strconcat(OpcodeStr, "\t{$src1, $dst|$dst, $src1}"),
2804 [(set RC:$dst, (OpNode (load addr:$src1)))], d>,
2805 Sched<[sched.Folded]>,
2806 Requires<[target, OptForSize]>;
2809 let hasSideEffects = 0, Constraints = "$src1 = $dst", ExeDomain = d in {
2810 def r_Int : I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2811 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), []>,
2814 def m_Int : I<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, intmemop:$src2),
2815 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), []>,
2816 Sched<[sched.Folded, sched.ReadAfterFold]>;
2821 multiclass sse_fp_unop_s_intr<RegisterClass RC, ValueType vt,
2822 PatFrags mem_frags, Intrinsic Intr,
2823 Predicate target, string Suffix> {
2824 let Predicates = [target] in {
2825 // These are unary operations, but they are modeled as having 2 source operands
2826 // because the high elements of the destination are unchanged in SSE.
2827 def : Pat<(Intr VR128:$src),
2828 (!cast<Instruction>(NAME#r_Int) VR128:$src, VR128:$src)>;
2830 // We don't want to fold scalar loads into these instructions unless
2831 // optimizing for size. This is because the folded instruction will have a
2832 // partial register update, while the unfolded sequence will not, e.g.
2834 // rcpss %xmm0, %xmm0
2835 // which has a clobber before the rcp, vs.
2837 let Predicates = [target, OptForSize] in {
2838 def : Pat<(Intr (mem_frags addr:$src2)),
2839 (!cast<Instruction>(NAME#m_Int)
2840 (vt (IMPLICIT_DEF)), addr:$src2)>;
2844 multiclass avx_fp_unop_s_intr<RegisterClass RC, ValueType vt, PatFrags mem_frags,
2845 Intrinsic Intr, Predicate target> {
2846 let Predicates = [target] in {
2847 def : Pat<(Intr VR128:$src),
2848 (!cast<Instruction>(NAME#r_Int) VR128:$src,
2851 let Predicates = [target, OptForSize] in {
2852 def : Pat<(Intr (mem_frags addr:$src2)),
2853 (!cast<Instruction>(NAME#m_Int)
2854 (vt (IMPLICIT_DEF)), addr:$src2)>;
2858 multiclass avx_fp_unop_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
2859 ValueType ScalarVT, X86MemOperand x86memop,
2860 Operand intmemop, SDPatternOperator OpNode, Domain d,
2861 X86FoldableSchedWrite sched, Predicate target> {
2862 let isCodeGenOnly = 1, hasSideEffects = 0 in {
2863 def r : I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
2864 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2865 [], d>, Sched<[sched]>;
2867 def m : I<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2868 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2869 [], d>, Sched<[sched.Folded, sched.ReadAfterFold]>;
2871 let hasSideEffects = 0, ExeDomain = d in {
2872 def r_Int : I<opc, MRMSrcReg, (outs VR128:$dst),
2873 (ins VR128:$src1, VR128:$src2),
2874 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2875 []>, Sched<[sched]>;
2877 def m_Int : I<opc, MRMSrcMem, (outs VR128:$dst),
2878 (ins VR128:$src1, intmemop:$src2),
2879 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2880 []>, Sched<[sched.Folded, sched.ReadAfterFold]>;
2883 // We don't want to fold scalar loads into these instructions unless
2884 // optimizing for size. This is because the folded instruction will have a
2885 // partial register update, while the unfolded sequence will not, e.g.
2886 // vmovss mem, %xmm0
2887 // vrcpss %xmm0, %xmm0, %xmm0
2888 // which has a clobber before the rcp, vs.
2889 // vrcpss mem, %xmm0, %xmm0
2890 // TODO: In theory, we could fold the load, and avoid the stall caused by
2891 // the partial register store, either in BreakFalseDeps or with smarter RA.
2892 let Predicates = [target] in {
2893 def : Pat<(OpNode RC:$src), (!cast<Instruction>(NAME#r)
2894 (ScalarVT (IMPLICIT_DEF)), RC:$src)>;
2896 let Predicates = [target, OptForSize] in {
2897 def : Pat<(ScalarVT (OpNode (load addr:$src))),
2898 (!cast<Instruction>(NAME#m) (ScalarVT (IMPLICIT_DEF)),
2903 /// sse1_fp_unop_p - SSE1 unops in packed form.
2904 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDPatternOperator OpNode,
2905 X86SchedWriteWidths sched, list<Predicate> prds> {
2906 let Predicates = prds in {
2907 def V#NAME#PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2908 !strconcat("v", OpcodeStr,
2909 "ps\t{$src, $dst|$dst, $src}"),
2910 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>,
2911 VEX, Sched<[sched.XMM]>, VEX_WIG;
2912 def V#NAME#PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2913 !strconcat("v", OpcodeStr,
2914 "ps\t{$src, $dst|$dst, $src}"),
2915 [(set VR128:$dst, (OpNode (loadv4f32 addr:$src)))]>,
2916 VEX, Sched<[sched.XMM.Folded]>, VEX_WIG;
2917 def V#NAME#PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2918 !strconcat("v", OpcodeStr,
2919 "ps\t{$src, $dst|$dst, $src}"),
2920 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))]>,
2921 VEX, VEX_L, Sched<[sched.YMM]>, VEX_WIG;
2922 def V#NAME#PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2923 !strconcat("v", OpcodeStr,
2924 "ps\t{$src, $dst|$dst, $src}"),
2925 [(set VR256:$dst, (OpNode (loadv8f32 addr:$src)))]>,
2926 VEX, VEX_L, Sched<[sched.YMM.Folded]>, VEX_WIG;
2929 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2930 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2931 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>,
2933 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2934 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2935 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>,
2936 Sched<[sched.XMM.Folded]>;
2939 /// sse2_fp_unop_p - SSE2 unops in vector forms.
2940 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
2941 SDPatternOperator OpNode, X86SchedWriteWidths sched> {
2942 let Predicates = [HasAVX, NoVLX] in {
2943 def V#NAME#PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2944 !strconcat("v", OpcodeStr,
2945 "pd\t{$src, $dst|$dst, $src}"),
2946 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>,
2947 VEX, Sched<[sched.XMM]>, VEX_WIG;
2948 def V#NAME#PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2949 !strconcat("v", OpcodeStr,
2950 "pd\t{$src, $dst|$dst, $src}"),
2951 [(set VR128:$dst, (OpNode (loadv2f64 addr:$src)))]>,
2952 VEX, Sched<[sched.XMM.Folded]>, VEX_WIG;
2953 def V#NAME#PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2954 !strconcat("v", OpcodeStr,
2955 "pd\t{$src, $dst|$dst, $src}"),
2956 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))]>,
2957 VEX, VEX_L, Sched<[sched.YMM]>, VEX_WIG;
2958 def V#NAME#PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2959 !strconcat("v", OpcodeStr,
2960 "pd\t{$src, $dst|$dst, $src}"),
2961 [(set VR256:$dst, (OpNode (loadv4f64 addr:$src)))]>,
2962 VEX, VEX_L, Sched<[sched.YMM.Folded]>, VEX_WIG;
2965 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2966 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2967 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>,
2969 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2970 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2971 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>,
2972 Sched<[sched.XMM.Folded]>;
2975 multiclass sse1_fp_unop_s_intr<bits<8> opc, string OpcodeStr, SDNode OpNode,
2976 X86SchedWriteWidths sched, Predicate AVXTarget> {
2977 defm SS : sse_fp_unop_s_intr<FR32, v4f32, sse_load_f32,
2978 !cast<Intrinsic>("int_x86_sse_"#OpcodeStr#_ss),
2980 defm V#NAME#SS : avx_fp_unop_s_intr<FR32, v4f32, sse_load_f32,
2981 !cast<Intrinsic>("int_x86_sse_"#OpcodeStr#_ss),
2983 XS, VEX_4V, VEX_LIG, VEX_WIG, NotMemoryFoldable;
2986 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr, SDPatternOperator OpNode,
2987 X86SchedWriteWidths sched, Predicate AVXTarget> {
2988 defm SS : sse_fp_unop_s<opc, OpcodeStr#ss, FR32, f32, f32mem,
2989 ssmem, OpNode, SSEPackedSingle, sched.Scl, UseSSE1>, XS;
2990 defm V#NAME#SS : avx_fp_unop_s<opc, "v"#OpcodeStr#ss, FR32, f32,
2991 f32mem, ssmem, OpNode, SSEPackedSingle, sched.Scl, AVXTarget>,
2992 XS, VEX_4V, VEX_LIG, VEX_WIG;
2995 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr, SDPatternOperator OpNode,
2996 X86SchedWriteWidths sched, Predicate AVXTarget> {
2997 defm SD : sse_fp_unop_s<opc, OpcodeStr#sd, FR64, f64, f64mem,
2998 sdmem, OpNode, SSEPackedDouble, sched.Scl, UseSSE2>, XD;
2999 defm V#NAME#SD : avx_fp_unop_s<opc, "v"#OpcodeStr#sd, FR64, f64,
3000 f64mem, sdmem, OpNode, SSEPackedDouble, sched.Scl, AVXTarget>,
3001 XD, VEX_4V, VEX_LIG, VEX_WIG;
3005 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", any_fsqrt, SchedWriteFSqrt, UseAVX>,
3006 sse1_fp_unop_p<0x51, "sqrt", any_fsqrt, SchedWriteFSqrt, [HasAVX, NoVLX]>,
3007 sse2_fp_unop_s<0x51, "sqrt", any_fsqrt, SchedWriteFSqrt64, UseAVX>,
3008 sse2_fp_unop_p<0x51, "sqrt", any_fsqrt, SchedWriteFSqrt64>, SIMD_EXC;
3010 // Reciprocal approximations. Note that these typically require refinement
3011 // in order to obtain suitable precision.
3012 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, SchedWriteFRsqrt, HasAVX>,
3013 sse1_fp_unop_s_intr<0x52, "rsqrt", X86frsqrt, SchedWriteFRsqrt, HasAVX>,
3014 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt, SchedWriteFRsqrt, [HasAVX]>;
3015 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, SchedWriteFRcp, HasAVX>,
3016 sse1_fp_unop_s_intr<0x53, "rcp", X86frcp, SchedWriteFRcp, HasAVX>,
3017 sse1_fp_unop_p<0x53, "rcp", X86frcp, SchedWriteFRcp, [HasAVX]>;
3019 // There is no f64 version of the reciprocal approximation instructions.
3021 multiclass scalar_unary_math_patterns<SDPatternOperator OpNode, string OpcPrefix, SDNode Move,
3022 ValueType VT, Predicate BasePredicate> {
3023 let Predicates = [BasePredicate] in {
3024 def : Pat<(VT (Move VT:$dst, (scalar_to_vector
3025 (OpNode (extractelt VT:$src, 0))))),
3026 (!cast<Instruction>(OpcPrefix#r_Int) VT:$dst, VT:$src)>;
3029 // Repeat for AVX versions of the instructions.
3030 let Predicates = [UseAVX] in {
3031 def : Pat<(VT (Move VT:$dst, (scalar_to_vector
3032 (OpNode (extractelt VT:$src, 0))))),
3033 (!cast<Instruction>("V"#OpcPrefix#r_Int) VT:$dst, VT:$src)>;
3037 defm : scalar_unary_math_patterns<any_fsqrt, "SQRTSS", X86Movss, v4f32, UseSSE1>;
3038 defm : scalar_unary_math_patterns<any_fsqrt, "SQRTSD", X86Movsd, v2f64, UseSSE2>;
3040 multiclass scalar_unary_math_intr_patterns<Intrinsic Intr, string OpcPrefix,
3041 SDNode Move, ValueType VT,
3042 Predicate BasePredicate> {
3043 let Predicates = [BasePredicate] in {
3044 def : Pat<(VT (Move VT:$dst, (Intr VT:$src))),
3045 (!cast<Instruction>(OpcPrefix#r_Int) VT:$dst, VT:$src)>;
3048 // Repeat for AVX versions of the instructions.
3049 let Predicates = [HasAVX] in {
3050 def : Pat<(VT (Move VT:$dst, (Intr VT:$src))),
3051 (!cast<Instruction>("V"#OpcPrefix#r_Int) VT:$dst, VT:$src)>;
3055 defm : scalar_unary_math_intr_patterns<int_x86_sse_rcp_ss, "RCPSS", X86Movss,
3057 defm : scalar_unary_math_intr_patterns<int_x86_sse_rsqrt_ss, "RSQRTSS", X86Movss,
3061 //===----------------------------------------------------------------------===//
3062 // SSE 1 & 2 - Non-temporal stores
3063 //===----------------------------------------------------------------------===//
3065 let AddedComplexity = 400 in { // Prefer non-temporal versions
3066 let Predicates = [HasAVX, NoVLX] in {
3067 let SchedRW = [SchedWriteFMoveLSNT.XMM.MR] in {
3068 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
3069 (ins f128mem:$dst, VR128:$src),
3070 "movntps\t{$src, $dst|$dst, $src}",
3071 [(alignednontemporalstore (v4f32 VR128:$src),
3072 addr:$dst)]>, VEX, VEX_WIG;
3073 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
3074 (ins f128mem:$dst, VR128:$src),
3075 "movntpd\t{$src, $dst|$dst, $src}",
3076 [(alignednontemporalstore (v2f64 VR128:$src),
3077 addr:$dst)]>, VEX, VEX_WIG;
3080 let SchedRW = [SchedWriteFMoveLSNT.YMM.MR] in {
3081 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
3082 (ins f256mem:$dst, VR256:$src),
3083 "movntps\t{$src, $dst|$dst, $src}",
3084 [(alignednontemporalstore (v8f32 VR256:$src),
3085 addr:$dst)]>, VEX, VEX_L, VEX_WIG;
3086 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
3087 (ins f256mem:$dst, VR256:$src),
3088 "movntpd\t{$src, $dst|$dst, $src}",
3089 [(alignednontemporalstore (v4f64 VR256:$src),
3090 addr:$dst)]>, VEX, VEX_L, VEX_WIG;
3093 let ExeDomain = SSEPackedInt in {
3094 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
3095 (ins i128mem:$dst, VR128:$src),
3096 "movntdq\t{$src, $dst|$dst, $src}",
3097 [(alignednontemporalstore (v2i64 VR128:$src),
3098 addr:$dst)]>, VEX, VEX_WIG,
3099 Sched<[SchedWriteVecMoveLSNT.XMM.MR]>;
3100 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
3101 (ins i256mem:$dst, VR256:$src),
3102 "movntdq\t{$src, $dst|$dst, $src}",
3103 [(alignednontemporalstore (v4i64 VR256:$src),
3104 addr:$dst)]>, VEX, VEX_L, VEX_WIG,
3105 Sched<[SchedWriteVecMoveLSNT.YMM.MR]>;
3109 let SchedRW = [SchedWriteFMoveLSNT.XMM.MR] in {
3110 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3111 "movntps\t{$src, $dst|$dst, $src}",
3112 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
3113 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3114 "movntpd\t{$src, $dst|$dst, $src}",
3115 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
3118 let ExeDomain = SSEPackedInt, SchedRW = [SchedWriteVecMoveLSNT.XMM.MR] in
3119 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3120 "movntdq\t{$src, $dst|$dst, $src}",
3121 [(alignednontemporalstore (v2i64 VR128:$src), addr:$dst)]>;
3123 let SchedRW = [WriteStoreNT] in {
3124 // There is no AVX form for instructions below this point
3125 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
3126 "movnti{l}\t{$src, $dst|$dst, $src}",
3127 [(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
3128 PS, Requires<[HasSSE2]>;
3129 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
3130 "movnti{q}\t{$src, $dst|$dst, $src}",
3131 [(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
3132 PS, Requires<[HasSSE2]>;
3133 } // SchedRW = [WriteStoreNT]
3135 let Predicates = [HasAVX, NoVLX] in {
3136 def : Pat<(alignednontemporalstore (v8i32 VR256:$src), addr:$dst),
3137 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
3138 def : Pat<(alignednontemporalstore (v16i16 VR256:$src), addr:$dst),
3139 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
3140 def : Pat<(alignednontemporalstore (v32i8 VR256:$src), addr:$dst),
3141 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
3143 def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst),
3144 (VMOVNTDQmr addr:$dst, VR128:$src)>;
3145 def : Pat<(alignednontemporalstore (v8i16 VR128:$src), addr:$dst),
3146 (VMOVNTDQmr addr:$dst, VR128:$src)>;
3147 def : Pat<(alignednontemporalstore (v16i8 VR128:$src), addr:$dst),
3148 (VMOVNTDQmr addr:$dst, VR128:$src)>;
3151 let Predicates = [UseSSE2] in {
3152 def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst),
3153 (MOVNTDQmr addr:$dst, VR128:$src)>;
3154 def : Pat<(alignednontemporalstore (v8i16 VR128:$src), addr:$dst),
3155 (MOVNTDQmr addr:$dst, VR128:$src)>;
3156 def : Pat<(alignednontemporalstore (v16i8 VR128:$src), addr:$dst),
3157 (MOVNTDQmr addr:$dst, VR128:$src)>;
3160 } // AddedComplexity
3162 //===----------------------------------------------------------------------===//
3163 // SSE 1 & 2 - Prefetch and memory fence
3164 //===----------------------------------------------------------------------===//
3166 // Prefetch intrinsic.
3167 let Predicates = [HasSSEPrefetch], SchedRW = [WriteLoad] in {
3168 def PREFETCHT0 : I<0x18, MRM1m, (outs), (ins i8mem:$src),
3169 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3), (i32 1))]>, TB;
3170 def PREFETCHT1 : I<0x18, MRM2m, (outs), (ins i8mem:$src),
3171 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2), (i32 1))]>, TB;
3172 def PREFETCHT2 : I<0x18, MRM3m, (outs), (ins i8mem:$src),
3173 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1), (i32 1))]>, TB;
3174 def PREFETCHNTA : I<0x18, MRM0m, (outs), (ins i8mem:$src),
3175 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0), (i32 1))]>, TB;
3178 // FIXME: How should flush instruction be modeled?
3179 let SchedRW = [WriteLoad] in {
3181 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3182 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
3183 PS, Requires<[HasSSE2]>;
3186 let SchedRW = [WriteNop] in {
3187 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3188 // was introduced with SSE2, it's backward compatible.
3189 def PAUSE : I<0x90, RawFrm, (outs), (ins),
3190 "pause", [(int_x86_sse2_pause)]>, OBXS;
3193 let SchedRW = [WriteFence] in {
3194 // Load, store, and memory fence
3195 // TODO: As with mfence, we may want to ease the availability of sfence/lfence
3196 // to include any 64-bit target.
3197 def SFENCE : I<0xAE, MRM7X, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>,
3198 PS, Requires<[HasSSE1]>;
3199 def LFENCE : I<0xAE, MRM5X, (outs), (ins), "lfence", [(int_x86_sse2_lfence)]>,
3200 PS, Requires<[HasSSE2]>;
3201 def MFENCE : I<0xAE, MRM6X, (outs), (ins), "mfence", [(int_x86_sse2_mfence)]>,
3202 PS, Requires<[HasMFence]>;
3205 def : Pat<(X86MFence), (MFENCE)>;
3207 //===----------------------------------------------------------------------===//
3208 // SSE 1 & 2 - Load/Store XCSR register
3209 //===----------------------------------------------------------------------===//
3211 let mayLoad=1, hasSideEffects=1 in
3212 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
3213 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>,
3214 VEX, Sched<[WriteLDMXCSR]>, VEX_WIG;
3215 let mayStore=1, hasSideEffects=1 in
3216 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
3217 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>,
3218 VEX, Sched<[WriteSTMXCSR]>, VEX_WIG;
3220 let mayLoad=1, hasSideEffects=1 in
3221 def LDMXCSR : I<0xAE, MRM2m, (outs), (ins i32mem:$src),
3222 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>,
3223 PS, Sched<[WriteLDMXCSR]>;
3224 let mayStore=1, hasSideEffects=1 in
3225 def STMXCSR : I<0xAE, MRM3m, (outs), (ins i32mem:$dst),
3226 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>,
3227 PS, Sched<[WriteSTMXCSR]>;
3229 //===---------------------------------------------------------------------===//
3230 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
3231 //===---------------------------------------------------------------------===//
3233 let ExeDomain = SSEPackedInt in { // SSE integer instructions
3235 let hasSideEffects = 0 in {
3236 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3237 "movdqa\t{$src, $dst|$dst, $src}", []>,
3238 Sched<[SchedWriteVecMoveLS.XMM.RR]>, VEX, VEX_WIG;
3239 def VMOVDQUrr : VSSI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3240 "movdqu\t{$src, $dst|$dst, $src}", []>,
3241 Sched<[SchedWriteVecMoveLS.XMM.RR]>, VEX, VEX_WIG;
3242 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3243 "movdqa\t{$src, $dst|$dst, $src}", []>,
3244 Sched<[SchedWriteVecMoveLS.YMM.RR]>, VEX, VEX_L, VEX_WIG;
3245 def VMOVDQUYrr : VSSI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3246 "movdqu\t{$src, $dst|$dst, $src}", []>,
3247 Sched<[SchedWriteVecMoveLS.YMM.RR]>, VEX, VEX_L, VEX_WIG;
3251 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
3252 def VMOVDQArr_REV : VPDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3253 "movdqa\t{$src, $dst|$dst, $src}", []>,
3254 Sched<[SchedWriteVecMoveLS.XMM.RR]>,
3255 VEX, VEX_WIG, FoldGenData<"VMOVDQArr">;
3256 def VMOVDQAYrr_REV : VPDI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
3257 "movdqa\t{$src, $dst|$dst, $src}", []>,
3258 Sched<[SchedWriteVecMoveLS.YMM.RR]>,
3259 VEX, VEX_L, VEX_WIG, FoldGenData<"VMOVDQAYrr">;
3260 def VMOVDQUrr_REV : VSSI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3261 "movdqu\t{$src, $dst|$dst, $src}", []>,
3262 Sched<[SchedWriteVecMoveLS.XMM.RR]>,
3263 VEX, VEX_WIG, FoldGenData<"VMOVDQUrr">;
3264 def VMOVDQUYrr_REV : VSSI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
3265 "movdqu\t{$src, $dst|$dst, $src}", []>,
3266 Sched<[SchedWriteVecMoveLS.YMM.RR]>,
3267 VEX, VEX_L, VEX_WIG, FoldGenData<"VMOVDQUYrr">;
3270 let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1,
3271 hasSideEffects = 0, Predicates = [HasAVX,NoVLX] in {
3272 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3273 "movdqa\t{$src, $dst|$dst, $src}",
3274 [(set VR128:$dst, (alignedloadv2i64 addr:$src))]>,
3275 Sched<[SchedWriteVecMoveLS.XMM.RM]>, VEX, VEX_WIG;
3276 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3277 "movdqa\t{$src, $dst|$dst, $src}", []>,
3278 Sched<[SchedWriteVecMoveLS.YMM.RM]>,
3279 VEX, VEX_L, VEX_WIG;
3280 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3281 "vmovdqu\t{$src, $dst|$dst, $src}",
3282 [(set VR128:$dst, (loadv2i64 addr:$src))]>,
3283 Sched<[SchedWriteVecMoveLS.XMM.RM]>,
3285 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3286 "vmovdqu\t{$src, $dst|$dst, $src}", []>,
3287 Sched<[SchedWriteVecMoveLS.YMM.RM]>,
3288 XS, VEX, VEX_L, VEX_WIG;
3291 let mayStore = 1, hasSideEffects = 0, Predicates = [HasAVX,NoVLX] in {
3292 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
3293 (ins i128mem:$dst, VR128:$src),
3294 "movdqa\t{$src, $dst|$dst, $src}",
3295 [(alignedstore (v2i64 VR128:$src), addr:$dst)]>,
3296 Sched<[SchedWriteVecMoveLS.XMM.MR]>, VEX, VEX_WIG;
3297 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
3298 (ins i256mem:$dst, VR256:$src),
3299 "movdqa\t{$src, $dst|$dst, $src}", []>,
3300 Sched<[SchedWriteVecMoveLS.YMM.MR]>, VEX, VEX_L, VEX_WIG;
3301 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3302 "vmovdqu\t{$src, $dst|$dst, $src}",
3303 [(store (v2i64 VR128:$src), addr:$dst)]>,
3304 Sched<[SchedWriteVecMoveLS.XMM.MR]>, XS, VEX, VEX_WIG;
3305 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
3306 "vmovdqu\t{$src, $dst|$dst, $src}",[]>,
3307 Sched<[SchedWriteVecMoveLS.YMM.MR]>, XS, VEX, VEX_L, VEX_WIG;
3310 let SchedRW = [SchedWriteVecMoveLS.XMM.RR] in {
3311 let hasSideEffects = 0 in {
3312 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3313 "movdqa\t{$src, $dst|$dst, $src}", []>;
3315 def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3316 "movdqu\t{$src, $dst|$dst, $src}", []>,
3317 XS, Requires<[UseSSE2]>;
3321 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
3322 def MOVDQArr_REV : PDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3323 "movdqa\t{$src, $dst|$dst, $src}", []>,
3324 FoldGenData<"MOVDQArr">;
3326 def MOVDQUrr_REV : I<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3327 "movdqu\t{$src, $dst|$dst, $src}", []>,
3328 XS, Requires<[UseSSE2]>, FoldGenData<"MOVDQUrr">;
3332 let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1,
3333 hasSideEffects = 0, SchedRW = [SchedWriteVecMoveLS.XMM.RM] in {
3334 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3335 "movdqa\t{$src, $dst|$dst, $src}",
3336 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
3337 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3338 "movdqu\t{$src, $dst|$dst, $src}",
3339 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
3340 XS, Requires<[UseSSE2]>;
3343 let mayStore = 1, hasSideEffects = 0,
3344 SchedRW = [SchedWriteVecMoveLS.XMM.MR] in {
3345 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3346 "movdqa\t{$src, $dst|$dst, $src}",
3347 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
3348 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3349 "movdqu\t{$src, $dst|$dst, $src}",
3350 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
3351 XS, Requires<[UseSSE2]>;
3354 } // ExeDomain = SSEPackedInt
3356 // Reversed version with ".s" suffix for GAS compatibility.
3357 def : InstAlias<"vmovdqa.s\t{$src, $dst|$dst, $src}",
3358 (VMOVDQArr_REV VR128:$dst, VR128:$src), 0>;
3359 def : InstAlias<"vmovdqa.s\t{$src, $dst|$dst, $src}",
3360 (VMOVDQAYrr_REV VR256:$dst, VR256:$src), 0>;
3361 def : InstAlias<"vmovdqu.s\t{$src, $dst|$dst, $src}",
3362 (VMOVDQUrr_REV VR128:$dst, VR128:$src), 0>;
3363 def : InstAlias<"vmovdqu.s\t{$src, $dst|$dst, $src}",
3364 (VMOVDQUYrr_REV VR256:$dst, VR256:$src), 0>;
3366 // Reversed version with ".s" suffix for GAS compatibility.
3367 def : InstAlias<"movdqa.s\t{$src, $dst|$dst, $src}",
3368 (MOVDQArr_REV VR128:$dst, VR128:$src), 0>;
3369 def : InstAlias<"movdqu.s\t{$src, $dst|$dst, $src}",
3370 (MOVDQUrr_REV VR128:$dst, VR128:$src), 0>;
3372 let Predicates = [HasAVX, NoVLX] in {
3373 // Additional patterns for other integer sizes.
3374 def : Pat<(alignedloadv4i32 addr:$src),
3375 (VMOVDQArm addr:$src)>;
3376 def : Pat<(alignedloadv8i16 addr:$src),
3377 (VMOVDQArm addr:$src)>;
3378 def : Pat<(alignedloadv16i8 addr:$src),
3379 (VMOVDQArm addr:$src)>;
3380 def : Pat<(loadv4i32 addr:$src),
3381 (VMOVDQUrm addr:$src)>;
3382 def : Pat<(loadv8i16 addr:$src),
3383 (VMOVDQUrm addr:$src)>;
3384 def : Pat<(loadv16i8 addr:$src),
3385 (VMOVDQUrm addr:$src)>;
3387 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3388 (VMOVDQAmr addr:$dst, VR128:$src)>;
3389 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3390 (VMOVDQAmr addr:$dst, VR128:$src)>;
3391 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3392 (VMOVDQAmr addr:$dst, VR128:$src)>;
3393 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3394 (VMOVDQUmr addr:$dst, VR128:$src)>;
3395 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3396 (VMOVDQUmr addr:$dst, VR128:$src)>;
3397 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3398 (VMOVDQUmr addr:$dst, VR128:$src)>;
3401 //===---------------------------------------------------------------------===//
3402 // SSE2 - Packed Integer Arithmetic Instructions
3403 //===---------------------------------------------------------------------===//
3405 let ExeDomain = SSEPackedInt in { // SSE integer instructions
3407 /// PDI_binop_rm2 - Simple SSE2 binary operator with different src and dst types
3408 multiclass PDI_binop_rm2<bits<8> opc, string OpcodeStr, SDNode OpNode,
3409 ValueType DstVT, ValueType SrcVT, RegisterClass RC,
3410 PatFrag memop_frag, X86MemOperand x86memop,
3411 X86FoldableSchedWrite sched, bit Is2Addr = 1> {
3412 let isCommutable = 1 in
3413 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
3414 (ins RC:$src1, RC:$src2),
3416 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3417 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3418 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1), RC:$src2)))]>,
3420 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
3421 (ins RC:$src1, x86memop:$src2),
3423 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3424 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3425 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1),
3426 (memop_frag addr:$src2))))]>,
3427 Sched<[sched.Folded, sched.ReadAfterFold]>;
3429 } // ExeDomain = SSEPackedInt
3431 defm PADDB : PDI_binop_all<0xFC, "paddb", add, v16i8, v32i8,
3432 SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
3433 defm PADDW : PDI_binop_all<0xFD, "paddw", add, v8i16, v16i16,
3434 SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
3435 defm PADDD : PDI_binop_all<0xFE, "paddd", add, v4i32, v8i32,
3436 SchedWriteVecALU, 1, NoVLX>;
3437 defm PADDQ : PDI_binop_all<0xD4, "paddq", add, v2i64, v4i64,
3438 SchedWriteVecALU, 1, NoVLX>;
3439 defm PADDSB : PDI_binop_all<0xEC, "paddsb", saddsat, v16i8, v32i8,
3440 SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
3441 defm PADDSW : PDI_binop_all<0xED, "paddsw", saddsat, v8i16, v16i16,
3442 SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
3443 defm PADDUSB : PDI_binop_all<0xDC, "paddusb", uaddsat, v16i8, v32i8,
3444 SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
3445 defm PADDUSW : PDI_binop_all<0xDD, "paddusw", uaddsat, v8i16, v16i16,
3446 SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
3447 defm PMULLW : PDI_binop_all<0xD5, "pmullw", mul, v8i16, v16i16,
3448 SchedWriteVecIMul, 1, NoVLX_Or_NoBWI>;
3449 defm PMULHUW : PDI_binop_all<0xE4, "pmulhuw", mulhu, v8i16, v16i16,
3450 SchedWriteVecIMul, 1, NoVLX_Or_NoBWI>;
3451 defm PMULHW : PDI_binop_all<0xE5, "pmulhw", mulhs, v8i16, v16i16,
3452 SchedWriteVecIMul, 1, NoVLX_Or_NoBWI>;
3453 defm PSUBB : PDI_binop_all<0xF8, "psubb", sub, v16i8, v32i8,
3454 SchedWriteVecALU, 0, NoVLX_Or_NoBWI>;
3455 defm PSUBW : PDI_binop_all<0xF9, "psubw", sub, v8i16, v16i16,
3456 SchedWriteVecALU, 0, NoVLX_Or_NoBWI>;
3457 defm PSUBD : PDI_binop_all<0xFA, "psubd", sub, v4i32, v8i32,
3458 SchedWriteVecALU, 0, NoVLX>;
3459 defm PSUBQ : PDI_binop_all<0xFB, "psubq", sub, v2i64, v4i64,
3460 SchedWriteVecALU, 0, NoVLX>;
3461 defm PSUBSB : PDI_binop_all<0xE8, "psubsb", ssubsat, v16i8, v32i8,
3462 SchedWriteVecALU, 0, NoVLX_Or_NoBWI>;
3463 defm PSUBSW : PDI_binop_all<0xE9, "psubsw", ssubsat, v8i16, v16i16,
3464 SchedWriteVecALU, 0, NoVLX_Or_NoBWI>;
3465 defm PSUBUSB : PDI_binop_all<0xD8, "psubusb", usubsat, v16i8, v32i8,
3466 SchedWriteVecALU, 0, NoVLX_Or_NoBWI>;
3467 defm PSUBUSW : PDI_binop_all<0xD9, "psubusw", usubsat, v8i16, v16i16,
3468 SchedWriteVecALU, 0, NoVLX_Or_NoBWI>;
3469 defm PMINUB : PDI_binop_all<0xDA, "pminub", umin, v16i8, v32i8,
3470 SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
3471 defm PMINSW : PDI_binop_all<0xEA, "pminsw", smin, v8i16, v16i16,
3472 SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
3473 defm PMAXUB : PDI_binop_all<0xDE, "pmaxub", umax, v16i8, v32i8,
3474 SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
3475 defm PMAXSW : PDI_binop_all<0xEE, "pmaxsw", smax, v8i16, v16i16,
3476 SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
3477 defm PAVGB : PDI_binop_all<0xE0, "pavgb", X86avg, v16i8, v32i8,
3478 SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
3479 defm PAVGW : PDI_binop_all<0xE3, "pavgw", X86avg, v8i16, v16i16,
3480 SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
3481 defm PMULUDQ : PDI_binop_all<0xF4, "pmuludq", X86pmuludq, v2i64, v4i64,
3482 SchedWriteVecIMul, 1, NoVLX>;
3484 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in
3485 defm VPMADDWD : PDI_binop_rm2<0xF5, "vpmaddwd", X86vpmaddwd, v4i32, v8i16, VR128,
3486 load, i128mem, SchedWriteVecIMul.XMM, 0>,
3489 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in
3490 defm VPMADDWDY : PDI_binop_rm2<0xF5, "vpmaddwd", X86vpmaddwd, v8i32, v16i16,
3491 VR256, load, i256mem, SchedWriteVecIMul.YMM,
3492 0>, VEX_4V, VEX_L, VEX_WIG;
3493 let Constraints = "$src1 = $dst" in
3494 defm PMADDWD : PDI_binop_rm2<0xF5, "pmaddwd", X86vpmaddwd, v4i32, v8i16, VR128,
3495 memop, i128mem, SchedWriteVecIMul.XMM>;
3497 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in
3498 defm VPSADBW : PDI_binop_rm2<0xF6, "vpsadbw", X86psadbw, v2i64, v16i8, VR128,
3499 load, i128mem, SchedWritePSADBW.XMM, 0>,
3501 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in
3502 defm VPSADBWY : PDI_binop_rm2<0xF6, "vpsadbw", X86psadbw, v4i64, v32i8, VR256,
3503 load, i256mem, SchedWritePSADBW.YMM, 0>,
3504 VEX_4V, VEX_L, VEX_WIG;
3505 let Constraints = "$src1 = $dst" in
3506 defm PSADBW : PDI_binop_rm2<0xF6, "psadbw", X86psadbw, v2i64, v16i8, VR128,
3507 memop, i128mem, SchedWritePSADBW.XMM>;
3509 //===---------------------------------------------------------------------===//
3510 // SSE2 - Packed Integer Logical Instructions
3511 //===---------------------------------------------------------------------===//
3513 multiclass PDI_binop_rmi<bits<8> opc, bits<8> opc2, Format ImmForm,
3514 string OpcodeStr, SDNode OpNode,
3515 SDNode OpNode2, RegisterClass RC,
3516 X86FoldableSchedWrite sched,
3517 X86FoldableSchedWrite schedImm,
3518 ValueType DstVT, ValueType SrcVT,
3519 PatFrag ld_frag, bit Is2Addr = 1> {
3520 // src2 is always 128-bit
3521 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
3522 (ins RC:$src1, VR128:$src2),
3524 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3525 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3526 [(set RC:$dst, (DstVT (OpNode RC:$src1, (SrcVT VR128:$src2))))]>,
3528 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
3529 (ins RC:$src1, i128mem:$src2),
3531 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3532 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3533 [(set RC:$dst, (DstVT (OpNode RC:$src1,
3534 (SrcVT (ld_frag addr:$src2)))))]>,
3535 Sched<[sched.Folded, sched.ReadAfterFold]>;
3536 def ri : PDIi8<opc2, ImmForm, (outs RC:$dst),
3537 (ins RC:$src1, u8imm:$src2),
3539 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3540 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3541 [(set RC:$dst, (DstVT (OpNode2 RC:$src1, (i8 timm:$src2))))]>,
3545 multiclass PDI_binop_rmi_all<bits<8> opc, bits<8> opc2, Format ImmForm,
3546 string OpcodeStr, SDNode OpNode,
3547 SDNode OpNode2, ValueType DstVT128,
3548 ValueType DstVT256, ValueType SrcVT,
3549 X86SchedWriteWidths sched,
3550 X86SchedWriteWidths schedImm, Predicate prd> {
3551 let Predicates = [HasAVX, prd] in
3552 defm V#NAME : PDI_binop_rmi<opc, opc2, ImmForm, !strconcat("v", OpcodeStr),
3553 OpNode, OpNode2, VR128, sched.XMM, schedImm.XMM,
3554 DstVT128, SrcVT, load, 0>, VEX_4V, VEX_WIG;
3555 let Predicates = [HasAVX2, prd] in
3556 defm V#NAME#Y : PDI_binop_rmi<opc, opc2, ImmForm, !strconcat("v", OpcodeStr),
3557 OpNode, OpNode2, VR256, sched.YMM, schedImm.YMM,
3558 DstVT256, SrcVT, load, 0>, VEX_4V, VEX_L,
3560 let Constraints = "$src1 = $dst" in
3561 defm NAME : PDI_binop_rmi<opc, opc2, ImmForm, OpcodeStr, OpNode, OpNode2,
3562 VR128, sched.XMM, schedImm.XMM, DstVT128, SrcVT,
3566 multiclass PDI_binop_ri<bits<8> opc, Format ImmForm, string OpcodeStr,
3567 SDNode OpNode, RegisterClass RC, ValueType VT,
3568 X86FoldableSchedWrite sched, bit Is2Addr = 1> {
3569 def ri : PDIi8<opc, ImmForm, (outs RC:$dst), (ins RC:$src1, u8imm:$src2),
3571 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3572 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3573 [(set RC:$dst, (VT (OpNode RC:$src1, (i8 timm:$src2))))]>,
3577 multiclass PDI_binop_ri_all<bits<8> opc, Format ImmForm, string OpcodeStr,
3578 SDNode OpNode, X86SchedWriteWidths sched> {
3579 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in
3580 defm V#NAME : PDI_binop_ri<opc, ImmForm, !strconcat("v", OpcodeStr), OpNode,
3581 VR128, v16i8, sched.XMM, 0>, VEX_4V, VEX_WIG;
3582 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in
3583 defm V#NAME#Y : PDI_binop_ri<opc, ImmForm, !strconcat("v", OpcodeStr), OpNode,
3584 VR256, v32i8, sched.YMM, 0>,
3585 VEX_4V, VEX_L, VEX_WIG;
3586 let Constraints = "$src1 = $dst" in
3587 defm NAME : PDI_binop_ri<opc, ImmForm, OpcodeStr, OpNode, VR128, v16i8,
3591 let ExeDomain = SSEPackedInt in {
3592 defm PSLLW : PDI_binop_rmi_all<0xF1, 0x71, MRM6r, "psllw", X86vshl, X86vshli,
3593 v8i16, v16i16, v8i16, SchedWriteVecShift,
3594 SchedWriteVecShiftImm, NoVLX_Or_NoBWI>;
3595 defm PSLLD : PDI_binop_rmi_all<0xF2, 0x72, MRM6r, "pslld", X86vshl, X86vshli,
3596 v4i32, v8i32, v4i32, SchedWriteVecShift,
3597 SchedWriteVecShiftImm, NoVLX>;
3598 defm PSLLQ : PDI_binop_rmi_all<0xF3, 0x73, MRM6r, "psllq", X86vshl, X86vshli,
3599 v2i64, v4i64, v2i64, SchedWriteVecShift,
3600 SchedWriteVecShiftImm, NoVLX>;
3602 defm PSRLW : PDI_binop_rmi_all<0xD1, 0x71, MRM2r, "psrlw", X86vsrl, X86vsrli,
3603 v8i16, v16i16, v8i16, SchedWriteVecShift,
3604 SchedWriteVecShiftImm, NoVLX_Or_NoBWI>;
3605 defm PSRLD : PDI_binop_rmi_all<0xD2, 0x72, MRM2r, "psrld", X86vsrl, X86vsrli,
3606 v4i32, v8i32, v4i32, SchedWriteVecShift,
3607 SchedWriteVecShiftImm, NoVLX>;
3608 defm PSRLQ : PDI_binop_rmi_all<0xD3, 0x73, MRM2r, "psrlq", X86vsrl, X86vsrli,
3609 v2i64, v4i64, v2i64, SchedWriteVecShift,
3610 SchedWriteVecShiftImm, NoVLX>;
3612 defm PSRAW : PDI_binop_rmi_all<0xE1, 0x71, MRM4r, "psraw", X86vsra, X86vsrai,
3613 v8i16, v16i16, v8i16, SchedWriteVecShift,
3614 SchedWriteVecShiftImm, NoVLX_Or_NoBWI>;
3615 defm PSRAD : PDI_binop_rmi_all<0xE2, 0x72, MRM4r, "psrad", X86vsra, X86vsrai,
3616 v4i32, v8i32, v4i32, SchedWriteVecShift,
3617 SchedWriteVecShiftImm, NoVLX>;
3619 defm PSLLDQ : PDI_binop_ri_all<0x73, MRM7r, "pslldq", X86vshldq,
3621 defm PSRLDQ : PDI_binop_ri_all<0x73, MRM3r, "psrldq", X86vshrdq,
3623 } // ExeDomain = SSEPackedInt
3625 //===---------------------------------------------------------------------===//
3626 // SSE2 - Packed Integer Comparison Instructions
3627 //===---------------------------------------------------------------------===//
3629 defm PCMPEQB : PDI_binop_all<0x74, "pcmpeqb", X86pcmpeq, v16i8, v32i8,
3630 SchedWriteVecALU, 1, TruePredicate>;
3631 defm PCMPEQW : PDI_binop_all<0x75, "pcmpeqw", X86pcmpeq, v8i16, v16i16,
3632 SchedWriteVecALU, 1, TruePredicate>;
3633 defm PCMPEQD : PDI_binop_all<0x76, "pcmpeqd", X86pcmpeq, v4i32, v8i32,
3634 SchedWriteVecALU, 1, TruePredicate>;
3635 defm PCMPGTB : PDI_binop_all<0x64, "pcmpgtb", X86pcmpgt, v16i8, v32i8,
3636 SchedWriteVecALU, 0, TruePredicate>;
3637 defm PCMPGTW : PDI_binop_all<0x65, "pcmpgtw", X86pcmpgt, v8i16, v16i16,
3638 SchedWriteVecALU, 0, TruePredicate>;
3639 defm PCMPGTD : PDI_binop_all<0x66, "pcmpgtd", X86pcmpgt, v4i32, v8i32,
3640 SchedWriteVecALU, 0, TruePredicate>;
3642 //===---------------------------------------------------------------------===//
3643 // SSE2 - Packed Integer Shuffle Instructions
3644 //===---------------------------------------------------------------------===//
3646 let ExeDomain = SSEPackedInt in {
3647 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt128, ValueType vt256,
3648 SDNode OpNode, X86SchedWriteWidths sched,
3650 let Predicates = [HasAVX, prd] in {
3651 def V#NAME#ri : Ii8<0x70, MRMSrcReg, (outs VR128:$dst),
3652 (ins VR128:$src1, u8imm:$src2),
3653 !strconcat("v", OpcodeStr,
3654 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3656 (vt128 (OpNode VR128:$src1, (i8 timm:$src2))))]>,
3657 VEX, Sched<[sched.XMM]>, VEX_WIG;
3658 def V#NAME#mi : Ii8<0x70, MRMSrcMem, (outs VR128:$dst),
3659 (ins i128mem:$src1, u8imm:$src2),
3660 !strconcat("v", OpcodeStr,
3661 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3663 (vt128 (OpNode (load addr:$src1),
3664 (i8 timm:$src2))))]>, VEX,
3665 Sched<[sched.XMM.Folded]>, VEX_WIG;
3668 let Predicates = [HasAVX2, prd] in {
3669 def V#NAME#Yri : Ii8<0x70, MRMSrcReg, (outs VR256:$dst),
3670 (ins VR256:$src1, u8imm:$src2),
3671 !strconcat("v", OpcodeStr,
3672 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3674 (vt256 (OpNode VR256:$src1, (i8 timm:$src2))))]>,
3675 VEX, VEX_L, Sched<[sched.YMM]>, VEX_WIG;
3676 def V#NAME#Ymi : Ii8<0x70, MRMSrcMem, (outs VR256:$dst),
3677 (ins i256mem:$src1, u8imm:$src2),
3678 !strconcat("v", OpcodeStr,
3679 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3681 (vt256 (OpNode (load addr:$src1),
3682 (i8 timm:$src2))))]>, VEX, VEX_L,
3683 Sched<[sched.YMM.Folded]>, VEX_WIG;
3686 let Predicates = [UseSSE2] in {
3687 def ri : Ii8<0x70, MRMSrcReg,
3688 (outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
3689 !strconcat(OpcodeStr,
3690 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3692 (vt128 (OpNode VR128:$src1, (i8 timm:$src2))))]>,
3694 def mi : Ii8<0x70, MRMSrcMem,
3695 (outs VR128:$dst), (ins i128mem:$src1, u8imm:$src2),
3696 !strconcat(OpcodeStr,
3697 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3699 (vt128 (OpNode (memop addr:$src1),
3700 (i8 timm:$src2))))]>,
3701 Sched<[sched.XMM.Folded]>;
3704 } // ExeDomain = SSEPackedInt
3706 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, v8i32, X86PShufd,
3707 SchedWriteShuffle, NoVLX>, PD;
3708 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, v16i16, X86PShufhw,
3709 SchedWriteShuffle, NoVLX_Or_NoBWI>, XS;
3710 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, v16i16, X86PShuflw,
3711 SchedWriteShuffle, NoVLX_Or_NoBWI>, XD;
3713 //===---------------------------------------------------------------------===//
3714 // Packed Integer Pack Instructions (SSE & AVX)
3715 //===---------------------------------------------------------------------===//
3717 let ExeDomain = SSEPackedInt in {
3718 multiclass sse2_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
3719 ValueType ArgVT, SDNode OpNode, RegisterClass RC,
3720 X86MemOperand x86memop, X86FoldableSchedWrite sched,
3721 PatFrag ld_frag, bit Is2Addr = 1> {
3722 def rr : PDI<opc, MRMSrcReg,
3723 (outs RC:$dst), (ins RC:$src1, RC:$src2),
3725 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3726 !strconcat(OpcodeStr,
3727 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3729 (OutVT (OpNode (ArgVT RC:$src1), RC:$src2)))]>,
3731 def rm : PDI<opc, MRMSrcMem,
3732 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3734 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3735 !strconcat(OpcodeStr,
3736 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3738 (OutVT (OpNode (ArgVT RC:$src1),
3739 (ld_frag addr:$src2))))]>,
3740 Sched<[sched.Folded, sched.ReadAfterFold]>;
3743 multiclass sse4_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
3744 ValueType ArgVT, SDNode OpNode, RegisterClass RC,
3745 X86MemOperand x86memop, X86FoldableSchedWrite sched,
3746 PatFrag ld_frag, bit Is2Addr = 1> {
3747 def rr : SS48I<opc, MRMSrcReg,
3748 (outs RC:$dst), (ins RC:$src1, RC:$src2),
3750 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3751 !strconcat(OpcodeStr,
3752 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3754 (OutVT (OpNode (ArgVT RC:$src1), RC:$src2)))]>,
3756 def rm : SS48I<opc, MRMSrcMem,
3757 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3759 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3760 !strconcat(OpcodeStr,
3761 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3763 (OutVT (OpNode (ArgVT RC:$src1),
3764 (ld_frag addr:$src2))))]>,
3765 Sched<[sched.Folded, sched.ReadAfterFold]>;
3768 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
3769 defm VPACKSSWB : sse2_pack<0x63, "vpacksswb", v16i8, v8i16, X86Packss, VR128,
3770 i128mem, SchedWriteShuffle.XMM, load, 0>,
3772 defm VPACKSSDW : sse2_pack<0x6B, "vpackssdw", v8i16, v4i32, X86Packss, VR128,
3773 i128mem, SchedWriteShuffle.XMM, load, 0>,
3776 defm VPACKUSWB : sse2_pack<0x67, "vpackuswb", v16i8, v8i16, X86Packus, VR128,
3777 i128mem, SchedWriteShuffle.XMM, load, 0>,
3779 defm VPACKUSDW : sse4_pack<0x2B, "vpackusdw", v8i16, v4i32, X86Packus, VR128,
3780 i128mem, SchedWriteShuffle.XMM, load, 0>,
3784 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
3785 defm VPACKSSWBY : sse2_pack<0x63, "vpacksswb", v32i8, v16i16, X86Packss, VR256,
3786 i256mem, SchedWriteShuffle.YMM, load, 0>,
3787 VEX_4V, VEX_L, VEX_WIG;
3788 defm VPACKSSDWY : sse2_pack<0x6B, "vpackssdw", v16i16, v8i32, X86Packss, VR256,
3789 i256mem, SchedWriteShuffle.YMM, load, 0>,
3790 VEX_4V, VEX_L, VEX_WIG;
3792 defm VPACKUSWBY : sse2_pack<0x67, "vpackuswb", v32i8, v16i16, X86Packus, VR256,
3793 i256mem, SchedWriteShuffle.YMM, load, 0>,
3794 VEX_4V, VEX_L, VEX_WIG;
3795 defm VPACKUSDWY : sse4_pack<0x2B, "vpackusdw", v16i16, v8i32, X86Packus, VR256,
3796 i256mem, SchedWriteShuffle.YMM, load, 0>,
3797 VEX_4V, VEX_L, VEX_WIG;
3800 let Constraints = "$src1 = $dst" in {
3801 defm PACKSSWB : sse2_pack<0x63, "packsswb", v16i8, v8i16, X86Packss, VR128,
3802 i128mem, SchedWriteShuffle.XMM, memop>;
3803 defm PACKSSDW : sse2_pack<0x6B, "packssdw", v8i16, v4i32, X86Packss, VR128,
3804 i128mem, SchedWriteShuffle.XMM, memop>;
3806 defm PACKUSWB : sse2_pack<0x67, "packuswb", v16i8, v8i16, X86Packus, VR128,
3807 i128mem, SchedWriteShuffle.XMM, memop>;
3809 defm PACKUSDW : sse4_pack<0x2B, "packusdw", v8i16, v4i32, X86Packus, VR128,
3810 i128mem, SchedWriteShuffle.XMM, memop>;
3812 } // ExeDomain = SSEPackedInt
3814 //===---------------------------------------------------------------------===//
3815 // SSE2 - Packed Integer Unpack Instructions
3816 //===---------------------------------------------------------------------===//
3818 let ExeDomain = SSEPackedInt in {
3819 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
3820 SDNode OpNode, RegisterClass RC, X86MemOperand x86memop,
3821 X86FoldableSchedWrite sched, PatFrag ld_frag,
3823 def rr : PDI<opc, MRMSrcReg,
3824 (outs RC:$dst), (ins RC:$src1, RC:$src2),
3826 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
3827 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3828 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))]>,
3830 def rm : PDI<opc, MRMSrcMem,
3831 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3833 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
3834 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3835 [(set RC:$dst, (vt (OpNode RC:$src1, (ld_frag addr:$src2))))]>,
3836 Sched<[sched.Folded, sched.ReadAfterFold]>;
3839 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
3840 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, X86Unpckl, VR128,
3841 i128mem, SchedWriteShuffle.XMM, load, 0>,
3843 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, X86Unpckl, VR128,
3844 i128mem, SchedWriteShuffle.XMM, load, 0>,
3846 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, X86Unpckh, VR128,
3847 i128mem, SchedWriteShuffle.XMM, load, 0>,
3849 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, X86Unpckh, VR128,
3850 i128mem, SchedWriteShuffle.XMM, load, 0>,
3854 let Predicates = [HasAVX, NoVLX] in {
3855 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, X86Unpckl, VR128,
3856 i128mem, SchedWriteShuffle.XMM, load, 0>,
3858 defm VPUNPCKLQDQ : sse2_unpack<0x6C, "vpunpcklqdq", v2i64, X86Unpckl, VR128,
3859 i128mem, SchedWriteShuffle.XMM, load, 0>,
3861 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, X86Unpckh, VR128,
3862 i128mem, SchedWriteShuffle.XMM, load, 0>,
3864 defm VPUNPCKHQDQ : sse2_unpack<0x6D, "vpunpckhqdq", v2i64, X86Unpckh, VR128,
3865 i128mem, SchedWriteShuffle.XMM, load, 0>,
3869 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
3870 defm VPUNPCKLBWY : sse2_unpack<0x60, "vpunpcklbw", v32i8, X86Unpckl, VR256,
3871 i256mem, SchedWriteShuffle.YMM, load, 0>,
3872 VEX_4V, VEX_L, VEX_WIG;
3873 defm VPUNPCKLWDY : sse2_unpack<0x61, "vpunpcklwd", v16i16, X86Unpckl, VR256,
3874 i256mem, SchedWriteShuffle.YMM, load, 0>,
3875 VEX_4V, VEX_L, VEX_WIG;
3876 defm VPUNPCKHBWY : sse2_unpack<0x68, "vpunpckhbw", v32i8, X86Unpckh, VR256,
3877 i256mem, SchedWriteShuffle.YMM, load, 0>,
3878 VEX_4V, VEX_L, VEX_WIG;
3879 defm VPUNPCKHWDY : sse2_unpack<0x69, "vpunpckhwd", v16i16, X86Unpckh, VR256,
3880 i256mem, SchedWriteShuffle.YMM, load, 0>,
3881 VEX_4V, VEX_L, VEX_WIG;
3884 let Predicates = [HasAVX2, NoVLX] in {
3885 defm VPUNPCKLDQY : sse2_unpack<0x62, "vpunpckldq", v8i32, X86Unpckl, VR256,
3886 i256mem, SchedWriteShuffle.YMM, load, 0>,
3887 VEX_4V, VEX_L, VEX_WIG;
3888 defm VPUNPCKLQDQY : sse2_unpack<0x6C, "vpunpcklqdq", v4i64, X86Unpckl, VR256,
3889 i256mem, SchedWriteShuffle.YMM, load, 0>,
3890 VEX_4V, VEX_L, VEX_WIG;
3891 defm VPUNPCKHDQY : sse2_unpack<0x6A, "vpunpckhdq", v8i32, X86Unpckh, VR256,
3892 i256mem, SchedWriteShuffle.YMM, load, 0>,
3893 VEX_4V, VEX_L, VEX_WIG;
3894 defm VPUNPCKHQDQY : sse2_unpack<0x6D, "vpunpckhqdq", v4i64, X86Unpckh, VR256,
3895 i256mem, SchedWriteShuffle.YMM, load, 0>,
3896 VEX_4V, VEX_L, VEX_WIG;
3899 let Constraints = "$src1 = $dst" in {
3900 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, X86Unpckl, VR128,
3901 i128mem, SchedWriteShuffle.XMM, memop>;
3902 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, X86Unpckl, VR128,
3903 i128mem, SchedWriteShuffle.XMM, memop>;
3904 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, X86Unpckl, VR128,
3905 i128mem, SchedWriteShuffle.XMM, memop>;
3906 defm PUNPCKLQDQ : sse2_unpack<0x6C, "punpcklqdq", v2i64, X86Unpckl, VR128,
3907 i128mem, SchedWriteShuffle.XMM, memop>;
3909 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, X86Unpckh, VR128,
3910 i128mem, SchedWriteShuffle.XMM, memop>;
3911 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, X86Unpckh, VR128,
3912 i128mem, SchedWriteShuffle.XMM, memop>;
3913 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, X86Unpckh, VR128,
3914 i128mem, SchedWriteShuffle.XMM, memop>;
3915 defm PUNPCKHQDQ : sse2_unpack<0x6D, "punpckhqdq", v2i64, X86Unpckh, VR128,
3916 i128mem, SchedWriteShuffle.XMM, memop>;
3918 } // ExeDomain = SSEPackedInt
3920 //===---------------------------------------------------------------------===//
3921 // SSE2 - Packed Integer Extract and Insert
3922 //===---------------------------------------------------------------------===//
3924 let ExeDomain = SSEPackedInt in {
3925 multiclass sse2_pinsrw<bit Is2Addr = 1> {
3926 def rr : Ii8<0xC4, MRMSrcReg,
3927 (outs VR128:$dst), (ins VR128:$src1,
3928 GR32orGR64:$src2, u8imm:$src3),
3930 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
3931 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3933 (X86pinsrw VR128:$src1, GR32orGR64:$src2, timm:$src3))]>,
3934 Sched<[WriteVecInsert, ReadDefault, ReadInt2Fpu]>;
3935 def rm : Ii8<0xC4, MRMSrcMem,
3936 (outs VR128:$dst), (ins VR128:$src1,
3937 i16mem:$src2, u8imm:$src3),
3939 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
3940 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3942 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
3944 Sched<[WriteVecInsert.Folded, WriteVecInsert.ReadAfterFold]>;
3948 let Predicates = [HasAVX, NoBWI] in
3949 def VPEXTRWrr : Ii8<0xC5, MRMSrcReg,
3950 (outs GR32orGR64:$dst), (ins VR128:$src1, u8imm:$src2),
3951 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3952 [(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1),
3954 PD, VEX, VEX_WIG, Sched<[WriteVecExtract]>;
3955 def PEXTRWrr : PDIi8<0xC5, MRMSrcReg,
3956 (outs GR32orGR64:$dst), (ins VR128:$src1, u8imm:$src2),
3957 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3958 [(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1),
3960 Sched<[WriteVecExtract]>;
3963 let Predicates = [HasAVX, NoBWI] in
3964 defm VPINSRW : sse2_pinsrw<0>, PD, VEX_4V, VEX_WIG;
3966 let Predicates = [UseSSE2], Constraints = "$src1 = $dst" in
3967 defm PINSRW : sse2_pinsrw, PD;
3969 } // ExeDomain = SSEPackedInt
3971 //===---------------------------------------------------------------------===//
3972 // SSE2 - Packed Mask Creation
3973 //===---------------------------------------------------------------------===//
3975 let ExeDomain = SSEPackedInt in {
3977 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
3979 "pmovmskb\t{$src, $dst|$dst, $src}",
3980 [(set GR32orGR64:$dst, (X86movmsk (v16i8 VR128:$src)))]>,
3981 Sched<[WriteVecMOVMSK]>, VEX, VEX_WIG;
3983 let Predicates = [HasAVX2] in {
3984 def VPMOVMSKBYrr : VPDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
3986 "pmovmskb\t{$src, $dst|$dst, $src}",
3987 [(set GR32orGR64:$dst, (X86movmsk (v32i8 VR256:$src)))]>,
3988 Sched<[WriteVecMOVMSKY]>, VEX, VEX_L, VEX_WIG;
3991 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst), (ins VR128:$src),
3992 "pmovmskb\t{$src, $dst|$dst, $src}",
3993 [(set GR32orGR64:$dst, (X86movmsk (v16i8 VR128:$src)))]>,
3994 Sched<[WriteVecMOVMSK]>;
3996 } // ExeDomain = SSEPackedInt
3998 //===---------------------------------------------------------------------===//
3999 // SSE2 - Conditional Store
4000 //===---------------------------------------------------------------------===//
4002 let ExeDomain = SSEPackedInt, SchedRW = [SchedWriteVecMoveLS.XMM.MR] in {
4003 let Uses = [EDI], Predicates = [HasAVX,Not64BitMode] in
4004 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
4005 (ins VR128:$src, VR128:$mask),
4006 "maskmovdqu\t{$mask, $src|$src, $mask}",
4007 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>,
4009 let Uses = [RDI], Predicates = [HasAVX,In64BitMode] in
4010 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
4011 (ins VR128:$src, VR128:$mask),
4012 "maskmovdqu\t{$mask, $src|$src, $mask}",
4013 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>,
4014 VEX, VEX_WIG, AdSize64;
4015 let Uses = [EDI], Predicates = [HasAVX,In64BitMode] in
4016 def VMASKMOVDQUX32 : VPDI<0xF7, MRMSrcReg, (outs),
4017 (ins VR128:$src, VR128:$mask), "",
4018 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>,
4019 VEX, VEX_WIG, AdSize32 {
4020 let AsmString = "addr32 vmaskmovdqu\t{$mask, $src|$src, $mask}";
4021 let AsmVariantName = "NonParsable";
4024 let Uses = [EDI], Predicates = [UseSSE2,Not64BitMode] in
4025 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
4026 "maskmovdqu\t{$mask, $src|$src, $mask}",
4027 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
4028 let Uses = [RDI], Predicates = [UseSSE2,In64BitMode] in
4029 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
4030 "maskmovdqu\t{$mask, $src|$src, $mask}",
4031 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>,
4033 let Uses = [EDI], Predicates = [UseSSE2,In64BitMode] in
4034 def MASKMOVDQUX32 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
4035 "addr32 maskmovdqu\t{$mask, $src|$src, $mask}",
4036 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>,
4038 let AsmVariantName = "NonParsable";
4041 } // ExeDomain = SSEPackedInt
4043 //===---------------------------------------------------------------------===//
4044 // SSE2 - Move Doubleword/Quadword
4045 //===---------------------------------------------------------------------===//
4047 //===---------------------------------------------------------------------===//
4048 // Move Int Doubleword to Packed Double Int
4050 let ExeDomain = SSEPackedInt in {
4051 def VMOVDI2PDIrr : VS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4052 "movd\t{$src, $dst|$dst, $src}",
4054 (v4i32 (scalar_to_vector GR32:$src)))]>,
4055 VEX, Sched<[WriteVecMoveFromGpr]>;
4056 def VMOVDI2PDIrm : VS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4057 "movd\t{$src, $dst|$dst, $src}",
4059 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
4060 VEX, Sched<[WriteVecLoad]>;
4061 def VMOV64toPQIrr : VRS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4062 "movq\t{$src, $dst|$dst, $src}",
4064 (v2i64 (scalar_to_vector GR64:$src)))]>,
4065 VEX, Sched<[WriteVecMoveFromGpr]>;
4066 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayLoad = 1 in
4067 def VMOV64toPQIrm : VRS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4068 "movq\t{$src, $dst|$dst, $src}", []>,
4069 VEX, Sched<[WriteVecLoad]>;
4070 let isCodeGenOnly = 1 in
4071 def VMOV64toSDrr : VRS2I<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
4072 "movq\t{$src, $dst|$dst, $src}",
4073 [(set FR64:$dst, (bitconvert GR64:$src))]>,
4074 VEX, Sched<[WriteVecMoveFromGpr]>;
4076 def MOVDI2PDIrr : S2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4077 "movd\t{$src, $dst|$dst, $src}",
4079 (v4i32 (scalar_to_vector GR32:$src)))]>,
4080 Sched<[WriteVecMoveFromGpr]>;
4081 def MOVDI2PDIrm : S2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4082 "movd\t{$src, $dst|$dst, $src}",
4084 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
4085 Sched<[WriteVecLoad]>;
4086 def MOV64toPQIrr : RS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4087 "movq\t{$src, $dst|$dst, $src}",
4089 (v2i64 (scalar_to_vector GR64:$src)))]>,
4090 Sched<[WriteVecMoveFromGpr]>;
4091 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayLoad = 1 in
4092 def MOV64toPQIrm : RS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4093 "movq\t{$src, $dst|$dst, $src}", []>,
4094 Sched<[WriteVecLoad]>;
4095 let isCodeGenOnly = 1 in
4096 def MOV64toSDrr : RS2I<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
4097 "movq\t{$src, $dst|$dst, $src}",
4098 [(set FR64:$dst, (bitconvert GR64:$src))]>,
4099 Sched<[WriteVecMoveFromGpr]>;
4100 } // ExeDomain = SSEPackedInt
4102 //===---------------------------------------------------------------------===//
4103 // Move Int Doubleword to Single Scalar
4105 let ExeDomain = SSEPackedInt, isCodeGenOnly = 1 in {
4106 def VMOVDI2SSrr : VS2I<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
4107 "movd\t{$src, $dst|$dst, $src}",
4108 [(set FR32:$dst, (bitconvert GR32:$src))]>,
4109 VEX, Sched<[WriteVecMoveFromGpr]>;
4111 def MOVDI2SSrr : S2I<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
4112 "movd\t{$src, $dst|$dst, $src}",
4113 [(set FR32:$dst, (bitconvert GR32:$src))]>,
4114 Sched<[WriteVecMoveFromGpr]>;
4116 } // ExeDomain = SSEPackedInt, isCodeGenOnly = 1
4118 //===---------------------------------------------------------------------===//
4119 // Move Packed Doubleword Int to Packed Double Int
4121 let ExeDomain = SSEPackedInt in {
4122 def VMOVPDI2DIrr : VS2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
4123 "movd\t{$src, $dst|$dst, $src}",
4124 [(set GR32:$dst, (extractelt (v4i32 VR128:$src),
4126 Sched<[WriteVecMoveToGpr]>;
4127 def VMOVPDI2DImr : VS2I<0x7E, MRMDestMem, (outs),
4128 (ins i32mem:$dst, VR128:$src),
4129 "movd\t{$src, $dst|$dst, $src}",
4130 [(store (i32 (extractelt (v4i32 VR128:$src),
4131 (iPTR 0))), addr:$dst)]>,
4132 VEX, Sched<[WriteVecStore]>;
4133 def MOVPDI2DIrr : S2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
4134 "movd\t{$src, $dst|$dst, $src}",
4135 [(set GR32:$dst, (extractelt (v4i32 VR128:$src),
4137 Sched<[WriteVecMoveToGpr]>;
4138 def MOVPDI2DImr : S2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
4139 "movd\t{$src, $dst|$dst, $src}",
4140 [(store (i32 (extractelt (v4i32 VR128:$src),
4141 (iPTR 0))), addr:$dst)]>,
4142 Sched<[WriteVecStore]>;
4143 } // ExeDomain = SSEPackedInt
4145 //===---------------------------------------------------------------------===//
4146 // Move Packed Doubleword Int first element to Doubleword Int
4148 let ExeDomain = SSEPackedInt in {
4149 let SchedRW = [WriteVecMoveToGpr] in {
4150 def VMOVPQIto64rr : VRS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4151 "movq\t{$src, $dst|$dst, $src}",
4152 [(set GR64:$dst, (extractelt (v2i64 VR128:$src),
4156 def MOVPQIto64rr : RS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4157 "movq\t{$src, $dst|$dst, $src}",
4158 [(set GR64:$dst, (extractelt (v2i64 VR128:$src),
4162 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayStore = 1 in
4163 def VMOVPQIto64mr : VRS2I<0x7E, MRMDestMem, (outs),
4164 (ins i64mem:$dst, VR128:$src),
4165 "movq\t{$src, $dst|$dst, $src}", []>,
4166 VEX, Sched<[WriteVecStore]>;
4167 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayStore = 1 in
4168 def MOVPQIto64mr : RS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4169 "movq\t{$src, $dst|$dst, $src}", []>,
4170 Sched<[WriteVecStore]>;
4171 } // ExeDomain = SSEPackedInt
4173 //===---------------------------------------------------------------------===//
4174 // Bitcast FR64 <-> GR64
4176 let ExeDomain = SSEPackedInt, isCodeGenOnly = 1 in {
4177 def VMOVSDto64rr : VRS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
4178 "movq\t{$src, $dst|$dst, $src}",
4179 [(set GR64:$dst, (bitconvert FR64:$src))]>,
4180 VEX, Sched<[WriteVecMoveToGpr]>;
4182 def MOVSDto64rr : RS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
4183 "movq\t{$src, $dst|$dst, $src}",
4184 [(set GR64:$dst, (bitconvert FR64:$src))]>,
4185 Sched<[WriteVecMoveToGpr]>;
4186 } // ExeDomain = SSEPackedInt, isCodeGenOnly = 1
4188 //===---------------------------------------------------------------------===//
4189 // Move Scalar Single to Double Int
4191 let ExeDomain = SSEPackedInt, isCodeGenOnly = 1 in {
4192 def VMOVSS2DIrr : VS2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
4193 "movd\t{$src, $dst|$dst, $src}",
4194 [(set GR32:$dst, (bitconvert FR32:$src))]>,
4195 VEX, Sched<[WriteVecMoveToGpr]>;
4196 def MOVSS2DIrr : S2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
4197 "movd\t{$src, $dst|$dst, $src}",
4198 [(set GR32:$dst, (bitconvert FR32:$src))]>,
4199 Sched<[WriteVecMoveToGpr]>;
4200 } // ExeDomain = SSEPackedInt, isCodeGenOnly = 1
4202 let Predicates = [UseAVX] in {
4203 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
4204 (VMOVDI2PDIrr GR32:$src)>;
4206 def : Pat<(v2i64 (X86vzmovl (v2i64 (scalar_to_vector GR64:$src)))),
4207 (VMOV64toPQIrr GR64:$src)>;
4209 // AVX 128-bit movd/movq instructions write zeros in the high 128-bit part.
4210 // These instructions also write zeros in the high part of a 256-bit register.
4211 def : Pat<(v4i32 (X86vzload32 addr:$src)),
4212 (VMOVDI2PDIrm addr:$src)>;
4213 def : Pat<(v8i32 (X86vzload32 addr:$src)),
4214 (SUBREG_TO_REG (i64 0), (v4i32 (VMOVDI2PDIrm addr:$src)), sub_xmm)>;
4217 let Predicates = [UseSSE2] in {
4218 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
4219 (MOVDI2PDIrr GR32:$src)>;
4221 def : Pat<(v2i64 (X86vzmovl (v2i64 (scalar_to_vector GR64:$src)))),
4222 (MOV64toPQIrr GR64:$src)>;
4223 def : Pat<(v4i32 (X86vzload32 addr:$src)),
4224 (MOVDI2PDIrm addr:$src)>;
4227 // Before the MC layer of LLVM existed, clang emitted "movd" assembly instead of
4228 // "movq" due to MacOS parsing limitation. In order to parse old assembly, we add
4230 def : InstAlias<"movd\t{$src, $dst|$dst, $src}",
4231 (MOV64toPQIrr VR128:$dst, GR64:$src), 0>;
4232 def : InstAlias<"movd\t{$src, $dst|$dst, $src}",
4233 (MOVPQIto64rr GR64:$dst, VR128:$src), 0>;
4234 // Allow "vmovd" but print "vmovq" since we don't need compatibility for AVX.
4235 def : InstAlias<"vmovd\t{$src, $dst|$dst, $src}",
4236 (VMOV64toPQIrr VR128:$dst, GR64:$src), 0>;
4237 def : InstAlias<"vmovd\t{$src, $dst|$dst, $src}",
4238 (VMOVPQIto64rr GR64:$dst, VR128:$src), 0>;
4240 //===---------------------------------------------------------------------===//
4241 // SSE2 - Move Quadword
4242 //===---------------------------------------------------------------------===//
4244 //===---------------------------------------------------------------------===//
4245 // Move Quadword Int to Packed Quadword Int
4248 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecLoad] in {
4249 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4250 "vmovq\t{$src, $dst|$dst, $src}",
4252 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
4253 VEX, Requires<[UseAVX]>, VEX_WIG;
4254 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4255 "movq\t{$src, $dst|$dst, $src}",
4257 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>,
4258 XS, Requires<[UseSSE2]>; // SSE2 instruction with XS Prefix
4259 } // ExeDomain, SchedRW
4261 //===---------------------------------------------------------------------===//
4262 // Move Packed Quadword Int to Quadword Int
4264 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecStore] in {
4265 def VMOVPQI2QImr : VS2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4266 "movq\t{$src, $dst|$dst, $src}",
4267 [(store (i64 (extractelt (v2i64 VR128:$src),
4268 (iPTR 0))), addr:$dst)]>,
4270 def MOVPQI2QImr : S2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4271 "movq\t{$src, $dst|$dst, $src}",
4272 [(store (i64 (extractelt (v2i64 VR128:$src),
4273 (iPTR 0))), addr:$dst)]>;
4274 } // ExeDomain, SchedRW
4276 // For disassembler only
4277 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
4278 SchedRW = [SchedWriteVecLogic.XMM] in {
4279 def VMOVPQI2QIrr : VS2I<0xD6, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
4280 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_WIG;
4281 def MOVPQI2QIrr : S2I<0xD6, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
4282 "movq\t{$src, $dst|$dst, $src}", []>;
4285 def : InstAlias<"vmovq.s\t{$src, $dst|$dst, $src}",
4286 (VMOVPQI2QIrr VR128:$dst, VR128:$src), 0>;
4287 def : InstAlias<"movq.s\t{$src, $dst|$dst, $src}",
4288 (MOVPQI2QIrr VR128:$dst, VR128:$src), 0>;
4290 let Predicates = [UseAVX] in {
4291 def : Pat<(v2i64 (X86vzload64 addr:$src)),
4292 (VMOVQI2PQIrm addr:$src)>;
4293 def : Pat<(v4i64 (X86vzload64 addr:$src)),
4294 (SUBREG_TO_REG (i64 0), (v2i64 (VMOVQI2PQIrm addr:$src)), sub_xmm)>;
4296 def : Pat<(X86vextractstore64 (v2i64 VR128:$src), addr:$dst),
4297 (VMOVPQI2QImr addr:$dst, VR128:$src)>;
4300 let Predicates = [UseSSE2] in {
4301 def : Pat<(v2i64 (X86vzload64 addr:$src)), (MOVQI2PQIrm addr:$src)>;
4303 def : Pat<(X86vextractstore64 (v2i64 VR128:$src), addr:$dst),
4304 (MOVPQI2QImr addr:$dst, VR128:$src)>;
4307 //===---------------------------------------------------------------------===//
4308 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
4309 // IA32 document. movq xmm1, xmm2 does clear the high bits.
4311 let ExeDomain = SSEPackedInt, SchedRW = [SchedWriteVecLogic.XMM] in {
4312 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4313 "vmovq\t{$src, $dst|$dst, $src}",
4314 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
4315 XS, VEX, Requires<[UseAVX]>, VEX_WIG;
4316 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4317 "movq\t{$src, $dst|$dst, $src}",
4318 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
4319 XS, Requires<[UseSSE2]>;
4320 } // ExeDomain, SchedRW
4322 let Predicates = [UseAVX] in {
4323 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
4324 (VMOVZPQILo2PQIrr VR128:$src)>;
4326 let Predicates = [UseSSE2] in {
4327 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
4328 (MOVZPQILo2PQIrr VR128:$src)>;
4331 let Predicates = [UseAVX] in {
4332 def : Pat<(v4f64 (X86vzmovl (v4f64 VR256:$src))),
4333 (SUBREG_TO_REG (i32 0),
4334 (v2f64 (VMOVZPQILo2PQIrr
4335 (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm)))),
4337 def : Pat<(v4i64 (X86vzmovl (v4i64 VR256:$src))),
4338 (SUBREG_TO_REG (i32 0),
4339 (v2i64 (VMOVZPQILo2PQIrr
4340 (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm)))),
4344 //===---------------------------------------------------------------------===//
4345 // SSE3 - Replicate Single FP - MOVSHDUP and MOVSLDUP
4346 //===---------------------------------------------------------------------===//
4348 multiclass sse3_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
4349 ValueType vt, RegisterClass RC, PatFrag mem_frag,
4350 X86MemOperand x86memop, X86FoldableSchedWrite sched> {
4351 def rr : S3SI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
4352 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4353 [(set RC:$dst, (vt (OpNode RC:$src)))]>,
4355 def rm : S3SI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
4356 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4357 [(set RC:$dst, (OpNode (mem_frag addr:$src)))]>,
4358 Sched<[sched.Folded]>;
4361 let Predicates = [HasAVX, NoVLX] in {
4362 defm VMOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
4363 v4f32, VR128, loadv4f32, f128mem,
4364 SchedWriteFShuffle.XMM>, VEX, VEX_WIG;
4365 defm VMOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
4366 v4f32, VR128, loadv4f32, f128mem,
4367 SchedWriteFShuffle.XMM>, VEX, VEX_WIG;
4368 defm VMOVSHDUPY : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
4369 v8f32, VR256, loadv8f32, f256mem,
4370 SchedWriteFShuffle.YMM>, VEX, VEX_L, VEX_WIG;
4371 defm VMOVSLDUPY : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
4372 v8f32, VR256, loadv8f32, f256mem,
4373 SchedWriteFShuffle.YMM>, VEX, VEX_L, VEX_WIG;
4375 defm MOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "movshdup", v4f32, VR128,
4376 memopv4f32, f128mem, SchedWriteFShuffle.XMM>;
4377 defm MOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "movsldup", v4f32, VR128,
4378 memopv4f32, f128mem, SchedWriteFShuffle.XMM>;
4380 let Predicates = [HasAVX, NoVLX] in {
4381 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
4382 (VMOVSHDUPrr VR128:$src)>;
4383 def : Pat<(v4i32 (X86Movshdup (load addr:$src))),
4384 (VMOVSHDUPrm addr:$src)>;
4385 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
4386 (VMOVSLDUPrr VR128:$src)>;
4387 def : Pat<(v4i32 (X86Movsldup (load addr:$src))),
4388 (VMOVSLDUPrm addr:$src)>;
4389 def : Pat<(v8i32 (X86Movshdup VR256:$src)),
4390 (VMOVSHDUPYrr VR256:$src)>;
4391 def : Pat<(v8i32 (X86Movshdup (load addr:$src))),
4392 (VMOVSHDUPYrm addr:$src)>;
4393 def : Pat<(v8i32 (X86Movsldup VR256:$src)),
4394 (VMOVSLDUPYrr VR256:$src)>;
4395 def : Pat<(v8i32 (X86Movsldup (load addr:$src))),
4396 (VMOVSLDUPYrm addr:$src)>;
4399 let Predicates = [UseSSE3] in {
4400 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
4401 (MOVSHDUPrr VR128:$src)>;
4402 def : Pat<(v4i32 (X86Movshdup (memop addr:$src))),
4403 (MOVSHDUPrm addr:$src)>;
4404 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
4405 (MOVSLDUPrr VR128:$src)>;
4406 def : Pat<(v4i32 (X86Movsldup (memop addr:$src))),
4407 (MOVSLDUPrm addr:$src)>;
4410 //===---------------------------------------------------------------------===//
4411 // SSE3 - Replicate Double FP - MOVDDUP
4412 //===---------------------------------------------------------------------===//
4414 multiclass sse3_replicate_dfp<string OpcodeStr, X86SchedWriteWidths sched> {
4415 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4416 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4417 [(set VR128:$dst, (v2f64 (X86Movddup VR128:$src)))]>,
4419 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
4420 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4423 (scalar_to_vector (loadf64 addr:$src)))))]>,
4424 Sched<[sched.XMM.Folded]>;
4427 // FIXME: Merge with above classes when there are patterns for the ymm version
4428 multiclass sse3_replicate_dfp_y<string OpcodeStr, X86SchedWriteWidths sched> {
4429 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
4430 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4431 [(set VR256:$dst, (v4f64 (X86Movddup VR256:$src)))]>,
4433 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
4434 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4436 (v4f64 (X86Movddup (loadv4f64 addr:$src))))]>,
4437 Sched<[sched.YMM.Folded]>;
4440 let Predicates = [HasAVX, NoVLX] in {
4441 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup", SchedWriteFShuffle>,
4443 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup", SchedWriteFShuffle>,
4444 VEX, VEX_L, VEX_WIG;
4447 defm MOVDDUP : sse3_replicate_dfp<"movddup", SchedWriteFShuffle>;
4450 let Predicates = [HasAVX, NoVLX] in {
4451 def : Pat<(X86Movddup (v2f64 (X86vzload64 addr:$src))),
4452 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4455 let Predicates = [UseSSE3] in {
4456 def : Pat<(X86Movddup (v2f64 (X86vzload64 addr:$src))),
4457 (MOVDDUPrm addr:$src)>;
4460 //===---------------------------------------------------------------------===//
4461 // SSE3 - Move Unaligned Integer
4462 //===---------------------------------------------------------------------===//
4464 let Predicates = [HasAVX] in {
4465 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4466 "vlddqu\t{$src, $dst|$dst, $src}",
4467 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>,
4468 Sched<[SchedWriteVecMoveLS.XMM.RM]>, VEX, VEX_WIG;
4469 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
4470 "vlddqu\t{$src, $dst|$dst, $src}",
4471 [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>,
4472 Sched<[SchedWriteVecMoveLS.YMM.RM]>, VEX, VEX_L, VEX_WIG;
4475 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4476 "lddqu\t{$src, $dst|$dst, $src}",
4477 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>,
4478 Sched<[SchedWriteVecMoveLS.XMM.RM]>;
4480 //===---------------------------------------------------------------------===//
4481 // SSE3 - Arithmetic
4482 //===---------------------------------------------------------------------===//
4484 multiclass sse3_addsub<string OpcodeStr, ValueType vt, RegisterClass RC,
4485 X86MemOperand x86memop, X86FoldableSchedWrite sched,
4486 PatFrag ld_frag, bit Is2Addr = 1> {
4487 let Uses = [MXCSR], mayRaiseFPException = 1 in {
4488 def rr : I<0xD0, MRMSrcReg,
4489 (outs RC:$dst), (ins RC:$src1, RC:$src2),
4491 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4492 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4493 [(set RC:$dst, (vt (X86Addsub RC:$src1, RC:$src2)))]>,
4495 def rm : I<0xD0, MRMSrcMem,
4496 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
4498 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4499 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4500 [(set RC:$dst, (vt (X86Addsub RC:$src1, (ld_frag addr:$src2))))]>,
4501 Sched<[sched.Folded, sched.ReadAfterFold]>;
4505 let Predicates = [HasAVX] in {
4506 let ExeDomain = SSEPackedSingle in {
4507 defm VADDSUBPS : sse3_addsub<"vaddsubps", v4f32, VR128, f128mem,
4508 SchedWriteFAddSizes.PS.XMM, loadv4f32, 0>,
4509 XD, VEX_4V, VEX_WIG;
4510 defm VADDSUBPSY : sse3_addsub<"vaddsubps", v8f32, VR256, f256mem,
4511 SchedWriteFAddSizes.PS.YMM, loadv8f32, 0>,
4512 XD, VEX_4V, VEX_L, VEX_WIG;
4514 let ExeDomain = SSEPackedDouble in {
4515 defm VADDSUBPD : sse3_addsub<"vaddsubpd", v2f64, VR128, f128mem,
4516 SchedWriteFAddSizes.PD.XMM, loadv2f64, 0>,
4517 PD, VEX_4V, VEX_WIG;
4518 defm VADDSUBPDY : sse3_addsub<"vaddsubpd", v4f64, VR256, f256mem,
4519 SchedWriteFAddSizes.PD.YMM, loadv4f64, 0>,
4520 PD, VEX_4V, VEX_L, VEX_WIG;
4523 let Constraints = "$src1 = $dst", Predicates = [UseSSE3] in {
4524 let ExeDomain = SSEPackedSingle in
4525 defm ADDSUBPS : sse3_addsub<"addsubps", v4f32, VR128, f128mem,
4526 SchedWriteFAddSizes.PS.XMM, memopv4f32>, XD;
4527 let ExeDomain = SSEPackedDouble in
4528 defm ADDSUBPD : sse3_addsub<"addsubpd", v2f64, VR128, f128mem,
4529 SchedWriteFAddSizes.PD.XMM, memopv2f64>, PD;
4532 //===---------------------------------------------------------------------===//
4533 // SSE3 Instructions
4534 //===---------------------------------------------------------------------===//
4537 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
4538 X86MemOperand x86memop, SDNode OpNode,
4539 X86FoldableSchedWrite sched, PatFrag ld_frag,
4541 let Uses = [MXCSR], mayRaiseFPException = 1 in {
4542 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
4544 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4545 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4546 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))]>,
4549 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
4551 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4552 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4553 [(set RC:$dst, (vt (OpNode RC:$src1, (ld_frag addr:$src2))))]>,
4554 Sched<[sched.Folded, sched.ReadAfterFold]>;
4557 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
4558 X86MemOperand x86memop, SDNode OpNode,
4559 X86FoldableSchedWrite sched, PatFrag ld_frag,
4561 let Uses = [MXCSR], mayRaiseFPException = 1 in {
4562 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
4564 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4565 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4566 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))]>,
4569 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
4571 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4572 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4573 [(set RC:$dst, (vt (OpNode RC:$src1, (ld_frag addr:$src2))))]>,
4574 Sched<[sched.Folded, sched.ReadAfterFold]>;
4578 let Predicates = [HasAVX] in {
4579 let ExeDomain = SSEPackedSingle in {
4580 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
4581 X86fhadd, WriteFHAdd, loadv4f32, 0>, VEX_4V, VEX_WIG;
4582 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
4583 X86fhsub, WriteFHAdd, loadv4f32, 0>, VEX_4V, VEX_WIG;
4584 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
4585 X86fhadd, WriteFHAddY, loadv8f32, 0>, VEX_4V, VEX_L, VEX_WIG;
4586 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
4587 X86fhsub, WriteFHAddY, loadv8f32, 0>, VEX_4V, VEX_L, VEX_WIG;
4589 let ExeDomain = SSEPackedDouble in {
4590 defm VHADDPD : S3_Int<0x7C, "vhaddpd", v2f64, VR128, f128mem,
4591 X86fhadd, WriteFHAdd, loadv2f64, 0>, VEX_4V, VEX_WIG;
4592 defm VHSUBPD : S3_Int<0x7D, "vhsubpd", v2f64, VR128, f128mem,
4593 X86fhsub, WriteFHAdd, loadv2f64, 0>, VEX_4V, VEX_WIG;
4594 defm VHADDPDY : S3_Int<0x7C, "vhaddpd", v4f64, VR256, f256mem,
4595 X86fhadd, WriteFHAddY, loadv4f64, 0>, VEX_4V, VEX_L, VEX_WIG;
4596 defm VHSUBPDY : S3_Int<0x7D, "vhsubpd", v4f64, VR256, f256mem,
4597 X86fhsub, WriteFHAddY, loadv4f64, 0>, VEX_4V, VEX_L, VEX_WIG;
4601 let Constraints = "$src1 = $dst" in {
4602 let ExeDomain = SSEPackedSingle in {
4603 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem, X86fhadd,
4604 WriteFHAdd, memopv4f32>;
4605 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem, X86fhsub,
4606 WriteFHAdd, memopv4f32>;
4608 let ExeDomain = SSEPackedDouble in {
4609 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem, X86fhadd,
4610 WriteFHAdd, memopv2f64>;
4611 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem, X86fhsub,
4612 WriteFHAdd, memopv2f64>;
4616 //===---------------------------------------------------------------------===//
4617 // SSSE3 - Packed Absolute Instructions
4618 //===---------------------------------------------------------------------===//
4620 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
4621 multiclass SS3I_unop_rm<bits<8> opc, string OpcodeStr, ValueType vt,
4622 SDNode OpNode, X86SchedWriteWidths sched, PatFrag ld_frag> {
4623 def rr : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
4625 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4626 [(set VR128:$dst, (vt (OpNode VR128:$src)))]>,
4629 def rm : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
4631 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4633 (vt (OpNode (ld_frag addr:$src))))]>,
4634 Sched<[sched.XMM.Folded]>;
4637 /// SS3I_unop_rm_int_y - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
4638 multiclass SS3I_unop_rm_y<bits<8> opc, string OpcodeStr, ValueType vt,
4639 SDNode OpNode, X86SchedWriteWidths sched> {
4640 def Yrr : SS38I<opc, MRMSrcReg, (outs VR256:$dst),
4642 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4643 [(set VR256:$dst, (vt (OpNode VR256:$src)))]>,
4646 def Yrm : SS38I<opc, MRMSrcMem, (outs VR256:$dst),
4648 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4650 (vt (OpNode (load addr:$src))))]>,
4651 Sched<[sched.YMM.Folded]>;
4654 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
4655 defm VPABSB : SS3I_unop_rm<0x1C, "vpabsb", v16i8, abs, SchedWriteVecALU,
4656 load>, VEX, VEX_WIG;
4657 defm VPABSW : SS3I_unop_rm<0x1D, "vpabsw", v8i16, abs, SchedWriteVecALU,
4658 load>, VEX, VEX_WIG;
4660 let Predicates = [HasAVX, NoVLX] in {
4661 defm VPABSD : SS3I_unop_rm<0x1E, "vpabsd", v4i32, abs, SchedWriteVecALU,
4662 load>, VEX, VEX_WIG;
4664 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
4665 defm VPABSB : SS3I_unop_rm_y<0x1C, "vpabsb", v32i8, abs, SchedWriteVecALU>,
4666 VEX, VEX_L, VEX_WIG;
4667 defm VPABSW : SS3I_unop_rm_y<0x1D, "vpabsw", v16i16, abs, SchedWriteVecALU>,
4668 VEX, VEX_L, VEX_WIG;
4670 let Predicates = [HasAVX2, NoVLX] in {
4671 defm VPABSD : SS3I_unop_rm_y<0x1E, "vpabsd", v8i32, abs, SchedWriteVecALU>,
4672 VEX, VEX_L, VEX_WIG;
4675 defm PABSB : SS3I_unop_rm<0x1C, "pabsb", v16i8, abs, SchedWriteVecALU,
4677 defm PABSW : SS3I_unop_rm<0x1D, "pabsw", v8i16, abs, SchedWriteVecALU,
4679 defm PABSD : SS3I_unop_rm<0x1E, "pabsd", v4i32, abs, SchedWriteVecALU,
4682 //===---------------------------------------------------------------------===//
4683 // SSSE3 - Packed Binary Operator Instructions
4684 //===---------------------------------------------------------------------===//
4686 /// SS3I_binop_rm - Simple SSSE3 bin op
4687 multiclass SS3I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
4688 ValueType DstVT, ValueType OpVT, RegisterClass RC,
4689 PatFrag memop_frag, X86MemOperand x86memop,
4690 X86FoldableSchedWrite sched, bit Is2Addr = 1> {
4691 let isCommutable = 1 in
4692 def rr : SS38I<opc, MRMSrcReg, (outs RC:$dst),
4693 (ins RC:$src1, RC:$src2),
4695 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4696 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4697 [(set RC:$dst, (DstVT (OpNode (OpVT RC:$src1), RC:$src2)))]>,
4699 def rm : SS38I<opc, MRMSrcMem, (outs RC:$dst),
4700 (ins RC:$src1, x86memop:$src2),
4702 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4703 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4705 (DstVT (OpNode (OpVT RC:$src1), (memop_frag addr:$src2))))]>,
4706 Sched<[sched.Folded, sched.ReadAfterFold]>;
4709 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
4710 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
4711 Intrinsic IntId128, X86FoldableSchedWrite sched,
4712 PatFrag ld_frag, bit Is2Addr = 1> {
4713 let isCommutable = 1 in
4714 def rr : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
4715 (ins VR128:$src1, VR128:$src2),
4717 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4718 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4719 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
4721 def rm : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
4722 (ins VR128:$src1, i128mem:$src2),
4724 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4725 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4727 (IntId128 VR128:$src1, (ld_frag addr:$src2)))]>,
4728 Sched<[sched.Folded, sched.ReadAfterFold]>;
4731 multiclass SS3I_binop_rm_int_y<bits<8> opc, string OpcodeStr,
4733 X86FoldableSchedWrite sched> {
4734 let isCommutable = 1 in
4735 def Yrr : SS38I<opc, MRMSrcReg, (outs VR256:$dst),
4736 (ins VR256:$src1, VR256:$src2),
4737 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4738 [(set VR256:$dst, (IntId256 VR256:$src1, VR256:$src2))]>,
4740 def Yrm : SS38I<opc, MRMSrcMem, (outs VR256:$dst),
4741 (ins VR256:$src1, i256mem:$src2),
4742 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4744 (IntId256 VR256:$src1, (load addr:$src2)))]>,
4745 Sched<[sched.Folded, sched.ReadAfterFold]>;
4748 let ImmT = NoImm, Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
4749 let isCommutable = 0 in {
4750 defm VPSHUFB : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v16i8, v16i8,
4751 VR128, load, i128mem,
4752 SchedWriteVarShuffle.XMM, 0>, VEX_4V, VEX_WIG;
4753 defm VPMADDUBSW : SS3I_binop_rm<0x04, "vpmaddubsw", X86vpmaddubsw, v8i16,
4754 v16i8, VR128, load, i128mem,
4755 SchedWriteVecIMul.XMM, 0>, VEX_4V, VEX_WIG;
4757 defm VPMULHRSW : SS3I_binop_rm<0x0B, "vpmulhrsw", X86mulhrs, v8i16, v8i16,
4758 VR128, load, i128mem,
4759 SchedWriteVecIMul.XMM, 0>, VEX_4V, VEX_WIG;
4762 let ImmT = NoImm, Predicates = [HasAVX] in {
4763 let isCommutable = 0 in {
4764 defm VPHADDW : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v8i16, v8i16, VR128,
4766 SchedWritePHAdd.XMM, 0>, VEX_4V, VEX_WIG;
4767 defm VPHADDD : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v4i32, v4i32, VR128,
4769 SchedWritePHAdd.XMM, 0>, VEX_4V, VEX_WIG;
4770 defm VPHSUBW : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v8i16, v8i16, VR128,
4772 SchedWritePHAdd.XMM, 0>, VEX_4V, VEX_WIG;
4773 defm VPHSUBD : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v4i32, v4i32, VR128,
4775 SchedWritePHAdd.XMM, 0>, VEX_4V, VEX_WIG;
4776 defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb",
4777 int_x86_ssse3_psign_b_128,
4778 SchedWriteVecALU.XMM, load, 0>, VEX_4V, VEX_WIG;
4779 defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw",
4780 int_x86_ssse3_psign_w_128,
4781 SchedWriteVecALU.XMM, load, 0>, VEX_4V, VEX_WIG;
4782 defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd",
4783 int_x86_ssse3_psign_d_128,
4784 SchedWriteVecALU.XMM, load, 0>, VEX_4V, VEX_WIG;
4785 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw",
4786 int_x86_ssse3_phadd_sw_128,
4787 SchedWritePHAdd.XMM, load, 0>, VEX_4V, VEX_WIG;
4788 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw",
4789 int_x86_ssse3_phsub_sw_128,
4790 SchedWritePHAdd.XMM, load, 0>, VEX_4V, VEX_WIG;
4794 let ImmT = NoImm, Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
4795 let isCommutable = 0 in {
4796 defm VPSHUFBY : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v32i8, v32i8,
4797 VR256, load, i256mem,
4798 SchedWriteVarShuffle.YMM, 0>, VEX_4V, VEX_L, VEX_WIG;
4799 defm VPMADDUBSWY : SS3I_binop_rm<0x04, "vpmaddubsw", X86vpmaddubsw, v16i16,
4800 v32i8, VR256, load, i256mem,
4801 SchedWriteVecIMul.YMM, 0>, VEX_4V, VEX_L, VEX_WIG;
4803 defm VPMULHRSWY : SS3I_binop_rm<0x0B, "vpmulhrsw", X86mulhrs, v16i16, v16i16,
4804 VR256, load, i256mem,
4805 SchedWriteVecIMul.YMM, 0>, VEX_4V, VEX_L, VEX_WIG;
4808 let ImmT = NoImm, Predicates = [HasAVX2] in {
4809 let isCommutable = 0 in {
4810 defm VPHADDWY : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v16i16, v16i16,
4811 VR256, load, i256mem,
4812 SchedWritePHAdd.YMM, 0>, VEX_4V, VEX_L, VEX_WIG;
4813 defm VPHADDDY : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v8i32, v8i32, VR256,
4815 SchedWritePHAdd.YMM, 0>, VEX_4V, VEX_L, VEX_WIG;
4816 defm VPHSUBWY : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v16i16, v16i16,
4817 VR256, load, i256mem,
4818 SchedWritePHAdd.YMM, 0>, VEX_4V, VEX_L, VEX_WIG;
4819 defm VPHSUBDY : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v8i32, v8i32, VR256,
4821 SchedWritePHAdd.YMM, 0>, VEX_4V, VEX_L, VEX_WIG;
4822 defm VPSIGNB : SS3I_binop_rm_int_y<0x08, "vpsignb", int_x86_avx2_psign_b,
4823 SchedWriteVecALU.YMM>, VEX_4V, VEX_L, VEX_WIG;
4824 defm VPSIGNW : SS3I_binop_rm_int_y<0x09, "vpsignw", int_x86_avx2_psign_w,
4825 SchedWriteVecALU.YMM>, VEX_4V, VEX_L, VEX_WIG;
4826 defm VPSIGND : SS3I_binop_rm_int_y<0x0A, "vpsignd", int_x86_avx2_psign_d,
4827 SchedWriteVecALU.YMM>, VEX_4V, VEX_L, VEX_WIG;
4828 defm VPHADDSW : SS3I_binop_rm_int_y<0x03, "vphaddsw",
4829 int_x86_avx2_phadd_sw,
4830 SchedWritePHAdd.YMM>, VEX_4V, VEX_L, VEX_WIG;
4831 defm VPHSUBSW : SS3I_binop_rm_int_y<0x07, "vphsubsw",
4832 int_x86_avx2_phsub_sw,
4833 SchedWritePHAdd.YMM>, VEX_4V, VEX_L, VEX_WIG;
4837 // None of these have i8 immediate fields.
4838 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
4839 let isCommutable = 0 in {
4840 defm PHADDW : SS3I_binop_rm<0x01, "phaddw", X86hadd, v8i16, v8i16, VR128,
4841 memop, i128mem, SchedWritePHAdd.XMM>;
4842 defm PHADDD : SS3I_binop_rm<0x02, "phaddd", X86hadd, v4i32, v4i32, VR128,
4843 memop, i128mem, SchedWritePHAdd.XMM>;
4844 defm PHSUBW : SS3I_binop_rm<0x05, "phsubw", X86hsub, v8i16, v8i16, VR128,
4845 memop, i128mem, SchedWritePHAdd.XMM>;
4846 defm PHSUBD : SS3I_binop_rm<0x06, "phsubd", X86hsub, v4i32, v4i32, VR128,
4847 memop, i128mem, SchedWritePHAdd.XMM>;
4848 defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", int_x86_ssse3_psign_b_128,
4849 SchedWriteVecALU.XMM, memop>;
4850 defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", int_x86_ssse3_psign_w_128,
4851 SchedWriteVecALU.XMM, memop>;
4852 defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", int_x86_ssse3_psign_d_128,
4853 SchedWriteVecALU.XMM, memop>;
4854 defm PSHUFB : SS3I_binop_rm<0x00, "pshufb", X86pshufb, v16i8, v16i8, VR128,
4855 memop, i128mem, SchedWriteVarShuffle.XMM>;
4856 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw",
4857 int_x86_ssse3_phadd_sw_128,
4858 SchedWritePHAdd.XMM, memop>;
4859 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw",
4860 int_x86_ssse3_phsub_sw_128,
4861 SchedWritePHAdd.XMM, memop>;
4862 defm PMADDUBSW : SS3I_binop_rm<0x04, "pmaddubsw", X86vpmaddubsw, v8i16,
4863 v16i8, VR128, memop, i128mem,
4864 SchedWriteVecIMul.XMM>;
4866 defm PMULHRSW : SS3I_binop_rm<0x0B, "pmulhrsw", X86mulhrs, v8i16, v8i16,
4867 VR128, memop, i128mem, SchedWriteVecIMul.XMM>;
4870 //===---------------------------------------------------------------------===//
4871 // SSSE3 - Packed Align Instruction Patterns
4872 //===---------------------------------------------------------------------===//
4874 multiclass ssse3_palignr<string asm, ValueType VT, RegisterClass RC,
4875 PatFrag memop_frag, X86MemOperand x86memop,
4876 X86FoldableSchedWrite sched, bit Is2Addr = 1> {
4877 let hasSideEffects = 0 in {
4878 def rri : SS3AI<0x0F, MRMSrcReg, (outs RC:$dst),
4879 (ins RC:$src1, RC:$src2, u8imm:$src3),
4881 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4883 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4884 [(set RC:$dst, (VT (X86PAlignr RC:$src1, RC:$src2, (i8 timm:$src3))))]>,
4887 def rmi : SS3AI<0x0F, MRMSrcMem, (outs RC:$dst),
4888 (ins RC:$src1, x86memop:$src2, u8imm:$src3),
4890 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4892 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4893 [(set RC:$dst, (VT (X86PAlignr RC:$src1,
4894 (memop_frag addr:$src2),
4895 (i8 timm:$src3))))]>,
4896 Sched<[sched.Folded, sched.ReadAfterFold]>;
4900 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in
4901 defm VPALIGNR : ssse3_palignr<"vpalignr", v16i8, VR128, load, i128mem,
4902 SchedWriteShuffle.XMM, 0>, VEX_4V, VEX_WIG;
4903 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in
4904 defm VPALIGNRY : ssse3_palignr<"vpalignr", v32i8, VR256, load, i256mem,
4905 SchedWriteShuffle.YMM, 0>, VEX_4V, VEX_L, VEX_WIG;
4906 let Constraints = "$src1 = $dst", Predicates = [UseSSSE3] in
4907 defm PALIGNR : ssse3_palignr<"palignr", v16i8, VR128, memop, i128mem,
4908 SchedWriteShuffle.XMM>;
4910 //===---------------------------------------------------------------------===//
4911 // SSSE3 - Thread synchronization
4912 //===---------------------------------------------------------------------===//
4914 let SchedRW = [WriteSystem] in {
4915 let Uses = [EAX, ECX, EDX] in
4916 def MONITOR32rrr : I<0x01, MRM_C8, (outs), (ins), "monitor", []>,
4917 TB, Requires<[HasSSE3, Not64BitMode]>;
4918 let Uses = [RAX, ECX, EDX] in
4919 def MONITOR64rrr : I<0x01, MRM_C8, (outs), (ins), "monitor", []>,
4920 TB, Requires<[HasSSE3, In64BitMode]>;
4922 let Uses = [ECX, EAX] in
4923 def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait",
4924 [(int_x86_sse3_mwait ECX, EAX)]>, TB, Requires<[HasSSE3]>;
4927 def : InstAlias<"mwait\t{%eax, %ecx|ecx, eax}", (MWAITrr)>, Requires<[Not64BitMode]>;
4928 def : InstAlias<"mwait\t{%rax, %rcx|rcx, rax}", (MWAITrr)>, Requires<[In64BitMode]>;
4930 def : InstAlias<"monitor\t{%eax, %ecx, %edx|edx, ecx, eax}", (MONITOR32rrr)>,
4931 Requires<[Not64BitMode]>;
4932 def : InstAlias<"monitor\t{%rax, %rcx, %rdx|rdx, rcx, rax}", (MONITOR64rrr)>,
4933 Requires<[In64BitMode]>;
4935 //===----------------------------------------------------------------------===//
4936 // SSE4.1 - Packed Move with Sign/Zero Extend
4937 // NOTE: Any Extend is promoted to Zero Extend in X86ISelDAGToDAG.cpp
4938 //===----------------------------------------------------------------------===//
4940 multiclass SS41I_pmovx_rrrm<bits<8> opc, string OpcodeStr, X86MemOperand MemOp,
4941 RegisterClass OutRC, RegisterClass InRC,
4942 X86FoldableSchedWrite sched> {
4943 def rr : SS48I<opc, MRMSrcReg, (outs OutRC:$dst), (ins InRC:$src),
4944 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>,
4947 def rm : SS48I<opc, MRMSrcMem, (outs OutRC:$dst), (ins MemOp:$src),
4948 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>,
4949 Sched<[sched.Folded]>;
4952 multiclass SS41I_pmovx_rm_all<bits<8> opc, string OpcodeStr,
4953 X86MemOperand MemOp, X86MemOperand MemYOp,
4955 defm NAME : SS41I_pmovx_rrrm<opc, OpcodeStr, MemOp, VR128, VR128,
4956 SchedWriteShuffle.XMM>;
4957 let Predicates = [HasAVX, prd] in
4958 defm V#NAME : SS41I_pmovx_rrrm<opc, !strconcat("v", OpcodeStr), MemOp,
4959 VR128, VR128, SchedWriteShuffle.XMM>,
4961 let Predicates = [HasAVX2, prd] in
4962 defm V#NAME#Y : SS41I_pmovx_rrrm<opc, !strconcat("v", OpcodeStr), MemYOp,
4963 VR256, VR128, WriteVPMOV256>,
4964 VEX, VEX_L, VEX_WIG;
4967 multiclass SS41I_pmovx_rm<bits<8> opc, string OpcodeStr, X86MemOperand MemOp,
4968 X86MemOperand MemYOp, Predicate prd> {
4969 defm PMOVSX#NAME : SS41I_pmovx_rm_all<opc, !strconcat("pmovsx", OpcodeStr),
4970 MemOp, MemYOp, prd>;
4971 defm PMOVZX#NAME : SS41I_pmovx_rm_all<!add(opc, 0x10),
4972 !strconcat("pmovzx", OpcodeStr),
4973 MemOp, MemYOp, prd>;
4976 defm BW : SS41I_pmovx_rm<0x20, "bw", i64mem, i128mem, NoVLX_Or_NoBWI>;
4977 defm WD : SS41I_pmovx_rm<0x23, "wd", i64mem, i128mem, NoVLX>;
4978 defm DQ : SS41I_pmovx_rm<0x25, "dq", i64mem, i128mem, NoVLX>;
4980 defm BD : SS41I_pmovx_rm<0x21, "bd", i32mem, i64mem, NoVLX>;
4981 defm WQ : SS41I_pmovx_rm<0x24, "wq", i32mem, i64mem, NoVLX>;
4983 defm BQ : SS41I_pmovx_rm<0x22, "bq", i16mem, i32mem, NoVLX>;
4986 multiclass SS41I_pmovx_avx2_patterns<string OpcPrefix, string ExtTy,
4987 SDNode ExtOp, SDNode InVecOp> {
4988 // Register-Register patterns
4989 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
4990 def : Pat<(v16i16 (ExtOp (v16i8 VR128:$src))),
4991 (!cast<I>(OpcPrefix#BWYrr) VR128:$src)>;
4993 let Predicates = [HasAVX2, NoVLX] in {
4994 def : Pat<(v8i32 (InVecOp (v16i8 VR128:$src))),
4995 (!cast<I>(OpcPrefix#BDYrr) VR128:$src)>;
4996 def : Pat<(v4i64 (InVecOp (v16i8 VR128:$src))),
4997 (!cast<I>(OpcPrefix#BQYrr) VR128:$src)>;
4999 def : Pat<(v8i32 (ExtOp (v8i16 VR128:$src))),
5000 (!cast<I>(OpcPrefix#WDYrr) VR128:$src)>;
5001 def : Pat<(v4i64 (InVecOp (v8i16 VR128:$src))),
5002 (!cast<I>(OpcPrefix#WQYrr) VR128:$src)>;
5004 def : Pat<(v4i64 (ExtOp (v4i32 VR128:$src))),
5005 (!cast<I>(OpcPrefix#DQYrr) VR128:$src)>;
5008 // Simple Register-Memory patterns
5009 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
5010 def : Pat<(v16i16 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5011 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
5013 def : Pat<(v16i16 (ExtOp (loadv16i8 addr:$src))),
5014 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
5017 let Predicates = [HasAVX2, NoVLX] in {
5018 def : Pat<(v8i32 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5019 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
5020 def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5021 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
5023 def : Pat<(v8i32 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
5024 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
5025 def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
5026 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
5028 def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi32") addr:$src)),
5029 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
5032 // AVX2 Register-Memory patterns
5033 let Predicates = [HasAVX2, NoVLX] in {
5034 def : Pat<(v8i32 (ExtOp (loadv8i16 addr:$src))),
5035 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
5037 def : Pat<(v8i32 (InVecOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
5038 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
5039 def : Pat<(v8i32 (InVecOp (bc_v16i8 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
5040 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
5041 def : Pat<(v8i32 (InVecOp (bc_v16i8 (v2i64 (X86vzload64 addr:$src))))),
5042 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
5044 def : Pat<(v4i64 (ExtOp (loadv4i32 addr:$src))),
5045 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
5047 def : Pat<(v4i64 (InVecOp (bc_v16i8 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
5048 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
5049 def : Pat<(v4i64 (InVecOp (bc_v16i8 (v2i64 (X86vzload32 addr:$src))))),
5050 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
5052 def : Pat<(v4i64 (InVecOp (bc_v8i16 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
5053 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
5054 def : Pat<(v4i64 (InVecOp (bc_v8i16 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
5055 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
5056 def : Pat<(v4i64 (InVecOp (bc_v8i16 (v2i64 (X86vzload64 addr:$src))))),
5057 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
5061 defm : SS41I_pmovx_avx2_patterns<"VPMOVSX", "s", sext, sext_invec>;
5062 defm : SS41I_pmovx_avx2_patterns<"VPMOVZX", "z", zext, zext_invec>;
5064 // SSE4.1/AVX patterns.
5065 multiclass SS41I_pmovx_patterns<string OpcPrefix, string ExtTy,
5067 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
5068 def : Pat<(v8i16 (ExtOp (v16i8 VR128:$src))),
5069 (!cast<I>(OpcPrefix#BWrr) VR128:$src)>;
5071 let Predicates = [HasAVX, NoVLX] in {
5072 def : Pat<(v4i32 (ExtOp (v16i8 VR128:$src))),
5073 (!cast<I>(OpcPrefix#BDrr) VR128:$src)>;
5074 def : Pat<(v2i64 (ExtOp (v16i8 VR128:$src))),
5075 (!cast<I>(OpcPrefix#BQrr) VR128:$src)>;
5077 def : Pat<(v4i32 (ExtOp (v8i16 VR128:$src))),
5078 (!cast<I>(OpcPrefix#WDrr) VR128:$src)>;
5079 def : Pat<(v2i64 (ExtOp (v8i16 VR128:$src))),
5080 (!cast<I>(OpcPrefix#WQrr) VR128:$src)>;
5082 def : Pat<(v2i64 (ExtOp (v4i32 VR128:$src))),
5083 (!cast<I>(OpcPrefix#DQrr) VR128:$src)>;
5085 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
5086 def : Pat<(v8i16 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5087 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
5089 let Predicates = [HasAVX, NoVLX] in {
5090 def : Pat<(v4i32 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5091 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
5092 def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5093 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
5095 def : Pat<(v4i32 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
5096 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
5097 def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
5098 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
5100 def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi32") addr:$src)),
5101 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
5103 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
5104 def : Pat<(v8i16 (ExtOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
5105 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
5106 def : Pat<(v8i16 (ExtOp (bc_v16i8 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
5107 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
5108 def : Pat<(v8i16 (ExtOp (bc_v16i8 (v2i64 (X86vzload64 addr:$src))))),
5109 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
5110 def : Pat<(v8i16 (ExtOp (loadv16i8 addr:$src))),
5111 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
5113 let Predicates = [HasAVX, NoVLX] in {
5114 def : Pat<(v4i32 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
5115 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
5116 def : Pat<(v4i32 (ExtOp (bc_v16i8 (v4i32 (X86vzload32 addr:$src))))),
5117 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
5118 def : Pat<(v4i32 (ExtOp (loadv16i8 addr:$src))),
5119 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
5121 def : Pat<(v2i64 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (extloadi32i16 addr:$src)))))),
5122 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
5123 def : Pat<(v2i64 (ExtOp (loadv16i8 addr:$src))),
5124 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
5126 def : Pat<(v4i32 (ExtOp (bc_v8i16 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
5127 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
5128 def : Pat<(v4i32 (ExtOp (bc_v8i16 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
5129 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
5130 def : Pat<(v4i32 (ExtOp (bc_v8i16 (v2i64 (X86vzload64 addr:$src))))),
5131 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
5132 def : Pat<(v4i32 (ExtOp (loadv8i16 addr:$src))),
5133 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
5135 def : Pat<(v2i64 (ExtOp (bc_v8i16 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
5136 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
5137 def : Pat<(v2i64 (ExtOp (bc_v8i16 (v4i32 (X86vzload32 addr:$src))))),
5138 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
5139 def : Pat<(v2i64 (ExtOp (loadv8i16 addr:$src))),
5140 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
5142 def : Pat<(v2i64 (ExtOp (bc_v4i32 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
5143 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
5144 def : Pat<(v2i64 (ExtOp (bc_v4i32 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
5145 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
5146 def : Pat<(v2i64 (ExtOp (bc_v4i32 (v2i64 (X86vzload64 addr:$src))))),
5147 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
5148 def : Pat<(v2i64 (ExtOp (loadv4i32 addr:$src))),
5149 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
5153 defm : SS41I_pmovx_patterns<"VPMOVSX", "s", sext_invec>;
5154 defm : SS41I_pmovx_patterns<"VPMOVZX", "z", zext_invec>;
5156 let Predicates = [UseSSE41] in {
5157 defm : SS41I_pmovx_patterns<"PMOVSX", "s", sext_invec>;
5158 defm : SS41I_pmovx_patterns<"PMOVZX", "z", zext_invec>;
5161 //===----------------------------------------------------------------------===//
5162 // SSE4.1 - Extract Instructions
5163 //===----------------------------------------------------------------------===//
5165 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
5166 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
5167 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
5168 (ins VR128:$src1, u8imm:$src2),
5169 !strconcat(OpcodeStr,
5170 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5171 [(set GR32orGR64:$dst, (X86pextrb (v16i8 VR128:$src1),
5173 Sched<[WriteVecExtract]>;
5174 let hasSideEffects = 0, mayStore = 1 in
5175 def mr : SS4AIi8<opc, MRMDestMem, (outs),
5176 (ins i8mem:$dst, VR128:$src1, u8imm:$src2),
5177 !strconcat(OpcodeStr,
5178 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5179 [(store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), timm:$src2))),
5180 addr:$dst)]>, Sched<[WriteVecExtractSt]>;
5183 let Predicates = [HasAVX, NoBWI] in
5184 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX, VEX_WIG;
5186 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
5189 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
5190 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
5191 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
5192 def rr_REV : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
5193 (ins VR128:$src1, u8imm:$src2),
5194 !strconcat(OpcodeStr,
5195 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
5196 Sched<[WriteVecExtract]>, FoldGenData<NAME#rr>;
5198 let hasSideEffects = 0, mayStore = 1 in
5199 def mr : SS4AIi8<opc, MRMDestMem, (outs),
5200 (ins i16mem:$dst, VR128:$src1, u8imm:$src2),
5201 !strconcat(OpcodeStr,
5202 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5203 [(store (i16 (trunc (X86pextrw (v8i16 VR128:$src1), timm:$src2))),
5204 addr:$dst)]>, Sched<[WriteVecExtractSt]>;
5207 let Predicates = [HasAVX, NoBWI] in
5208 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX, VEX_WIG;
5210 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
5213 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
5214 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
5215 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
5216 (ins VR128:$src1, u8imm:$src2),
5217 !strconcat(OpcodeStr,
5218 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5220 (extractelt (v4i32 VR128:$src1), imm:$src2))]>,
5221 Sched<[WriteVecExtract]>;
5222 def mr : SS4AIi8<opc, MRMDestMem, (outs),
5223 (ins i32mem:$dst, VR128:$src1, u8imm:$src2),
5224 !strconcat(OpcodeStr,
5225 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5226 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
5227 addr:$dst)]>, Sched<[WriteVecExtractSt]>;
5230 let Predicates = [HasAVX, NoDQI] in
5231 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
5233 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
5235 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
5236 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
5237 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
5238 (ins VR128:$src1, u8imm:$src2),
5239 !strconcat(OpcodeStr,
5240 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5242 (extractelt (v2i64 VR128:$src1), imm:$src2))]>,
5243 Sched<[WriteVecExtract]>;
5244 def mr : SS4AIi8<opc, MRMDestMem, (outs),
5245 (ins i64mem:$dst, VR128:$src1, u8imm:$src2),
5246 !strconcat(OpcodeStr,
5247 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5248 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
5249 addr:$dst)]>, Sched<[WriteVecExtractSt]>;
5252 let Predicates = [HasAVX, NoDQI] in
5253 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
5255 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">, REX_W;
5257 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
5259 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
5260 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
5261 (ins VR128:$src1, u8imm:$src2),
5262 !strconcat(OpcodeStr,
5263 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5264 [(set GR32orGR64:$dst,
5265 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
5266 Sched<[WriteVecExtract]>;
5267 def mr : SS4AIi8<opc, MRMDestMem, (outs),
5268 (ins f32mem:$dst, VR128:$src1, u8imm:$src2),
5269 !strconcat(OpcodeStr,
5270 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5271 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
5272 addr:$dst)]>, Sched<[WriteVecExtractSt]>;
5275 let ExeDomain = SSEPackedSingle in {
5276 let Predicates = [UseAVX] in
5277 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX, VEX_WIG;
5278 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
5281 //===----------------------------------------------------------------------===//
5282 // SSE4.1 - Insert Instructions
5283 //===----------------------------------------------------------------------===//
5285 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
5286 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
5287 (ins VR128:$src1, GR32orGR64:$src2, u8imm:$src3),
5289 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5291 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5293 (X86pinsrb VR128:$src1, GR32orGR64:$src2, timm:$src3))]>,
5294 Sched<[WriteVecInsert, ReadDefault, ReadInt2Fpu]>;
5295 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
5296 (ins VR128:$src1, i8mem:$src2, u8imm:$src3),
5298 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5300 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5302 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2), timm:$src3))]>,
5303 Sched<[WriteVecInsert.Folded, WriteVecInsert.ReadAfterFold]>;
5306 let Predicates = [HasAVX, NoBWI] in
5307 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V, VEX_WIG;
5308 let Constraints = "$src1 = $dst" in
5309 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
5311 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
5312 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
5313 (ins VR128:$src1, GR32:$src2, u8imm:$src3),
5315 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5317 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5319 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
5320 Sched<[WriteVecInsert, ReadDefault, ReadInt2Fpu]>;
5321 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
5322 (ins VR128:$src1, i32mem:$src2, u8imm:$src3),
5324 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5326 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5328 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2), imm:$src3)))]>,
5329 Sched<[WriteVecInsert.Folded, WriteVecInsert.ReadAfterFold]>;
5332 let Predicates = [HasAVX, NoDQI] in
5333 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
5334 let Constraints = "$src1 = $dst" in
5335 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
5337 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
5338 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
5339 (ins VR128:$src1, GR64:$src2, u8imm:$src3),
5341 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5343 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5345 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
5346 Sched<[WriteVecInsert, ReadDefault, ReadInt2Fpu]>;
5347 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
5348 (ins VR128:$src1, i64mem:$src2, u8imm:$src3),
5350 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5352 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5354 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2), imm:$src3)))]>,
5355 Sched<[WriteVecInsert.Folded, WriteVecInsert.ReadAfterFold]>;
5358 let Predicates = [HasAVX, NoDQI] in
5359 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
5360 let Constraints = "$src1 = $dst" in
5361 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
5363 // insertps has a few different modes, there's the first two here below which
5364 // are optimized inserts that won't zero arbitrary elements in the destination
5365 // vector. The next one matches the intrinsic and could zero arbitrary elements
5366 // in the target vector.
5367 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
5368 let isCommutable = 1 in
5369 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
5370 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
5372 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5374 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5376 (X86insertps VR128:$src1, VR128:$src2, timm:$src3))]>,
5377 Sched<[SchedWriteFShuffle.XMM]>;
5378 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
5379 (ins VR128:$src1, f32mem:$src2, u8imm:$src3),
5381 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5383 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5385 (X86insertps VR128:$src1,
5386 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
5388 Sched<[SchedWriteFShuffle.XMM.Folded, SchedWriteFShuffle.XMM.ReadAfterFold]>;
5391 let ExeDomain = SSEPackedSingle in {
5392 let Predicates = [UseAVX] in
5393 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>,
5395 let Constraints = "$src1 = $dst" in
5396 defm INSERTPS : SS41I_insertf32<0x21, "insertps", 1>;
5399 //===----------------------------------------------------------------------===//
5400 // SSE4.1 - Round Instructions
5401 //===----------------------------------------------------------------------===//
5403 multiclass sse41_fp_unop_p<bits<8> opc, string OpcodeStr,
5404 X86MemOperand x86memop, RegisterClass RC,
5405 ValueType VT, PatFrag mem_frag, SDPatternOperator OpNode,
5406 X86FoldableSchedWrite sched> {
5407 // Intrinsic operation, reg.
5408 // Vector intrinsic operation, reg
5409 let Uses = [MXCSR], mayRaiseFPException = 1 in {
5410 def r : SS4AIi8<opc, MRMSrcReg,
5411 (outs RC:$dst), (ins RC:$src1, i32u8imm:$src2),
5412 !strconcat(OpcodeStr,
5413 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5414 [(set RC:$dst, (VT (OpNode RC:$src1, timm:$src2)))]>,
5417 // Vector intrinsic operation, mem
5418 def m : SS4AIi8<opc, MRMSrcMem,
5419 (outs RC:$dst), (ins x86memop:$src1, i32u8imm:$src2),
5420 !strconcat(OpcodeStr,
5421 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5423 (VT (OpNode (mem_frag addr:$src1), timm:$src2)))]>,
5424 Sched<[sched.Folded]>;
5428 multiclass avx_fp_unop_rm<bits<8> opcss, bits<8> opcsd,
5429 string OpcodeStr, X86FoldableSchedWrite sched> {
5430 let ExeDomain = SSEPackedSingle, hasSideEffects = 0, isCodeGenOnly = 1 in {
5431 def SSr : SS4AIi8<opcss, MRMSrcReg,
5432 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2, i32u8imm:$src3),
5433 !strconcat(OpcodeStr,
5434 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5435 []>, Sched<[sched]>;
5438 def SSm : SS4AIi8<opcss, MRMSrcMem,
5439 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2, i32u8imm:$src3),
5440 !strconcat(OpcodeStr,
5441 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5442 []>, Sched<[sched.Folded, sched.ReadAfterFold]>;
5443 } // ExeDomain = SSEPackedSingle, hasSideEffects = 0
5445 let ExeDomain = SSEPackedDouble, hasSideEffects = 0, isCodeGenOnly = 1 in {
5446 def SDr : SS4AIi8<opcsd, MRMSrcReg,
5447 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2, i32u8imm:$src3),
5448 !strconcat(OpcodeStr,
5449 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5450 []>, Sched<[sched]>;
5453 def SDm : SS4AIi8<opcsd, MRMSrcMem,
5454 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src2, i32u8imm:$src3),
5455 !strconcat(OpcodeStr,
5456 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5457 []>, Sched<[sched.Folded, sched.ReadAfterFold]>;
5458 } // ExeDomain = SSEPackedDouble, hasSideEffects = 0
5461 multiclass sse41_fp_unop_s<bits<8> opcss, bits<8> opcsd,
5462 string OpcodeStr, X86FoldableSchedWrite sched> {
5463 let Uses = [MXCSR], mayRaiseFPException = 1 in {
5464 let ExeDomain = SSEPackedSingle, hasSideEffects = 0, isCodeGenOnly = 1 in {
5465 def SSr : SS4AIi8<opcss, MRMSrcReg,
5466 (outs FR32:$dst), (ins FR32:$src1, i32u8imm:$src2),
5467 !strconcat(OpcodeStr,
5468 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5469 []>, Sched<[sched]>;
5472 def SSm : SS4AIi8<opcss, MRMSrcMem,
5473 (outs FR32:$dst), (ins f32mem:$src1, i32u8imm:$src2),
5474 !strconcat(OpcodeStr,
5475 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5476 []>, Sched<[sched.Folded, sched.ReadAfterFold]>;
5477 } // ExeDomain = SSEPackedSingle, hasSideEffects = 0
5479 let ExeDomain = SSEPackedDouble, hasSideEffects = 0, isCodeGenOnly = 1 in {
5480 def SDr : SS4AIi8<opcsd, MRMSrcReg,
5481 (outs FR64:$dst), (ins FR64:$src1, i32u8imm:$src2),
5482 !strconcat(OpcodeStr,
5483 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5484 []>, Sched<[sched]>;
5487 def SDm : SS4AIi8<opcsd, MRMSrcMem,
5488 (outs FR64:$dst), (ins f64mem:$src1, i32u8imm:$src2),
5489 !strconcat(OpcodeStr,
5490 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5491 []>, Sched<[sched.Folded, sched.ReadAfterFold]>;
5492 } // ExeDomain = SSEPackedDouble, hasSideEffects = 0
5496 multiclass sse41_fp_binop_s<bits<8> opcss, bits<8> opcsd,
5497 string OpcodeStr, X86FoldableSchedWrite sched,
5498 ValueType VT32, ValueType VT64,
5499 SDNode OpNode, bit Is2Addr = 1> {
5500 let Uses = [MXCSR], mayRaiseFPException = 1 in {
5501 let ExeDomain = SSEPackedSingle in {
5502 def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
5503 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32u8imm:$src3),
5505 !strconcat(OpcodeStr,
5506 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5507 !strconcat(OpcodeStr,
5508 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5509 [(set VR128:$dst, (VT32 (OpNode VR128:$src1, VR128:$src2, timm:$src3)))]>,
5512 def SSm_Int : SS4AIi8<opcss, MRMSrcMem,
5513 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32u8imm:$src3),
5515 !strconcat(OpcodeStr,
5516 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5517 !strconcat(OpcodeStr,
5518 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5520 (OpNode VR128:$src1, (sse_load_f32 addr:$src2), timm:$src3))]>,
5521 Sched<[sched.Folded, sched.ReadAfterFold]>;
5522 } // ExeDomain = SSEPackedSingle, isCodeGenOnly = 1
5524 let ExeDomain = SSEPackedDouble in {
5525 def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
5526 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32u8imm:$src3),
5528 !strconcat(OpcodeStr,
5529 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5530 !strconcat(OpcodeStr,
5531 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5532 [(set VR128:$dst, (VT64 (OpNode VR128:$src1, VR128:$src2, timm:$src3)))]>,
5535 def SDm_Int : SS4AIi8<opcsd, MRMSrcMem,
5536 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32u8imm:$src3),
5538 !strconcat(OpcodeStr,
5539 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5540 !strconcat(OpcodeStr,
5541 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5543 (OpNode VR128:$src1, (sse_load_f64 addr:$src2), timm:$src3))]>,
5544 Sched<[sched.Folded, sched.ReadAfterFold]>;
5545 } // ExeDomain = SSEPackedDouble, isCodeGenOnly = 1
5549 // FP round - roundss, roundps, roundsd, roundpd
5550 let Predicates = [HasAVX, NoVLX] in {
5551 let ExeDomain = SSEPackedSingle, Uses = [MXCSR], mayRaiseFPException = 1 in {
5553 defm VROUNDPS : sse41_fp_unop_p<0x08, "vroundps", f128mem, VR128, v4f32,
5554 loadv4f32, X86any_VRndScale, SchedWriteFRnd.XMM>,
5556 defm VROUNDPSY : sse41_fp_unop_p<0x08, "vroundps", f256mem, VR256, v8f32,
5557 loadv8f32, X86any_VRndScale, SchedWriteFRnd.YMM>,
5558 VEX, VEX_L, VEX_WIG;
5561 let ExeDomain = SSEPackedDouble, Uses = [MXCSR], mayRaiseFPException = 1 in {
5562 defm VROUNDPD : sse41_fp_unop_p<0x09, "vroundpd", f128mem, VR128, v2f64,
5563 loadv2f64, X86any_VRndScale, SchedWriteFRnd.XMM>,
5565 defm VROUNDPDY : sse41_fp_unop_p<0x09, "vroundpd", f256mem, VR256, v4f64,
5566 loadv4f64, X86any_VRndScale, SchedWriteFRnd.YMM>,
5567 VEX, VEX_L, VEX_WIG;
5570 let Predicates = [UseAVX] in {
5571 defm VROUND : sse41_fp_binop_s<0x0A, 0x0B, "vround", SchedWriteFRnd.Scl,
5572 v4f32, v2f64, X86RndScales, 0>,
5573 VEX_4V, VEX_LIG, VEX_WIG, SIMD_EXC;
5574 defm VROUND : avx_fp_unop_rm<0x0A, 0x0B, "vround", SchedWriteFRnd.Scl>,
5575 VEX_4V, VEX_LIG, VEX_WIG, SIMD_EXC;
5578 let Predicates = [UseAVX] in {
5579 def : Pat<(X86any_VRndScale FR32:$src1, timm:$src2),
5580 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src1, timm:$src2)>;
5581 def : Pat<(X86any_VRndScale FR64:$src1, timm:$src2),
5582 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src1, timm:$src2)>;
5585 let Predicates = [UseAVX, OptForSize] in {
5586 def : Pat<(X86any_VRndScale (loadf32 addr:$src1), timm:$src2),
5587 (VROUNDSSm (f32 (IMPLICIT_DEF)), addr:$src1, timm:$src2)>;
5588 def : Pat<(X86any_VRndScale (loadf64 addr:$src1), timm:$src2),
5589 (VROUNDSDm (f64 (IMPLICIT_DEF)), addr:$src1, timm:$src2)>;
5592 let ExeDomain = SSEPackedSingle in
5593 defm ROUNDPS : sse41_fp_unop_p<0x08, "roundps", f128mem, VR128, v4f32,
5594 memopv4f32, X86any_VRndScale, SchedWriteFRnd.XMM>;
5595 let ExeDomain = SSEPackedDouble in
5596 defm ROUNDPD : sse41_fp_unop_p<0x09, "roundpd", f128mem, VR128, v2f64,
5597 memopv2f64, X86any_VRndScale, SchedWriteFRnd.XMM>;
5599 defm ROUND : sse41_fp_unop_s<0x0A, 0x0B, "round", SchedWriteFRnd.Scl>;
5601 let Constraints = "$src1 = $dst" in
5602 defm ROUND : sse41_fp_binop_s<0x0A, 0x0B, "round", SchedWriteFRnd.Scl,
5603 v4f32, v2f64, X86RndScales>;
5605 let Predicates = [UseSSE41] in {
5606 def : Pat<(X86any_VRndScale FR32:$src1, timm:$src2),
5607 (ROUNDSSr FR32:$src1, timm:$src2)>;
5608 def : Pat<(X86any_VRndScale FR64:$src1, timm:$src2),
5609 (ROUNDSDr FR64:$src1, timm:$src2)>;
5612 let Predicates = [UseSSE41, OptForSize] in {
5613 def : Pat<(X86any_VRndScale (loadf32 addr:$src1), timm:$src2),
5614 (ROUNDSSm addr:$src1, timm:$src2)>;
5615 def : Pat<(X86any_VRndScale (loadf64 addr:$src1), timm:$src2),
5616 (ROUNDSDm addr:$src1, timm:$src2)>;
5619 //===----------------------------------------------------------------------===//
5620 // SSE4.1 - Packed Bit Test
5621 //===----------------------------------------------------------------------===//
5623 // ptest instruction we'll lower to this in X86ISelLowering primarily from
5624 // the intel intrinsic that corresponds to this.
5625 let Defs = [EFLAGS], Predicates = [HasAVX] in {
5626 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
5627 "vptest\t{$src2, $src1|$src1, $src2}",
5628 [(set EFLAGS, (X86ptest VR128:$src1, (v2i64 VR128:$src2)))]>,
5629 Sched<[SchedWriteVecTest.XMM]>, VEX, VEX_WIG;
5630 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
5631 "vptest\t{$src2, $src1|$src1, $src2}",
5632 [(set EFLAGS,(X86ptest VR128:$src1, (loadv2i64 addr:$src2)))]>,
5633 Sched<[SchedWriteVecTest.XMM.Folded, SchedWriteVecTest.XMM.ReadAfterFold]>,
5636 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
5637 "vptest\t{$src2, $src1|$src1, $src2}",
5638 [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
5639 Sched<[SchedWriteVecTest.YMM]>, VEX, VEX_L, VEX_WIG;
5640 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
5641 "vptest\t{$src2, $src1|$src1, $src2}",
5642 [(set EFLAGS,(X86ptest VR256:$src1, (loadv4i64 addr:$src2)))]>,
5643 Sched<[SchedWriteVecTest.YMM.Folded, SchedWriteVecTest.YMM.ReadAfterFold]>,
5644 VEX, VEX_L, VEX_WIG;
5647 let Defs = [EFLAGS] in {
5648 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
5649 "ptest\t{$src2, $src1|$src1, $src2}",
5650 [(set EFLAGS, (X86ptest VR128:$src1, (v2i64 VR128:$src2)))]>,
5651 Sched<[SchedWriteVecTest.XMM]>;
5652 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
5653 "ptest\t{$src2, $src1|$src1, $src2}",
5654 [(set EFLAGS, (X86ptest VR128:$src1, (memopv2i64 addr:$src2)))]>,
5655 Sched<[SchedWriteVecTest.XMM.Folded, SchedWriteVecTest.XMM.ReadAfterFold]>;
5658 // The bit test instructions below are AVX only
5659 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
5660 X86MemOperand x86memop, PatFrag mem_frag, ValueType vt,
5661 X86FoldableSchedWrite sched> {
5662 def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
5663 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
5664 [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>,
5665 Sched<[sched]>, VEX;
5666 def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
5667 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
5668 [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
5669 Sched<[sched.Folded, sched.ReadAfterFold]>, VEX;
5672 let Defs = [EFLAGS], Predicates = [HasAVX] in {
5673 let ExeDomain = SSEPackedSingle in {
5674 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, loadv4f32, v4f32,
5675 SchedWriteFTest.XMM>;
5676 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, loadv8f32, v8f32,
5677 SchedWriteFTest.YMM>, VEX_L;
5679 let ExeDomain = SSEPackedDouble in {
5680 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, loadv2f64, v2f64,
5681 SchedWriteFTest.XMM>;
5682 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, loadv4f64, v4f64,
5683 SchedWriteFTest.YMM>, VEX_L;
5687 //===----------------------------------------------------------------------===//
5688 // SSE4.1 - Misc Instructions
5689 //===----------------------------------------------------------------------===//
5691 let Defs = [EFLAGS], Predicates = [HasPOPCNT] in {
5692 def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
5693 "popcnt{w}\t{$src, $dst|$dst, $src}",
5694 [(set GR16:$dst, (ctpop GR16:$src)), (implicit EFLAGS)]>,
5695 Sched<[WritePOPCNT]>, OpSize16, XS;
5696 def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
5697 "popcnt{w}\t{$src, $dst|$dst, $src}",
5698 [(set GR16:$dst, (ctpop (loadi16 addr:$src))),
5699 (implicit EFLAGS)]>,
5700 Sched<[WritePOPCNT.Folded]>, OpSize16, XS;
5702 def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
5703 "popcnt{l}\t{$src, $dst|$dst, $src}",
5704 [(set GR32:$dst, (ctpop GR32:$src)), (implicit EFLAGS)]>,
5705 Sched<[WritePOPCNT]>, OpSize32, XS;
5707 def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
5708 "popcnt{l}\t{$src, $dst|$dst, $src}",
5709 [(set GR32:$dst, (ctpop (loadi32 addr:$src))),
5710 (implicit EFLAGS)]>,
5711 Sched<[WritePOPCNT.Folded]>, OpSize32, XS;
5713 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
5714 "popcnt{q}\t{$src, $dst|$dst, $src}",
5715 [(set GR64:$dst, (ctpop GR64:$src)), (implicit EFLAGS)]>,
5716 Sched<[WritePOPCNT]>, XS;
5717 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
5718 "popcnt{q}\t{$src, $dst|$dst, $src}",
5719 [(set GR64:$dst, (ctpop (loadi64 addr:$src))),
5720 (implicit EFLAGS)]>,
5721 Sched<[WritePOPCNT.Folded]>, XS;
5724 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
5725 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
5726 SDNode OpNode, PatFrag ld_frag,
5727 X86FoldableSchedWrite Sched> {
5728 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
5730 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5731 [(set VR128:$dst, (v8i16 (OpNode (v8i16 VR128:$src))))]>,
5733 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
5735 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5737 (v8i16 (OpNode (ld_frag addr:$src))))]>,
5738 Sched<[Sched.Folded]>;
5741 // PHMIN has the same profile as PSAD, thus we use the same scheduling
5742 // model, although the naming is misleading.
5743 let Predicates = [HasAVX] in
5744 defm VPHMINPOSUW : SS41I_unop_rm_int_v16<0x41, "vphminposuw",
5746 WritePHMINPOS>, VEX, VEX_WIG;
5747 defm PHMINPOSUW : SS41I_unop_rm_int_v16<0x41, "phminposuw",
5751 /// SS48I_binop_rm - Simple SSE41 binary operator.
5752 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
5753 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
5754 X86MemOperand x86memop, X86FoldableSchedWrite sched,
5756 let isCommutable = 1 in
5757 def rr : SS48I<opc, MRMSrcReg, (outs RC:$dst),
5758 (ins RC:$src1, RC:$src2),
5760 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5761 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5762 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>,
5764 def rm : SS48I<opc, MRMSrcMem, (outs RC:$dst),
5765 (ins RC:$src1, x86memop:$src2),
5767 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5768 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5770 (OpVT (OpNode RC:$src1, (memop_frag addr:$src2))))]>,
5771 Sched<[sched.Folded, sched.ReadAfterFold]>;
5774 let Predicates = [HasAVX, NoVLX] in {
5775 defm VPMINSD : SS48I_binop_rm<0x39, "vpminsd", smin, v4i32, VR128,
5776 load, i128mem, SchedWriteVecALU.XMM, 0>,
5778 defm VPMINUD : SS48I_binop_rm<0x3B, "vpminud", umin, v4i32, VR128,
5779 load, i128mem, SchedWriteVecALU.XMM, 0>,
5781 defm VPMAXSD : SS48I_binop_rm<0x3D, "vpmaxsd", smax, v4i32, VR128,
5782 load, i128mem, SchedWriteVecALU.XMM, 0>,
5784 defm VPMAXUD : SS48I_binop_rm<0x3F, "vpmaxud", umax, v4i32, VR128,
5785 load, i128mem, SchedWriteVecALU.XMM, 0>,
5787 defm VPMULDQ : SS48I_binop_rm<0x28, "vpmuldq", X86pmuldq, v2i64, VR128,
5788 load, i128mem, SchedWriteVecIMul.XMM, 0>,
5791 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
5792 defm VPMINSB : SS48I_binop_rm<0x38, "vpminsb", smin, v16i8, VR128,
5793 load, i128mem, SchedWriteVecALU.XMM, 0>,
5795 defm VPMINUW : SS48I_binop_rm<0x3A, "vpminuw", umin, v8i16, VR128,
5796 load, i128mem, SchedWriteVecALU.XMM, 0>,
5798 defm VPMAXSB : SS48I_binop_rm<0x3C, "vpmaxsb", smax, v16i8, VR128,
5799 load, i128mem, SchedWriteVecALU.XMM, 0>,
5801 defm VPMAXUW : SS48I_binop_rm<0x3E, "vpmaxuw", umax, v8i16, VR128,
5802 load, i128mem, SchedWriteVecALU.XMM, 0>,
5806 let Predicates = [HasAVX2, NoVLX] in {
5807 defm VPMINSDY : SS48I_binop_rm<0x39, "vpminsd", smin, v8i32, VR256,
5808 load, i256mem, SchedWriteVecALU.YMM, 0>,
5809 VEX_4V, VEX_L, VEX_WIG;
5810 defm VPMINUDY : SS48I_binop_rm<0x3B, "vpminud", umin, v8i32, VR256,
5811 load, i256mem, SchedWriteVecALU.YMM, 0>,
5812 VEX_4V, VEX_L, VEX_WIG;
5813 defm VPMAXSDY : SS48I_binop_rm<0x3D, "vpmaxsd", smax, v8i32, VR256,
5814 load, i256mem, SchedWriteVecALU.YMM, 0>,
5815 VEX_4V, VEX_L, VEX_WIG;
5816 defm VPMAXUDY : SS48I_binop_rm<0x3F, "vpmaxud", umax, v8i32, VR256,
5817 load, i256mem, SchedWriteVecALU.YMM, 0>,
5818 VEX_4V, VEX_L, VEX_WIG;
5819 defm VPMULDQY : SS48I_binop_rm<0x28, "vpmuldq", X86pmuldq, v4i64, VR256,
5820 load, i256mem, SchedWriteVecIMul.YMM, 0>,
5821 VEX_4V, VEX_L, VEX_WIG;
5823 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
5824 defm VPMINSBY : SS48I_binop_rm<0x38, "vpminsb", smin, v32i8, VR256,
5825 load, i256mem, SchedWriteVecALU.YMM, 0>,
5826 VEX_4V, VEX_L, VEX_WIG;
5827 defm VPMINUWY : SS48I_binop_rm<0x3A, "vpminuw", umin, v16i16, VR256,
5828 load, i256mem, SchedWriteVecALU.YMM, 0>,
5829 VEX_4V, VEX_L, VEX_WIG;
5830 defm VPMAXSBY : SS48I_binop_rm<0x3C, "vpmaxsb", smax, v32i8, VR256,
5831 load, i256mem, SchedWriteVecALU.YMM, 0>,
5832 VEX_4V, VEX_L, VEX_WIG;
5833 defm VPMAXUWY : SS48I_binop_rm<0x3E, "vpmaxuw", umax, v16i16, VR256,
5834 load, i256mem, SchedWriteVecALU.YMM, 0>,
5835 VEX_4V, VEX_L, VEX_WIG;
5838 let Constraints = "$src1 = $dst" in {
5839 defm PMINSB : SS48I_binop_rm<0x38, "pminsb", smin, v16i8, VR128,
5840 memop, i128mem, SchedWriteVecALU.XMM, 1>;
5841 defm PMINSD : SS48I_binop_rm<0x39, "pminsd", smin, v4i32, VR128,
5842 memop, i128mem, SchedWriteVecALU.XMM, 1>;
5843 defm PMINUD : SS48I_binop_rm<0x3B, "pminud", umin, v4i32, VR128,
5844 memop, i128mem, SchedWriteVecALU.XMM, 1>;
5845 defm PMINUW : SS48I_binop_rm<0x3A, "pminuw", umin, v8i16, VR128,
5846 memop, i128mem, SchedWriteVecALU.XMM, 1>;
5847 defm PMAXSB : SS48I_binop_rm<0x3C, "pmaxsb", smax, v16i8, VR128,
5848 memop, i128mem, SchedWriteVecALU.XMM, 1>;
5849 defm PMAXSD : SS48I_binop_rm<0x3D, "pmaxsd", smax, v4i32, VR128,
5850 memop, i128mem, SchedWriteVecALU.XMM, 1>;
5851 defm PMAXUD : SS48I_binop_rm<0x3F, "pmaxud", umax, v4i32, VR128,
5852 memop, i128mem, SchedWriteVecALU.XMM, 1>;
5853 defm PMAXUW : SS48I_binop_rm<0x3E, "pmaxuw", umax, v8i16, VR128,
5854 memop, i128mem, SchedWriteVecALU.XMM, 1>;
5855 defm PMULDQ : SS48I_binop_rm<0x28, "pmuldq", X86pmuldq, v2i64, VR128,
5856 memop, i128mem, SchedWriteVecIMul.XMM, 1>;
5859 let Predicates = [HasAVX, NoVLX] in
5860 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, VR128,
5861 load, i128mem, SchedWritePMULLD.XMM, 0>,
5863 let Predicates = [HasAVX] in
5864 defm VPCMPEQQ : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v2i64, VR128,
5865 load, i128mem, SchedWriteVecALU.XMM, 0>,
5868 let Predicates = [HasAVX2, NoVLX] in
5869 defm VPMULLDY : SS48I_binop_rm<0x40, "vpmulld", mul, v8i32, VR256,
5870 load, i256mem, SchedWritePMULLD.YMM, 0>,
5871 VEX_4V, VEX_L, VEX_WIG;
5872 let Predicates = [HasAVX2] in
5873 defm VPCMPEQQY : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v4i64, VR256,
5874 load, i256mem, SchedWriteVecALU.YMM, 0>,
5875 VEX_4V, VEX_L, VEX_WIG;
5877 let Constraints = "$src1 = $dst" in {
5878 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32, VR128,
5879 memop, i128mem, SchedWritePMULLD.XMM, 1>;
5880 defm PCMPEQQ : SS48I_binop_rm<0x29, "pcmpeqq", X86pcmpeq, v2i64, VR128,
5881 memop, i128mem, SchedWriteVecALU.XMM, 1>;
5884 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
5885 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
5886 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
5887 X86MemOperand x86memop, bit Is2Addr,
5888 X86FoldableSchedWrite sched> {
5889 let isCommutable = 1 in
5890 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
5891 (ins RC:$src1, RC:$src2, u8imm:$src3),
5893 !strconcat(OpcodeStr,
5894 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5895 !strconcat(OpcodeStr,
5896 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5897 [(set RC:$dst, (IntId RC:$src1, RC:$src2, timm:$src3))]>,
5899 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
5900 (ins RC:$src1, x86memop:$src2, u8imm:$src3),
5902 !strconcat(OpcodeStr,
5903 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5904 !strconcat(OpcodeStr,
5905 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5907 (IntId RC:$src1, (memop_frag addr:$src2), timm:$src3))]>,
5908 Sched<[sched.Folded, sched.ReadAfterFold]>;
5911 /// SS41I_binop_rmi - SSE 4.1 binary operator with 8-bit immediate
5912 multiclass SS41I_binop_rmi<bits<8> opc, string OpcodeStr, SDNode OpNode,
5913 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
5914 X86MemOperand x86memop, bit Is2Addr,
5915 X86FoldableSchedWrite sched> {
5916 let isCommutable = 1 in
5917 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
5918 (ins RC:$src1, RC:$src2, u8imm:$src3),
5920 !strconcat(OpcodeStr,
5921 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5922 !strconcat(OpcodeStr,
5923 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5924 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2, timm:$src3)))]>,
5926 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
5927 (ins RC:$src1, x86memop:$src2, u8imm:$src3),
5929 !strconcat(OpcodeStr,
5930 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5931 !strconcat(OpcodeStr,
5932 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5934 (OpVT (OpNode RC:$src1, (memop_frag addr:$src2), timm:$src3)))]>,
5935 Sched<[sched.Folded, sched.ReadAfterFold]>;
5938 def BlendCommuteImm2 : SDNodeXForm<timm, [{
5939 uint8_t Imm = N->getZExtValue() & 0x03;
5940 return getI8Imm(Imm ^ 0x03, SDLoc(N));
5943 def BlendCommuteImm4 : SDNodeXForm<timm, [{
5944 uint8_t Imm = N->getZExtValue() & 0x0f;
5945 return getI8Imm(Imm ^ 0x0f, SDLoc(N));
5948 def BlendCommuteImm8 : SDNodeXForm<timm, [{
5949 uint8_t Imm = N->getZExtValue() & 0xff;
5950 return getI8Imm(Imm ^ 0xff, SDLoc(N));
5953 // Turn a 4-bit blendi immediate to 8-bit for use with pblendw.
5954 def BlendScaleImm4 : SDNodeXForm<timm, [{
5955 uint8_t Imm = N->getZExtValue();
5957 for (unsigned i = 0; i != 4; ++i) {
5959 NewImm |= 0x3 << (i * 2);
5961 return getI8Imm(NewImm, SDLoc(N));
5964 // Turn a 2-bit blendi immediate to 8-bit for use with pblendw.
5965 def BlendScaleImm2 : SDNodeXForm<timm, [{
5966 uint8_t Imm = N->getZExtValue();
5968 for (unsigned i = 0; i != 2; ++i) {
5970 NewImm |= 0xf << (i * 4);
5972 return getI8Imm(NewImm, SDLoc(N));
5975 // Turn a 2-bit blendi immediate to 4-bit for use with pblendd.
5976 def BlendScaleImm2to4 : SDNodeXForm<timm, [{
5977 uint8_t Imm = N->getZExtValue();
5979 for (unsigned i = 0; i != 2; ++i) {
5981 NewImm |= 0x3 << (i * 2);
5983 return getI8Imm(NewImm, SDLoc(N));
5986 // Turn a 4-bit blendi immediate to 8-bit for use with pblendw and invert it.
5987 def BlendScaleCommuteImm4 : SDNodeXForm<timm, [{
5988 uint8_t Imm = N->getZExtValue();
5990 for (unsigned i = 0; i != 4; ++i) {
5992 NewImm |= 0x3 << (i * 2);
5994 return getI8Imm(NewImm ^ 0xff, SDLoc(N));
5997 // Turn a 2-bit blendi immediate to 8-bit for use with pblendw and invert it.
5998 def BlendScaleCommuteImm2 : SDNodeXForm<timm, [{
5999 uint8_t Imm = N->getZExtValue();
6001 for (unsigned i = 0; i != 2; ++i) {
6003 NewImm |= 0xf << (i * 4);
6005 return getI8Imm(NewImm ^ 0xff, SDLoc(N));
6008 // Turn a 2-bit blendi immediate to 4-bit for use with pblendd and invert it.
6009 def BlendScaleCommuteImm2to4 : SDNodeXForm<timm, [{
6010 uint8_t Imm = N->getZExtValue();
6012 for (unsigned i = 0; i != 2; ++i) {
6014 NewImm |= 0x3 << (i * 2);
6016 return getI8Imm(NewImm ^ 0xf, SDLoc(N));
6019 let Predicates = [HasAVX] in {
6020 let isCommutable = 0 in {
6021 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
6022 VR128, load, i128mem, 0,
6023 SchedWriteMPSAD.XMM>, VEX_4V, VEX_WIG;
6026 let Uses = [MXCSR], mayRaiseFPException = 1 in {
6027 let ExeDomain = SSEPackedSingle in
6028 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
6029 VR128, load, f128mem, 0,
6030 SchedWriteDPPS.XMM>, VEX_4V, VEX_WIG;
6031 let ExeDomain = SSEPackedDouble in
6032 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
6033 VR128, load, f128mem, 0,
6034 SchedWriteDPPD.XMM>, VEX_4V, VEX_WIG;
6035 let ExeDomain = SSEPackedSingle in
6036 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
6037 VR256, load, i256mem, 0,
6038 SchedWriteDPPS.YMM>, VEX_4V, VEX_L, VEX_WIG;
6042 let Predicates = [HasAVX2] in {
6043 let isCommutable = 0 in {
6044 defm VMPSADBWY : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_avx2_mpsadbw,
6045 VR256, load, i256mem, 0,
6046 SchedWriteMPSAD.YMM>, VEX_4V, VEX_L, VEX_WIG;
6050 let Constraints = "$src1 = $dst" in {
6051 let isCommutable = 0 in {
6052 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
6053 VR128, memop, i128mem, 1,
6054 SchedWriteMPSAD.XMM>;
6057 let ExeDomain = SSEPackedSingle in
6058 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
6059 VR128, memop, f128mem, 1,
6060 SchedWriteDPPS.XMM>, SIMD_EXC;
6061 let ExeDomain = SSEPackedDouble in
6062 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
6063 VR128, memop, f128mem, 1,
6064 SchedWriteDPPD.XMM>, SIMD_EXC;
6067 /// SS41I_blend_rmi - SSE 4.1 blend with 8-bit immediate
6068 multiclass SS41I_blend_rmi<bits<8> opc, string OpcodeStr, SDNode OpNode,
6069 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
6070 X86MemOperand x86memop, bit Is2Addr, Domain d,
6071 X86FoldableSchedWrite sched, SDNodeXForm commuteXForm> {
6072 let ExeDomain = d, Constraints = !if(Is2Addr, "$src1 = $dst", "") in {
6073 let isCommutable = 1 in
6074 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
6075 (ins RC:$src1, RC:$src2, u8imm:$src3),
6077 !strconcat(OpcodeStr,
6078 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6079 !strconcat(OpcodeStr,
6080 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6081 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2, timm:$src3)))]>,
6083 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
6084 (ins RC:$src1, x86memop:$src2, u8imm:$src3),
6086 !strconcat(OpcodeStr,
6087 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6088 !strconcat(OpcodeStr,
6089 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6091 (OpVT (OpNode RC:$src1, (memop_frag addr:$src2), timm:$src3)))]>,
6092 Sched<[sched.Folded, sched.ReadAfterFold]>;
6095 // Pattern to commute if load is in first source.
6096 def : Pat<(OpVT (OpNode (memop_frag addr:$src2), RC:$src1, timm:$src3)),
6097 (!cast<Instruction>(NAME#"rmi") RC:$src1, addr:$src2,
6098 (commuteXForm timm:$src3))>;
6101 let Predicates = [HasAVX] in {
6102 defm VBLENDPS : SS41I_blend_rmi<0x0C, "vblendps", X86Blendi, v4f32,
6103 VR128, load, f128mem, 0, SSEPackedSingle,
6104 SchedWriteFBlend.XMM, BlendCommuteImm4>,
6106 defm VBLENDPSY : SS41I_blend_rmi<0x0C, "vblendps", X86Blendi, v8f32,
6107 VR256, load, f256mem, 0, SSEPackedSingle,
6108 SchedWriteFBlend.YMM, BlendCommuteImm8>,
6109 VEX_4V, VEX_L, VEX_WIG;
6110 defm VBLENDPD : SS41I_blend_rmi<0x0D, "vblendpd", X86Blendi, v2f64,
6111 VR128, load, f128mem, 0, SSEPackedDouble,
6112 SchedWriteFBlend.XMM, BlendCommuteImm2>,
6114 defm VBLENDPDY : SS41I_blend_rmi<0x0D, "vblendpd", X86Blendi, v4f64,
6115 VR256, load, f256mem, 0, SSEPackedDouble,
6116 SchedWriteFBlend.YMM, BlendCommuteImm4>,
6117 VEX_4V, VEX_L, VEX_WIG;
6118 defm VPBLENDW : SS41I_blend_rmi<0x0E, "vpblendw", X86Blendi, v8i16,
6119 VR128, load, i128mem, 0, SSEPackedInt,
6120 SchedWriteBlend.XMM, BlendCommuteImm8>,
6124 let Predicates = [HasAVX2] in {
6125 defm VPBLENDWY : SS41I_blend_rmi<0x0E, "vpblendw", X86Blendi, v16i16,
6126 VR256, load, i256mem, 0, SSEPackedInt,
6127 SchedWriteBlend.YMM, BlendCommuteImm8>,
6128 VEX_4V, VEX_L, VEX_WIG;
6131 // Emulate vXi32/vXi64 blends with vXf32/vXf64 or pblendw.
6132 // ExecutionDomainFixPass will cleanup domains later on.
6133 let Predicates = [HasAVX1Only] in {
6134 def : Pat<(X86Blendi (v4i64 VR256:$src1), (v4i64 VR256:$src2), timm:$src3),
6135 (VBLENDPDYrri VR256:$src1, VR256:$src2, timm:$src3)>;
6136 def : Pat<(X86Blendi VR256:$src1, (loadv4i64 addr:$src2), timm:$src3),
6137 (VBLENDPDYrmi VR256:$src1, addr:$src2, timm:$src3)>;
6138 def : Pat<(X86Blendi (loadv4i64 addr:$src2), VR256:$src1, timm:$src3),
6139 (VBLENDPDYrmi VR256:$src1, addr:$src2, (BlendCommuteImm4 timm:$src3))>;
6141 // Use pblendw for 128-bit integer to keep it in the integer domain and prevent
6142 // it from becoming movsd via commuting under optsize.
6143 def : Pat<(X86Blendi (v2i64 VR128:$src1), (v2i64 VR128:$src2), timm:$src3),
6144 (VPBLENDWrri VR128:$src1, VR128:$src2, (BlendScaleImm2 timm:$src3))>;
6145 def : Pat<(X86Blendi VR128:$src1, (loadv2i64 addr:$src2), timm:$src3),
6146 (VPBLENDWrmi VR128:$src1, addr:$src2, (BlendScaleImm2 timm:$src3))>;
6147 def : Pat<(X86Blendi (loadv2i64 addr:$src2), VR128:$src1, timm:$src3),
6148 (VPBLENDWrmi VR128:$src1, addr:$src2, (BlendScaleCommuteImm2 timm:$src3))>;
6150 def : Pat<(X86Blendi (v8i32 VR256:$src1), (v8i32 VR256:$src2), timm:$src3),
6151 (VBLENDPSYrri VR256:$src1, VR256:$src2, timm:$src3)>;
6152 def : Pat<(X86Blendi VR256:$src1, (loadv8i32 addr:$src2), timm:$src3),
6153 (VBLENDPSYrmi VR256:$src1, addr:$src2, timm:$src3)>;
6154 def : Pat<(X86Blendi (loadv8i32 addr:$src2), VR256:$src1, timm:$src3),
6155 (VBLENDPSYrmi VR256:$src1, addr:$src2, (BlendCommuteImm8 timm:$src3))>;
6157 // Use pblendw for 128-bit integer to keep it in the integer domain and prevent
6158 // it from becoming movss via commuting under optsize.
6159 def : Pat<(X86Blendi (v4i32 VR128:$src1), (v4i32 VR128:$src2), timm:$src3),
6160 (VPBLENDWrri VR128:$src1, VR128:$src2, (BlendScaleImm4 timm:$src3))>;
6161 def : Pat<(X86Blendi VR128:$src1, (loadv4i32 addr:$src2), timm:$src3),
6162 (VPBLENDWrmi VR128:$src1, addr:$src2, (BlendScaleImm4 timm:$src3))>;
6163 def : Pat<(X86Blendi (loadv4i32 addr:$src2), VR128:$src1, timm:$src3),
6164 (VPBLENDWrmi VR128:$src1, addr:$src2, (BlendScaleCommuteImm4 timm:$src3))>;
6167 defm BLENDPS : SS41I_blend_rmi<0x0C, "blendps", X86Blendi, v4f32,
6168 VR128, memop, f128mem, 1, SSEPackedSingle,
6169 SchedWriteFBlend.XMM, BlendCommuteImm4>;
6170 defm BLENDPD : SS41I_blend_rmi<0x0D, "blendpd", X86Blendi, v2f64,
6171 VR128, memop, f128mem, 1, SSEPackedDouble,
6172 SchedWriteFBlend.XMM, BlendCommuteImm2>;
6173 defm PBLENDW : SS41I_blend_rmi<0x0E, "pblendw", X86Blendi, v8i16,
6174 VR128, memop, i128mem, 1, SSEPackedInt,
6175 SchedWriteBlend.XMM, BlendCommuteImm8>;
6177 let Predicates = [UseSSE41] in {
6178 // Use pblendw for 128-bit integer to keep it in the integer domain and prevent
6179 // it from becoming movss via commuting under optsize.
6180 def : Pat<(X86Blendi (v2i64 VR128:$src1), (v2i64 VR128:$src2), timm:$src3),
6181 (PBLENDWrri VR128:$src1, VR128:$src2, (BlendScaleImm2 timm:$src3))>;
6182 def : Pat<(X86Blendi VR128:$src1, (memopv2i64 addr:$src2), timm:$src3),
6183 (PBLENDWrmi VR128:$src1, addr:$src2, (BlendScaleImm2 timm:$src3))>;
6184 def : Pat<(X86Blendi (memopv2i64 addr:$src2), VR128:$src1, timm:$src3),
6185 (PBLENDWrmi VR128:$src1, addr:$src2, (BlendScaleCommuteImm2 timm:$src3))>;
6187 def : Pat<(X86Blendi (v4i32 VR128:$src1), (v4i32 VR128:$src2), timm:$src3),
6188 (PBLENDWrri VR128:$src1, VR128:$src2, (BlendScaleImm4 timm:$src3))>;
6189 def : Pat<(X86Blendi VR128:$src1, (memopv4i32 addr:$src2), timm:$src3),
6190 (PBLENDWrmi VR128:$src1, addr:$src2, (BlendScaleImm4 timm:$src3))>;
6191 def : Pat<(X86Blendi (memopv4i32 addr:$src2), VR128:$src1, timm:$src3),
6192 (PBLENDWrmi VR128:$src1, addr:$src2, (BlendScaleCommuteImm4 timm:$src3))>;
6195 // For insertion into the zero index (low half) of a 256-bit vector, it is
6196 // more efficient to generate a blend with immediate instead of an insert*128.
6197 let Predicates = [HasAVX] in {
6198 def : Pat<(insert_subvector (v4f64 VR256:$src1), (v2f64 VR128:$src2), (iPTR 0)),
6199 (VBLENDPDYrri VR256:$src1,
6200 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)),
6201 VR128:$src2, sub_xmm), 0x3)>;
6202 def : Pat<(insert_subvector (v8f32 VR256:$src1), (v4f32 VR128:$src2), (iPTR 0)),
6203 (VBLENDPSYrri VR256:$src1,
6204 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)),
6205 VR128:$src2, sub_xmm), 0xf)>;
6207 def : Pat<(insert_subvector (loadv4f64 addr:$src2), (v2f64 VR128:$src1), (iPTR 0)),
6208 (VBLENDPDYrmi (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)),
6209 VR128:$src1, sub_xmm), addr:$src2, 0xc)>;
6210 def : Pat<(insert_subvector (loadv8f32 addr:$src2), (v4f32 VR128:$src1), (iPTR 0)),
6211 (VBLENDPSYrmi (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)),
6212 VR128:$src1, sub_xmm), addr:$src2, 0xf0)>;
6215 /// SS41I_quaternary_vx - AVX SSE 4.1 with 4 operators
6216 multiclass SS41I_quaternary_avx<bits<8> opc, string OpcodeStr, RegisterClass RC,
6217 X86MemOperand x86memop, ValueType VT,
6218 PatFrag mem_frag, SDNode OpNode,
6219 X86FoldableSchedWrite sched> {
6220 def rr : Ii8Reg<opc, MRMSrcReg, (outs RC:$dst),
6221 (ins RC:$src1, RC:$src2, RC:$src3),
6222 !strconcat(OpcodeStr,
6223 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
6224 [(set RC:$dst, (VT (OpNode RC:$src3, RC:$src2, RC:$src1)))],
6225 SSEPackedInt>, TAPD, VEX_4V,
6228 def rm : Ii8Reg<opc, MRMSrcMem, (outs RC:$dst),
6229 (ins RC:$src1, x86memop:$src2, RC:$src3),
6230 !strconcat(OpcodeStr,
6231 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
6233 (OpNode RC:$src3, (mem_frag addr:$src2),
6234 RC:$src1))], SSEPackedInt>, TAPD, VEX_4V,
6235 Sched<[sched.Folded, sched.ReadAfterFold,
6237 ReadDefault, ReadDefault, ReadDefault, ReadDefault,
6240 sched.ReadAfterFold]>;
6243 let Predicates = [HasAVX] in {
6244 let ExeDomain = SSEPackedDouble in {
6245 defm VBLENDVPD : SS41I_quaternary_avx<0x4B, "vblendvpd", VR128, f128mem,
6246 v2f64, loadv2f64, X86Blendv,
6247 SchedWriteFVarBlend.XMM>;
6248 defm VBLENDVPDY : SS41I_quaternary_avx<0x4B, "vblendvpd", VR256, f256mem,
6249 v4f64, loadv4f64, X86Blendv,
6250 SchedWriteFVarBlend.YMM>, VEX_L;
6251 } // ExeDomain = SSEPackedDouble
6252 let ExeDomain = SSEPackedSingle in {
6253 defm VBLENDVPS : SS41I_quaternary_avx<0x4A, "vblendvps", VR128, f128mem,
6254 v4f32, loadv4f32, X86Blendv,
6255 SchedWriteFVarBlend.XMM>;
6256 defm VBLENDVPSY : SS41I_quaternary_avx<0x4A, "vblendvps", VR256, f256mem,
6257 v8f32, loadv8f32, X86Blendv,
6258 SchedWriteFVarBlend.YMM>, VEX_L;
6259 } // ExeDomain = SSEPackedSingle
6260 defm VPBLENDVB : SS41I_quaternary_avx<0x4C, "vpblendvb", VR128, i128mem,
6261 v16i8, loadv16i8, X86Blendv,
6262 SchedWriteVarBlend.XMM>;
6265 let Predicates = [HasAVX2] in {
6266 defm VPBLENDVBY : SS41I_quaternary_avx<0x4C, "vpblendvb", VR256, i256mem,
6267 v32i8, loadv32i8, X86Blendv,
6268 SchedWriteVarBlend.YMM>, VEX_L;
6271 let Predicates = [HasAVX] in {
6272 def : Pat<(v4i32 (X86Blendv (v4i32 VR128:$mask), (v4i32 VR128:$src1),
6273 (v4i32 VR128:$src2))),
6274 (VBLENDVPSrr VR128:$src2, VR128:$src1, VR128:$mask)>;
6275 def : Pat<(v2i64 (X86Blendv (v2i64 VR128:$mask), (v2i64 VR128:$src1),
6276 (v2i64 VR128:$src2))),
6277 (VBLENDVPDrr VR128:$src2, VR128:$src1, VR128:$mask)>;
6278 def : Pat<(v8i32 (X86Blendv (v8i32 VR256:$mask), (v8i32 VR256:$src1),
6279 (v8i32 VR256:$src2))),
6280 (VBLENDVPSYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
6281 def : Pat<(v4i64 (X86Blendv (v4i64 VR256:$mask), (v4i64 VR256:$src1),
6282 (v4i64 VR256:$src2))),
6283 (VBLENDVPDYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
6286 // Prefer a movss or movsd over a blendps when optimizing for size. these were
6287 // changed to use blends because blends have better throughput on sandybridge
6288 // and haswell, but movs[s/d] are 1-2 byte shorter instructions.
6289 let Predicates = [HasAVX, OptForSpeed] in {
6290 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
6291 (VBLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>;
6292 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
6293 (VPBLENDWrri (v4i32 (V_SET0)), VR128:$src, (i8 3))>;
6295 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
6296 (VBLENDPSrri VR128:$src1, VR128:$src2, (i8 1))>;
6297 def : Pat<(v4f32 (X86Movss VR128:$src1, (loadv4f32 addr:$src2))),
6298 (VBLENDPSrmi VR128:$src1, addr:$src2, (i8 1))>;
6299 def : Pat<(v4f32 (X86Movss (loadv4f32 addr:$src2), VR128:$src1)),
6300 (VBLENDPSrmi VR128:$src1, addr:$src2, (i8 0xe))>;
6302 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
6303 (VBLENDPDrri VR128:$src1, VR128:$src2, (i8 1))>;
6304 def : Pat<(v2f64 (X86Movsd VR128:$src1, (loadv2f64 addr:$src2))),
6305 (VBLENDPDrmi VR128:$src1, addr:$src2, (i8 1))>;
6306 def : Pat<(v2f64 (X86Movsd (loadv2f64 addr:$src2), VR128:$src1)),
6307 (VBLENDPDrmi VR128:$src1, addr:$src2, (i8 2))>;
6309 // Move low f32 and clear high bits.
6310 def : Pat<(v8f32 (X86vzmovl (v8f32 VR256:$src))),
6311 (SUBREG_TO_REG (i32 0),
6312 (v4f32 (VBLENDPSrri (v4f32 (V_SET0)),
6313 (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm)),
6314 (i8 1))), sub_xmm)>;
6315 def : Pat<(v8i32 (X86vzmovl (v8i32 VR256:$src))),
6316 (SUBREG_TO_REG (i32 0),
6317 (v4i32 (VPBLENDWrri (v4i32 (V_SET0)),
6318 (v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm)),
6319 (i8 3))), sub_xmm)>;
6322 // Prefer a movss or movsd over a blendps when optimizing for size. these were
6323 // changed to use blends because blends have better throughput on sandybridge
6324 // and haswell, but movs[s/d] are 1-2 byte shorter instructions.
6325 let Predicates = [UseSSE41, OptForSpeed] in {
6326 // With SSE41 we can use blends for these patterns.
6327 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
6328 (BLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>;
6329 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
6330 (PBLENDWrri (v4i32 (V_SET0)), VR128:$src, (i8 3))>;
6332 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
6333 (BLENDPSrri VR128:$src1, VR128:$src2, (i8 1))>;
6334 def : Pat<(v4f32 (X86Movss VR128:$src1, (memopv4f32 addr:$src2))),
6335 (BLENDPSrmi VR128:$src1, addr:$src2, (i8 1))>;
6336 def : Pat<(v4f32 (X86Movss (memopv4f32 addr:$src2), VR128:$src1)),
6337 (BLENDPSrmi VR128:$src1, addr:$src2, (i8 0xe))>;
6339 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
6340 (BLENDPDrri VR128:$src1, VR128:$src2, (i8 1))>;
6341 def : Pat<(v2f64 (X86Movsd VR128:$src1, (memopv2f64 addr:$src2))),
6342 (BLENDPDrmi VR128:$src1, addr:$src2, (i8 1))>;
6343 def : Pat<(v2f64 (X86Movsd (memopv2f64 addr:$src2), VR128:$src1)),
6344 (BLENDPDrmi VR128:$src1, addr:$src2, (i8 2))>;
6348 /// SS41I_ternary - SSE 4.1 ternary operator
6349 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
6350 multiclass SS41I_ternary<bits<8> opc, string OpcodeStr, ValueType VT,
6351 PatFrag mem_frag, X86MemOperand x86memop,
6352 SDNode OpNode, X86FoldableSchedWrite sched> {
6353 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
6354 (ins VR128:$src1, VR128:$src2),
6355 !strconcat(OpcodeStr,
6356 "\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}"),
6358 (VT (OpNode XMM0, VR128:$src2, VR128:$src1)))]>,
6361 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
6362 (ins VR128:$src1, x86memop:$src2),
6363 !strconcat(OpcodeStr,
6364 "\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}"),
6366 (OpNode XMM0, (mem_frag addr:$src2), VR128:$src1))]>,
6367 Sched<[sched.Folded, sched.ReadAfterFold]>;
6371 let ExeDomain = SSEPackedDouble in
6372 defm BLENDVPD : SS41I_ternary<0x15, "blendvpd", v2f64, memopv2f64, f128mem,
6373 X86Blendv, SchedWriteFVarBlend.XMM>;
6374 let ExeDomain = SSEPackedSingle in
6375 defm BLENDVPS : SS41I_ternary<0x14, "blendvps", v4f32, memopv4f32, f128mem,
6376 X86Blendv, SchedWriteFVarBlend.XMM>;
6377 defm PBLENDVB : SS41I_ternary<0x10, "pblendvb", v16i8, memopv16i8, i128mem,
6378 X86Blendv, SchedWriteVarBlend.XMM>;
6380 // Aliases with the implicit xmm0 argument
6381 def : InstAlias<"blendvpd\t{$src2, $dst|$dst, $src2}",
6382 (BLENDVPDrr0 VR128:$dst, VR128:$src2), 0>;
6383 def : InstAlias<"blendvpd\t{$src2, $dst|$dst, $src2}",
6384 (BLENDVPDrm0 VR128:$dst, f128mem:$src2), 0>;
6385 def : InstAlias<"blendvps\t{$src2, $dst|$dst, $src2}",
6386 (BLENDVPSrr0 VR128:$dst, VR128:$src2), 0>;
6387 def : InstAlias<"blendvps\t{$src2, $dst|$dst, $src2}",
6388 (BLENDVPSrm0 VR128:$dst, f128mem:$src2), 0>;
6389 def : InstAlias<"pblendvb\t{$src2, $dst|$dst, $src2}",
6390 (PBLENDVBrr0 VR128:$dst, VR128:$src2), 0>;
6391 def : InstAlias<"pblendvb\t{$src2, $dst|$dst, $src2}",
6392 (PBLENDVBrm0 VR128:$dst, i128mem:$src2), 0>;
6394 let Predicates = [UseSSE41] in {
6395 def : Pat<(v4i32 (X86Blendv (v4i32 XMM0), (v4i32 VR128:$src1),
6396 (v4i32 VR128:$src2))),
6397 (BLENDVPSrr0 VR128:$src2, VR128:$src1)>;
6398 def : Pat<(v2i64 (X86Blendv (v2i64 XMM0), (v2i64 VR128:$src1),
6399 (v2i64 VR128:$src2))),
6400 (BLENDVPDrr0 VR128:$src2, VR128:$src1)>;
6403 let AddedComplexity = 400 in { // Prefer non-temporal versions
6405 let Predicates = [HasAVX, NoVLX] in
6406 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
6407 "vmovntdqa\t{$src, $dst|$dst, $src}", []>,
6408 Sched<[SchedWriteVecMoveLSNT.XMM.RM]>, VEX, VEX_WIG;
6409 let Predicates = [HasAVX2, NoVLX] in
6410 def VMOVNTDQAYrm : SS48I<0x2A, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
6411 "vmovntdqa\t{$src, $dst|$dst, $src}", []>,
6412 Sched<[SchedWriteVecMoveLSNT.YMM.RM]>, VEX, VEX_L, VEX_WIG;
6413 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
6414 "movntdqa\t{$src, $dst|$dst, $src}", []>,
6415 Sched<[SchedWriteVecMoveLSNT.XMM.RM]>;
6417 let Predicates = [HasAVX2, NoVLX] in {
6418 def : Pat<(v8f32 (alignednontemporalload addr:$src)),
6419 (VMOVNTDQAYrm addr:$src)>;
6420 def : Pat<(v4f64 (alignednontemporalload addr:$src)),
6421 (VMOVNTDQAYrm addr:$src)>;
6422 def : Pat<(v4i64 (alignednontemporalload addr:$src)),
6423 (VMOVNTDQAYrm addr:$src)>;
6424 def : Pat<(v8i32 (alignednontemporalload addr:$src)),
6425 (VMOVNTDQAYrm addr:$src)>;
6426 def : Pat<(v16i16 (alignednontemporalload addr:$src)),
6427 (VMOVNTDQAYrm addr:$src)>;
6428 def : Pat<(v32i8 (alignednontemporalload addr:$src)),
6429 (VMOVNTDQAYrm addr:$src)>;
6432 let Predicates = [HasAVX, NoVLX] in {
6433 def : Pat<(v4f32 (alignednontemporalload addr:$src)),
6434 (VMOVNTDQArm addr:$src)>;
6435 def : Pat<(v2f64 (alignednontemporalload addr:$src)),
6436 (VMOVNTDQArm addr:$src)>;
6437 def : Pat<(v2i64 (alignednontemporalload addr:$src)),
6438 (VMOVNTDQArm addr:$src)>;
6439 def : Pat<(v4i32 (alignednontemporalload addr:$src)),
6440 (VMOVNTDQArm addr:$src)>;
6441 def : Pat<(v8i16 (alignednontemporalload addr:$src)),
6442 (VMOVNTDQArm addr:$src)>;
6443 def : Pat<(v16i8 (alignednontemporalload addr:$src)),
6444 (VMOVNTDQArm addr:$src)>;
6447 let Predicates = [UseSSE41] in {
6448 def : Pat<(v4f32 (alignednontemporalload addr:$src)),
6449 (MOVNTDQArm addr:$src)>;
6450 def : Pat<(v2f64 (alignednontemporalload addr:$src)),
6451 (MOVNTDQArm addr:$src)>;
6452 def : Pat<(v2i64 (alignednontemporalload addr:$src)),
6453 (MOVNTDQArm addr:$src)>;
6454 def : Pat<(v4i32 (alignednontemporalload addr:$src)),
6455 (MOVNTDQArm addr:$src)>;
6456 def : Pat<(v8i16 (alignednontemporalload addr:$src)),
6457 (MOVNTDQArm addr:$src)>;
6458 def : Pat<(v16i8 (alignednontemporalload addr:$src)),
6459 (MOVNTDQArm addr:$src)>;
6462 } // AddedComplexity
6464 //===----------------------------------------------------------------------===//
6465 // SSE4.2 - Compare Instructions
6466 //===----------------------------------------------------------------------===//
6468 /// SS42I_binop_rm - Simple SSE 4.2 binary operator
6469 multiclass SS42I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
6470 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
6471 X86MemOperand x86memop, X86FoldableSchedWrite sched,
6473 def rr : SS428I<opc, MRMSrcReg, (outs RC:$dst),
6474 (ins RC:$src1, RC:$src2),
6476 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6477 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6478 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>,
6480 def rm : SS428I<opc, MRMSrcMem, (outs RC:$dst),
6481 (ins RC:$src1, x86memop:$src2),
6483 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6484 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6486 (OpVT (OpNode RC:$src1, (memop_frag addr:$src2))))]>,
6487 Sched<[sched.Folded, sched.ReadAfterFold]>;
6490 let Predicates = [HasAVX] in
6491 defm VPCMPGTQ : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v2i64, VR128,
6492 load, i128mem, SchedWriteVecALU.XMM, 0>,
6495 let Predicates = [HasAVX2] in
6496 defm VPCMPGTQY : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v4i64, VR256,
6497 load, i256mem, SchedWriteVecALU.YMM, 0>,
6498 VEX_4V, VEX_L, VEX_WIG;
6500 let Constraints = "$src1 = $dst" in
6501 defm PCMPGTQ : SS42I_binop_rm<0x37, "pcmpgtq", X86pcmpgt, v2i64, VR128,
6502 memop, i128mem, SchedWriteVecALU.XMM>;
6504 //===----------------------------------------------------------------------===//
6505 // SSE4.2 - String/text Processing Instructions
6506 //===----------------------------------------------------------------------===//
6508 multiclass pcmpistrm_SS42AI<string asm> {
6509 def rr : SS42AI<0x62, MRMSrcReg, (outs),
6510 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
6511 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
6512 []>, Sched<[WritePCmpIStrM]>;
6514 def rm :SS42AI<0x62, MRMSrcMem, (outs),
6515 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
6516 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
6517 []>, Sched<[WritePCmpIStrM.Folded, WritePCmpIStrM.ReadAfterFold]>;
6520 let Defs = [XMM0, EFLAGS], hasSideEffects = 0 in {
6521 let Predicates = [HasAVX] in
6522 defm VPCMPISTRM : pcmpistrm_SS42AI<"vpcmpistrm">, VEX, VEX_WIG;
6523 defm PCMPISTRM : pcmpistrm_SS42AI<"pcmpistrm"> ;
6526 multiclass SS42AI_pcmpestrm<string asm> {
6527 def rr : SS42AI<0x60, MRMSrcReg, (outs),
6528 (ins VR128:$src1, VR128:$src3, u8imm:$src5),
6529 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
6530 []>, Sched<[WritePCmpEStrM]>;
6532 def rm : SS42AI<0x60, MRMSrcMem, (outs),
6533 (ins VR128:$src1, i128mem:$src3, u8imm:$src5),
6534 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
6535 []>, Sched<[WritePCmpEStrM.Folded, WritePCmpEStrM.ReadAfterFold]>;
6538 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX], hasSideEffects = 0 in {
6539 let Predicates = [HasAVX] in
6540 defm VPCMPESTRM : SS42AI_pcmpestrm<"vpcmpestrm">, VEX, VEX_WIG;
6541 defm PCMPESTRM : SS42AI_pcmpestrm<"pcmpestrm">;
6544 multiclass SS42AI_pcmpistri<string asm> {
6545 def rr : SS42AI<0x63, MRMSrcReg, (outs),
6546 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
6547 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
6548 []>, Sched<[WritePCmpIStrI]>;
6550 def rm : SS42AI<0x63, MRMSrcMem, (outs),
6551 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
6552 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
6553 []>, Sched<[WritePCmpIStrI.Folded, WritePCmpIStrI.ReadAfterFold]>;
6556 let Defs = [ECX, EFLAGS], hasSideEffects = 0 in {
6557 let Predicates = [HasAVX] in
6558 defm VPCMPISTRI : SS42AI_pcmpistri<"vpcmpistri">, VEX, VEX_WIG;
6559 defm PCMPISTRI : SS42AI_pcmpistri<"pcmpistri">;
6562 multiclass SS42AI_pcmpestri<string asm> {
6563 def rr : SS42AI<0x61, MRMSrcReg, (outs),
6564 (ins VR128:$src1, VR128:$src3, u8imm:$src5),
6565 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
6566 []>, Sched<[WritePCmpEStrI]>;
6568 def rm : SS42AI<0x61, MRMSrcMem, (outs),
6569 (ins VR128:$src1, i128mem:$src3, u8imm:$src5),
6570 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
6571 []>, Sched<[WritePCmpEStrI.Folded, WritePCmpEStrI.ReadAfterFold]>;
6574 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX], hasSideEffects = 0 in {
6575 let Predicates = [HasAVX] in
6576 defm VPCMPESTRI : SS42AI_pcmpestri<"vpcmpestri">, VEX, VEX_WIG;
6577 defm PCMPESTRI : SS42AI_pcmpestri<"pcmpestri">;
6580 //===----------------------------------------------------------------------===//
6581 // SSE4.2 - CRC Instructions
6582 //===----------------------------------------------------------------------===//
6584 // No CRC instructions have AVX equivalents
6586 // crc intrinsic instruction
6587 // This set of instructions are only rm, the only difference is the size
6589 class SS42I_crc32r<bits<8> opc, string asm, RegisterClass RCOut,
6590 RegisterClass RCIn, SDPatternOperator Int> :
6591 SS42FI<opc, MRMSrcReg, (outs RCOut:$dst), (ins RCOut:$src1, RCIn:$src2),
6592 !strconcat(asm, "\t{$src2, $src1|$src1, $src2}"),
6593 [(set RCOut:$dst, (Int RCOut:$src1, RCIn:$src2))]>,
6594 Sched<[WriteCRC32]>;
6596 class SS42I_crc32m<bits<8> opc, string asm, RegisterClass RCOut,
6597 X86MemOperand x86memop, SDPatternOperator Int> :
6598 SS42FI<opc, MRMSrcMem, (outs RCOut:$dst), (ins RCOut:$src1, x86memop:$src2),
6599 !strconcat(asm, "\t{$src2, $src1|$src1, $src2}"),
6600 [(set RCOut:$dst, (Int RCOut:$src1, (load addr:$src2)))]>,
6601 Sched<[WriteCRC32.Folded, WriteCRC32.ReadAfterFold]>;
6603 let Constraints = "$src1 = $dst" in {
6604 def CRC32r32m8 : SS42I_crc32m<0xF0, "crc32{b}", GR32, i8mem,
6605 int_x86_sse42_crc32_32_8>;
6606 def CRC32r32r8 : SS42I_crc32r<0xF0, "crc32{b}", GR32, GR8,
6607 int_x86_sse42_crc32_32_8>;
6608 def CRC32r32m16 : SS42I_crc32m<0xF1, "crc32{w}", GR32, i16mem,
6609 int_x86_sse42_crc32_32_16>, OpSize16;
6610 def CRC32r32r16 : SS42I_crc32r<0xF1, "crc32{w}", GR32, GR16,
6611 int_x86_sse42_crc32_32_16>, OpSize16;
6612 def CRC32r32m32 : SS42I_crc32m<0xF1, "crc32{l}", GR32, i32mem,
6613 int_x86_sse42_crc32_32_32>, OpSize32;
6614 def CRC32r32r32 : SS42I_crc32r<0xF1, "crc32{l}", GR32, GR32,
6615 int_x86_sse42_crc32_32_32>, OpSize32;
6616 def CRC32r64m64 : SS42I_crc32m<0xF1, "crc32{q}", GR64, i64mem,
6617 int_x86_sse42_crc32_64_64>, REX_W;
6618 def CRC32r64r64 : SS42I_crc32r<0xF1, "crc32{q}", GR64, GR64,
6619 int_x86_sse42_crc32_64_64>, REX_W;
6620 let hasSideEffects = 0 in {
6622 def CRC32r64m8 : SS42I_crc32m<0xF0, "crc32{b}", GR64, i8mem,
6624 def CRC32r64r8 : SS42I_crc32r<0xF0, "crc32{b}", GR64, GR8,
6629 //===----------------------------------------------------------------------===//
6630 // SHA-NI Instructions
6631 //===----------------------------------------------------------------------===//
6633 // FIXME: Is there a better scheduler class for SHA than WriteVecIMul?
6634 multiclass SHAI_binop<bits<8> Opc, string OpcodeStr, Intrinsic IntId,
6635 X86FoldableSchedWrite sched, bit UsesXMM0 = 0> {
6636 def rr : I<Opc, MRMSrcReg, (outs VR128:$dst),
6637 (ins VR128:$src1, VR128:$src2),
6639 !strconcat(OpcodeStr, "\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}"),
6640 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}")),
6642 (set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0)),
6643 (set VR128:$dst, (IntId VR128:$src1, VR128:$src2)))]>,
6644 T8PS, Sched<[sched]>;
6646 def rm : I<Opc, MRMSrcMem, (outs VR128:$dst),
6647 (ins VR128:$src1, i128mem:$src2),
6649 !strconcat(OpcodeStr, "\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}"),
6650 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}")),
6652 (set VR128:$dst, (IntId VR128:$src1,
6653 (memop addr:$src2), XMM0)),
6654 (set VR128:$dst, (IntId VR128:$src1,
6655 (memop addr:$src2))))]>, T8PS,
6656 Sched<[sched.Folded, sched.ReadAfterFold]>;
6659 let Constraints = "$src1 = $dst", Predicates = [HasSHA] in {
6660 def SHA1RNDS4rri : Ii8<0xCC, MRMSrcReg, (outs VR128:$dst),
6661 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
6662 "sha1rnds4\t{$src3, $src2, $dst|$dst, $src2, $src3}",
6664 (int_x86_sha1rnds4 VR128:$src1, VR128:$src2,
6665 (i8 timm:$src3)))]>, TAPS,
6666 Sched<[SchedWriteVecIMul.XMM]>;
6667 def SHA1RNDS4rmi : Ii8<0xCC, MRMSrcMem, (outs VR128:$dst),
6668 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
6669 "sha1rnds4\t{$src3, $src2, $dst|$dst, $src2, $src3}",
6671 (int_x86_sha1rnds4 VR128:$src1,
6673 (i8 timm:$src3)))]>, TAPS,
6674 Sched<[SchedWriteVecIMul.XMM.Folded,
6675 SchedWriteVecIMul.XMM.ReadAfterFold]>;
6677 defm SHA1NEXTE : SHAI_binop<0xC8, "sha1nexte", int_x86_sha1nexte,
6678 SchedWriteVecIMul.XMM>;
6679 defm SHA1MSG1 : SHAI_binop<0xC9, "sha1msg1", int_x86_sha1msg1,
6680 SchedWriteVecIMul.XMM>;
6681 defm SHA1MSG2 : SHAI_binop<0xCA, "sha1msg2", int_x86_sha1msg2,
6682 SchedWriteVecIMul.XMM>;
6685 defm SHA256RNDS2 : SHAI_binop<0xCB, "sha256rnds2", int_x86_sha256rnds2,
6686 SchedWriteVecIMul.XMM, 1>;
6688 defm SHA256MSG1 : SHAI_binop<0xCC, "sha256msg1", int_x86_sha256msg1,
6689 SchedWriteVecIMul.XMM>;
6690 defm SHA256MSG2 : SHAI_binop<0xCD, "sha256msg2", int_x86_sha256msg2,
6691 SchedWriteVecIMul.XMM>;
6694 // Aliases with explicit %xmm0
6695 def : InstAlias<"sha256rnds2\t{$src2, $dst|$dst, $src2}",
6696 (SHA256RNDS2rr VR128:$dst, VR128:$src2), 0>;
6697 def : InstAlias<"sha256rnds2\t{$src2, $dst|$dst, $src2}",
6698 (SHA256RNDS2rm VR128:$dst, i128mem:$src2), 0>;
6700 //===----------------------------------------------------------------------===//
6701 // AES-NI Instructions
6702 //===----------------------------------------------------------------------===//
6704 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
6705 Intrinsic IntId, PatFrag ld_frag,
6706 bit Is2Addr = 0, RegisterClass RC = VR128,
6707 X86MemOperand MemOp = i128mem> {
6708 let AsmString = OpcodeStr#
6709 !if(Is2Addr, "\t{$src2, $dst|$dst, $src2}",
6710 "\t{$src2, $src1, $dst|$dst, $src1, $src2}") in {
6711 def rr : AES8I<opc, MRMSrcReg, (outs RC:$dst),
6712 (ins RC:$src1, RC:$src2), "",
6713 [(set RC:$dst, (IntId RC:$src1, RC:$src2))]>,
6714 Sched<[WriteAESDecEnc]>;
6715 def rm : AES8I<opc, MRMSrcMem, (outs RC:$dst),
6716 (ins RC:$src1, MemOp:$src2), "",
6717 [(set RC:$dst, (IntId RC:$src1, (ld_frag addr:$src2)))]>,
6718 Sched<[WriteAESDecEnc.Folded, WriteAESDecEnc.ReadAfterFold]>;
6722 // Perform One Round of an AES Encryption/Decryption Flow
6723 let Predicates = [HasAVX, NoVLX_Or_NoVAES, HasAES] in {
6724 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
6725 int_x86_aesni_aesenc, load>, VEX_4V, VEX_WIG;
6726 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
6727 int_x86_aesni_aesenclast, load>, VEX_4V, VEX_WIG;
6728 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
6729 int_x86_aesni_aesdec, load>, VEX_4V, VEX_WIG;
6730 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
6731 int_x86_aesni_aesdeclast, load>, VEX_4V, VEX_WIG;
6734 let Predicates = [NoVLX, HasVAES] in {
6735 defm VAESENCY : AESI_binop_rm_int<0xDC, "vaesenc",
6736 int_x86_aesni_aesenc_256, load, 0, VR256,
6737 i256mem>, VEX_4V, VEX_L, VEX_WIG;
6738 defm VAESENCLASTY : AESI_binop_rm_int<0xDD, "vaesenclast",
6739 int_x86_aesni_aesenclast_256, load, 0, VR256,
6740 i256mem>, VEX_4V, VEX_L, VEX_WIG;
6741 defm VAESDECY : AESI_binop_rm_int<0xDE, "vaesdec",
6742 int_x86_aesni_aesdec_256, load, 0, VR256,
6743 i256mem>, VEX_4V, VEX_L, VEX_WIG;
6744 defm VAESDECLASTY : AESI_binop_rm_int<0xDF, "vaesdeclast",
6745 int_x86_aesni_aesdeclast_256, load, 0, VR256,
6746 i256mem>, VEX_4V, VEX_L, VEX_WIG;
6749 let Constraints = "$src1 = $dst" in {
6750 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
6751 int_x86_aesni_aesenc, memop, 1>;
6752 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
6753 int_x86_aesni_aesenclast, memop, 1>;
6754 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
6755 int_x86_aesni_aesdec, memop, 1>;
6756 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
6757 int_x86_aesni_aesdeclast, memop, 1>;
6760 // Perform the AES InvMixColumn Transformation
6761 let Predicates = [HasAVX, HasAES] in {
6762 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
6764 "vaesimc\t{$src1, $dst|$dst, $src1}",
6766 (int_x86_aesni_aesimc VR128:$src1))]>, Sched<[WriteAESIMC]>,
6768 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
6769 (ins i128mem:$src1),
6770 "vaesimc\t{$src1, $dst|$dst, $src1}",
6771 [(set VR128:$dst, (int_x86_aesni_aesimc (load addr:$src1)))]>,
6772 Sched<[WriteAESIMC.Folded]>, VEX, VEX_WIG;
6774 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
6776 "aesimc\t{$src1, $dst|$dst, $src1}",
6778 (int_x86_aesni_aesimc VR128:$src1))]>, Sched<[WriteAESIMC]>;
6779 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
6780 (ins i128mem:$src1),
6781 "aesimc\t{$src1, $dst|$dst, $src1}",
6782 [(set VR128:$dst, (int_x86_aesni_aesimc (memop addr:$src1)))]>,
6783 Sched<[WriteAESIMC.Folded]>;
6785 // AES Round Key Generation Assist
6786 let Predicates = [HasAVX, HasAES] in {
6787 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
6788 (ins VR128:$src1, u8imm:$src2),
6789 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
6791 (int_x86_aesni_aeskeygenassist VR128:$src1, timm:$src2))]>,
6792 Sched<[WriteAESKeyGen]>, VEX, VEX_WIG;
6793 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
6794 (ins i128mem:$src1, u8imm:$src2),
6795 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
6797 (int_x86_aesni_aeskeygenassist (load addr:$src1), timm:$src2))]>,
6798 Sched<[WriteAESKeyGen.Folded]>, VEX, VEX_WIG;
6800 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
6801 (ins VR128:$src1, u8imm:$src2),
6802 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
6804 (int_x86_aesni_aeskeygenassist VR128:$src1, timm:$src2))]>,
6805 Sched<[WriteAESKeyGen]>;
6806 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
6807 (ins i128mem:$src1, u8imm:$src2),
6808 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
6810 (int_x86_aesni_aeskeygenassist (memop addr:$src1), timm:$src2))]>,
6811 Sched<[WriteAESKeyGen.Folded]>;
6813 //===----------------------------------------------------------------------===//
6814 // PCLMUL Instructions
6815 //===----------------------------------------------------------------------===//
6817 // Immediate transform to help with commuting.
6818 def PCLMULCommuteImm : SDNodeXForm<timm, [{
6819 uint8_t Imm = N->getZExtValue();
6820 return getI8Imm((uint8_t)((Imm >> 4) | (Imm << 4)), SDLoc(N));
6823 // SSE carry-less Multiplication instructions
6824 let Predicates = [NoAVX, HasPCLMUL] in {
6825 let Constraints = "$src1 = $dst" in {
6826 let isCommutable = 1 in
6827 def PCLMULQDQrr : PCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
6828 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
6829 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
6831 (int_x86_pclmulqdq VR128:$src1, VR128:$src2, timm:$src3))]>,
6832 Sched<[WriteCLMul]>;
6834 def PCLMULQDQrm : PCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
6835 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
6836 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
6838 (int_x86_pclmulqdq VR128:$src1, (memop addr:$src2),
6840 Sched<[WriteCLMul.Folded, WriteCLMul.ReadAfterFold]>;
6841 } // Constraints = "$src1 = $dst"
6843 def : Pat<(int_x86_pclmulqdq (memop addr:$src2), VR128:$src1,
6845 (PCLMULQDQrm VR128:$src1, addr:$src2,
6846 (PCLMULCommuteImm timm:$src3))>;
6847 } // Predicates = [NoAVX, HasPCLMUL]
6850 foreach HI = ["hq","lq"] in
6851 foreach LO = ["hq","lq"] in {
6852 def : InstAlias<"pclmul" # HI # LO # "dq\t{$src, $dst|$dst, $src}",
6853 (PCLMULQDQrr VR128:$dst, VR128:$src,
6854 !add(!shl(!eq(LO,"hq"),4),!eq(HI,"hq"))), 0>;
6855 def : InstAlias<"pclmul" # HI # LO # "dq\t{$src, $dst|$dst, $src}",
6856 (PCLMULQDQrm VR128:$dst, i128mem:$src,
6857 !add(!shl(!eq(LO,"hq"),4),!eq(HI,"hq"))), 0>;
6860 // AVX carry-less Multiplication instructions
6861 multiclass vpclmulqdq<RegisterClass RC, X86MemOperand MemOp,
6862 PatFrag LdFrag, Intrinsic IntId> {
6863 let isCommutable = 1 in
6864 def rr : PCLMULIi8<0x44, MRMSrcReg, (outs RC:$dst),
6865 (ins RC:$src1, RC:$src2, u8imm:$src3),
6866 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
6868 (IntId RC:$src1, RC:$src2, timm:$src3))]>,
6869 Sched<[WriteCLMul]>;
6871 def rm : PCLMULIi8<0x44, MRMSrcMem, (outs RC:$dst),
6872 (ins RC:$src1, MemOp:$src2, u8imm:$src3),
6873 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
6875 (IntId RC:$src1, (LdFrag addr:$src2), timm:$src3))]>,
6876 Sched<[WriteCLMul.Folded, WriteCLMul.ReadAfterFold]>;
6878 // We can commute a load in the first operand by swapping the sources and
6879 // rotating the immediate.
6880 def : Pat<(IntId (LdFrag addr:$src2), RC:$src1, (i8 timm:$src3)),
6881 (!cast<Instruction>(NAME#"rm") RC:$src1, addr:$src2,
6882 (PCLMULCommuteImm timm:$src3))>;
6885 let Predicates = [HasAVX, NoVLX_Or_NoVPCLMULQDQ, HasPCLMUL] in
6886 defm VPCLMULQDQ : vpclmulqdq<VR128, i128mem, load,
6887 int_x86_pclmulqdq>, VEX_4V, VEX_WIG;
6889 let Predicates = [NoVLX, HasVPCLMULQDQ] in
6890 defm VPCLMULQDQY : vpclmulqdq<VR256, i256mem, load,
6891 int_x86_pclmulqdq_256>, VEX_4V, VEX_L, VEX_WIG;
6893 multiclass vpclmulqdq_aliases_impl<string InstStr, RegisterClass RC,
6894 X86MemOperand MemOp, string Hi, string Lo> {
6895 def : InstAlias<"vpclmul"#Hi#Lo#"dq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
6896 (!cast<Instruction>(InstStr # "rr") RC:$dst, RC:$src1, RC:$src2,
6897 !add(!shl(!eq(Lo,"hq"),4),!eq(Hi,"hq"))), 0>;
6898 def : InstAlias<"vpclmul"#Hi#Lo#"dq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
6899 (!cast<Instruction>(InstStr # "rm") RC:$dst, RC:$src1, MemOp:$src2,
6900 !add(!shl(!eq(Lo,"hq"),4),!eq(Hi,"hq"))), 0>;
6903 multiclass vpclmulqdq_aliases<string InstStr, RegisterClass RC,
6904 X86MemOperand MemOp> {
6905 defm : vpclmulqdq_aliases_impl<InstStr, RC, MemOp, "hq", "hq">;
6906 defm : vpclmulqdq_aliases_impl<InstStr, RC, MemOp, "hq", "lq">;
6907 defm : vpclmulqdq_aliases_impl<InstStr, RC, MemOp, "lq", "hq">;
6908 defm : vpclmulqdq_aliases_impl<InstStr, RC, MemOp, "lq", "lq">;
6912 defm : vpclmulqdq_aliases<"VPCLMULQDQ", VR128, i128mem>;
6913 defm : vpclmulqdq_aliases<"VPCLMULQDQY", VR256, i256mem>;
6915 //===----------------------------------------------------------------------===//
6916 // SSE4A Instructions
6917 //===----------------------------------------------------------------------===//
6919 let Predicates = [HasSSE4A] in {
6921 let ExeDomain = SSEPackedInt in {
6922 let Constraints = "$src = $dst" in {
6923 def EXTRQI : Ii8<0x78, MRMXr, (outs VR128:$dst),
6924 (ins VR128:$src, u8imm:$len, u8imm:$idx),
6925 "extrq\t{$idx, $len, $src|$src, $len, $idx}",
6926 [(set VR128:$dst, (X86extrqi VR128:$src, timm:$len,
6928 PD, Sched<[SchedWriteVecALU.XMM]>;
6929 def EXTRQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
6930 (ins VR128:$src, VR128:$mask),
6931 "extrq\t{$mask, $src|$src, $mask}",
6932 [(set VR128:$dst, (int_x86_sse4a_extrq VR128:$src,
6934 PD, Sched<[SchedWriteVecALU.XMM]>;
6936 def INSERTQI : Ii8<0x78, MRMSrcReg, (outs VR128:$dst),
6937 (ins VR128:$src, VR128:$src2, u8imm:$len, u8imm:$idx),
6938 "insertq\t{$idx, $len, $src2, $src|$src, $src2, $len, $idx}",
6939 [(set VR128:$dst, (X86insertqi VR128:$src, VR128:$src2,
6940 timm:$len, timm:$idx))]>,
6941 XD, Sched<[SchedWriteVecALU.XMM]>;
6942 def INSERTQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
6943 (ins VR128:$src, VR128:$mask),
6944 "insertq\t{$mask, $src|$src, $mask}",
6945 [(set VR128:$dst, (int_x86_sse4a_insertq VR128:$src,
6947 XD, Sched<[SchedWriteVecALU.XMM]>;
6949 } // ExeDomain = SSEPackedInt
6951 // Non-temporal (unaligned) scalar stores.
6952 let AddedComplexity = 400 in { // Prefer non-temporal versions
6953 let hasSideEffects = 0, mayStore = 1, SchedRW = [SchedWriteFMoveLSNT.Scl.MR] in {
6954 def MOVNTSS : I<0x2B, MRMDestMem, (outs), (ins f32mem:$dst, VR128:$src),
6955 "movntss\t{$src, $dst|$dst, $src}", []>, XS;
6957 def MOVNTSD : I<0x2B, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
6958 "movntsd\t{$src, $dst|$dst, $src}", []>, XD;
6961 def : Pat<(nontemporalstore FR32:$src, addr:$dst),
6962 (MOVNTSS addr:$dst, (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)))>;
6964 def : Pat<(nontemporalstore FR64:$src, addr:$dst),
6965 (MOVNTSD addr:$dst, (v2f64 (COPY_TO_REGCLASS FR64:$src, VR128)))>;
6967 } // AddedComplexity
6970 //===----------------------------------------------------------------------===//
6972 //===----------------------------------------------------------------------===//
6974 //===----------------------------------------------------------------------===//
6975 // VBROADCAST - Load from memory and broadcast to all elements of the
6976 // destination operand
6978 class avx_broadcast_rm<bits<8> opc, string OpcodeStr, RegisterClass RC,
6979 X86MemOperand x86memop, ValueType VT,
6980 PatFrag bcast_frag, SchedWrite Sched> :
6981 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
6982 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
6983 [(set RC:$dst, (VT (bcast_frag addr:$src)))]>,
6984 Sched<[Sched]>, VEX;
6986 // AVX2 adds register forms
6987 class avx2_broadcast_rr<bits<8> opc, string OpcodeStr, RegisterClass RC,
6988 ValueType ResVT, ValueType OpVT, SchedWrite Sched> :
6989 AVX28I<opc, MRMSrcReg, (outs RC:$dst), (ins VR128:$src),
6990 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
6991 [(set RC:$dst, (ResVT (X86VBroadcast (OpVT VR128:$src))))]>,
6992 Sched<[Sched]>, VEX;
6994 let ExeDomain = SSEPackedSingle, Predicates = [HasAVX, NoVLX] in {
6995 def VBROADCASTSSrm : avx_broadcast_rm<0x18, "vbroadcastss", VR128,
6996 f32mem, v4f32, X86VBroadcastld32,
6997 SchedWriteFShuffle.XMM.Folded>;
6998 def VBROADCASTSSYrm : avx_broadcast_rm<0x18, "vbroadcastss", VR256,
6999 f32mem, v8f32, X86VBroadcastld32,
7000 SchedWriteFShuffle.XMM.Folded>, VEX_L;
7002 let ExeDomain = SSEPackedDouble, Predicates = [HasAVX, NoVLX] in
7003 def VBROADCASTSDYrm : avx_broadcast_rm<0x19, "vbroadcastsd", VR256, f64mem,
7004 v4f64, X86VBroadcastld64,
7005 SchedWriteFShuffle.XMM.Folded>, VEX_L;
7007 let ExeDomain = SSEPackedSingle, Predicates = [HasAVX2, NoVLX] in {
7008 def VBROADCASTSSrr : avx2_broadcast_rr<0x18, "vbroadcastss", VR128,
7009 v4f32, v4f32, SchedWriteFShuffle.XMM>;
7010 def VBROADCASTSSYrr : avx2_broadcast_rr<0x18, "vbroadcastss", VR256,
7011 v8f32, v4f32, WriteFShuffle256>, VEX_L;
7013 let ExeDomain = SSEPackedDouble, Predicates = [HasAVX2, NoVLX] in
7014 def VBROADCASTSDYrr : avx2_broadcast_rr<0x19, "vbroadcastsd", VR256,
7015 v4f64, v2f64, WriteFShuffle256>, VEX_L;
7017 //===----------------------------------------------------------------------===//
7018 // VBROADCAST*128 - Load from memory and broadcast 128-bit vector to both
7019 // halves of a 256-bit vector.
7021 let mayLoad = 1, hasSideEffects = 0, Predicates = [HasAVX2] in
7022 def VBROADCASTI128 : AVX8I<0x5A, MRMSrcMem, (outs VR256:$dst),
7024 "vbroadcasti128\t{$src, $dst|$dst, $src}", []>,
7025 Sched<[WriteShuffleLd]>, VEX, VEX_L;
7027 let mayLoad = 1, hasSideEffects = 0, Predicates = [HasAVX],
7028 ExeDomain = SSEPackedSingle in
7029 def VBROADCASTF128 : AVX8I<0x1A, MRMSrcMem, (outs VR256:$dst),
7031 "vbroadcastf128\t{$src, $dst|$dst, $src}", []>,
7032 Sched<[SchedWriteFShuffle.XMM.Folded]>, VEX, VEX_L;
7034 let Predicates = [HasAVX, NoVLX] in {
7035 def : Pat<(v4f64 (X86SubVBroadcastld128 addr:$src)),
7036 (VBROADCASTF128 addr:$src)>;
7037 def : Pat<(v8f32 (X86SubVBroadcastld128 addr:$src)),
7038 (VBROADCASTF128 addr:$src)>;
7039 // NOTE: We're using FP instructions here, but execution domain fixing can
7040 // convert to integer when profitable.
7041 def : Pat<(v4i64 (X86SubVBroadcastld128 addr:$src)),
7042 (VBROADCASTF128 addr:$src)>;
7043 def : Pat<(v8i32 (X86SubVBroadcastld128 addr:$src)),
7044 (VBROADCASTF128 addr:$src)>;
7045 def : Pat<(v16i16 (X86SubVBroadcastld128 addr:$src)),
7046 (VBROADCASTF128 addr:$src)>;
7047 def : Pat<(v32i8 (X86SubVBroadcastld128 addr:$src)),
7048 (VBROADCASTF128 addr:$src)>;
7051 //===----------------------------------------------------------------------===//
7052 // VPERM2F128 - Permute Floating-Point Values in 128-bit chunks
7055 let ExeDomain = SSEPackedSingle in {
7056 let isCommutable = 1 in
7057 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
7058 (ins VR256:$src1, VR256:$src2, u8imm:$src3),
7059 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", []>,
7060 VEX_4V, VEX_L, Sched<[WriteFShuffle256]>;
7061 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
7062 (ins VR256:$src1, f256mem:$src2, u8imm:$src3),
7063 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", []>,
7064 VEX_4V, VEX_L, Sched<[WriteFShuffle256.Folded, WriteFShuffle256.ReadAfterFold]>;
7067 // Immediate transform to help with commuting.
7068 def Perm2XCommuteImm : SDNodeXForm<timm, [{
7069 return getI8Imm(N->getZExtValue() ^ 0x22, SDLoc(N));
7072 multiclass vperm2x128_lowering<string InstrStr, ValueType VT, PatFrag memop_frag> {
7073 def : Pat<(VT (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 timm:$imm))),
7074 (!cast<Instruction>(InstrStr#rr) VR256:$src1, VR256:$src2, timm:$imm)>;
7075 def : Pat<(VT (X86VPerm2x128 VR256:$src1, (memop_frag addr:$src2), (i8 timm:$imm))),
7076 (!cast<Instruction>(InstrStr#rm) VR256:$src1, addr:$src2, timm:$imm)>;
7077 // Pattern with load in other operand.
7078 def : Pat<(VT (X86VPerm2x128 (memop_frag addr:$src2), VR256:$src1, (i8 timm:$imm))),
7079 (!cast<Instruction>(InstrStr#rm) VR256:$src1, addr:$src2,
7080 (Perm2XCommuteImm timm:$imm))>;
7083 let Predicates = [HasAVX] in {
7084 defm : vperm2x128_lowering<"VPERM2F128", v4f64, loadv4f64>;
7085 defm : vperm2x128_lowering<"VPERM2F128", v8f32, loadv8f32>;
7088 let Predicates = [HasAVX1Only] in {
7089 defm : vperm2x128_lowering<"VPERM2F128", v4i64, loadv4i64>;
7090 defm : vperm2x128_lowering<"VPERM2F128", v8i32, loadv8i32>;
7091 defm : vperm2x128_lowering<"VPERM2F128", v16i16, loadv16i16>;
7092 defm : vperm2x128_lowering<"VPERM2F128", v32i8, loadv32i8>;
7095 //===----------------------------------------------------------------------===//
7096 // VINSERTF128 - Insert packed floating-point values
7098 let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
7099 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
7100 (ins VR256:$src1, VR128:$src2, u8imm:$src3),
7101 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7102 []>, Sched<[WriteFShuffle256]>, VEX_4V, VEX_L;
7104 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
7105 (ins VR256:$src1, f128mem:$src2, u8imm:$src3),
7106 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7107 []>, Sched<[WriteFShuffle256.Folded, WriteFShuffle256.ReadAfterFold]>, VEX_4V, VEX_L;
7110 // To create a 256-bit all ones value, we should produce VCMPTRUEPS
7111 // with YMM register containing zero.
7112 // FIXME: Avoid producing vxorps to clear the fake inputs.
7113 let Predicates = [HasAVX1Only] in {
7114 def : Pat<(v8i32 immAllOnesV), (VCMPPSYrri (AVX_SET0), (AVX_SET0), 0xf)>;
7117 multiclass vinsert_lowering<string InstrStr, ValueType From, ValueType To,
7118 PatFrag memop_frag> {
7119 def : Pat<(vinsert128_insert:$ins (To VR256:$src1), (From VR128:$src2),
7121 (!cast<Instruction>(InstrStr#rr) VR256:$src1, VR128:$src2,
7122 (INSERT_get_vinsert128_imm VR256:$ins))>;
7123 def : Pat<(vinsert128_insert:$ins (To VR256:$src1),
7124 (From (memop_frag addr:$src2)),
7126 (!cast<Instruction>(InstrStr#rm) VR256:$src1, addr:$src2,
7127 (INSERT_get_vinsert128_imm VR256:$ins))>;
7130 let Predicates = [HasAVX, NoVLX] in {
7131 defm : vinsert_lowering<"VINSERTF128", v4f32, v8f32, loadv4f32>;
7132 defm : vinsert_lowering<"VINSERTF128", v2f64, v4f64, loadv2f64>;
7135 let Predicates = [HasAVX1Only] in {
7136 defm : vinsert_lowering<"VINSERTF128", v2i64, v4i64, loadv2i64>;
7137 defm : vinsert_lowering<"VINSERTF128", v4i32, v8i32, loadv4i32>;
7138 defm : vinsert_lowering<"VINSERTF128", v8i16, v16i16, loadv8i16>;
7139 defm : vinsert_lowering<"VINSERTF128", v16i8, v32i8, loadv16i8>;
7142 //===----------------------------------------------------------------------===//
7143 // VEXTRACTF128 - Extract packed floating-point values
7145 let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
7146 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
7147 (ins VR256:$src1, u8imm:$src2),
7148 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7149 []>, Sched<[WriteFShuffle256]>, VEX, VEX_L;
7151 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
7152 (ins f128mem:$dst, VR256:$src1, u8imm:$src2),
7153 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7154 []>, Sched<[WriteFStoreX]>, VEX, VEX_L;
7157 multiclass vextract_lowering<string InstrStr, ValueType From, ValueType To> {
7158 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
7159 (To (!cast<Instruction>(InstrStr#rr)
7161 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
7162 def : Pat<(store (To (vextract128_extract:$ext (From VR256:$src1),
7163 (iPTR imm))), addr:$dst),
7164 (!cast<Instruction>(InstrStr#mr) addr:$dst, VR256:$src1,
7165 (EXTRACT_get_vextract128_imm VR128:$ext))>;
7169 let Predicates = [HasAVX, NoVLX] in {
7170 defm : vextract_lowering<"VEXTRACTF128", v8f32, v4f32>;
7171 defm : vextract_lowering<"VEXTRACTF128", v4f64, v2f64>;
7174 let Predicates = [HasAVX1Only] in {
7175 defm : vextract_lowering<"VEXTRACTF128", v4i64, v2i64>;
7176 defm : vextract_lowering<"VEXTRACTF128", v8i32, v4i32>;
7177 defm : vextract_lowering<"VEXTRACTF128", v16i16, v8i16>;
7178 defm : vextract_lowering<"VEXTRACTF128", v32i8, v16i8>;
7181 //===----------------------------------------------------------------------===//
7182 // VMASKMOV - Conditional SIMD Packed Loads and Stores
7184 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
7185 Intrinsic IntLd, Intrinsic IntLd256,
7186 Intrinsic IntSt, Intrinsic IntSt256,
7187 X86SchedWriteMaskMove schedX,
7188 X86SchedWriteMaskMove schedY> {
7189 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
7190 (ins VR128:$src1, f128mem:$src2),
7191 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7192 [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
7193 VEX_4V, Sched<[schedX.RM]>;
7194 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
7195 (ins VR256:$src1, f256mem:$src2),
7196 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7197 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
7198 VEX_4V, VEX_L, Sched<[schedY.RM]>;
7199 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
7200 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
7201 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7202 [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>,
7203 VEX_4V, Sched<[schedX.MR]>;
7204 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
7205 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
7206 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7207 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>,
7208 VEX_4V, VEX_L, Sched<[schedY.MR]>;
7211 let ExeDomain = SSEPackedSingle in
7212 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
7213 int_x86_avx_maskload_ps,
7214 int_x86_avx_maskload_ps_256,
7215 int_x86_avx_maskstore_ps,
7216 int_x86_avx_maskstore_ps_256,
7217 WriteFMaskMove32, WriteFMaskMove32Y>;
7218 let ExeDomain = SSEPackedDouble in
7219 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
7220 int_x86_avx_maskload_pd,
7221 int_x86_avx_maskload_pd_256,
7222 int_x86_avx_maskstore_pd,
7223 int_x86_avx_maskstore_pd_256,
7224 WriteFMaskMove64, WriteFMaskMove64Y>;
7226 //===----------------------------------------------------------------------===//
7228 //===----------------------------------------------------------------------===//
7229 let Predicates = [HasAVXVNNI, NoVLX_Or_NoVNNI], Constraints = "$src1 = $dst",
7230 ExplicitVEXPrefix = 1, checkVEXPredicate = 1 in
7231 multiclass avx_vnni_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
7233 let isCommutable = IsCommutable in
7234 def rr : AVX8I<opc, MRMSrcReg, (outs VR128:$dst),
7235 (ins VR128:$src1, VR128:$src2, VR128:$src3),
7236 !strconcat(OpcodeStr, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
7237 [(set VR128:$dst, (v4i32 (OpNode VR128:$src1,
7238 VR128:$src2, VR128:$src3)))]>,
7239 VEX_4V, Sched<[SchedWriteVecIMul.XMM]>;
7241 def rm : AVX8I<opc, MRMSrcMem, (outs VR128:$dst),
7242 (ins VR128:$src1, VR128:$src2, i128mem:$src3),
7243 !strconcat(OpcodeStr, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
7244 [(set VR128:$dst, (v4i32 (OpNode VR128:$src1, VR128:$src2,
7245 (loadv4i32 addr:$src3))))]>,
7246 VEX_4V, Sched<[SchedWriteVecIMul.XMM]>;
7248 let isCommutable = IsCommutable in
7249 def Yrr : AVX8I<opc, MRMSrcReg, (outs VR256:$dst),
7250 (ins VR256:$src1, VR256:$src2, VR256:$src3),
7251 !strconcat(OpcodeStr, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
7252 [(set VR256:$dst, (v8i32 (OpNode VR256:$src1,
7253 VR256:$src2, VR256:$src3)))]>,
7254 VEX_4V, VEX_L, Sched<[SchedWriteVecIMul.XMM]>;
7256 def Yrm : AVX8I<opc, MRMSrcMem, (outs VR256:$dst),
7257 (ins VR256:$src1, VR256:$src2, i256mem:$src3),
7258 !strconcat(OpcodeStr, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
7259 [(set VR256:$dst, (v8i32 (OpNode VR256:$src1, VR256:$src2,
7260 (loadv8i32 addr:$src3))))]>,
7261 VEX_4V, VEX_L, Sched<[SchedWriteVecIMul.XMM]>;
7264 defm VPDPBUSD : avx_vnni_rm<0x50, "vpdpbusd", X86Vpdpbusd, 0>;
7265 defm VPDPBUSDS : avx_vnni_rm<0x51, "vpdpbusds", X86Vpdpbusds, 0>;
7266 defm VPDPWSSD : avx_vnni_rm<0x52, "vpdpwssd", X86Vpdpwssd, 1>;
7267 defm VPDPWSSDS : avx_vnni_rm<0x53, "vpdpwssds", X86Vpdpwssds, 1>;
7269 def X86vpmaddwd_su : PatFrag<(ops node:$lhs, node:$rhs),
7270 (X86vpmaddwd node:$lhs, node:$rhs), [{
7271 return N->hasOneUse();
7274 let Predicates = [HasAVXVNNI, NoVLX_Or_NoVNNI] in {
7275 def : Pat<(v8i32 (add VR256:$src1,
7276 (X86vpmaddwd_su VR256:$src2, VR256:$src3))),
7277 (VPDPWSSDYrr VR256:$src1, VR256:$src2, VR256:$src3)>;
7278 def : Pat<(v8i32 (add VR256:$src1,
7279 (X86vpmaddwd_su VR256:$src2, (load addr:$src3)))),
7280 (VPDPWSSDYrm VR256:$src1, VR256:$src2, addr:$src3)>;
7281 def : Pat<(v4i32 (add VR128:$src1,
7282 (X86vpmaddwd_su VR128:$src2, VR128:$src3))),
7283 (VPDPWSSDrr VR128:$src1, VR128:$src2, VR128:$src3)>;
7284 def : Pat<(v4i32 (add VR128:$src1,
7285 (X86vpmaddwd_su VR128:$src2, (load addr:$src3)))),
7286 (VPDPWSSDrm VR128:$src1, VR128:$src2, addr:$src3)>;
7289 //===----------------------------------------------------------------------===//
7290 // VPERMIL - Permute Single and Double Floating-Point Values
7293 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
7294 RegisterClass RC, X86MemOperand x86memop_f,
7295 X86MemOperand x86memop_i,
7296 ValueType f_vt, ValueType i_vt,
7297 X86FoldableSchedWrite sched,
7298 X86FoldableSchedWrite varsched> {
7299 let Predicates = [HasAVX, NoVLX] in {
7300 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
7301 (ins RC:$src1, RC:$src2),
7302 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7303 [(set RC:$dst, (f_vt (X86VPermilpv RC:$src1, (i_vt RC:$src2))))]>, VEX_4V,
7305 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
7306 (ins RC:$src1, x86memop_i:$src2),
7307 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7308 [(set RC:$dst, (f_vt (X86VPermilpv RC:$src1,
7309 (i_vt (load addr:$src2)))))]>, VEX_4V,
7310 Sched<[varsched.Folded, sched.ReadAfterFold]>;
7312 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
7313 (ins RC:$src1, u8imm:$src2),
7314 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7315 [(set RC:$dst, (f_vt (X86VPermilpi RC:$src1, (i8 timm:$src2))))]>, VEX,
7317 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
7318 (ins x86memop_f:$src1, u8imm:$src2),
7319 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7321 (f_vt (X86VPermilpi (load addr:$src1), (i8 timm:$src2))))]>, VEX,
7322 Sched<[sched.Folded]>;
7323 }// Predicates = [HasAVX, NoVLX]
7326 let ExeDomain = SSEPackedSingle in {
7327 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
7328 v4f32, v4i32, SchedWriteFShuffle.XMM,
7329 SchedWriteFVarShuffle.XMM>;
7330 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
7331 v8f32, v8i32, SchedWriteFShuffle.YMM,
7332 SchedWriteFVarShuffle.YMM>, VEX_L;
7334 let ExeDomain = SSEPackedDouble in {
7335 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
7336 v2f64, v2i64, SchedWriteFShuffle.XMM,
7337 SchedWriteFVarShuffle.XMM>;
7338 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
7339 v4f64, v4i64, SchedWriteFShuffle.YMM,
7340 SchedWriteFVarShuffle.YMM>, VEX_L;
7343 //===----------------------------------------------------------------------===//
7344 // VZERO - Zero YMM registers
7345 // Note: These instruction do not affect the YMM16-YMM31.
7348 let SchedRW = [WriteSystem] in {
7349 let Defs = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
7350 YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15] in {
7351 // Zero All YMM registers
7352 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
7353 [(int_x86_avx_vzeroall)]>, PS, VEX, VEX_L,
7354 Requires<[HasAVX]>, VEX_WIG;
7356 // Zero Upper bits of YMM registers
7357 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
7358 [(int_x86_avx_vzeroupper)]>, PS, VEX,
7359 Requires<[HasAVX]>, VEX_WIG;
7363 //===----------------------------------------------------------------------===//
7364 // Half precision conversion instructions
7367 multiclass f16c_ph2ps<RegisterClass RC, X86MemOperand x86memop,
7368 X86FoldableSchedWrite sched> {
7369 def rr : I<0x13, MRMSrcReg, (outs RC:$dst), (ins VR128:$src),
7370 "vcvtph2ps\t{$src, $dst|$dst, $src}",
7371 [(set RC:$dst, (X86any_cvtph2ps VR128:$src))]>,
7372 T8PD, VEX, Sched<[sched]>;
7373 let hasSideEffects = 0, mayLoad = 1 in
7374 def rm : I<0x13, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
7375 "vcvtph2ps\t{$src, $dst|$dst, $src}",
7376 []>, T8PD, VEX, Sched<[sched.Folded]>;
7379 multiclass f16c_ps2ph<RegisterClass RC, X86MemOperand x86memop,
7380 SchedWrite RR, SchedWrite MR> {
7381 def rr : Ii8<0x1D, MRMDestReg, (outs VR128:$dst),
7382 (ins RC:$src1, i32u8imm:$src2),
7383 "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7384 [(set VR128:$dst, (X86any_cvtps2ph RC:$src1, timm:$src2))]>,
7385 TAPD, VEX, Sched<[RR]>;
7386 let hasSideEffects = 0, mayStore = 1 in
7387 def mr : Ii8<0x1D, MRMDestMem, (outs),
7388 (ins x86memop:$dst, RC:$src1, i32u8imm:$src2),
7389 "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
7390 TAPD, VEX, Sched<[MR]>;
7393 let Predicates = [HasF16C, NoVLX] in {
7394 defm VCVTPH2PS : f16c_ph2ps<VR128, f64mem, WriteCvtPH2PS>, SIMD_EXC;
7395 defm VCVTPH2PSY : f16c_ph2ps<VR256, f128mem, WriteCvtPH2PSY>, VEX_L, SIMD_EXC;
7396 defm VCVTPS2PH : f16c_ps2ph<VR128, f64mem, WriteCvtPS2PH,
7397 WriteCvtPS2PHSt>, SIMD_EXC;
7398 defm VCVTPS2PHY : f16c_ps2ph<VR256, f128mem, WriteCvtPS2PHY,
7399 WriteCvtPS2PHYSt>, VEX_L, SIMD_EXC;
7401 // Pattern match vcvtph2ps of a scalar i64 load.
7402 def : Pat<(v4f32 (X86any_cvtph2ps (bc_v8i16 (v2i64 (X86vzload64 addr:$src))))),
7403 (VCVTPH2PSrm addr:$src)>;
7404 def : Pat<(v4f32 (X86any_cvtph2ps (bc_v8i16
7405 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
7406 (VCVTPH2PSrm addr:$src)>;
7407 def : Pat<(v8f32 (X86any_cvtph2ps (loadv8i16 addr:$src))),
7408 (VCVTPH2PSYrm addr:$src)>;
7410 def : Pat<(store (f64 (extractelt
7411 (bc_v2f64 (v8i16 (X86any_cvtps2ph VR128:$src1, timm:$src2))),
7412 (iPTR 0))), addr:$dst),
7413 (VCVTPS2PHmr addr:$dst, VR128:$src1, timm:$src2)>;
7414 def : Pat<(store (i64 (extractelt
7415 (bc_v2i64 (v8i16 (X86any_cvtps2ph VR128:$src1, timm:$src2))),
7416 (iPTR 0))), addr:$dst),
7417 (VCVTPS2PHmr addr:$dst, VR128:$src1, timm:$src2)>;
7418 def : Pat<(store (v8i16 (X86any_cvtps2ph VR256:$src1, timm:$src2)), addr:$dst),
7419 (VCVTPS2PHYmr addr:$dst, VR256:$src1, timm:$src2)>;
7422 //===----------------------------------------------------------------------===//
7423 // AVX2 Instructions
7424 //===----------------------------------------------------------------------===//
7426 /// AVX2_blend_rmi - AVX2 blend with 8-bit immediate
7427 multiclass AVX2_blend_rmi<bits<8> opc, string OpcodeStr, SDNode OpNode,
7428 ValueType OpVT, X86FoldableSchedWrite sched,
7430 X86MemOperand x86memop, SDNodeXForm commuteXForm> {
7431 let isCommutable = 1 in
7432 def rri : AVX2AIi8<opc, MRMSrcReg, (outs RC:$dst),
7433 (ins RC:$src1, RC:$src2, u8imm:$src3),
7434 !strconcat(OpcodeStr,
7435 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
7436 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2, timm:$src3)))]>,
7437 Sched<[sched]>, VEX_4V;
7438 def rmi : AVX2AIi8<opc, MRMSrcMem, (outs RC:$dst),
7439 (ins RC:$src1, x86memop:$src2, u8imm:$src3),
7440 !strconcat(OpcodeStr,
7441 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
7443 (OpVT (OpNode RC:$src1, (load addr:$src2), timm:$src3)))]>,
7444 Sched<[sched.Folded, sched.ReadAfterFold]>, VEX_4V;
7446 // Pattern to commute if load is in first source.
7447 def : Pat<(OpVT (OpNode (load addr:$src2), RC:$src1, timm:$src3)),
7448 (!cast<Instruction>(NAME#"rmi") RC:$src1, addr:$src2,
7449 (commuteXForm timm:$src3))>;
7452 let Predicates = [HasAVX2] in {
7453 defm VPBLENDD : AVX2_blend_rmi<0x02, "vpblendd", X86Blendi, v4i32,
7454 SchedWriteBlend.XMM, VR128, i128mem,
7456 defm VPBLENDDY : AVX2_blend_rmi<0x02, "vpblendd", X86Blendi, v8i32,
7457 SchedWriteBlend.YMM, VR256, i256mem,
7458 BlendCommuteImm8>, VEX_L;
7460 def : Pat<(X86Blendi (v4i64 VR256:$src1), (v4i64 VR256:$src2), timm:$src3),
7461 (VPBLENDDYrri VR256:$src1, VR256:$src2, (BlendScaleImm4 timm:$src3))>;
7462 def : Pat<(X86Blendi VR256:$src1, (loadv4i64 addr:$src2), timm:$src3),
7463 (VPBLENDDYrmi VR256:$src1, addr:$src2, (BlendScaleImm4 timm:$src3))>;
7464 def : Pat<(X86Blendi (loadv4i64 addr:$src2), VR256:$src1, timm:$src3),
7465 (VPBLENDDYrmi VR256:$src1, addr:$src2, (BlendScaleCommuteImm4 timm:$src3))>;
7467 def : Pat<(X86Blendi (v2i64 VR128:$src1), (v2i64 VR128:$src2), timm:$src3),
7468 (VPBLENDDrri VR128:$src1, VR128:$src2, (BlendScaleImm2to4 timm:$src3))>;
7469 def : Pat<(X86Blendi VR128:$src1, (loadv2i64 addr:$src2), timm:$src3),
7470 (VPBLENDDrmi VR128:$src1, addr:$src2, (BlendScaleImm2to4 timm:$src3))>;
7471 def : Pat<(X86Blendi (loadv2i64 addr:$src2), VR128:$src1, timm:$src3),
7472 (VPBLENDDrmi VR128:$src1, addr:$src2, (BlendScaleCommuteImm2to4 timm:$src3))>;
7475 // For insertion into the zero index (low half) of a 256-bit vector, it is
7476 // more efficient to generate a blend with immediate instead of an insert*128.
7477 // NOTE: We're using FP instructions here, but execution domain fixing should
7478 // take care of using integer instructions when profitable.
7479 let Predicates = [HasAVX] in {
7480 def : Pat<(insert_subvector (v8i32 VR256:$src1), (v4i32 VR128:$src2), (iPTR 0)),
7481 (VBLENDPSYrri VR256:$src1,
7482 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
7483 VR128:$src2, sub_xmm), 0xf)>;
7484 def : Pat<(insert_subvector (v4i64 VR256:$src1), (v2i64 VR128:$src2), (iPTR 0)),
7485 (VBLENDPSYrri VR256:$src1,
7486 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
7487 VR128:$src2, sub_xmm), 0xf)>;
7488 def : Pat<(insert_subvector (v16i16 VR256:$src1), (v8i16 VR128:$src2), (iPTR 0)),
7489 (VBLENDPSYrri VR256:$src1,
7490 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
7491 VR128:$src2, sub_xmm), 0xf)>;
7492 def : Pat<(insert_subvector (v32i8 VR256:$src1), (v16i8 VR128:$src2), (iPTR 0)),
7493 (VBLENDPSYrri VR256:$src1,
7494 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
7495 VR128:$src2, sub_xmm), 0xf)>;
7497 def : Pat<(insert_subvector (loadv8i32 addr:$src2), (v4i32 VR128:$src1), (iPTR 0)),
7498 (VBLENDPSYrmi (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
7499 VR128:$src1, sub_xmm), addr:$src2, 0xf0)>;
7500 def : Pat<(insert_subvector (loadv4i64 addr:$src2), (v2i64 VR128:$src1), (iPTR 0)),
7501 (VBLENDPSYrmi (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
7502 VR128:$src1, sub_xmm), addr:$src2, 0xf0)>;
7503 def : Pat<(insert_subvector (loadv16i16 addr:$src2), (v8i16 VR128:$src1), (iPTR 0)),
7504 (VBLENDPSYrmi (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
7505 VR128:$src1, sub_xmm), addr:$src2, 0xf0)>;
7506 def : Pat<(insert_subvector (loadv32i8 addr:$src2), (v16i8 VR128:$src1), (iPTR 0)),
7507 (VBLENDPSYrmi (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
7508 VR128:$src1, sub_xmm), addr:$src2, 0xf0)>;
7511 //===----------------------------------------------------------------------===//
7512 // VPBROADCAST - Load from memory and broadcast to all elements of the
7513 // destination operand
7515 multiclass avx2_broadcast<bits<8> opc, string OpcodeStr,
7516 X86MemOperand x86memop, PatFrag bcast_frag,
7517 ValueType OpVT128, ValueType OpVT256, Predicate prd> {
7518 let Predicates = [HasAVX2, prd] in {
7519 def rr : AVX28I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
7520 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7522 (OpVT128 (X86VBroadcast (OpVT128 VR128:$src))))]>,
7523 Sched<[SchedWriteShuffle.XMM]>, VEX;
7524 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst), (ins x86memop:$src),
7525 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7527 (OpVT128 (bcast_frag addr:$src)))]>,
7528 Sched<[SchedWriteShuffle.XMM.Folded]>, VEX;
7529 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
7530 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7532 (OpVT256 (X86VBroadcast (OpVT128 VR128:$src))))]>,
7533 Sched<[WriteShuffle256]>, VEX, VEX_L;
7534 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst), (ins x86memop:$src),
7535 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7537 (OpVT256 (bcast_frag addr:$src)))]>,
7538 Sched<[SchedWriteShuffle.XMM.Folded]>, VEX, VEX_L;
7540 // Provide aliases for broadcast from the same register class that
7541 // automatically does the extract.
7542 def : Pat<(OpVT256 (X86VBroadcast (OpVT256 VR256:$src))),
7543 (!cast<Instruction>(NAME#"Yrr")
7544 (OpVT128 (EXTRACT_SUBREG (OpVT256 VR256:$src),sub_xmm)))>;
7548 defm VPBROADCASTB : avx2_broadcast<0x78, "vpbroadcastb", i8mem, X86VBroadcastld8,
7549 v16i8, v32i8, NoVLX_Or_NoBWI>;
7550 defm VPBROADCASTW : avx2_broadcast<0x79, "vpbroadcastw", i16mem, X86VBroadcastld16,
7551 v8i16, v16i16, NoVLX_Or_NoBWI>;
7552 defm VPBROADCASTD : avx2_broadcast<0x58, "vpbroadcastd", i32mem, X86VBroadcastld32,
7553 v4i32, v8i32, NoVLX>;
7554 defm VPBROADCASTQ : avx2_broadcast<0x59, "vpbroadcastq", i64mem, X86VBroadcastld64,
7555 v2i64, v4i64, NoVLX>;
7557 let Predicates = [HasAVX2, NoVLX] in {
7558 // Provide fallback in case the load node that is used in the patterns above
7559 // is used by additional users, which prevents the pattern selection.
7560 def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
7561 (VBROADCASTSSrr (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)))>;
7562 def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
7563 (VBROADCASTSSYrr (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)))>;
7564 def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
7565 (VBROADCASTSDYrr (v2f64 (COPY_TO_REGCLASS FR64:$src, VR128)))>;
7568 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
7569 def : Pat<(v16i8 (X86VBroadcast GR8:$src)),
7570 (VPBROADCASTBrr (VMOVDI2PDIrr
7571 (i32 (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
7572 GR8:$src, sub_8bit))))>;
7573 def : Pat<(v32i8 (X86VBroadcast GR8:$src)),
7574 (VPBROADCASTBYrr (VMOVDI2PDIrr
7575 (i32 (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
7576 GR8:$src, sub_8bit))))>;
7578 def : Pat<(v8i16 (X86VBroadcast GR16:$src)),
7579 (VPBROADCASTWrr (VMOVDI2PDIrr
7580 (i32 (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
7581 GR16:$src, sub_16bit))))>;
7582 def : Pat<(v16i16 (X86VBroadcast GR16:$src)),
7583 (VPBROADCASTWYrr (VMOVDI2PDIrr
7584 (i32 (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
7585 GR16:$src, sub_16bit))))>;
7587 let Predicates = [HasAVX2, NoVLX] in {
7588 def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
7589 (VPBROADCASTDrr (VMOVDI2PDIrr GR32:$src))>;
7590 def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
7591 (VPBROADCASTDYrr (VMOVDI2PDIrr GR32:$src))>;
7592 def : Pat<(v2i64 (X86VBroadcast GR64:$src)),
7593 (VPBROADCASTQrr (VMOV64toPQIrr GR64:$src))>;
7594 def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
7595 (VPBROADCASTQYrr (VMOV64toPQIrr GR64:$src))>;
7598 // AVX1 broadcast patterns
7599 let Predicates = [HasAVX1Only] in {
7600 def : Pat<(v8i32 (X86VBroadcastld32 addr:$src)),
7601 (VBROADCASTSSYrm addr:$src)>;
7602 def : Pat<(v4i64 (X86VBroadcastld64 addr:$src)),
7603 (VBROADCASTSDYrm addr:$src)>;
7604 def : Pat<(v4i32 (X86VBroadcastld32 addr:$src)),
7605 (VBROADCASTSSrm addr:$src)>;
7608 // Provide fallback in case the load node that is used in the patterns above
7609 // is used by additional users, which prevents the pattern selection.
7610 let Predicates = [HasAVX, NoVLX] in {
7611 // 128bit broadcasts:
7612 def : Pat<(v2f64 (X86VBroadcast f64:$src)),
7613 (VMOVDDUPrr (v2f64 (COPY_TO_REGCLASS FR64:$src, VR128)))>;
7614 def : Pat<(v2f64 (X86VBroadcastld64 addr:$src)),
7615 (VMOVDDUPrm addr:$src)>;
7617 def : Pat<(v2f64 (X86VBroadcast v2f64:$src)),
7618 (VMOVDDUPrr VR128:$src)>;
7621 let Predicates = [HasAVX1Only] in {
7622 def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
7623 (VPERMILPSri (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)), 0)>;
7624 def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
7625 (VINSERTF128rr (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)),
7626 (v4f32 (VPERMILPSri (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)), 0)), sub_xmm),
7627 (v4f32 (VPERMILPSri (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)), 0)), 1)>;
7628 def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
7629 (VINSERTF128rr (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)),
7630 (v2f64 (VMOVDDUPrr (v2f64 (COPY_TO_REGCLASS FR64:$src, VR128)))), sub_xmm),
7631 (v2f64 (VMOVDDUPrr (v2f64 (COPY_TO_REGCLASS FR64:$src, VR128)))), 1)>;
7633 def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
7634 (VPSHUFDri (VMOVDI2PDIrr GR32:$src), 0)>;
7635 def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
7636 (VINSERTF128rr (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
7637 (v4i32 (VPSHUFDri (VMOVDI2PDIrr GR32:$src), 0)), sub_xmm),
7638 (v4i32 (VPSHUFDri (VMOVDI2PDIrr GR32:$src), 0)), 1)>;
7639 def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
7640 (VINSERTF128rr (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)),
7641 (v4i32 (VPSHUFDri (VMOV64toPQIrr GR64:$src), 0x44)), sub_xmm),
7642 (v4i32 (VPSHUFDri (VMOV64toPQIrr GR64:$src), 0x44)), 1)>;
7644 def : Pat<(v2i64 (X86VBroadcast i64:$src)),
7645 (VPSHUFDri (VMOV64toPQIrr GR64:$src), 0x44)>;
7646 def : Pat<(v2i64 (X86VBroadcastld64 addr:$src)),
7647 (VMOVDDUPrm addr:$src)>;
7650 //===----------------------------------------------------------------------===//
7651 // VPERM - Permute instructions
7654 multiclass avx2_perm<bits<8> opc, string OpcodeStr,
7655 ValueType OpVT, X86FoldableSchedWrite Sched,
7656 X86MemOperand memOp> {
7657 let Predicates = [HasAVX2, NoVLX] in {
7658 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
7659 (ins VR256:$src1, VR256:$src2),
7660 !strconcat(OpcodeStr,
7661 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7663 (OpVT (X86VPermv VR256:$src1, VR256:$src2)))]>,
7664 Sched<[Sched]>, VEX_4V, VEX_L;
7665 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
7666 (ins VR256:$src1, memOp:$src2),
7667 !strconcat(OpcodeStr,
7668 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7670 (OpVT (X86VPermv VR256:$src1,
7671 (load addr:$src2))))]>,
7672 Sched<[Sched.Folded, Sched.ReadAfterFold]>, VEX_4V, VEX_L;
7676 defm VPERMD : avx2_perm<0x36, "vpermd", v8i32, WriteVarShuffle256, i256mem>;
7677 let ExeDomain = SSEPackedSingle in
7678 defm VPERMPS : avx2_perm<0x16, "vpermps", v8f32, WriteFVarShuffle256, f256mem>;
7680 multiclass avx2_perm_imm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
7681 ValueType OpVT, X86FoldableSchedWrite Sched,
7682 X86MemOperand memOp> {
7683 let Predicates = [HasAVX2, NoVLX] in {
7684 def Yri : AVX2AIi8<opc, MRMSrcReg, (outs VR256:$dst),
7685 (ins VR256:$src1, u8imm:$src2),
7686 !strconcat(OpcodeStr,
7687 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7689 (OpVT (X86VPermi VR256:$src1, (i8 timm:$src2))))]>,
7690 Sched<[Sched]>, VEX, VEX_L;
7691 def Ymi : AVX2AIi8<opc, MRMSrcMem, (outs VR256:$dst),
7692 (ins memOp:$src1, u8imm:$src2),
7693 !strconcat(OpcodeStr,
7694 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7696 (OpVT (X86VPermi (mem_frag addr:$src1),
7697 (i8 timm:$src2))))]>,
7698 Sched<[Sched.Folded, Sched.ReadAfterFold]>, VEX, VEX_L;
7702 defm VPERMQ : avx2_perm_imm<0x00, "vpermq", loadv4i64, v4i64,
7703 WriteShuffle256, i256mem>, VEX_W;
7704 let ExeDomain = SSEPackedDouble in
7705 defm VPERMPD : avx2_perm_imm<0x01, "vpermpd", loadv4f64, v4f64,
7706 WriteFShuffle256, f256mem>, VEX_W;
7708 //===----------------------------------------------------------------------===//
7709 // VPERM2I128 - Permute Integer vector Values in 128-bit chunks
7711 let isCommutable = 1 in
7712 def VPERM2I128rr : AVX2AIi8<0x46, MRMSrcReg, (outs VR256:$dst),
7713 (ins VR256:$src1, VR256:$src2, u8imm:$src3),
7714 "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", []>,
7715 Sched<[WriteShuffle256]>, VEX_4V, VEX_L;
7716 def VPERM2I128rm : AVX2AIi8<0x46, MRMSrcMem, (outs VR256:$dst),
7717 (ins VR256:$src1, f256mem:$src2, u8imm:$src3),
7718 "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", []>,
7719 Sched<[WriteShuffle256.Folded, WriteShuffle256.ReadAfterFold]>, VEX_4V, VEX_L;
7721 let Predicates = [HasAVX2] in {
7722 defm : vperm2x128_lowering<"VPERM2I128", v4i64, loadv4i64>;
7723 defm : vperm2x128_lowering<"VPERM2I128", v8i32, loadv8i32>;
7724 defm : vperm2x128_lowering<"VPERM2I128", v16i16, loadv16i16>;
7725 defm : vperm2x128_lowering<"VPERM2I128", v32i8, loadv32i8>;
7728 //===----------------------------------------------------------------------===//
7729 // VINSERTI128 - Insert packed integer values
7731 let hasSideEffects = 0 in {
7732 def VINSERTI128rr : AVX2AIi8<0x38, MRMSrcReg, (outs VR256:$dst),
7733 (ins VR256:$src1, VR128:$src2, u8imm:$src3),
7734 "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7735 []>, Sched<[WriteShuffle256]>, VEX_4V, VEX_L;
7737 def VINSERTI128rm : AVX2AIi8<0x38, MRMSrcMem, (outs VR256:$dst),
7738 (ins VR256:$src1, i128mem:$src2, u8imm:$src3),
7739 "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7740 []>, Sched<[WriteShuffle256.Folded, WriteShuffle256.ReadAfterFold]>, VEX_4V, VEX_L;
7743 let Predicates = [HasAVX2, NoVLX] in {
7744 defm : vinsert_lowering<"VINSERTI128", v2i64, v4i64, loadv2i64>;
7745 defm : vinsert_lowering<"VINSERTI128", v4i32, v8i32, loadv4i32>;
7746 defm : vinsert_lowering<"VINSERTI128", v8i16, v16i16, loadv8i16>;
7747 defm : vinsert_lowering<"VINSERTI128", v16i8, v32i8, loadv16i8>;
7750 //===----------------------------------------------------------------------===//
7751 // VEXTRACTI128 - Extract packed integer values
7753 def VEXTRACTI128rr : AVX2AIi8<0x39, MRMDestReg, (outs VR128:$dst),
7754 (ins VR256:$src1, u8imm:$src2),
7755 "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
7756 Sched<[WriteShuffle256]>, VEX, VEX_L;
7757 let hasSideEffects = 0, mayStore = 1 in
7758 def VEXTRACTI128mr : AVX2AIi8<0x39, MRMDestMem, (outs),
7759 (ins i128mem:$dst, VR256:$src1, u8imm:$src2),
7760 "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
7761 Sched<[SchedWriteVecMoveLS.XMM.MR]>, VEX, VEX_L;
7763 let Predicates = [HasAVX2, NoVLX] in {
7764 defm : vextract_lowering<"VEXTRACTI128", v4i64, v2i64>;
7765 defm : vextract_lowering<"VEXTRACTI128", v8i32, v4i32>;
7766 defm : vextract_lowering<"VEXTRACTI128", v16i16, v8i16>;
7767 defm : vextract_lowering<"VEXTRACTI128", v32i8, v16i8>;
7770 //===----------------------------------------------------------------------===//
7771 // VPMASKMOV - Conditional SIMD Integer Packed Loads and Stores
7773 multiclass avx2_pmovmask<string OpcodeStr,
7774 Intrinsic IntLd128, Intrinsic IntLd256,
7775 Intrinsic IntSt128, Intrinsic IntSt256,
7776 X86SchedWriteMaskMove schedX,
7777 X86SchedWriteMaskMove schedY> {
7778 def rm : AVX28I<0x8c, MRMSrcMem, (outs VR128:$dst),
7779 (ins VR128:$src1, i128mem:$src2),
7780 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7781 [(set VR128:$dst, (IntLd128 addr:$src2, VR128:$src1))]>,
7782 VEX_4V, Sched<[schedX.RM]>;
7783 def Yrm : AVX28I<0x8c, MRMSrcMem, (outs VR256:$dst),
7784 (ins VR256:$src1, i256mem:$src2),
7785 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7786 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
7787 VEX_4V, VEX_L, Sched<[schedY.RM]>;
7788 def mr : AVX28I<0x8e, MRMDestMem, (outs),
7789 (ins i128mem:$dst, VR128:$src1, VR128:$src2),
7790 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7791 [(IntSt128 addr:$dst, VR128:$src1, VR128:$src2)]>,
7792 VEX_4V, Sched<[schedX.MR]>;
7793 def Ymr : AVX28I<0x8e, MRMDestMem, (outs),
7794 (ins i256mem:$dst, VR256:$src1, VR256:$src2),
7795 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7796 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>,
7797 VEX_4V, VEX_L, Sched<[schedY.MR]>;
7800 defm VPMASKMOVD : avx2_pmovmask<"vpmaskmovd",
7801 int_x86_avx2_maskload_d,
7802 int_x86_avx2_maskload_d_256,
7803 int_x86_avx2_maskstore_d,
7804 int_x86_avx2_maskstore_d_256,
7805 WriteVecMaskMove32, WriteVecMaskMove32Y>;
7806 defm VPMASKMOVQ : avx2_pmovmask<"vpmaskmovq",
7807 int_x86_avx2_maskload_q,
7808 int_x86_avx2_maskload_q_256,
7809 int_x86_avx2_maskstore_q,
7810 int_x86_avx2_maskstore_q_256,
7811 WriteVecMaskMove64, WriteVecMaskMove64Y>, VEX_W;
7813 multiclass maskmov_lowering<string InstrStr, RegisterClass RC, ValueType VT,
7816 def: Pat<(masked_store (VT RC:$src), addr:$ptr, (MaskVT RC:$mask)),
7817 (!cast<Instruction>(InstrStr#"mr") addr:$ptr, RC:$mask, RC:$src)>;
7819 def: Pat<(VT (masked_load addr:$ptr, (MaskVT RC:$mask), undef)),
7820 (!cast<Instruction>(InstrStr#"rm") RC:$mask, addr:$ptr)>;
7821 def: Pat<(VT (masked_load addr:$ptr, (MaskVT RC:$mask),
7822 (VT immAllZerosV))),
7823 (!cast<Instruction>(InstrStr#"rm") RC:$mask, addr:$ptr)>;
7825 let Predicates = [HasAVX] in {
7826 defm : maskmov_lowering<"VMASKMOVPS", VR128, v4f32, v4i32>;
7827 defm : maskmov_lowering<"VMASKMOVPD", VR128, v2f64, v2i64>;
7828 defm : maskmov_lowering<"VMASKMOVPSY", VR256, v8f32, v8i32>;
7829 defm : maskmov_lowering<"VMASKMOVPDY", VR256, v4f64, v4i64>;
7831 let Predicates = [HasAVX1Only] in {
7832 // load/store i32/i64 not supported use ps/pd version
7833 defm : maskmov_lowering<"VMASKMOVPSY", VR256, v8i32, v8i32>;
7834 defm : maskmov_lowering<"VMASKMOVPDY", VR256, v4i64, v4i64>;
7835 defm : maskmov_lowering<"VMASKMOVPS", VR128, v4i32, v4i32>;
7836 defm : maskmov_lowering<"VMASKMOVPD", VR128, v2i64, v2i64>;
7838 let Predicates = [HasAVX2] in {
7839 defm : maskmov_lowering<"VPMASKMOVDY", VR256, v8i32, v8i32>;
7840 defm : maskmov_lowering<"VPMASKMOVQY", VR256, v4i64, v4i64>;
7841 defm : maskmov_lowering<"VPMASKMOVD", VR128, v4i32, v4i32>;
7842 defm : maskmov_lowering<"VPMASKMOVQ", VR128, v2i64, v2i64>;
7845 //===----------------------------------------------------------------------===//
7846 // Variable Bit Shifts
7848 multiclass avx2_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
7849 ValueType vt128, ValueType vt256> {
7850 def rr : AVX28I<opc, MRMSrcReg, (outs VR128:$dst),
7851 (ins VR128:$src1, VR128:$src2),
7852 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7854 (vt128 (OpNode VR128:$src1, (vt128 VR128:$src2))))]>,
7855 VEX_4V, Sched<[SchedWriteVarVecShift.XMM]>;
7856 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst),
7857 (ins VR128:$src1, i128mem:$src2),
7858 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7860 (vt128 (OpNode VR128:$src1,
7861 (vt128 (load addr:$src2)))))]>,
7862 VEX_4V, Sched<[SchedWriteVarVecShift.XMM.Folded,
7863 SchedWriteVarVecShift.XMM.ReadAfterFold]>;
7864 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
7865 (ins VR256:$src1, VR256:$src2),
7866 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7868 (vt256 (OpNode VR256:$src1, (vt256 VR256:$src2))))]>,
7869 VEX_4V, VEX_L, Sched<[SchedWriteVarVecShift.YMM]>;
7870 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
7871 (ins VR256:$src1, i256mem:$src2),
7872 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7874 (vt256 (OpNode VR256:$src1,
7875 (vt256 (load addr:$src2)))))]>,
7876 VEX_4V, VEX_L, Sched<[SchedWriteVarVecShift.YMM.Folded,
7877 SchedWriteVarVecShift.YMM.ReadAfterFold]>;
7880 let Predicates = [HasAVX2, NoVLX] in {
7881 defm VPSLLVD : avx2_var_shift<0x47, "vpsllvd", X86vshlv, v4i32, v8i32>;
7882 defm VPSLLVQ : avx2_var_shift<0x47, "vpsllvq", X86vshlv, v2i64, v4i64>, VEX_W;
7883 defm VPSRLVD : avx2_var_shift<0x45, "vpsrlvd", X86vsrlv, v4i32, v8i32>;
7884 defm VPSRLVQ : avx2_var_shift<0x45, "vpsrlvq", X86vsrlv, v2i64, v4i64>, VEX_W;
7885 defm VPSRAVD : avx2_var_shift<0x46, "vpsravd", X86vsrav, v4i32, v8i32>;
7888 //===----------------------------------------------------------------------===//
7889 // VGATHER - GATHER Operations
7891 // FIXME: Improve scheduling of gather instructions.
7892 multiclass avx2_gather<bits<8> opc, string OpcodeStr, ValueType VTx,
7893 ValueType VTy, RegisterClass RC256,
7894 X86MemOperand memop128, X86MemOperand memop256,
7895 ValueType MTx = VTx, ValueType MTy = VTy> {
7896 let mayLoad = 1, hasSideEffects = 0 in {
7897 def rm : AVX28I<opc, MRMSrcMem4VOp3, (outs VR128:$dst, VR128:$mask_wb),
7898 (ins VR128:$src1, memop128:$src2, VR128:$mask),
7899 !strconcat(OpcodeStr,
7900 "\t{$mask, $src2, $dst|$dst, $src2, $mask}"),
7901 []>, VEX, Sched<[WriteLoad, WriteVecMaskedGatherWriteback]>;
7902 def Yrm : AVX28I<opc, MRMSrcMem4VOp3, (outs RC256:$dst, RC256:$mask_wb),
7903 (ins RC256:$src1, memop256:$src2, RC256:$mask),
7904 !strconcat(OpcodeStr,
7905 "\t{$mask, $src2, $dst|$dst, $src2, $mask}"),
7906 []>, VEX, VEX_L, Sched<[WriteLoad, WriteVecMaskedGatherWriteback]>;
7910 let Predicates = [HasAVX2] in {
7911 let mayLoad = 1, hasSideEffects = 0, Constraints
7912 = "@earlyclobber $dst,@earlyclobber $mask_wb, $src1 = $dst, $mask = $mask_wb"
7914 defm VPGATHERDQ : avx2_gather<0x90, "vpgatherdq", v2i64, v4i64,
7915 VR256, vx128mem, vx256mem>, VEX_W;
7916 defm VPGATHERQQ : avx2_gather<0x91, "vpgatherqq", v2i64, v4i64,
7917 VR256, vx128mem, vy256mem>, VEX_W;
7918 defm VPGATHERDD : avx2_gather<0x90, "vpgatherdd", v4i32, v8i32,
7919 VR256, vx128mem, vy256mem>;
7920 defm VPGATHERQD : avx2_gather<0x91, "vpgatherqd", v4i32, v4i32,
7921 VR128, vx64mem, vy128mem>;
7923 let ExeDomain = SSEPackedDouble in {
7924 defm VGATHERDPD : avx2_gather<0x92, "vgatherdpd", v2f64, v4f64,
7925 VR256, vx128mem, vx256mem, v2i64, v4i64>, VEX_W;
7926 defm VGATHERQPD : avx2_gather<0x93, "vgatherqpd", v2f64, v4f64,
7927 VR256, vx128mem, vy256mem, v2i64, v4i64>, VEX_W;
7930 let ExeDomain = SSEPackedSingle in {
7931 defm VGATHERDPS : avx2_gather<0x92, "vgatherdps", v4f32, v8f32,
7932 VR256, vx128mem, vy256mem, v4i32, v8i32>;
7933 defm VGATHERQPS : avx2_gather<0x93, "vgatherqps", v4f32, v4f32,
7934 VR128, vx64mem, vy128mem, v4i32, v4i32>;
7939 //===----------------------------------------------------------------------===//
7940 // GFNI instructions
7941 //===----------------------------------------------------------------------===//
7943 multiclass GF2P8MULB_rm<string OpcodeStr, ValueType OpVT,
7944 RegisterClass RC, PatFrag MemOpFrag,
7945 X86MemOperand X86MemOp, bit Is2Addr = 0> {
7946 let ExeDomain = SSEPackedInt,
7947 AsmString = !if(Is2Addr,
7948 OpcodeStr#"\t{$src2, $dst|$dst, $src2}",
7949 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}") in {
7950 let isCommutable = 1 in
7951 def rr : PDI<0xCF, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2), "",
7952 [(set RC:$dst, (OpVT (X86GF2P8mulb RC:$src1, RC:$src2)))]>,
7953 Sched<[SchedWriteVecALU.XMM]>, T8PD;
7955 def rm : PDI<0xCF, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, X86MemOp:$src2), "",
7956 [(set RC:$dst, (OpVT (X86GF2P8mulb RC:$src1,
7957 (MemOpFrag addr:$src2))))]>,
7958 Sched<[SchedWriteVecALU.XMM.Folded, SchedWriteVecALU.XMM.ReadAfterFold]>, T8PD;
7962 multiclass GF2P8AFFINE_rmi<bits<8> Op, string OpStr, ValueType OpVT,
7963 SDNode OpNode, RegisterClass RC, PatFrag MemOpFrag,
7964 X86MemOperand X86MemOp, bit Is2Addr = 0> {
7965 let AsmString = !if(Is2Addr,
7966 OpStr#"\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7967 OpStr#"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}") in {
7968 def rri : Ii8<Op, MRMSrcReg, (outs RC:$dst),
7969 (ins RC:$src1, RC:$src2, u8imm:$src3), "",
7970 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2, timm:$src3)))],
7971 SSEPackedInt>, Sched<[SchedWriteVecALU.XMM]>;
7972 def rmi : Ii8<Op, MRMSrcMem, (outs RC:$dst),
7973 (ins RC:$src1, X86MemOp:$src2, u8imm:$src3), "",
7974 [(set RC:$dst, (OpVT (OpNode RC:$src1,
7975 (MemOpFrag addr:$src2),
7976 timm:$src3)))], SSEPackedInt>,
7977 Sched<[SchedWriteVecALU.XMM.Folded, SchedWriteVecALU.XMM.ReadAfterFold]>;
7981 multiclass GF2P8AFFINE_common<bits<8> Op, string OpStr, SDNode OpNode> {
7982 let Constraints = "$src1 = $dst",
7983 Predicates = [HasGFNI, UseSSE2] in
7984 defm NAME : GF2P8AFFINE_rmi<Op, OpStr, v16i8, OpNode,
7985 VR128, load, i128mem, 1>;
7986 let Predicates = [HasGFNI, HasAVX, NoVLX_Or_NoBWI] in {
7987 defm V#NAME : GF2P8AFFINE_rmi<Op, "v"#OpStr, v16i8, OpNode, VR128,
7988 load, i128mem>, VEX_4V, VEX_W;
7989 defm V#NAME#Y : GF2P8AFFINE_rmi<Op, "v"#OpStr, v32i8, OpNode, VR256,
7990 load, i256mem>, VEX_4V, VEX_L, VEX_W;
7995 let Constraints = "$src1 = $dst",
7996 Predicates = [HasGFNI, UseSSE2] in
7997 defm GF2P8MULB : GF2P8MULB_rm<"gf2p8mulb", v16i8, VR128, memop,
7999 let Predicates = [HasGFNI, HasAVX, NoVLX_Or_NoBWI] in {
8000 defm VGF2P8MULB : GF2P8MULB_rm<"vgf2p8mulb", v16i8, VR128, load,
8002 defm VGF2P8MULBY : GF2P8MULB_rm<"vgf2p8mulb", v32i8, VR256, load,
8003 i256mem>, VEX_4V, VEX_L;
8005 // GF2P8AFFINEINVQB, GF2P8AFFINEQB
8006 let isCommutable = 0 in {
8007 defm GF2P8AFFINEINVQB : GF2P8AFFINE_common<0xCF, "gf2p8affineinvqb",
8008 X86GF2P8affineinvqb>, TAPD;
8009 defm GF2P8AFFINEQB : GF2P8AFFINE_common<0xCE, "gf2p8affineqb",
8010 X86GF2P8affineqb>, TAPD;