1 //====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // SSE 1 & 2 Instructions Classes
19 //===----------------------------------------------------------------------===//
21 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
22 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
23 RegisterClass RC, X86MemOperand x86memop,
25 let isCommutable = 1 in {
26 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
28 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
29 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
30 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>;
32 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
34 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
35 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
36 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>;
39 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
40 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
41 string asm, string SSEVer, string FPSizeStr,
42 Operand memopr, ComplexPattern mem_cpat,
44 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
46 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
47 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
48 [(set RC:$dst, (!cast<Intrinsic>(
49 !strconcat("int_x86_sse", SSEVer, "_", OpcodeStr, FPSizeStr))
50 RC:$src1, RC:$src2))]>;
51 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
53 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
54 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
55 [(set RC:$dst, (!cast<Intrinsic>(!strconcat("int_x86_sse",
56 SSEVer, "_", OpcodeStr, FPSizeStr))
57 RC:$src1, mem_cpat:$src2))]>;
60 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
61 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
62 RegisterClass RC, ValueType vt,
63 X86MemOperand x86memop, PatFrag mem_frag,
64 Domain d, bit Is2Addr = 1> {
65 let isCommutable = 1 in
66 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
68 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
69 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
70 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], d>;
72 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
74 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
75 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
76 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))], d>;
79 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
80 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
81 string OpcodeStr, X86MemOperand x86memop,
82 list<dag> pat_rr, list<dag> pat_rm,
84 let isCommutable = 1 in
85 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
87 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
88 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
90 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
92 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
93 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
97 /// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
98 multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
99 string asm, string SSEVer, string FPSizeStr,
100 X86MemOperand x86memop, PatFrag mem_frag,
101 Domain d, bit Is2Addr = 1> {
102 def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
104 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
105 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
106 [(set RC:$dst, (!cast<Intrinsic>(
107 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
108 RC:$src1, RC:$src2))], d>;
109 def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1,x86memop:$src2),
111 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
112 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
113 [(set RC:$dst, (!cast<Intrinsic>(
114 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
115 RC:$src1, (mem_frag addr:$src2)))], d>;
118 //===----------------------------------------------------------------------===//
119 // SSE 1 & 2 - Move Instructions
120 //===----------------------------------------------------------------------===//
122 class sse12_move_rr<RegisterClass RC, ValueType vt, string asm> :
123 SI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), asm,
124 [(set (vt VR128:$dst), (movl VR128:$src1, (scalar_to_vector RC:$src2)))]>;
126 // Loading from memory automatically zeroing upper bits.
127 class sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
128 PatFrag mem_pat, string OpcodeStr> :
129 SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
130 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
131 [(set RC:$dst, (mem_pat addr:$src))]>;
133 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
134 // register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
135 // is used instead. Register-to-register movss/movsd is not modeled as an
136 // INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
137 // in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
138 let isAsmParserOnly = 0 in {
139 def VMOVSSrr : sse12_move_rr<FR32, v4f32,
140 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS, VEX_4V;
141 def VMOVSDrr : sse12_move_rr<FR64, v2f64,
142 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD, VEX_4V;
144 let canFoldAsLoad = 1, isReMaterializable = 1 in {
145 def VMOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS, VEX;
147 let AddedComplexity = 20 in
148 def VMOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD, VEX;
152 let Constraints = "$src1 = $dst" in {
153 def MOVSSrr : sse12_move_rr<FR32, v4f32,
154 "movss\t{$src2, $dst|$dst, $src2}">, XS;
155 def MOVSDrr : sse12_move_rr<FR64, v2f64,
156 "movsd\t{$src2, $dst|$dst, $src2}">, XD;
159 let canFoldAsLoad = 1, isReMaterializable = 1 in {
160 def MOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
162 let AddedComplexity = 20 in
163 def MOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
166 let AddedComplexity = 15 in {
167 // Extract the low 32-bit value from one vector and insert it into another.
168 def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
169 (MOVSSrr (v4f32 VR128:$src1),
170 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
171 // Extract the low 64-bit value from one vector and insert it into another.
172 def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
173 (MOVSDrr (v2f64 VR128:$src1),
174 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
177 // Implicitly promote a 32-bit scalar to a vector.
178 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
179 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
180 // Implicitly promote a 64-bit scalar to a vector.
181 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
182 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
183 // Implicitly promote a 32-bit scalar to a vector.
184 def : Pat<(v8f32 (scalar_to_vector FR32:$src)),
185 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
186 // Implicitly promote a 64-bit scalar to a vector.
187 def : Pat<(v4f64 (scalar_to_vector FR64:$src)),
188 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
190 let AddedComplexity = 20 in {
191 // MOVSSrm zeros the high parts of the register; represent this
192 // with SUBREG_TO_REG.
193 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
194 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
195 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
196 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
197 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
198 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
199 // MOVSDrm zeros the high parts of the register; represent this
200 // with SUBREG_TO_REG.
201 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
202 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
203 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
204 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
205 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
206 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
207 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
208 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
209 def : Pat<(v2f64 (X86vzload addr:$src)),
210 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
213 // Store scalar value to memory.
214 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
215 "movss\t{$src, $dst|$dst, $src}",
216 [(store FR32:$src, addr:$dst)]>;
217 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
218 "movsd\t{$src, $dst|$dst, $src}",
219 [(store FR64:$src, addr:$dst)]>;
221 let isAsmParserOnly = 0 in {
222 def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
223 "movss\t{$src, $dst|$dst, $src}",
224 [(store FR32:$src, addr:$dst)]>, XS, VEX;
225 def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
226 "movsd\t{$src, $dst|$dst, $src}",
227 [(store FR64:$src, addr:$dst)]>, XD, VEX;
230 // Extract and store.
231 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
234 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
235 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
238 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
240 // Move Aligned/Unaligned floating point values
241 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
242 X86MemOperand x86memop, PatFrag ld_frag,
243 string asm, Domain d,
244 bit IsReMaterializable = 1> {
245 let neverHasSideEffects = 1 in
246 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
247 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>;
248 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
249 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
250 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
251 [(set RC:$dst, (ld_frag addr:$src))], d>;
254 let isAsmParserOnly = 0 in {
255 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
256 "movaps", SSEPackedSingle>, VEX;
257 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
258 "movapd", SSEPackedDouble>, OpSize, VEX;
259 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
260 "movups", SSEPackedSingle>, VEX;
261 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
262 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
264 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
265 "movaps", SSEPackedSingle>, VEX;
266 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
267 "movapd", SSEPackedDouble>, OpSize, VEX;
268 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
269 "movups", SSEPackedSingle>, VEX;
270 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
271 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
273 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
274 "movaps", SSEPackedSingle>, TB;
275 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
276 "movapd", SSEPackedDouble>, TB, OpSize;
277 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
278 "movups", SSEPackedSingle>, TB;
279 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
280 "movupd", SSEPackedDouble, 0>, TB, OpSize;
282 let isAsmParserOnly = 0 in {
283 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
284 "movaps\t{$src, $dst|$dst, $src}",
285 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>, VEX;
286 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
287 "movapd\t{$src, $dst|$dst, $src}",
288 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>, VEX;
289 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
290 "movups\t{$src, $dst|$dst, $src}",
291 [(store (v4f32 VR128:$src), addr:$dst)]>, VEX;
292 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
293 "movupd\t{$src, $dst|$dst, $src}",
294 [(store (v2f64 VR128:$src), addr:$dst)]>, VEX;
295 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
296 "movaps\t{$src, $dst|$dst, $src}",
297 [(alignedstore (v8f32 VR256:$src), addr:$dst)]>, VEX;
298 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
299 "movapd\t{$src, $dst|$dst, $src}",
300 [(alignedstore (v4f64 VR256:$src), addr:$dst)]>, VEX;
301 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
302 "movups\t{$src, $dst|$dst, $src}",
303 [(store (v8f32 VR256:$src), addr:$dst)]>, VEX;
304 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
305 "movupd\t{$src, $dst|$dst, $src}",
306 [(store (v4f64 VR256:$src), addr:$dst)]>, VEX;
309 def : Pat<(int_x86_avx_loadu_ps_256 addr:$src), (VMOVUPSYrm addr:$src)>;
310 def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
311 (VMOVUPSYmr addr:$dst, VR256:$src)>;
313 def : Pat<(int_x86_avx_loadu_pd_256 addr:$src), (VMOVUPDYrm addr:$src)>;
314 def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
315 (VMOVUPDYmr addr:$dst, VR256:$src)>;
317 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
318 "movaps\t{$src, $dst|$dst, $src}",
319 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
320 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
321 "movapd\t{$src, $dst|$dst, $src}",
322 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
323 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
324 "movups\t{$src, $dst|$dst, $src}",
325 [(store (v4f32 VR128:$src), addr:$dst)]>;
326 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
327 "movupd\t{$src, $dst|$dst, $src}",
328 [(store (v2f64 VR128:$src), addr:$dst)]>;
330 // Intrinsic forms of MOVUPS/D load and store
331 let isAsmParserOnly = 0 in {
332 def VMOVUPSmr_Int : VPSI<0x11, MRMDestMem, (outs),
333 (ins f128mem:$dst, VR128:$src),
334 "movups\t{$src, $dst|$dst, $src}",
335 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>, VEX;
336 def VMOVUPDmr_Int : VPDI<0x11, MRMDestMem, (outs),
337 (ins f128mem:$dst, VR128:$src),
338 "movupd\t{$src, $dst|$dst, $src}",
339 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>, VEX;
342 def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
343 "movups\t{$src, $dst|$dst, $src}",
344 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
345 def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
346 "movupd\t{$src, $dst|$dst, $src}",
347 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
349 // Move Low/High packed floating point values
350 multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
351 PatFrag mov_frag, string base_opc,
353 def PSrm : PI<opc, MRMSrcMem,
354 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
355 !strconcat(base_opc, "s", asm_opr),
358 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
359 SSEPackedSingle>, TB;
361 def PDrm : PI<opc, MRMSrcMem,
362 (outs RC:$dst), (ins RC:$src1, f64mem:$src2),
363 !strconcat(base_opc, "d", asm_opr),
364 [(set RC:$dst, (v2f64 (mov_frag RC:$src1,
365 (scalar_to_vector (loadf64 addr:$src2)))))],
366 SSEPackedDouble>, TB, OpSize;
369 let isAsmParserOnly = 0, AddedComplexity = 20 in {
370 defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
371 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
372 defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
373 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
375 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
376 defm MOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
377 "\t{$src2, $dst|$dst, $src2}">;
378 defm MOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
379 "\t{$src2, $dst|$dst, $src2}">;
382 let isAsmParserOnly = 0 in {
383 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
384 "movlps\t{$src, $dst|$dst, $src}",
385 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
386 (iPTR 0))), addr:$dst)]>, VEX;
387 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
388 "movlpd\t{$src, $dst|$dst, $src}",
389 [(store (f64 (vector_extract (v2f64 VR128:$src),
390 (iPTR 0))), addr:$dst)]>, VEX;
392 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
393 "movlps\t{$src, $dst|$dst, $src}",
394 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
395 (iPTR 0))), addr:$dst)]>;
396 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
397 "movlpd\t{$src, $dst|$dst, $src}",
398 [(store (f64 (vector_extract (v2f64 VR128:$src),
399 (iPTR 0))), addr:$dst)]>;
401 // v2f64 extract element 1 is always custom lowered to unpack high to low
402 // and extract element 0 so the non-store version isn't too horrible.
403 let isAsmParserOnly = 0 in {
404 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
405 "movhps\t{$src, $dst|$dst, $src}",
406 [(store (f64 (vector_extract
407 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
408 (undef)), (iPTR 0))), addr:$dst)]>,
410 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
411 "movhpd\t{$src, $dst|$dst, $src}",
412 [(store (f64 (vector_extract
413 (v2f64 (unpckh VR128:$src, (undef))),
414 (iPTR 0))), addr:$dst)]>,
417 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
418 "movhps\t{$src, $dst|$dst, $src}",
419 [(store (f64 (vector_extract
420 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
421 (undef)), (iPTR 0))), addr:$dst)]>;
422 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
423 "movhpd\t{$src, $dst|$dst, $src}",
424 [(store (f64 (vector_extract
425 (v2f64 (unpckh VR128:$src, (undef))),
426 (iPTR 0))), addr:$dst)]>;
428 let isAsmParserOnly = 0, AddedComplexity = 20 in {
429 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
430 (ins VR128:$src1, VR128:$src2),
431 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
433 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>,
435 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
436 (ins VR128:$src1, VR128:$src2),
437 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
439 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>,
442 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
443 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
444 (ins VR128:$src1, VR128:$src2),
445 "movlhps\t{$src2, $dst|$dst, $src2}",
447 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>;
448 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
449 (ins VR128:$src1, VR128:$src2),
450 "movhlps\t{$src2, $dst|$dst, $src2}",
452 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>;
455 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
456 (MOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
457 let AddedComplexity = 20 in {
458 def : Pat<(v4f32 (movddup VR128:$src, (undef))),
459 (MOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
460 def : Pat<(v2i64 (movddup VR128:$src, (undef))),
461 (MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
464 //===----------------------------------------------------------------------===//
465 // SSE 1 & 2 - Conversion Instructions
466 //===----------------------------------------------------------------------===//
468 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
469 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
471 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
472 [(set DstRC:$dst, (OpNode SrcRC:$src))]>;
473 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
474 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>;
477 multiclass sse12_cvt_s_np<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
478 X86MemOperand x86memop, string asm> {
479 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
481 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
485 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
486 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
487 string asm, Domain d> {
488 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
489 [(set DstRC:$dst, (OpNode SrcRC:$src))], d>;
490 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
491 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))], d>;
494 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
495 X86MemOperand x86memop, string asm> {
496 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
497 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
498 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
499 (ins DstRC:$src1, x86memop:$src),
500 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
503 let isAsmParserOnly = 0 in {
504 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
505 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX;
506 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
507 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
509 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
510 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
511 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
512 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD,
515 // The assembler can recognize rr 64-bit instructions by seeing a rxx
516 // register, but the same isn't true when only using memory operands,
517 // provide other assembly "l" and "q" forms to address this explicitly
518 // where appropriate to do so.
519 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss">, XS,
521 defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">, XS,
523 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd">, XD,
525 defm VCVTSI2SDL : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">, XD,
527 defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">, XD,
531 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
532 "cvttss2si\t{$src, $dst|$dst, $src}">, XS;
533 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
534 "cvttss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
535 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
536 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD;
537 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
538 "cvttsd2si{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
539 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
540 "cvtsi2ss\t{$src, $dst|$dst, $src}">, XS;
541 defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
542 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
543 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
544 "cvtsi2sd\t{$src, $dst|$dst, $src}">, XD;
545 defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
546 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
548 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
549 // and/or XMM operand(s).
551 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
552 Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
554 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
555 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
556 [(set DstRC:$dst, (Int SrcRC:$src))]>;
557 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
558 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
559 [(set DstRC:$dst, (Int (ld_frag addr:$src)))]>;
562 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
563 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
564 PatFrag ld_frag, string asm, bit Is2Addr = 1> {
565 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
567 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
568 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
569 [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))]>;
570 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
571 (ins DstRC:$src1, x86memop:$src2),
573 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
574 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
575 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))]>;
578 let isAsmParserOnly = 0 in {
579 defm Int_VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
580 f32mem, load, "cvtss2si">, XS, VEX;
581 defm Int_VCVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
582 int_x86_sse_cvtss2si64, f32mem, load, "cvtss2si">,
584 defm Int_VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
585 f128mem, load, "cvtsd2si">, XD, VEX;
586 defm Int_VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
587 int_x86_sse2_cvtsd2si64, f128mem, load, "cvtsd2si">,
590 // FIXME: The asm matcher has a hack to ignore instructions with _Int and Int_
591 // Get rid of this hack or rename the intrinsics, there are several
592 // intructions that only match with the intrinsic form, why create duplicates
593 // to let them be recognized by the assembler?
594 defm VCVTSD2SI_alt : sse12_cvt_s_np<0x2D, FR64, GR32, f64mem,
595 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
596 defm VCVTSD2SI64 : sse12_cvt_s_np<0x2D, FR64, GR64, f64mem,
597 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX, VEX_W;
599 defm Int_CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
600 f32mem, load, "cvtss2si">, XS;
601 defm Int_CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
602 f32mem, load, "cvtss2si{q}">, XS, REX_W;
603 defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
604 f128mem, load, "cvtsd2si{l}">, XD;
605 defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
606 f128mem, load, "cvtsd2si{q}">, XD, REX_W;
609 let isAsmParserOnly = 0 in {
610 defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
611 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss", 0>, XS, VEX_4V;
612 defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
613 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss", 0>, XS, VEX_4V,
615 defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
616 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd", 0>, XD, VEX_4V;
617 defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
618 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd", 0>, XD,
622 let Constraints = "$src1 = $dst" in {
623 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
624 int_x86_sse_cvtsi2ss, i32mem, loadi32,
626 defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
627 int_x86_sse_cvtsi642ss, i64mem, loadi64,
628 "cvtsi2ss{q}">, XS, REX_W;
629 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
630 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
632 defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
633 int_x86_sse2_cvtsi642sd, i64mem, loadi64,
634 "cvtsi2sd">, XD, REX_W;
639 // Aliases for intrinsics
640 let isAsmParserOnly = 0 in {
641 defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
642 f32mem, load, "cvttss2si">, XS, VEX;
643 defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
644 int_x86_sse_cvttss2si64, f32mem, load,
645 "cvttss2si">, XS, VEX, VEX_W;
646 defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
647 f128mem, load, "cvttsd2si">, XD, VEX;
648 defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
649 int_x86_sse2_cvttsd2si64, f128mem, load,
650 "cvttsd2si">, XD, VEX, VEX_W;
652 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
653 f32mem, load, "cvttss2si">, XS;
654 defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
655 int_x86_sse_cvttss2si64, f32mem, load,
656 "cvttss2si{q}">, XS, REX_W;
657 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
658 f128mem, load, "cvttsd2si">, XD;
659 defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
660 int_x86_sse2_cvttsd2si64, f128mem, load,
661 "cvttsd2si{q}">, XD, REX_W;
663 let isAsmParserOnly = 0, Pattern = []<dag> in {
664 defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load,
665 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS, VEX;
666 defm VCVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load,
667 "cvtss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
669 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load,
670 "cvtdq2ps\t{$src, $dst|$dst, $src}",
671 SSEPackedSingle>, TB, VEX;
672 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, undef, i256mem, load,
673 "cvtdq2ps\t{$src, $dst|$dst, $src}",
674 SSEPackedSingle>, TB, VEX;
676 let Pattern = []<dag> in {
677 defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load /*dummy*/,
678 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS;
679 defm CVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load /*dummy*/,
680 "cvtss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
681 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load /*dummy*/,
682 "cvtdq2ps\t{$src, $dst|$dst, $src}",
683 SSEPackedSingle>, TB; /* PD SSE3 form is avaiable */
688 // Convert scalar double to scalar single
689 let isAsmParserOnly = 0 in {
690 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
691 (ins FR64:$src1, FR64:$src2),
692 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
694 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
695 (ins FR64:$src1, f64mem:$src2),
696 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
697 []>, XD, Requires<[HasAVX, OptForSize]>, VEX_4V;
699 def : Pat<(f32 (fround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>,
702 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
703 "cvtsd2ss\t{$src, $dst|$dst, $src}",
704 [(set FR32:$dst, (fround FR64:$src))]>;
705 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
706 "cvtsd2ss\t{$src, $dst|$dst, $src}",
707 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>, XD,
708 Requires<[HasSSE2, OptForSize]>;
710 let isAsmParserOnly = 0 in
711 defm Int_VCVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
712 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss", 0>,
714 let Constraints = "$src1 = $dst" in
715 defm Int_CVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
716 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss">, XS;
718 // Convert scalar single to scalar double
719 let isAsmParserOnly = 0 in { // SSE2 instructions with XS prefix
720 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
721 (ins FR32:$src1, FR32:$src2),
722 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
723 []>, XS, Requires<[HasAVX]>, VEX_4V;
724 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
725 (ins FR32:$src1, f32mem:$src2),
726 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
727 []>, XS, VEX_4V, Requires<[HasAVX, OptForSize]>;
729 def : Pat<(f64 (fextend FR32:$src)), (VCVTSS2SDrr FR32:$src, FR32:$src)>,
732 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
733 "cvtss2sd\t{$src, $dst|$dst, $src}",
734 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
736 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
737 "cvtss2sd\t{$src, $dst|$dst, $src}",
738 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
739 Requires<[HasSSE2, OptForSize]>;
741 let isAsmParserOnly = 0 in {
742 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
743 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
744 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
745 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
746 VR128:$src2))]>, XS, VEX_4V,
748 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
749 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
750 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
751 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
752 (load addr:$src2)))]>, XS, VEX_4V,
755 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
756 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
757 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
758 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
759 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
762 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
763 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
764 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
765 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
766 (load addr:$src2)))]>, XS,
770 def : Pat<(extloadf32 addr:$src),
771 (CVTSS2SDrr (MOVSSrm addr:$src))>,
772 Requires<[HasSSE2, OptForSpeed]>;
774 // Convert doubleword to packed single/double fp
775 let isAsmParserOnly = 0 in { // SSE2 instructions without OpSize prefix
776 def Int_VCVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
777 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
778 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
779 TB, VEX, Requires<[HasAVX]>;
780 def Int_VCVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
781 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
782 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
783 (bitconvert (memopv2i64 addr:$src))))]>,
784 TB, VEX, Requires<[HasAVX]>;
786 def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
787 "cvtdq2ps\t{$src, $dst|$dst, $src}",
788 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
789 TB, Requires<[HasSSE2]>;
790 def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
791 "cvtdq2ps\t{$src, $dst|$dst, $src}",
792 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
793 (bitconvert (memopv2i64 addr:$src))))]>,
794 TB, Requires<[HasSSE2]>;
796 // FIXME: why the non-intrinsic version is described as SSE3?
797 let isAsmParserOnly = 0 in { // SSE2 instructions with XS prefix
798 def Int_VCVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
799 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
800 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
801 XS, VEX, Requires<[HasAVX]>;
802 def Int_VCVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
803 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
804 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
805 (bitconvert (memopv2i64 addr:$src))))]>,
806 XS, VEX, Requires<[HasAVX]>;
808 def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
809 "cvtdq2pd\t{$src, $dst|$dst, $src}",
810 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
811 XS, Requires<[HasSSE2]>;
812 def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
813 "cvtdq2pd\t{$src, $dst|$dst, $src}",
814 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
815 (bitconvert (memopv2i64 addr:$src))))]>,
816 XS, Requires<[HasSSE2]>;
819 // Convert packed single/double fp to doubleword
820 let isAsmParserOnly = 0 in {
821 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
822 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
823 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
824 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
825 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
826 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
827 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
828 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
830 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
831 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
832 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
833 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
835 let isAsmParserOnly = 0 in {
836 def Int_VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
837 "cvtps2dq\t{$src, $dst|$dst, $src}",
838 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>,
840 def Int_VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst),
842 "cvtps2dq\t{$src, $dst|$dst, $src}",
843 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
844 (memop addr:$src)))]>, VEX;
846 def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
847 "cvtps2dq\t{$src, $dst|$dst, $src}",
848 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
849 def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
850 "cvtps2dq\t{$src, $dst|$dst, $src}",
851 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
852 (memop addr:$src)))]>;
854 let isAsmParserOnly = 0 in { // SSE2 packed instructions with XD prefix
855 def Int_VCVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
856 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
857 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
858 XD, VEX, Requires<[HasAVX]>;
859 def Int_VCVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
860 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
861 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
862 (memop addr:$src)))]>,
863 XD, VEX, Requires<[HasAVX]>;
865 def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
866 "cvtpd2dq\t{$src, $dst|$dst, $src}",
867 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
868 XD, Requires<[HasSSE2]>;
869 def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
870 "cvtpd2dq\t{$src, $dst|$dst, $src}",
871 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
872 (memop addr:$src)))]>,
873 XD, Requires<[HasSSE2]>;
876 // Convert with truncation packed single/double fp to doubleword
877 let isAsmParserOnly = 0 in { // SSE2 packed instructions with XS prefix
878 def VCVTTPS2DQrr : VSSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
879 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
880 def VCVTTPS2DQrm : VSSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
881 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
882 def VCVTTPS2DQYrr : VSSI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
883 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
884 def VCVTTPS2DQYrm : VSSI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
885 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
887 def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
888 "cvttps2dq\t{$src, $dst|$dst, $src}",
890 (int_x86_sse2_cvttps2dq VR128:$src))]>;
891 def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
892 "cvttps2dq\t{$src, $dst|$dst, $src}",
894 (int_x86_sse2_cvttps2dq (memop addr:$src)))]>;
897 let isAsmParserOnly = 0 in {
898 def Int_VCVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
899 "vcvttps2dq\t{$src, $dst|$dst, $src}",
901 (int_x86_sse2_cvttps2dq VR128:$src))]>,
902 XS, VEX, Requires<[HasAVX]>;
903 def Int_VCVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
904 "vcvttps2dq\t{$src, $dst|$dst, $src}",
905 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
906 (memop addr:$src)))]>,
907 XS, VEX, Requires<[HasAVX]>;
910 let isAsmParserOnly = 0 in {
911 def Int_VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst),
913 "cvttpd2dq\t{$src, $dst|$dst, $src}",
914 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>,
916 def Int_VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst),
918 "cvttpd2dq\t{$src, $dst|$dst, $src}",
919 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
920 (memop addr:$src)))]>, VEX;
922 def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
923 "cvttpd2dq\t{$src, $dst|$dst, $src}",
924 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
925 def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
926 "cvttpd2dq\t{$src, $dst|$dst, $src}",
927 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
928 (memop addr:$src)))]>;
930 let isAsmParserOnly = 0 in {
931 // The assembler can recognize rr 256-bit instructions by seeing a ymm
932 // register, but the same isn't true when using memory operands instead.
933 // Provide other assembly rr and rm forms to address this explicitly.
934 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
935 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
936 def VCVTTPD2DQXrYr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
937 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
940 def VCVTTPD2DQXrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
941 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
942 def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
943 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
946 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
947 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
948 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
949 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
952 // Convert packed single to packed double
953 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
954 // SSE2 instructions without OpSize prefix
955 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
956 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
957 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
958 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
959 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
960 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
961 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
962 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
964 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
965 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
966 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
967 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
969 let isAsmParserOnly = 0 in {
970 def Int_VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
971 "vcvtps2pd\t{$src, $dst|$dst, $src}",
972 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
973 VEX, Requires<[HasAVX]>;
974 def Int_VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
975 "vcvtps2pd\t{$src, $dst|$dst, $src}",
976 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
977 (load addr:$src)))]>,
978 VEX, Requires<[HasAVX]>;
980 def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
981 "cvtps2pd\t{$src, $dst|$dst, $src}",
982 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
983 TB, Requires<[HasSSE2]>;
984 def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
985 "cvtps2pd\t{$src, $dst|$dst, $src}",
986 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
987 (load addr:$src)))]>,
988 TB, Requires<[HasSSE2]>;
990 // Convert packed double to packed single
991 let isAsmParserOnly = 0 in {
992 // The assembler can recognize rr 256-bit instructions by seeing a ymm
993 // register, but the same isn't true when using memory operands instead.
994 // Provide other assembly rr and rm forms to address this explicitly.
995 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
996 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
997 def VCVTPD2PSXrYr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
998 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1001 def VCVTPD2PSXrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1002 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1003 def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1004 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1007 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1008 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX;
1009 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1010 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
1012 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1013 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1014 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1015 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1018 let isAsmParserOnly = 0 in {
1019 def Int_VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1020 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1021 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1022 def Int_VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst),
1024 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1025 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1026 (memop addr:$src)))]>;
1028 def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1029 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1030 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1031 def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1032 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1033 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1034 (memop addr:$src)))]>;
1036 // AVX 256-bit register conversion intrinsics
1037 // FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
1038 // whenever possible to avoid declaring two versions of each one.
1039 def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
1040 (VCVTDQ2PSYrr VR256:$src)>;
1041 def : Pat<(int_x86_avx_cvtdq2_ps_256 (memopv8i32 addr:$src)),
1042 (VCVTDQ2PSYrm addr:$src)>;
1044 def : Pat<(int_x86_avx_cvt_pd2_ps_256 VR256:$src),
1045 (VCVTPD2PSYrr VR256:$src)>;
1046 def : Pat<(int_x86_avx_cvt_pd2_ps_256 (memopv4f64 addr:$src)),
1047 (VCVTPD2PSYrm addr:$src)>;
1049 def : Pat<(int_x86_avx_cvt_ps2dq_256 VR256:$src),
1050 (VCVTPS2DQYrr VR256:$src)>;
1051 def : Pat<(int_x86_avx_cvt_ps2dq_256 (memopv8f32 addr:$src)),
1052 (VCVTPS2DQYrm addr:$src)>;
1054 def : Pat<(int_x86_avx_cvt_ps2_pd_256 VR128:$src),
1055 (VCVTPS2PDYrr VR128:$src)>;
1056 def : Pat<(int_x86_avx_cvt_ps2_pd_256 (memopv4f32 addr:$src)),
1057 (VCVTPS2PDYrm addr:$src)>;
1059 def : Pat<(int_x86_avx_cvtt_pd2dq_256 VR256:$src),
1060 (VCVTTPD2DQYrr VR256:$src)>;
1061 def : Pat<(int_x86_avx_cvtt_pd2dq_256 (memopv4f64 addr:$src)),
1062 (VCVTTPD2DQYrm addr:$src)>;
1064 def : Pat<(int_x86_avx_cvtt_ps2dq_256 VR256:$src),
1065 (VCVTTPS2DQYrr VR256:$src)>;
1066 def : Pat<(int_x86_avx_cvtt_ps2dq_256 (memopv8f32 addr:$src)),
1067 (VCVTTPS2DQYrm addr:$src)>;
1069 //===----------------------------------------------------------------------===//
1070 // SSE 1 & 2 - Compare Instructions
1071 //===----------------------------------------------------------------------===//
1073 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
1074 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
1075 string asm, string asm_alt> {
1076 let isAsmParserOnly = 1 in {
1077 def rr : SIi8<0xC2, MRMSrcReg,
1078 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc),
1081 def rm : SIi8<0xC2, MRMSrcMem,
1082 (outs RC:$dst), (ins RC:$src1, x86memop:$src, SSECC:$cc),
1086 // Accept explicit immediate argument form instead of comparison code.
1087 def rr_alt : SIi8<0xC2, MRMSrcReg,
1088 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1091 def rm_alt : SIi8<0xC2, MRMSrcMem,
1092 (outs RC:$dst), (ins RC:$src1, x86memop:$src, i8imm:$src2),
1096 let neverHasSideEffects = 1, isAsmParserOnly = 0 in {
1097 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem,
1098 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1099 "cmpss\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1101 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem,
1102 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1103 "cmpsd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1107 let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
1108 defm CMPSS : sse12_cmp_scalar<FR32, f32mem,
1109 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
1110 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}">, XS;
1111 defm CMPSD : sse12_cmp_scalar<FR64, f64mem,
1112 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
1113 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}">, XD;
1116 multiclass sse12_cmp_scalar_int<RegisterClass RC, X86MemOperand x86memop,
1117 Intrinsic Int, string asm> {
1118 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
1119 (ins VR128:$src1, VR128:$src, SSECC:$cc), asm,
1120 [(set VR128:$dst, (Int VR128:$src1,
1121 VR128:$src, imm:$cc))]>;
1122 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
1123 (ins VR128:$src1, f32mem:$src, SSECC:$cc), asm,
1124 [(set VR128:$dst, (Int VR128:$src1,
1125 (load addr:$src), imm:$cc))]>;
1128 // Aliases to match intrinsics which expect XMM operand(s).
1129 let isAsmParserOnly = 0 in {
1130 defm Int_VCMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1131 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}">,
1133 defm Int_VCMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1134 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}">,
1137 let Constraints = "$src1 = $dst" in {
1138 defm Int_CMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1139 "cmp${cc}ss\t{$src, $dst|$dst, $src}">, XS;
1140 defm Int_CMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1141 "cmp${cc}sd\t{$src, $dst|$dst, $src}">, XD;
1145 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
1146 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
1147 ValueType vt, X86MemOperand x86memop,
1148 PatFrag ld_frag, string OpcodeStr, Domain d> {
1149 def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
1150 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1151 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))], d>;
1152 def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
1153 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1154 [(set EFLAGS, (OpNode (vt RC:$src1),
1155 (ld_frag addr:$src2)))], d>;
1158 let Defs = [EFLAGS] in {
1159 let isAsmParserOnly = 0 in {
1160 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1161 "ucomiss", SSEPackedSingle>, VEX;
1162 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1163 "ucomisd", SSEPackedDouble>, OpSize, VEX;
1164 let Pattern = []<dag> in {
1165 defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1166 "comiss", SSEPackedSingle>, VEX;
1167 defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1168 "comisd", SSEPackedDouble>, OpSize, VEX;
1171 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1172 load, "ucomiss", SSEPackedSingle>, VEX;
1173 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1174 load, "ucomisd", SSEPackedDouble>, OpSize, VEX;
1176 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
1177 load, "comiss", SSEPackedSingle>, VEX;
1178 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
1179 load, "comisd", SSEPackedDouble>, OpSize, VEX;
1181 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1182 "ucomiss", SSEPackedSingle>, TB;
1183 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1184 "ucomisd", SSEPackedDouble>, TB, OpSize;
1186 let Pattern = []<dag> in {
1187 defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1188 "comiss", SSEPackedSingle>, TB;
1189 defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1190 "comisd", SSEPackedDouble>, TB, OpSize;
1193 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1194 load, "ucomiss", SSEPackedSingle>, TB;
1195 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1196 load, "ucomisd", SSEPackedDouble>, TB, OpSize;
1198 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
1199 "comiss", SSEPackedSingle>, TB;
1200 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
1201 "comisd", SSEPackedDouble>, TB, OpSize;
1202 } // Defs = [EFLAGS]
1204 // sse12_cmp_packed - sse 1 & 2 compared packed instructions
1205 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
1206 Intrinsic Int, string asm, string asm_alt,
1208 let isAsmParserOnly = 1 in {
1209 def rri : PIi8<0xC2, MRMSrcReg,
1210 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc), asm,
1211 [(set RC:$dst, (Int RC:$src1, RC:$src, imm:$cc))], d>;
1212 def rmi : PIi8<0xC2, MRMSrcMem,
1213 (outs RC:$dst), (ins RC:$src1, f128mem:$src, SSECC:$cc), asm,
1214 [(set RC:$dst, (Int RC:$src1, (memop addr:$src), imm:$cc))], d>;
1217 // Accept explicit immediate argument form instead of comparison code.
1218 def rri_alt : PIi8<0xC2, MRMSrcReg,
1219 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1221 def rmi_alt : PIi8<0xC2, MRMSrcMem,
1222 (outs RC:$dst), (ins RC:$src1, f128mem:$src, i8imm:$src2),
1226 let isAsmParserOnly = 0 in {
1227 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1228 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1229 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1230 SSEPackedSingle>, VEX_4V;
1231 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1232 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1233 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1234 SSEPackedDouble>, OpSize, VEX_4V;
1235 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_ps_256,
1236 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1237 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1238 SSEPackedSingle>, VEX_4V;
1239 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_pd_256,
1240 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1241 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1242 SSEPackedDouble>, OpSize, VEX_4V;
1244 let Constraints = "$src1 = $dst" in {
1245 defm CMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1246 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
1247 "cmpps\t{$src2, $src, $dst|$dst, $src, $src2}",
1248 SSEPackedSingle>, TB;
1249 defm CMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1250 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1251 "cmppd\t{$src2, $src, $dst|$dst, $src, $src2}",
1252 SSEPackedDouble>, TB, OpSize;
1255 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
1256 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
1257 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
1258 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
1259 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
1260 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
1261 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
1262 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
1264 //===----------------------------------------------------------------------===//
1265 // SSE 1 & 2 - Shuffle Instructions
1266 //===----------------------------------------------------------------------===//
1268 /// sse12_shuffle - sse 1 & 2 shuffle instructions
1269 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
1270 ValueType vt, string asm, PatFrag mem_frag,
1271 Domain d, bit IsConvertibleToThreeAddress = 0> {
1272 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
1273 (ins RC:$src1, f128mem:$src2, i8imm:$src3), asm,
1274 [(set RC:$dst, (vt (shufp:$src3
1275 RC:$src1, (mem_frag addr:$src2))))], d>;
1276 let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
1277 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
1278 (ins RC:$src1, RC:$src2, i8imm:$src3), asm,
1280 (vt (shufp:$src3 RC:$src1, RC:$src2)))], d>;
1283 let isAsmParserOnly = 0 in {
1284 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1285 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1286 memopv4f32, SSEPackedSingle>, TB, VEX_4V;
1287 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
1288 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1289 memopv8f32, SSEPackedSingle>, TB, VEX_4V;
1290 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1291 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1292 memopv2f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
1293 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
1294 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1295 memopv4f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
1298 let Constraints = "$src1 = $dst" in {
1299 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1300 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1301 memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>,
1303 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1304 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1305 memopv2f64, SSEPackedDouble>, TB, OpSize;
1308 //===----------------------------------------------------------------------===//
1309 // SSE 1 & 2 - Unpack Instructions
1310 //===----------------------------------------------------------------------===//
1312 /// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
1313 multiclass sse12_unpack_interleave<bits<8> opc, PatFrag OpNode, ValueType vt,
1314 PatFrag mem_frag, RegisterClass RC,
1315 X86MemOperand x86memop, string asm,
1317 def rr : PI<opc, MRMSrcReg,
1318 (outs RC:$dst), (ins RC:$src1, RC:$src2),
1320 (vt (OpNode RC:$src1, RC:$src2)))], d>;
1321 def rm : PI<opc, MRMSrcMem,
1322 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
1324 (vt (OpNode RC:$src1,
1325 (mem_frag addr:$src2))))], d>;
1328 let AddedComplexity = 10 in {
1329 let isAsmParserOnly = 0 in {
1330 defm VUNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1331 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1332 SSEPackedSingle>, VEX_4V;
1333 defm VUNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1334 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1335 SSEPackedDouble>, OpSize, VEX_4V;
1336 defm VUNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1337 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1338 SSEPackedSingle>, VEX_4V;
1339 defm VUNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1340 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1341 SSEPackedDouble>, OpSize, VEX_4V;
1343 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, unpckh, v8f32, memopv8f32,
1344 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1345 SSEPackedSingle>, VEX_4V;
1346 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, unpckh, v4f64, memopv4f64,
1347 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1348 SSEPackedDouble>, OpSize, VEX_4V;
1349 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, unpckl, v8f32, memopv8f32,
1350 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1351 SSEPackedSingle>, VEX_4V;
1352 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, unpckl, v4f64, memopv4f64,
1353 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1354 SSEPackedDouble>, OpSize, VEX_4V;
1357 let Constraints = "$src1 = $dst" in {
1358 defm UNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1359 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
1360 SSEPackedSingle>, TB;
1361 defm UNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1362 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
1363 SSEPackedDouble>, TB, OpSize;
1364 defm UNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1365 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
1366 SSEPackedSingle>, TB;
1367 defm UNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1368 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
1369 SSEPackedDouble>, TB, OpSize;
1370 } // Constraints = "$src1 = $dst"
1371 } // AddedComplexity
1373 //===----------------------------------------------------------------------===//
1374 // SSE 1 & 2 - Extract Floating-Point Sign mask
1375 //===----------------------------------------------------------------------===//
1377 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
1378 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
1380 def rr32 : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
1381 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1382 [(set GR32:$dst, (Int RC:$src))], d>;
1383 def rr64 : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins RC:$src),
1384 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>, REX_W;
1388 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
1389 SSEPackedSingle>, TB;
1390 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
1391 SSEPackedDouble>, TB, OpSize;
1393 let isAsmParserOnly = 0 in {
1394 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
1395 "movmskps", SSEPackedSingle>, VEX;
1396 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
1397 "movmskpd", SSEPackedDouble>, OpSize,
1399 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
1400 "movmskps", SSEPackedSingle>, VEX;
1401 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
1402 "movmskpd", SSEPackedDouble>, OpSize,
1406 def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1407 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1408 def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1409 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1411 def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1412 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1413 def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1414 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1418 //===----------------------------------------------------------------------===//
1419 // SSE 1 & 2 - Misc aliasing of packed SSE 1 & 2 instructions
1420 //===----------------------------------------------------------------------===//
1422 // Aliases of packed SSE1 & SSE2 instructions for scalar use. These all have
1423 // names that start with 'Fs'.
1425 // Alias instructions that map fld0 to pxor for sse.
1426 let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
1427 canFoldAsLoad = 1 in {
1428 // FIXME: Set encoding to pseudo!
1429 def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1430 [(set FR32:$dst, fp32imm0)]>,
1431 Requires<[HasSSE1]>, TB, OpSize;
1432 def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1433 [(set FR64:$dst, fpimm0)]>,
1434 Requires<[HasSSE2]>, TB, OpSize;
1435 def VFsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1436 [(set FR32:$dst, fp32imm0)]>,
1437 Requires<[HasAVX]>, TB, OpSize, VEX_4V;
1438 def VFsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1439 [(set FR64:$dst, fpimm0)]>,
1440 Requires<[HasAVX]>, TB, OpSize, VEX_4V;
1443 // Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
1444 // bits are disregarded.
1445 let neverHasSideEffects = 1 in {
1446 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1447 "movaps\t{$src, $dst|$dst, $src}", []>;
1448 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1449 "movapd\t{$src, $dst|$dst, $src}", []>;
1452 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
1453 // bits are disregarded.
1454 let canFoldAsLoad = 1, isReMaterializable = 1 in {
1455 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1456 "movaps\t{$src, $dst|$dst, $src}",
1457 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
1458 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1459 "movapd\t{$src, $dst|$dst, $src}",
1460 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
1463 //===----------------------------------------------------------------------===//
1464 // SSE 1 & 2 - Logical Instructions
1465 //===----------------------------------------------------------------------===//
1467 /// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
1469 multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
1471 let isAsmParserOnly = 0 in {
1472 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
1473 FR32, f32, f128mem, memopfsf32, SSEPackedSingle, 0>, VEX_4V;
1475 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
1476 FR64, f64, f128mem, memopfsf64, SSEPackedDouble, 0>, OpSize, VEX_4V;
1479 let Constraints = "$src1 = $dst" in {
1480 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
1481 f32, f128mem, memopfsf32, SSEPackedSingle>, TB;
1483 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
1484 f64, f128mem, memopfsf64, SSEPackedDouble>, TB, OpSize;
1488 // Alias bitwise logical operations using SSE logical ops on packed FP values.
1489 let mayLoad = 0 in {
1490 defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand>;
1491 defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for>;
1492 defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor>;
1495 let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
1496 defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef>;
1498 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
1500 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
1501 SDNode OpNode, int HasPat = 0,
1502 list<list<dag>> Pattern = []> {
1503 let isAsmParserOnly = 0, Pattern = []<dag> in {
1504 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1505 !strconcat(OpcodeStr, "ps"), f128mem,
1506 !if(HasPat, Pattern[0], // rr
1507 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
1509 !if(HasPat, Pattern[2], // rm
1510 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1511 (memopv2i64 addr:$src2)))]), 0>,
1514 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1515 !strconcat(OpcodeStr, "pd"), f128mem,
1516 !if(HasPat, Pattern[1], // rr
1517 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1520 !if(HasPat, Pattern[3], // rm
1521 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1522 (memopv2i64 addr:$src2)))]), 0>,
1525 let Constraints = "$src1 = $dst" in {
1526 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1527 !strconcat(OpcodeStr, "ps"), f128mem,
1528 !if(HasPat, Pattern[0], // rr
1529 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
1531 !if(HasPat, Pattern[2], // rm
1532 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1533 (memopv2i64 addr:$src2)))])>, TB;
1535 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1536 !strconcat(OpcodeStr, "pd"), f128mem,
1537 !if(HasPat, Pattern[1], // rr
1538 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1541 !if(HasPat, Pattern[3], // rm
1542 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1543 (memopv2i64 addr:$src2)))])>,
1548 /// sse12_fp_packed_logical_y - AVX 256-bit SSE 1 & 2 logical ops forms
1550 let isAsmParserOnly = 0 in {
1551 multiclass sse12_fp_packed_logical_y<bits<8> opc, string OpcodeStr> {
1552 defm PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
1553 !strconcat(OpcodeStr, "ps"), f256mem, [], [], 0>, VEX_4V;
1555 defm PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
1556 !strconcat(OpcodeStr, "pd"), f256mem, [], [], 0>, OpSize, VEX_4V;
1560 // AVX 256-bit packed logical ops forms
1561 defm VAND : sse12_fp_packed_logical_y<0x54, "and">;
1562 defm VOR : sse12_fp_packed_logical_y<0x56, "or">;
1563 defm VXOR : sse12_fp_packed_logical_y<0x57, "xor">;
1564 let isCommutable = 0 in
1565 defm VANDN : sse12_fp_packed_logical_y<0x55, "andn">;
1567 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
1568 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
1569 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
1570 let isCommutable = 0 in
1571 defm ANDN : sse12_fp_packed_logical<0x55, "andn", undef /* dummy */, 1, [
1573 [(set VR128:$dst, (X86pandn VR128:$src1, VR128:$src2))],
1577 [(set VR128:$dst, (X86pandn VR128:$src1, (memopv2i64 addr:$src2)))],
1581 //===----------------------------------------------------------------------===//
1582 // SSE 1 & 2 - Arithmetic Instructions
1583 //===----------------------------------------------------------------------===//
1585 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
1588 /// In addition, we also have a special variant of the scalar form here to
1589 /// represent the associated intrinsic operation. This form is unlike the
1590 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
1591 /// and leaves the top elements unmodified (therefore these cannot be commuted).
1593 /// These three forms can each be reg+reg or reg+mem.
1596 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
1598 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
1600 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
1601 OpNode, FR32, f32mem, Is2Addr>, XS;
1602 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
1603 OpNode, FR64, f64mem, Is2Addr>, XD;
1606 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
1608 let mayLoad = 0 in {
1609 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
1610 v4f32, f128mem, memopv4f32, SSEPackedSingle, Is2Addr>, TB;
1611 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
1612 v2f64, f128mem, memopv2f64, SSEPackedDouble, Is2Addr>, TB, OpSize;
1616 multiclass basic_sse12_fp_binop_p_y<bits<8> opc, string OpcodeStr,
1618 let mayLoad = 0 in {
1619 defm PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR256,
1620 v8f32, f256mem, memopv8f32, SSEPackedSingle, 0>, TB;
1621 defm PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR256,
1622 v4f64, f256mem, memopv4f64, SSEPackedDouble, 0>, TB, OpSize;
1626 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
1628 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1629 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32, Is2Addr>, XS;
1630 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1631 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64, Is2Addr>, XD;
1634 multiclass basic_sse12_fp_binop_p_int<bits<8> opc, string OpcodeStr,
1636 defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1637 !strconcat(OpcodeStr, "ps"), "sse", "_ps", f128mem, memopv4f32,
1638 SSEPackedSingle, Is2Addr>, TB;
1640 defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1641 !strconcat(OpcodeStr, "pd"), "sse2", "_pd", f128mem, memopv2f64,
1642 SSEPackedDouble, Is2Addr>, TB, OpSize;
1645 multiclass basic_sse12_fp_binop_p_y_int<bits<8> opc, string OpcodeStr> {
1646 defm PSY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
1647 !strconcat(OpcodeStr, "ps"), "avx", "_ps_256", f256mem, memopv8f32,
1648 SSEPackedSingle, 0>, TB;
1650 defm PDY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
1651 !strconcat(OpcodeStr, "pd"), "avx", "_pd_256", f256mem, memopv4f64,
1652 SSEPackedDouble, 0>, TB, OpSize;
1655 // Binary Arithmetic instructions
1656 let isAsmParserOnly = 0 in {
1657 defm VADD : basic_sse12_fp_binop_s<0x58, "add", fadd, 0>,
1658 basic_sse12_fp_binop_s_int<0x58, "add", 0>,
1659 basic_sse12_fp_binop_p<0x58, "add", fadd, 0>,
1660 basic_sse12_fp_binop_p_y<0x58, "add", fadd>, VEX_4V;
1661 defm VMUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, 0>,
1662 basic_sse12_fp_binop_s_int<0x59, "mul", 0>,
1663 basic_sse12_fp_binop_p<0x59, "mul", fmul, 0>,
1664 basic_sse12_fp_binop_p_y<0x59, "mul", fmul>, VEX_4V;
1666 let isCommutable = 0 in {
1667 defm VSUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, 0>,
1668 basic_sse12_fp_binop_s_int<0x5C, "sub", 0>,
1669 basic_sse12_fp_binop_p<0x5C, "sub", fsub, 0>,
1670 basic_sse12_fp_binop_p_y<0x5C, "sub", fsub>, VEX_4V;
1671 defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, 0>,
1672 basic_sse12_fp_binop_s_int<0x5E, "div", 0>,
1673 basic_sse12_fp_binop_p<0x5E, "div", fdiv, 0>,
1674 basic_sse12_fp_binop_p_y<0x5E, "div", fdiv>, VEX_4V;
1675 defm VMAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, 0>,
1676 basic_sse12_fp_binop_s_int<0x5F, "max", 0>,
1677 basic_sse12_fp_binop_p<0x5F, "max", X86fmax, 0>,
1678 basic_sse12_fp_binop_p_int<0x5F, "max", 0>,
1679 basic_sse12_fp_binop_p_y<0x5F, "max", X86fmax>,
1680 basic_sse12_fp_binop_p_y_int<0x5F, "max">, VEX_4V;
1681 defm VMIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, 0>,
1682 basic_sse12_fp_binop_s_int<0x5D, "min", 0>,
1683 basic_sse12_fp_binop_p<0x5D, "min", X86fmin, 0>,
1684 basic_sse12_fp_binop_p_int<0x5D, "min", 0>,
1685 basic_sse12_fp_binop_p_y_int<0x5D, "min">,
1686 basic_sse12_fp_binop_p_y<0x5D, "min", X86fmin>, VEX_4V;
1690 let Constraints = "$src1 = $dst" in {
1691 defm ADD : basic_sse12_fp_binop_s<0x58, "add", fadd>,
1692 basic_sse12_fp_binop_p<0x58, "add", fadd>,
1693 basic_sse12_fp_binop_s_int<0x58, "add">;
1694 defm MUL : basic_sse12_fp_binop_s<0x59, "mul", fmul>,
1695 basic_sse12_fp_binop_p<0x59, "mul", fmul>,
1696 basic_sse12_fp_binop_s_int<0x59, "mul">;
1698 let isCommutable = 0 in {
1699 defm SUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub>,
1700 basic_sse12_fp_binop_p<0x5C, "sub", fsub>,
1701 basic_sse12_fp_binop_s_int<0x5C, "sub">;
1702 defm DIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv>,
1703 basic_sse12_fp_binop_p<0x5E, "div", fdiv>,
1704 basic_sse12_fp_binop_s_int<0x5E, "div">;
1705 defm MAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax>,
1706 basic_sse12_fp_binop_p<0x5F, "max", X86fmax>,
1707 basic_sse12_fp_binop_s_int<0x5F, "max">,
1708 basic_sse12_fp_binop_p_int<0x5F, "max">;
1709 defm MIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin>,
1710 basic_sse12_fp_binop_p<0x5D, "min", X86fmin>,
1711 basic_sse12_fp_binop_s_int<0x5D, "min">,
1712 basic_sse12_fp_binop_p_int<0x5D, "min">;
1717 /// In addition, we also have a special variant of the scalar form here to
1718 /// represent the associated intrinsic operation. This form is unlike the
1719 /// plain scalar form, in that it takes an entire vector (instead of a
1720 /// scalar) and leaves the top elements undefined.
1722 /// And, we have a special variant form for a full-vector intrinsic form.
1724 /// sse1_fp_unop_s - SSE1 unops in scalar form.
1725 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
1726 SDNode OpNode, Intrinsic F32Int> {
1727 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1728 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1729 [(set FR32:$dst, (OpNode FR32:$src))]>;
1730 // For scalar unary operations, fold a load into the operation
1731 // only in OptForSize mode. It eliminates an instruction, but it also
1732 // eliminates a whole-register clobber (the load), so it introduces a
1733 // partial register update condition.
1734 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
1735 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1736 [(set FR32:$dst, (OpNode (load addr:$src)))]>, XS,
1737 Requires<[HasSSE1, OptForSize]>;
1738 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1739 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1740 [(set VR128:$dst, (F32Int VR128:$src))]>;
1741 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
1742 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1743 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
1746 /// sse1_fp_unop_s_avx - AVX SSE1 unops in scalar form.
1747 multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
1748 SDNode OpNode, Intrinsic F32Int> {
1749 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
1750 !strconcat(OpcodeStr,
1751 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1752 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2),
1753 !strconcat(OpcodeStr,
1754 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1755 []>, XS, Requires<[HasAVX, OptForSize]>;
1756 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1757 !strconcat(OpcodeStr,
1758 "ss\t{$src, $dst, $dst|$dst, $dst, $src}"),
1759 [(set VR128:$dst, (F32Int VR128:$src))]>;
1760 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
1761 !strconcat(OpcodeStr,
1762 "ss\t{$src, $dst, $dst|$dst, $dst, $src}"),
1763 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
1766 /// sse1_fp_unop_p - SSE1 unops in packed form.
1767 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1768 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1769 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1770 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>;
1771 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1772 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1773 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
1776 /// sse1_fp_unop_p_y - AVX 256-bit SSE1 unops in packed form.
1777 multiclass sse1_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1778 def PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1779 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1780 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))]>;
1781 def PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1782 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1783 [(set VR256:$dst, (OpNode (memopv8f32 addr:$src)))]>;
1786 /// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms.
1787 multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
1788 Intrinsic V4F32Int> {
1789 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1790 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1791 [(set VR128:$dst, (V4F32Int VR128:$src))]>;
1792 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1793 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1794 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
1797 /// sse1_fp_unop_p_y_int - AVX 256-bit intrinsics unops in packed forms.
1798 multiclass sse1_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
1799 Intrinsic V4F32Int> {
1800 def PSYr_Int : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1801 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1802 [(set VR256:$dst, (V4F32Int VR256:$src))]>;
1803 def PSYm_Int : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1804 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1805 [(set VR256:$dst, (V4F32Int (memopv8f32 addr:$src)))]>;
1808 /// sse2_fp_unop_s - SSE2 unops in scalar form.
1809 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
1810 SDNode OpNode, Intrinsic F64Int> {
1811 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1812 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1813 [(set FR64:$dst, (OpNode FR64:$src))]>;
1814 // See the comments in sse1_fp_unop_s for why this is OptForSize.
1815 def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
1816 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1817 [(set FR64:$dst, (OpNode (load addr:$src)))]>, XD,
1818 Requires<[HasSSE2, OptForSize]>;
1819 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1820 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1821 [(set VR128:$dst, (F64Int VR128:$src))]>;
1822 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1823 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1824 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1827 /// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
1828 multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
1829 SDNode OpNode, Intrinsic F64Int> {
1830 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1831 !strconcat(OpcodeStr,
1832 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1833 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst),
1834 (ins FR64:$src1, f64mem:$src2),
1835 !strconcat(OpcodeStr,
1836 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1837 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1838 !strconcat(OpcodeStr, "sd\t{$src, $dst, $dst|$dst, $dst, $src}"),
1839 [(set VR128:$dst, (F64Int VR128:$src))]>;
1840 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1841 !strconcat(OpcodeStr, "sd\t{$src, $dst, $dst|$dst, $dst, $src}"),
1842 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1845 /// sse2_fp_unop_p - SSE2 unops in vector forms.
1846 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
1848 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1849 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1850 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>;
1851 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1852 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1853 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
1856 /// sse2_fp_unop_p_y - AVX SSE2 256-bit unops in vector forms.
1857 multiclass sse2_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1858 def PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1859 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1860 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))]>;
1861 def PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1862 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1863 [(set VR256:$dst, (OpNode (memopv4f64 addr:$src)))]>;
1866 /// sse2_fp_unop_p_int - SSE2 intrinsic unops in vector forms.
1867 multiclass sse2_fp_unop_p_int<bits<8> opc, string OpcodeStr,
1868 Intrinsic V2F64Int> {
1869 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1870 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1871 [(set VR128:$dst, (V2F64Int VR128:$src))]>;
1872 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1873 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1874 [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>;
1877 /// sse2_fp_unop_p_y_int - AVX 256-bit intrinsic unops in vector forms.
1878 multiclass sse2_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
1879 Intrinsic V2F64Int> {
1880 def PDYr_Int : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1881 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1882 [(set VR256:$dst, (V2F64Int VR256:$src))]>;
1883 def PDYm_Int : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1884 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1885 [(set VR256:$dst, (V2F64Int (memopv4f64 addr:$src)))]>;
1888 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
1890 defm VSQRT : sse1_fp_unop_s_avx<0x51, "vsqrt", fsqrt, int_x86_sse_sqrt_ss>,
1891 sse2_fp_unop_s_avx<0x51, "vsqrt", fsqrt, int_x86_sse2_sqrt_sd>,
1894 defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt>,
1895 sse2_fp_unop_p<0x51, "vsqrt", fsqrt>,
1896 sse1_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
1897 sse2_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
1898 sse1_fp_unop_p_int<0x51, "vsqrt", int_x86_sse_sqrt_ps>,
1899 sse2_fp_unop_p_int<0x51, "vsqrt", int_x86_sse2_sqrt_pd>,
1900 sse1_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_ps_256>,
1901 sse2_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_pd_256>,
1904 // Reciprocal approximations. Note that these typically require refinement
1905 // in order to obtain suitable precision.
1906 defm VRSQRT : sse1_fp_unop_s_avx<0x52, "vrsqrt", X86frsqrt,
1907 int_x86_sse_rsqrt_ss>, VEX_4V;
1908 defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt>,
1909 sse1_fp_unop_p_y<0x52, "vrsqrt", X86frsqrt>,
1910 sse1_fp_unop_p_y_int<0x52, "vrsqrt", int_x86_avx_rsqrt_ps_256>,
1911 sse1_fp_unop_p_int<0x52, "vrsqrt", int_x86_sse_rsqrt_ps>, VEX;
1913 defm VRCP : sse1_fp_unop_s_avx<0x53, "vrcp", X86frcp, int_x86_sse_rcp_ss>,
1915 defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp>,
1916 sse1_fp_unop_p_y<0x53, "vrcp", X86frcp>,
1917 sse1_fp_unop_p_y_int<0x53, "vrcp", int_x86_avx_rcp_ps_256>,
1918 sse1_fp_unop_p_int<0x53, "vrcp", int_x86_sse_rcp_ps>, VEX;
1922 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
1923 sse1_fp_unop_p<0x51, "sqrt", fsqrt>,
1924 sse1_fp_unop_p_int<0x51, "sqrt", int_x86_sse_sqrt_ps>,
1925 sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
1926 sse2_fp_unop_p<0x51, "sqrt", fsqrt>,
1927 sse2_fp_unop_p_int<0x51, "sqrt", int_x86_sse2_sqrt_pd>;
1929 // Reciprocal approximations. Note that these typically require refinement
1930 // in order to obtain suitable precision.
1931 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss>,
1932 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt>,
1933 sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps>;
1934 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
1935 sse1_fp_unop_p<0x53, "rcp", X86frcp>,
1936 sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps>;
1938 // There is no f64 version of the reciprocal approximation instructions.
1940 //===----------------------------------------------------------------------===//
1941 // SSE 1 & 2 - Non-temporal stores
1942 //===----------------------------------------------------------------------===//
1944 let isAsmParserOnly = 0 in {
1945 def VMOVNTPSmr_Int : VPSI<0x2B, MRMDestMem, (outs),
1946 (ins i128mem:$dst, VR128:$src),
1947 "movntps\t{$src, $dst|$dst, $src}",
1948 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>, VEX;
1949 def VMOVNTPDmr_Int : VPDI<0x2B, MRMDestMem, (outs),
1950 (ins i128mem:$dst, VR128:$src),
1951 "movntpd\t{$src, $dst|$dst, $src}",
1952 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>, VEX;
1954 let ExeDomain = SSEPackedInt in
1955 def VMOVNTDQmr_Int : VPDI<0xE7, MRMDestMem, (outs),
1956 (ins f128mem:$dst, VR128:$src),
1957 "movntdq\t{$src, $dst|$dst, $src}",
1958 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>, VEX;
1960 let AddedComplexity = 400 in { // Prefer non-temporal versions
1961 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
1962 (ins f128mem:$dst, VR128:$src),
1963 "movntps\t{$src, $dst|$dst, $src}",
1964 [(alignednontemporalstore (v4f32 VR128:$src),
1966 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
1967 (ins f128mem:$dst, VR128:$src),
1968 "movntpd\t{$src, $dst|$dst, $src}",
1969 [(alignednontemporalstore (v2f64 VR128:$src),
1971 def VMOVNTDQ_64mr : VPDI<0xE7, MRMDestMem, (outs),
1972 (ins f128mem:$dst, VR128:$src),
1973 "movntdq\t{$src, $dst|$dst, $src}",
1974 [(alignednontemporalstore (v2f64 VR128:$src),
1976 let ExeDomain = SSEPackedInt in
1977 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
1978 (ins f128mem:$dst, VR128:$src),
1979 "movntdq\t{$src, $dst|$dst, $src}",
1980 [(alignednontemporalstore (v4f32 VR128:$src),
1983 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
1984 (ins f256mem:$dst, VR256:$src),
1985 "movntps\t{$src, $dst|$dst, $src}",
1986 [(alignednontemporalstore (v8f32 VR256:$src),
1988 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
1989 (ins f256mem:$dst, VR256:$src),
1990 "movntpd\t{$src, $dst|$dst, $src}",
1991 [(alignednontemporalstore (v4f64 VR256:$src),
1993 def VMOVNTDQY_64mr : VPDI<0xE7, MRMDestMem, (outs),
1994 (ins f256mem:$dst, VR256:$src),
1995 "movntdq\t{$src, $dst|$dst, $src}",
1996 [(alignednontemporalstore (v4f64 VR256:$src),
1998 let ExeDomain = SSEPackedInt in
1999 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
2000 (ins f256mem:$dst, VR256:$src),
2001 "movntdq\t{$src, $dst|$dst, $src}",
2002 [(alignednontemporalstore (v8f32 VR256:$src),
2007 def : Pat<(int_x86_avx_movnt_dq_256 addr:$dst, VR256:$src),
2008 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
2009 def : Pat<(int_x86_avx_movnt_pd_256 addr:$dst, VR256:$src),
2010 (VMOVNTPDYmr addr:$dst, VR256:$src)>;
2011 def : Pat<(int_x86_avx_movnt_ps_256 addr:$dst, VR256:$src),
2012 (VMOVNTPSYmr addr:$dst, VR256:$src)>;
2014 def MOVNTPSmr_Int : PSI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2015 "movntps\t{$src, $dst|$dst, $src}",
2016 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>;
2017 def MOVNTPDmr_Int : PDI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2018 "movntpd\t{$src, $dst|$dst, $src}",
2019 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>;
2021 let ExeDomain = SSEPackedInt in
2022 def MOVNTDQmr_Int : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2023 "movntdq\t{$src, $dst|$dst, $src}",
2024 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>;
2026 let AddedComplexity = 400 in { // Prefer non-temporal versions
2027 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2028 "movntps\t{$src, $dst|$dst, $src}",
2029 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2030 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2031 "movntpd\t{$src, $dst|$dst, $src}",
2032 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
2034 def MOVNTDQ_64mr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2035 "movntdq\t{$src, $dst|$dst, $src}",
2036 [(alignednontemporalstore (v2f64 VR128:$src), addr:$dst)]>;
2038 let ExeDomain = SSEPackedInt in
2039 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2040 "movntdq\t{$src, $dst|$dst, $src}",
2041 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2043 // There is no AVX form for instructions below this point
2044 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2045 "movnti\t{$src, $dst|$dst, $src}",
2046 [(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
2047 TB, Requires<[HasSSE2]>;
2049 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2050 "movnti\t{$src, $dst|$dst, $src}",
2051 [(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
2052 TB, Requires<[HasSSE2]>;
2055 def MOVNTImr_Int : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2056 "movnti\t{$src, $dst|$dst, $src}",
2057 [(int_x86_sse2_movnt_i addr:$dst, GR32:$src)]>,
2058 TB, Requires<[HasSSE2]>;
2060 //===----------------------------------------------------------------------===//
2061 // SSE 1 & 2 - Misc Instructions (No AVX form)
2062 //===----------------------------------------------------------------------===//
2064 // Prefetch intrinsic.
2065 def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
2066 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3))]>;
2067 def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
2068 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2))]>;
2069 def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
2070 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1))]>;
2071 def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
2072 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0))]>;
2074 // Load, store, and memory fence
2075 def SFENCE : I<0xAE, MRM_F8, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>,
2076 TB, Requires<[HasSSE1]>;
2077 def : Pat<(X86SFence), (SFENCE)>;
2079 // Alias instructions that map zero vector to pxor / xorp* for sse.
2080 // We set canFoldAsLoad because this can be converted to a constant-pool
2081 // load of an all-zeros value if folding it would be beneficial.
2082 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
2083 // JIT implementation, it does not expand the instructions below like
2084 // X86MCInstLower does.
2085 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2086 isCodeGenOnly = 1 in {
2087 def V_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2088 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
2089 def V_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2090 [(set VR128:$dst, (v2f64 immAllZerosV))]>;
2091 let ExeDomain = SSEPackedInt in
2092 def V_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2093 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2096 // The same as done above but for AVX. The 128-bit versions are the
2097 // same, but re-encoded. The 256-bit does not support PI version.
2098 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
2099 // JIT implementatioan, it does not expand the instructions below like
2100 // X86MCInstLower does.
2101 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2102 isCodeGenOnly = 1, Predicates = [HasAVX] in {
2103 def AVX_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2104 [(set VR128:$dst, (v4f32 immAllZerosV))]>, VEX_4V;
2105 def AVX_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2106 [(set VR128:$dst, (v2f64 immAllZerosV))]>, VEX_4V;
2107 def AVX_SET0PSY : PSI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
2108 [(set VR256:$dst, (v8f32 immAllZerosV))]>, VEX_4V;
2109 def AVX_SET0PDY : PDI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
2110 [(set VR256:$dst, (v4f64 immAllZerosV))]>, VEX_4V;
2111 let ExeDomain = SSEPackedInt in
2112 def AVX_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2113 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2116 def : Pat<(v2i64 immAllZerosV), (V_SET0PI)>;
2117 def : Pat<(v8i16 immAllZerosV), (V_SET0PI)>;
2118 def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>;
2120 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
2121 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
2123 //===----------------------------------------------------------------------===//
2124 // SSE 1 & 2 - Load/Store XCSR register
2125 //===----------------------------------------------------------------------===//
2127 let isAsmParserOnly = 0 in {
2128 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2129 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>, VEX;
2130 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2131 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>, VEX;
2134 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2135 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
2136 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2137 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
2139 //===---------------------------------------------------------------------===//
2140 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
2141 //===---------------------------------------------------------------------===//
2143 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2145 let isAsmParserOnly = 0 in {
2146 let neverHasSideEffects = 1 in {
2147 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2148 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2149 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2150 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2152 def VMOVDQUrr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2153 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2154 def VMOVDQUYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2155 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2157 let canFoldAsLoad = 1, mayLoad = 1 in {
2158 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2159 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2160 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2161 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2162 let Predicates = [HasAVX] in {
2163 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2164 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2165 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2166 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2170 let mayStore = 1 in {
2171 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
2172 (ins i128mem:$dst, VR128:$src),
2173 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2174 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
2175 (ins i256mem:$dst, VR256:$src),
2176 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2177 let Predicates = [HasAVX] in {
2178 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2179 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2180 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
2181 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2186 let neverHasSideEffects = 1 in
2187 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2188 "movdqa\t{$src, $dst|$dst, $src}", []>;
2190 def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2191 "movdqu\t{$src, $dst|$dst, $src}",
2192 []>, XS, Requires<[HasSSE2]>;
2194 let canFoldAsLoad = 1, mayLoad = 1 in {
2195 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2196 "movdqa\t{$src, $dst|$dst, $src}",
2197 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
2198 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2199 "movdqu\t{$src, $dst|$dst, $src}",
2200 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
2201 XS, Requires<[HasSSE2]>;
2204 let mayStore = 1 in {
2205 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2206 "movdqa\t{$src, $dst|$dst, $src}",
2207 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
2208 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2209 "movdqu\t{$src, $dst|$dst, $src}",
2210 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
2211 XS, Requires<[HasSSE2]>;
2214 // Intrinsic forms of MOVDQU load and store
2215 let isAsmParserOnly = 0 in {
2216 def VMOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2217 "vmovdqu\t{$src, $dst|$dst, $src}",
2218 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2219 XS, VEX, Requires<[HasAVX]>;
2222 def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2223 "movdqu\t{$src, $dst|$dst, $src}",
2224 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2225 XS, Requires<[HasSSE2]>;
2227 } // ExeDomain = SSEPackedInt
2229 def : Pat<(int_x86_avx_loadu_dq_256 addr:$src), (VMOVDQUYrm addr:$src)>;
2230 def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
2231 (VMOVDQUYmr addr:$dst, VR256:$src)>;
2233 //===---------------------------------------------------------------------===//
2234 // SSE2 - Packed Integer Arithmetic Instructions
2235 //===---------------------------------------------------------------------===//
2237 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2239 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
2240 bit IsCommutable = 0, bit Is2Addr = 1> {
2241 let isCommutable = IsCommutable in
2242 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2243 (ins VR128:$src1, VR128:$src2),
2245 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2246 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2247 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2248 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2249 (ins VR128:$src1, i128mem:$src2),
2251 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2252 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2253 [(set VR128:$dst, (IntId VR128:$src1,
2254 (bitconvert (memopv2i64 addr:$src2))))]>;
2257 multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
2258 string OpcodeStr, Intrinsic IntId,
2259 Intrinsic IntId2, bit Is2Addr = 1> {
2260 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2261 (ins VR128:$src1, VR128:$src2),
2263 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2264 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2265 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2266 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2267 (ins VR128:$src1, i128mem:$src2),
2269 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2270 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2271 [(set VR128:$dst, (IntId VR128:$src1,
2272 (bitconvert (memopv2i64 addr:$src2))))]>;
2273 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst),
2274 (ins VR128:$src1, i32i8imm:$src2),
2276 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2277 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2278 [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
2281 /// PDI_binop_rm - Simple SSE2 binary operator.
2282 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2283 ValueType OpVT, bit IsCommutable = 0, bit Is2Addr = 1> {
2284 let isCommutable = IsCommutable in
2285 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2286 (ins VR128:$src1, VR128:$src2),
2288 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2289 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2290 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>;
2291 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2292 (ins VR128:$src1, i128mem:$src2),
2294 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2295 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2296 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
2297 (bitconvert (memopv2i64 addr:$src2)))))]>;
2300 /// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
2302 /// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
2303 /// to collapse (bitconvert VT to VT) into its operand.
2305 multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
2306 bit IsCommutable = 0, bit Is2Addr = 1> {
2307 let isCommutable = IsCommutable in
2308 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2309 (ins VR128:$src1, VR128:$src2),
2311 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2312 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2313 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]>;
2314 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2315 (ins VR128:$src1, i128mem:$src2),
2317 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2318 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2319 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2i64 addr:$src2)))]>;
2322 } // ExeDomain = SSEPackedInt
2324 // 128-bit Integer Arithmetic
2326 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
2327 defm VPADDB : PDI_binop_rm<0xFC, "vpaddb", add, v16i8, 1, 0 /*3addr*/>, VEX_4V;
2328 defm VPADDW : PDI_binop_rm<0xFD, "vpaddw", add, v8i16, 1, 0>, VEX_4V;
2329 defm VPADDD : PDI_binop_rm<0xFE, "vpaddd", add, v4i32, 1, 0>, VEX_4V;
2330 defm VPADDQ : PDI_binop_rm_v2i64<0xD4, "vpaddq", add, 1, 0>, VEX_4V;
2331 defm VPMULLW : PDI_binop_rm<0xD5, "vpmullw", mul, v8i16, 1, 0>, VEX_4V;
2332 defm VPSUBB : PDI_binop_rm<0xF8, "vpsubb", sub, v16i8, 0, 0>, VEX_4V;
2333 defm VPSUBW : PDI_binop_rm<0xF9, "vpsubw", sub, v8i16, 0, 0>, VEX_4V;
2334 defm VPSUBD : PDI_binop_rm<0xFA, "vpsubd", sub, v4i32, 0, 0>, VEX_4V;
2335 defm VPSUBQ : PDI_binop_rm_v2i64<0xFB, "vpsubq", sub, 0, 0>, VEX_4V;
2338 defm VPSUBSB : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_sse2_psubs_b, 0, 0>,
2340 defm VPSUBSW : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_sse2_psubs_w, 0, 0>,
2342 defm VPSUBUSB : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_sse2_psubus_b, 0, 0>,
2344 defm VPSUBUSW : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_sse2_psubus_w, 0, 0>,
2346 defm VPADDSB : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_sse2_padds_b, 1, 0>,
2348 defm VPADDSW : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_sse2_padds_w, 1, 0>,
2350 defm VPADDUSB : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_sse2_paddus_b, 1, 0>,
2352 defm VPADDUSW : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_sse2_paddus_w, 1, 0>,
2354 defm VPMULHUW : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_sse2_pmulhu_w, 1, 0>,
2356 defm VPMULHW : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_sse2_pmulh_w, 1, 0>,
2358 defm VPMULUDQ : PDI_binop_rm_int<0xF4, "vpmuludq", int_x86_sse2_pmulu_dq, 1, 0>,
2360 defm VPMADDWD : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_sse2_pmadd_wd, 1, 0>,
2362 defm VPAVGB : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_sse2_pavg_b, 1, 0>,
2364 defm VPAVGW : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_sse2_pavg_w, 1, 0>,
2366 defm VPMINUB : PDI_binop_rm_int<0xDA, "vpminub", int_x86_sse2_pminu_b, 1, 0>,
2368 defm VPMINSW : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_sse2_pmins_w, 1, 0>,
2370 defm VPMAXUB : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_sse2_pmaxu_b, 1, 0>,
2372 defm VPMAXSW : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_sse2_pmaxs_w, 1, 0>,
2374 defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw, 1, 0>,
2378 let Constraints = "$src1 = $dst" in {
2379 defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
2380 defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
2381 defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
2382 defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
2383 defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
2384 defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
2385 defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
2386 defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
2387 defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
2390 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
2391 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
2392 defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
2393 defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
2394 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
2395 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
2396 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
2397 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
2398 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
2399 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w, 1>;
2400 defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
2401 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
2402 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
2403 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
2404 defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
2405 defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
2406 defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
2407 defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
2408 defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw, 1>;
2410 } // Constraints = "$src1 = $dst"
2412 //===---------------------------------------------------------------------===//
2413 // SSE2 - Packed Integer Logical Instructions
2414 //===---------------------------------------------------------------------===//
2416 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
2417 defm VPSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "vpsllw",
2418 int_x86_sse2_psll_w, int_x86_sse2_pslli_w, 0>,
2420 defm VPSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "vpslld",
2421 int_x86_sse2_psll_d, int_x86_sse2_pslli_d, 0>,
2423 defm VPSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "vpsllq",
2424 int_x86_sse2_psll_q, int_x86_sse2_pslli_q, 0>,
2427 defm VPSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "vpsrlw",
2428 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w, 0>,
2430 defm VPSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "vpsrld",
2431 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d, 0>,
2433 defm VPSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "vpsrlq",
2434 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q, 0>,
2437 defm VPSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "vpsraw",
2438 int_x86_sse2_psra_w, int_x86_sse2_psrai_w, 0>,
2440 defm VPSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "vpsrad",
2441 int_x86_sse2_psra_d, int_x86_sse2_psrai_d, 0>,
2444 defm VPAND : PDI_binop_rm_v2i64<0xDB, "vpand", and, 1, 0>, VEX_4V;
2445 defm VPOR : PDI_binop_rm_v2i64<0xEB, "vpor" , or, 1, 0>, VEX_4V;
2446 defm VPXOR : PDI_binop_rm_v2i64<0xEF, "vpxor", xor, 1, 0>, VEX_4V;
2448 let ExeDomain = SSEPackedInt in {
2449 let neverHasSideEffects = 1 in {
2450 // 128-bit logical shifts.
2451 def VPSLLDQri : PDIi8<0x73, MRM7r,
2452 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2453 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2455 def VPSRLDQri : PDIi8<0x73, MRM3r,
2456 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2457 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2459 // PSRADQri doesn't exist in SSE[1-3].
2461 def VPANDNrr : PDI<0xDF, MRMSrcReg,
2462 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2463 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2464 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2465 VR128:$src2)))]>, VEX_4V;
2467 def VPANDNrm : PDI<0xDF, MRMSrcMem,
2468 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2469 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2470 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2471 (memopv2i64 addr:$src2))))]>,
2476 let Constraints = "$src1 = $dst" in {
2477 defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
2478 int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
2479 defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
2480 int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
2481 defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
2482 int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
2484 defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
2485 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
2486 defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
2487 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
2488 defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
2489 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
2491 defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
2492 int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
2493 defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
2494 int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
2496 defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
2497 defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or, 1>;
2498 defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
2500 let ExeDomain = SSEPackedInt in {
2501 let neverHasSideEffects = 1 in {
2502 // 128-bit logical shifts.
2503 def PSLLDQri : PDIi8<0x73, MRM7r,
2504 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2505 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
2506 def PSRLDQri : PDIi8<0x73, MRM3r,
2507 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2508 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
2509 // PSRADQri doesn't exist in SSE[1-3].
2511 def PANDNrr : PDI<0xDF, MRMSrcReg,
2512 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2513 "pandn\t{$src2, $dst|$dst, $src2}", []>;
2515 def PANDNrm : PDI<0xDF, MRMSrcMem,
2516 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2517 "pandn\t{$src2, $dst|$dst, $src2}", []>;
2519 } // Constraints = "$src1 = $dst"
2521 let Predicates = [HasAVX] in {
2522 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2523 (v2i64 (VPSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2524 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2525 (v2i64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2526 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2527 (v2i64 (VPSLLDQri VR128:$src1, imm:$src2))>;
2528 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2529 (v2i64 (VPSRLDQri VR128:$src1, imm:$src2))>;
2530 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2531 (v2f64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2533 // Shift up / down and insert zero's.
2534 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2535 (v2i64 (VPSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2536 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2537 (v2i64 (VPSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2540 let Predicates = [HasSSE2] in {
2541 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2542 (v2i64 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2543 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2544 (v2i64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2545 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2546 (v2i64 (PSLLDQri VR128:$src1, imm:$src2))>;
2547 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2548 (v2i64 (PSRLDQri VR128:$src1, imm:$src2))>;
2549 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2550 (v2f64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2552 // Shift up / down and insert zero's.
2553 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2554 (v2i64 (PSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2555 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2556 (v2i64 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2559 //===---------------------------------------------------------------------===//
2560 // SSE2 - Packed Integer Comparison Instructions
2561 //===---------------------------------------------------------------------===//
2563 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
2564 defm VPCMPEQB : PDI_binop_rm_int<0x74, "vpcmpeqb", int_x86_sse2_pcmpeq_b, 1,
2566 defm VPCMPEQW : PDI_binop_rm_int<0x75, "vpcmpeqw", int_x86_sse2_pcmpeq_w, 1,
2568 defm VPCMPEQD : PDI_binop_rm_int<0x76, "vpcmpeqd", int_x86_sse2_pcmpeq_d, 1,
2570 defm VPCMPGTB : PDI_binop_rm_int<0x64, "vpcmpgtb", int_x86_sse2_pcmpgt_b, 0,
2572 defm VPCMPGTW : PDI_binop_rm_int<0x65, "vpcmpgtw", int_x86_sse2_pcmpgt_w, 0,
2574 defm VPCMPGTD : PDI_binop_rm_int<0x66, "vpcmpgtd", int_x86_sse2_pcmpgt_d, 0,
2578 let Constraints = "$src1 = $dst" in {
2579 defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b, 1>;
2580 defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w, 1>;
2581 defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d, 1>;
2582 defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
2583 defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
2584 defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
2585 } // Constraints = "$src1 = $dst"
2587 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
2588 (PCMPEQBrr VR128:$src1, VR128:$src2)>;
2589 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
2590 (PCMPEQBrm VR128:$src1, addr:$src2)>;
2591 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
2592 (PCMPEQWrr VR128:$src1, VR128:$src2)>;
2593 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
2594 (PCMPEQWrm VR128:$src1, addr:$src2)>;
2595 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
2596 (PCMPEQDrr VR128:$src1, VR128:$src2)>;
2597 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
2598 (PCMPEQDrm VR128:$src1, addr:$src2)>;
2600 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
2601 (PCMPGTBrr VR128:$src1, VR128:$src2)>;
2602 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
2603 (PCMPGTBrm VR128:$src1, addr:$src2)>;
2604 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
2605 (PCMPGTWrr VR128:$src1, VR128:$src2)>;
2606 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
2607 (PCMPGTWrm VR128:$src1, addr:$src2)>;
2608 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
2609 (PCMPGTDrr VR128:$src1, VR128:$src2)>;
2610 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
2611 (PCMPGTDrm VR128:$src1, addr:$src2)>;
2613 //===---------------------------------------------------------------------===//
2614 // SSE2 - Packed Integer Pack Instructions
2615 //===---------------------------------------------------------------------===//
2617 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
2618 defm VPACKSSWB : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_sse2_packsswb_128,
2620 defm VPACKSSDW : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_sse2_packssdw_128,
2622 defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128,
2626 let Constraints = "$src1 = $dst" in {
2627 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
2628 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
2629 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
2630 } // Constraints = "$src1 = $dst"
2632 //===---------------------------------------------------------------------===//
2633 // SSE2 - Packed Integer Shuffle Instructions
2634 //===---------------------------------------------------------------------===//
2636 let ExeDomain = SSEPackedInt in {
2637 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt, PatFrag pshuf_frag,
2639 def ri : Ii8<0x70, MRMSrcReg,
2640 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
2641 !strconcat(OpcodeStr,
2642 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2643 [(set VR128:$dst, (vt (pshuf_frag:$src2 VR128:$src1,
2645 def mi : Ii8<0x70, MRMSrcMem,
2646 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
2647 !strconcat(OpcodeStr,
2648 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2649 [(set VR128:$dst, (vt (pshuf_frag:$src2
2650 (bc_frag (memopv2i64 addr:$src1)),
2653 } // ExeDomain = SSEPackedInt
2655 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
2656 let AddedComplexity = 5 in
2657 defm VPSHUFD : sse2_pshuffle<"vpshufd", v4i32, pshufd, bc_v4i32>, OpSize,
2660 // SSE2 with ImmT == Imm8 and XS prefix.
2661 defm VPSHUFHW : sse2_pshuffle<"vpshufhw", v8i16, pshufhw, bc_v8i16>, XS,
2664 // SSE2 with ImmT == Imm8 and XD prefix.
2665 defm VPSHUFLW : sse2_pshuffle<"vpshuflw", v8i16, pshuflw, bc_v8i16>, XD,
2669 let Predicates = [HasSSE2] in {
2670 let AddedComplexity = 5 in
2671 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize;
2673 // SSE2 with ImmT == Imm8 and XS prefix.
2674 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, pshufhw, bc_v8i16>, XS;
2676 // SSE2 with ImmT == Imm8 and XD prefix.
2677 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, pshuflw, bc_v8i16>, XD;
2680 //===---------------------------------------------------------------------===//
2681 // SSE2 - Packed Integer Unpack Instructions
2682 //===---------------------------------------------------------------------===//
2684 let ExeDomain = SSEPackedInt in {
2685 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
2686 PatFrag unp_frag, PatFrag bc_frag, bit Is2Addr = 1> {
2687 def rr : PDI<opc, MRMSrcReg,
2688 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2690 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2691 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2692 [(set VR128:$dst, (vt (unp_frag VR128:$src1, VR128:$src2)))]>;
2693 def rm : PDI<opc, MRMSrcMem,
2694 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2696 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2697 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2698 [(set VR128:$dst, (unp_frag VR128:$src1,
2699 (bc_frag (memopv2i64
2703 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
2704 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, unpckl, bc_v16i8,
2706 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, unpckl, bc_v8i16,
2708 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, unpckl, bc_v4i32,
2711 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2712 /// knew to collapse (bitconvert VT to VT) into its operand.
2713 def VPUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2714 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2715 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2717 (v2i64 (unpckl VR128:$src1, VR128:$src2)))]>, VEX_4V;
2718 def VPUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2719 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2720 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2722 (v2i64 (unpckl VR128:$src1,
2723 (memopv2i64 addr:$src2))))]>, VEX_4V;
2725 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, unpckh, bc_v16i8,
2727 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, unpckh, bc_v8i16,
2729 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, unpckh, bc_v4i32,
2732 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2733 /// knew to collapse (bitconvert VT to VT) into its operand.
2734 def VPUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2735 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2736 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2738 (v2i64 (unpckh VR128:$src1, VR128:$src2)))]>, VEX_4V;
2739 def VPUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2740 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2741 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2743 (v2i64 (unpckh VR128:$src1,
2744 (memopv2i64 addr:$src2))))]>, VEX_4V;
2747 let Constraints = "$src1 = $dst" in {
2748 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, unpckl, bc_v16i8>;
2749 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, unpckl, bc_v8i16>;
2750 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, unpckl, bc_v4i32>;
2752 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2753 /// knew to collapse (bitconvert VT to VT) into its operand.
2754 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2755 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2756 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2758 (v2i64 (unpckl VR128:$src1, VR128:$src2)))]>;
2759 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2760 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2761 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2763 (v2i64 (unpckl VR128:$src1,
2764 (memopv2i64 addr:$src2))))]>;
2766 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, unpckh, bc_v16i8>;
2767 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, unpckh, bc_v8i16>;
2768 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, unpckh, bc_v4i32>;
2770 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2771 /// knew to collapse (bitconvert VT to VT) into its operand.
2772 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2773 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2774 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2776 (v2i64 (unpckh VR128:$src1, VR128:$src2)))]>;
2777 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2778 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2779 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2781 (v2i64 (unpckh VR128:$src1,
2782 (memopv2i64 addr:$src2))))]>;
2785 } // ExeDomain = SSEPackedInt
2787 //===---------------------------------------------------------------------===//
2788 // SSE2 - Packed Integer Extract and Insert
2789 //===---------------------------------------------------------------------===//
2791 let ExeDomain = SSEPackedInt in {
2792 multiclass sse2_pinsrw<bit Is2Addr = 1> {
2793 def rri : Ii8<0xC4, MRMSrcReg,
2794 (outs VR128:$dst), (ins VR128:$src1,
2795 GR32:$src2, i32i8imm:$src3),
2797 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2798 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2800 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
2801 def rmi : Ii8<0xC4, MRMSrcMem,
2802 (outs VR128:$dst), (ins VR128:$src1,
2803 i16mem:$src2, i32i8imm:$src3),
2805 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2806 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2808 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
2813 let isAsmParserOnly = 0, Predicates = [HasAVX] in
2814 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
2815 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2816 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2817 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2818 imm:$src2))]>, OpSize, VEX;
2819 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
2820 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2821 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2822 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2826 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
2827 defm VPINSRW : sse2_pinsrw<0>, OpSize, VEX_4V;
2828 def VPINSRWrr64i : Ii8<0xC4, MRMSrcReg, (outs VR128:$dst),
2829 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
2830 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2831 []>, OpSize, VEX_4V;
2834 let Constraints = "$src1 = $dst" in
2835 defm PINSRW : sse2_pinsrw, TB, OpSize, Requires<[HasSSE2]>;
2837 } // ExeDomain = SSEPackedInt
2839 //===---------------------------------------------------------------------===//
2840 // SSE2 - Packed Mask Creation
2841 //===---------------------------------------------------------------------===//
2843 let ExeDomain = SSEPackedInt in {
2845 let isAsmParserOnly = 0 in {
2846 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2847 "pmovmskb\t{$src, $dst|$dst, $src}",
2848 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>, VEX;
2849 def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
2850 "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX;
2852 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2853 "pmovmskb\t{$src, $dst|$dst, $src}",
2854 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
2856 } // ExeDomain = SSEPackedInt
2858 //===---------------------------------------------------------------------===//
2859 // SSE2 - Conditional Store
2860 //===---------------------------------------------------------------------===//
2862 let ExeDomain = SSEPackedInt in {
2864 let isAsmParserOnly = 0 in {
2866 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
2867 (ins VR128:$src, VR128:$mask),
2868 "maskmovdqu\t{$mask, $src|$src, $mask}",
2869 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>, VEX;
2871 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
2872 (ins VR128:$src, VR128:$mask),
2873 "maskmovdqu\t{$mask, $src|$src, $mask}",
2874 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>, VEX;
2878 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2879 "maskmovdqu\t{$mask, $src|$src, $mask}",
2880 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
2882 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2883 "maskmovdqu\t{$mask, $src|$src, $mask}",
2884 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
2886 } // ExeDomain = SSEPackedInt
2888 //===---------------------------------------------------------------------===//
2889 // SSE2 - Move Doubleword
2890 //===---------------------------------------------------------------------===//
2892 // Move Int Doubleword to Packed Double Int
2893 let isAsmParserOnly = 0 in {
2894 def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2895 "movd\t{$src, $dst|$dst, $src}",
2897 (v4i32 (scalar_to_vector GR32:$src)))]>, VEX;
2898 def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2899 "movd\t{$src, $dst|$dst, $src}",
2901 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
2904 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2905 "movd\t{$src, $dst|$dst, $src}",
2907 (v4i32 (scalar_to_vector GR32:$src)))]>;
2908 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2909 "movd\t{$src, $dst|$dst, $src}",
2911 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
2912 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
2913 "mov{d|q}\t{$src, $dst|$dst, $src}",
2915 (v2i64 (scalar_to_vector GR64:$src)))]>;
2916 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
2917 "mov{d|q}\t{$src, $dst|$dst, $src}",
2918 [(set FR64:$dst, (bitconvert GR64:$src))]>;
2921 // Move Int Doubleword to Single Scalar
2922 let isAsmParserOnly = 0 in {
2923 def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2924 "movd\t{$src, $dst|$dst, $src}",
2925 [(set FR32:$dst, (bitconvert GR32:$src))]>, VEX;
2927 def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2928 "movd\t{$src, $dst|$dst, $src}",
2929 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
2932 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2933 "movd\t{$src, $dst|$dst, $src}",
2934 [(set FR32:$dst, (bitconvert GR32:$src))]>;
2936 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2937 "movd\t{$src, $dst|$dst, $src}",
2938 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
2940 // Move Packed Doubleword Int to Packed Double Int
2941 let isAsmParserOnly = 0 in {
2942 def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
2943 "movd\t{$src, $dst|$dst, $src}",
2944 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2946 def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs),
2947 (ins i32mem:$dst, VR128:$src),
2948 "movd\t{$src, $dst|$dst, $src}",
2949 [(store (i32 (vector_extract (v4i32 VR128:$src),
2950 (iPTR 0))), addr:$dst)]>, VEX;
2952 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
2953 "movd\t{$src, $dst|$dst, $src}",
2954 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2956 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
2957 "movd\t{$src, $dst|$dst, $src}",
2958 [(store (i32 (vector_extract (v4i32 VR128:$src),
2959 (iPTR 0))), addr:$dst)]>;
2961 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
2962 "mov{d|q}\t{$src, $dst|$dst, $src}",
2963 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
2965 def MOV64toSDrm : S3SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
2966 "movq\t{$src, $dst|$dst, $src}",
2967 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
2969 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
2970 "mov{d|q}\t{$src, $dst|$dst, $src}",
2971 [(set GR64:$dst, (bitconvert FR64:$src))]>;
2972 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
2973 "movq\t{$src, $dst|$dst, $src}",
2974 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
2976 // Move Scalar Single to Double Int
2977 let isAsmParserOnly = 0 in {
2978 def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
2979 "movd\t{$src, $dst|$dst, $src}",
2980 [(set GR32:$dst, (bitconvert FR32:$src))]>, VEX;
2981 def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
2982 "movd\t{$src, $dst|$dst, $src}",
2983 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>, VEX;
2985 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
2986 "movd\t{$src, $dst|$dst, $src}",
2987 [(set GR32:$dst, (bitconvert FR32:$src))]>;
2988 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
2989 "movd\t{$src, $dst|$dst, $src}",
2990 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
2992 // movd / movq to XMM register zero-extends
2993 let AddedComplexity = 15, isAsmParserOnly = 0 in {
2994 def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2995 "movd\t{$src, $dst|$dst, $src}",
2996 [(set VR128:$dst, (v4i32 (X86vzmovl
2997 (v4i32 (scalar_to_vector GR32:$src)))))]>,
2999 def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3000 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3001 [(set VR128:$dst, (v2i64 (X86vzmovl
3002 (v2i64 (scalar_to_vector GR64:$src)))))]>,
3005 let AddedComplexity = 15 in {
3006 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3007 "movd\t{$src, $dst|$dst, $src}",
3008 [(set VR128:$dst, (v4i32 (X86vzmovl
3009 (v4i32 (scalar_to_vector GR32:$src)))))]>;
3010 def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3011 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3012 [(set VR128:$dst, (v2i64 (X86vzmovl
3013 (v2i64 (scalar_to_vector GR64:$src)))))]>;
3016 let AddedComplexity = 20 in {
3017 let isAsmParserOnly = 0 in
3018 def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3019 "movd\t{$src, $dst|$dst, $src}",
3021 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3022 (loadi32 addr:$src))))))]>,
3024 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3025 "movd\t{$src, $dst|$dst, $src}",
3027 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3028 (loadi32 addr:$src))))))]>;
3030 def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
3031 (MOVZDI2PDIrm addr:$src)>;
3032 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
3033 (MOVZDI2PDIrm addr:$src)>;
3034 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
3035 (MOVZDI2PDIrm addr:$src)>;
3038 //===---------------------------------------------------------------------===//
3039 // SSE2 - Move Quadword
3040 //===---------------------------------------------------------------------===//
3042 // Move Quadword Int to Packed Quadword Int
3043 let isAsmParserOnly = 0 in
3044 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3045 "vmovq\t{$src, $dst|$dst, $src}",
3047 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3048 VEX, Requires<[HasAVX]>;
3049 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3050 "movq\t{$src, $dst|$dst, $src}",
3052 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3053 Requires<[HasSSE2]>; // SSE2 instruction with XS Prefix
3055 // Move Packed Quadword Int to Quadword Int
3056 let isAsmParserOnly = 0 in
3057 def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3058 "movq\t{$src, $dst|$dst, $src}",
3059 [(store (i64 (vector_extract (v2i64 VR128:$src),
3060 (iPTR 0))), addr:$dst)]>, VEX;
3061 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3062 "movq\t{$src, $dst|$dst, $src}",
3063 [(store (i64 (vector_extract (v2i64 VR128:$src),
3064 (iPTR 0))), addr:$dst)]>;
3066 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
3067 (f64 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
3069 // Store / copy lower 64-bits of a XMM register.
3070 let isAsmParserOnly = 0 in
3071 def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3072 "movq\t{$src, $dst|$dst, $src}",
3073 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX;
3074 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3075 "movq\t{$src, $dst|$dst, $src}",
3076 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
3078 let AddedComplexity = 20, isAsmParserOnly = 0 in
3079 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3080 "vmovq\t{$src, $dst|$dst, $src}",
3082 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3083 (loadi64 addr:$src))))))]>,
3084 XS, VEX, Requires<[HasAVX]>;
3086 let AddedComplexity = 20 in {
3087 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3088 "movq\t{$src, $dst|$dst, $src}",
3090 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3091 (loadi64 addr:$src))))))]>,
3092 XS, Requires<[HasSSE2]>;
3094 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
3095 (MOVZQI2PQIrm addr:$src)>;
3096 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
3097 (MOVZQI2PQIrm addr:$src)>;
3098 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
3101 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
3102 // IA32 document. movq xmm1, xmm2 does clear the high bits.
3103 let isAsmParserOnly = 0, AddedComplexity = 15 in
3104 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3105 "vmovq\t{$src, $dst|$dst, $src}",
3106 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3107 XS, VEX, Requires<[HasAVX]>;
3108 let AddedComplexity = 15 in
3109 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3110 "movq\t{$src, $dst|$dst, $src}",
3111 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3112 XS, Requires<[HasSSE2]>;
3114 let AddedComplexity = 20, isAsmParserOnly = 0 in
3115 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3116 "vmovq\t{$src, $dst|$dst, $src}",
3117 [(set VR128:$dst, (v2i64 (X86vzmovl
3118 (loadv2i64 addr:$src))))]>,
3119 XS, VEX, Requires<[HasAVX]>;
3120 let AddedComplexity = 20 in {
3121 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3122 "movq\t{$src, $dst|$dst, $src}",
3123 [(set VR128:$dst, (v2i64 (X86vzmovl
3124 (loadv2i64 addr:$src))))]>,
3125 XS, Requires<[HasSSE2]>;
3127 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
3128 (MOVZPQILo2PQIrm addr:$src)>;
3131 // Instructions to match in the assembler
3132 let isAsmParserOnly = 0 in {
3133 def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3134 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3135 def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3136 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3137 // Recognize "movd" with GR64 destination, but encode as a "movq"
3138 def VMOVQd64rr_alt : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3139 "movd\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3142 // Instructions for the disassembler
3143 // xr = XMM register
3146 let isAsmParserOnly = 0, Predicates = [HasAVX] in
3147 def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3148 "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS;
3149 def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3150 "movq\t{$src, $dst|$dst, $src}", []>, XS;
3152 //===---------------------------------------------------------------------===//
3153 // SSE2 - Misc Instructions
3154 //===---------------------------------------------------------------------===//
3157 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3158 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
3159 TB, Requires<[HasSSE2]>;
3161 // Load, store, and memory fence
3162 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
3163 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
3164 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
3165 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
3166 def : Pat<(X86LFence), (LFENCE)>;
3167 def : Pat<(X86MFence), (MFENCE)>;
3170 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3171 // was introduced with SSE2, it's backward compatible.
3172 def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", []>, REP;
3174 // Alias instructions that map zero vector to pxor / xorp* for sse.
3175 // We set canFoldAsLoad because this can be converted to a constant-pool
3176 // load of an all-ones value if folding it would be beneficial.
3177 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
3178 isCodeGenOnly = 1, ExeDomain = SSEPackedInt in
3179 // FIXME: Change encoding to pseudo.
3180 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
3181 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
3183 //===---------------------------------------------------------------------===//
3184 // SSE3 - Conversion Instructions
3185 //===---------------------------------------------------------------------===//
3187 // Convert Packed Double FP to Packed DW Integers
3188 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
3189 // The assembler can recognize rr 256-bit instructions by seeing a ymm
3190 // register, but the same isn't true when using memory operands instead.
3191 // Provide other assembly rr and rm forms to address this explicitly.
3192 def VCVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3193 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3194 def VCVTPD2DQXrYr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3195 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3198 def VCVTPD2DQXrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3199 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3200 def VCVTPD2DQXrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3201 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3204 def VCVTPD2DQYrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3205 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
3206 def VCVTPD2DQYrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
3207 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
3210 def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3211 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3212 def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3213 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3215 // Convert Packed DW Integers to Packed Double FP
3216 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
3217 def VCVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3218 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3219 def VCVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3220 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3221 def VCVTDQ2PDYrm : S3SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
3222 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3223 def VCVTDQ2PDYrr : S3SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
3224 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3227 def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3228 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3229 def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3230 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3232 // AVX 256-bit register conversion intrinsics
3233 def : Pat<(int_x86_avx_cvtdq2_pd_256 VR128:$src),
3234 (VCVTDQ2PDYrr VR128:$src)>;
3235 def : Pat<(int_x86_avx_cvtdq2_pd_256 (memopv4i32 addr:$src)),
3236 (VCVTDQ2PDYrm addr:$src)>;
3238 def : Pat<(int_x86_avx_cvt_pd2dq_256 VR256:$src),
3239 (VCVTPD2DQYrr VR256:$src)>;
3240 def : Pat<(int_x86_avx_cvt_pd2dq_256 (memopv4f64 addr:$src)),
3241 (VCVTPD2DQYrm addr:$src)>;
3243 //===---------------------------------------------------------------------===//
3244 // SSE3 - Move Instructions
3245 //===---------------------------------------------------------------------===//
3247 // Replicate Single FP
3248 multiclass sse3_replicate_sfp<bits<8> op, PatFrag rep_frag, string OpcodeStr> {
3249 def rr : S3SI<op, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3250 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3251 [(set VR128:$dst, (v4f32 (rep_frag
3252 VR128:$src, (undef))))]>;
3253 def rm : S3SI<op, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3254 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3255 [(set VR128:$dst, (rep_frag
3256 (memopv4f32 addr:$src), (undef)))]>;
3259 multiclass sse3_replicate_sfp_y<bits<8> op, PatFrag rep_frag,
3261 def rr : S3SI<op, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3262 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
3263 def rm : S3SI<op, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3264 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
3267 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
3268 // FIXME: Merge above classes when we have patterns for the ymm version
3269 defm VMOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "vmovshdup">, VEX;
3270 defm VMOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "vmovsldup">, VEX;
3271 defm VMOVSHDUPY : sse3_replicate_sfp_y<0x16, movshdup, "vmovshdup">, VEX;
3272 defm VMOVSLDUPY : sse3_replicate_sfp_y<0x12, movsldup, "vmovsldup">, VEX;
3274 defm MOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "movshdup">;
3275 defm MOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "movsldup">;
3277 // Replicate Double FP
3278 multiclass sse3_replicate_dfp<string OpcodeStr> {
3279 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3280 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3281 [(set VR128:$dst,(v2f64 (movddup VR128:$src, (undef))))]>;
3282 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
3283 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3285 (v2f64 (movddup (scalar_to_vector (loadf64 addr:$src)),
3289 multiclass sse3_replicate_dfp_y<string OpcodeStr> {
3290 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3291 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3293 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3294 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3298 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
3299 // FIXME: Merge above classes when we have patterns for the ymm version
3300 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
3301 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX;
3303 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
3305 // Move Unaligned Integer
3306 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
3307 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3308 "vlddqu\t{$src, $dst|$dst, $src}",
3309 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
3310 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3311 "vlddqu\t{$src, $dst|$dst, $src}",
3312 [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>, VEX;
3314 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3315 "lddqu\t{$src, $dst|$dst, $src}",
3316 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
3318 def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
3320 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3322 // Several Move patterns
3323 let AddedComplexity = 5 in {
3324 def : Pat<(movddup (memopv2f64 addr:$src), (undef)),
3325 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3326 def : Pat<(movddup (bc_v4f32 (memopv2f64 addr:$src)), (undef)),
3327 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3328 def : Pat<(movddup (memopv2i64 addr:$src), (undef)),
3329 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3330 def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
3331 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3334 // vector_shuffle v1, <undef> <1, 1, 3, 3>
3335 let AddedComplexity = 15 in
3336 def : Pat<(v4i32 (movshdup VR128:$src, (undef))),
3337 (MOVSHDUPrr VR128:$src)>, Requires<[HasSSE3]>;
3338 let AddedComplexity = 20 in
3339 def : Pat<(v4i32 (movshdup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
3340 (MOVSHDUPrm addr:$src)>, Requires<[HasSSE3]>;
3342 // vector_shuffle v1, <undef> <0, 0, 2, 2>
3343 let AddedComplexity = 15 in
3344 def : Pat<(v4i32 (movsldup VR128:$src, (undef))),
3345 (MOVSLDUPrr VR128:$src)>, Requires<[HasSSE3]>;
3346 let AddedComplexity = 20 in
3347 def : Pat<(v4i32 (movsldup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
3348 (MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>;
3350 //===---------------------------------------------------------------------===//
3351 // SSE3 - Arithmetic
3352 //===---------------------------------------------------------------------===//
3354 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
3355 X86MemOperand x86memop, bit Is2Addr = 1> {
3356 def rr : I<0xD0, MRMSrcReg,
3357 (outs RC:$dst), (ins RC:$src1, RC:$src2),
3359 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3360 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3361 [(set RC:$dst, (Int RC:$src1, RC:$src2))]>;
3362 def rm : I<0xD0, MRMSrcMem,
3363 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3365 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3366 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3367 [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))]>;
3370 let isAsmParserOnly = 0, Predicates = [HasAVX],
3371 ExeDomain = SSEPackedDouble in {
3372 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
3373 f128mem, 0>, TB, XD, VEX_4V;
3374 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
3375 f128mem, 0>, TB, OpSize, VEX_4V;
3376 defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
3377 f256mem, 0>, TB, XD, VEX_4V;
3378 defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
3379 f256mem, 0>, TB, OpSize, VEX_4V;
3381 let Constraints = "$src1 = $dst", Predicates = [HasSSE3],
3382 ExeDomain = SSEPackedDouble in {
3383 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
3385 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
3386 f128mem>, TB, OpSize;
3389 //===---------------------------------------------------------------------===//
3390 // SSE3 Instructions
3391 //===---------------------------------------------------------------------===//
3394 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3395 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3396 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3398 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3399 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3400 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3402 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3404 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3405 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3406 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3408 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3409 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3410 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3412 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3413 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3414 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3416 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3418 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3419 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3420 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3423 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
3424 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
3425 int_x86_sse3_hadd_ps, 0>, VEX_4V;
3426 defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
3427 int_x86_sse3_hadd_pd, 0>, VEX_4V;
3428 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
3429 int_x86_sse3_hsub_ps, 0>, VEX_4V;
3430 defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
3431 int_x86_sse3_hsub_pd, 0>, VEX_4V;
3432 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
3433 int_x86_avx_hadd_ps_256, 0>, VEX_4V;
3434 defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
3435 int_x86_avx_hadd_pd_256, 0>, VEX_4V;
3436 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
3437 int_x86_avx_hsub_ps_256, 0>, VEX_4V;
3438 defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
3439 int_x86_avx_hsub_pd_256, 0>, VEX_4V;
3442 let Constraints = "$src1 = $dst" in {
3443 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem,
3444 int_x86_sse3_hadd_ps>;
3445 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem,
3446 int_x86_sse3_hadd_pd>;
3447 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem,
3448 int_x86_sse3_hsub_ps>;
3449 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem,
3450 int_x86_sse3_hsub_pd>;
3453 //===---------------------------------------------------------------------===//
3454 // SSSE3 - Packed Absolute Instructions
3455 //===---------------------------------------------------------------------===//
3458 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
3459 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
3460 PatFrag mem_frag128, Intrinsic IntId128> {
3461 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3463 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3464 [(set VR128:$dst, (IntId128 VR128:$src))]>,
3467 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3469 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3472 (bitconvert (mem_frag128 addr:$src))))]>, OpSize;
3475 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
3476 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", memopv16i8,
3477 int_x86_ssse3_pabs_b_128>, VEX;
3478 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", memopv8i16,
3479 int_x86_ssse3_pabs_w_128>, VEX;
3480 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", memopv4i32,
3481 int_x86_ssse3_pabs_d_128>, VEX;
3484 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", memopv16i8,
3485 int_x86_ssse3_pabs_b_128>;
3486 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", memopv8i16,
3487 int_x86_ssse3_pabs_w_128>;
3488 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", memopv4i32,
3489 int_x86_ssse3_pabs_d_128>;
3491 //===---------------------------------------------------------------------===//
3492 // SSSE3 - Packed Binary Operator Instructions
3493 //===---------------------------------------------------------------------===//
3495 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
3496 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
3497 PatFrag mem_frag128, Intrinsic IntId128,
3499 let isCommutable = 1 in
3500 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3501 (ins VR128:$src1, VR128:$src2),
3503 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3504 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3505 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3507 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3508 (ins VR128:$src1, i128mem:$src2),
3510 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3511 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3513 (IntId128 VR128:$src1,
3514 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
3517 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
3518 let isCommutable = 0 in {
3519 defm VPHADDW : SS3I_binop_rm_int<0x01, "vphaddw", memopv8i16,
3520 int_x86_ssse3_phadd_w_128, 0>, VEX_4V;
3521 defm VPHADDD : SS3I_binop_rm_int<0x02, "vphaddd", memopv4i32,
3522 int_x86_ssse3_phadd_d_128, 0>, VEX_4V;
3523 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw", memopv8i16,
3524 int_x86_ssse3_phadd_sw_128, 0>, VEX_4V;
3525 defm VPHSUBW : SS3I_binop_rm_int<0x05, "vphsubw", memopv8i16,
3526 int_x86_ssse3_phsub_w_128, 0>, VEX_4V;
3527 defm VPHSUBD : SS3I_binop_rm_int<0x06, "vphsubd", memopv4i32,
3528 int_x86_ssse3_phsub_d_128, 0>, VEX_4V;
3529 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw", memopv8i16,
3530 int_x86_ssse3_phsub_sw_128, 0>, VEX_4V;
3531 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw", memopv16i8,
3532 int_x86_ssse3_pmadd_ub_sw_128, 0>, VEX_4V;
3533 defm VPSHUFB : SS3I_binop_rm_int<0x00, "vpshufb", memopv16i8,
3534 int_x86_ssse3_pshuf_b_128, 0>, VEX_4V;
3535 defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb", memopv16i8,
3536 int_x86_ssse3_psign_b_128, 0>, VEX_4V;
3537 defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw", memopv8i16,
3538 int_x86_ssse3_psign_w_128, 0>, VEX_4V;
3539 defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd", memopv4i32,
3540 int_x86_ssse3_psign_d_128, 0>, VEX_4V;
3542 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw", memopv8i16,
3543 int_x86_ssse3_pmul_hr_sw_128, 0>, VEX_4V;
3546 // None of these have i8 immediate fields.
3547 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
3548 let isCommutable = 0 in {
3549 defm PHADDW : SS3I_binop_rm_int<0x01, "phaddw", memopv8i16,
3550 int_x86_ssse3_phadd_w_128>;
3551 defm PHADDD : SS3I_binop_rm_int<0x02, "phaddd", memopv4i32,
3552 int_x86_ssse3_phadd_d_128>;
3553 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw", memopv8i16,
3554 int_x86_ssse3_phadd_sw_128>;
3555 defm PHSUBW : SS3I_binop_rm_int<0x05, "phsubw", memopv8i16,
3556 int_x86_ssse3_phsub_w_128>;
3557 defm PHSUBD : SS3I_binop_rm_int<0x06, "phsubd", memopv4i32,
3558 int_x86_ssse3_phsub_d_128>;
3559 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw", memopv8i16,
3560 int_x86_ssse3_phsub_sw_128>;
3561 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw", memopv16i8,
3562 int_x86_ssse3_pmadd_ub_sw_128>;
3563 defm PSHUFB : SS3I_binop_rm_int<0x00, "pshufb", memopv16i8,
3564 int_x86_ssse3_pshuf_b_128>;
3565 defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", memopv16i8,
3566 int_x86_ssse3_psign_b_128>;
3567 defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", memopv8i16,
3568 int_x86_ssse3_psign_w_128>;
3569 defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", memopv4i32,
3570 int_x86_ssse3_psign_d_128>;
3572 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw", memopv8i16,
3573 int_x86_ssse3_pmul_hr_sw_128>;
3576 def : Pat<(X86pshufb VR128:$src, VR128:$mask),
3577 (PSHUFBrr128 VR128:$src, VR128:$mask)>, Requires<[HasSSSE3]>;
3578 def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
3579 (PSHUFBrm128 VR128:$src, addr:$mask)>, Requires<[HasSSSE3]>;
3581 def : Pat<(X86psignb VR128:$src1, VR128:$src2),
3582 (PSIGNBrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
3583 def : Pat<(X86psignw VR128:$src1, VR128:$src2),
3584 (PSIGNWrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
3585 def : Pat<(X86psignd VR128:$src1, VR128:$src2),
3586 (PSIGNDrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
3588 //===---------------------------------------------------------------------===//
3589 // SSSE3 - Packed Align Instruction Patterns
3590 //===---------------------------------------------------------------------===//
3592 multiclass ssse3_palign<string asm, bit Is2Addr = 1> {
3593 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
3594 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
3596 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3598 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3600 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
3601 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
3603 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3605 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3609 let isAsmParserOnly = 0, Predicates = [HasAVX] in
3610 defm VPALIGN : ssse3_palign<"vpalignr", 0>, VEX_4V;
3611 let Constraints = "$src1 = $dst" in
3612 defm PALIGN : ssse3_palign<"palignr">;
3614 let AddedComplexity = 5 in {
3615 def : Pat<(v4i32 (palign:$src3 VR128:$src1, VR128:$src2)),
3616 (PALIGNR128rr VR128:$src2, VR128:$src1,
3617 (SHUFFLE_get_palign_imm VR128:$src3))>,
3618 Requires<[HasSSSE3]>;
3619 def : Pat<(v4f32 (palign:$src3 VR128:$src1, VR128:$src2)),
3620 (PALIGNR128rr VR128:$src2, VR128:$src1,
3621 (SHUFFLE_get_palign_imm VR128:$src3))>,
3622 Requires<[HasSSSE3]>;
3623 def : Pat<(v8i16 (palign:$src3 VR128:$src1, VR128:$src2)),
3624 (PALIGNR128rr VR128:$src2, VR128:$src1,
3625 (SHUFFLE_get_palign_imm VR128:$src3))>,
3626 Requires<[HasSSSE3]>;
3627 def : Pat<(v16i8 (palign:$src3 VR128:$src1, VR128:$src2)),
3628 (PALIGNR128rr VR128:$src2, VR128:$src1,
3629 (SHUFFLE_get_palign_imm VR128:$src3))>,
3630 Requires<[HasSSSE3]>;
3633 //===---------------------------------------------------------------------===//
3634 // SSSE3 Misc Instructions
3635 //===---------------------------------------------------------------------===//
3637 // Thread synchronization
3638 let usesCustomInserter = 1 in {
3639 def MONITOR : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
3640 [(int_x86_sse3_monitor addr:$src1, GR32:$src2, GR32:$src3)]>;
3641 def MWAIT : PseudoI<(outs), (ins GR32:$src1, GR32:$src2),
3642 [(int_x86_sse3_mwait GR32:$src1, GR32:$src2)]>;
3645 let Uses = [EAX, ECX, EDX] in
3646 def MONITORrrr : I<0x01, MRM_C8, (outs), (ins), "monitor", []>, TB,
3647 Requires<[HasSSE3]>;
3648 let Uses = [ECX, EAX] in
3649 def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait", []>, TB,
3650 Requires<[HasSSE3]>;
3652 def : InstAlias<"mwait %eax, %ecx", (MWAITrr)>, Requires<[In32BitMode]>;
3653 def : InstAlias<"mwait %rax, %rcx", (MWAITrr)>, Requires<[In64BitMode]>;
3655 def : InstAlias<"monitor %eax, %ecx, %edx", (MONITORrrr)>,
3656 Requires<[In32BitMode]>;
3657 def : InstAlias<"monitor %rax, %rcx, %rdx", (MONITORrrr)>,
3658 Requires<[In64BitMode]>;
3660 //===---------------------------------------------------------------------===//
3661 // Non-Instruction Patterns
3662 //===---------------------------------------------------------------------===//
3664 // extload f32 -> f64. This matches load+fextend because we have a hack in
3665 // the isel (PreprocessForFPConvert) that can introduce loads after dag
3667 // Since these loads aren't folded into the fextend, we have to match it
3669 let Predicates = [HasSSE2] in
3670 def : Pat<(fextend (loadf32 addr:$src)),
3671 (CVTSS2SDrm addr:$src)>;
3674 let Predicates = [HasXMMInt] in {
3675 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
3676 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
3677 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
3678 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
3679 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
3680 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
3681 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
3682 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
3683 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
3684 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
3685 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
3686 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
3687 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
3688 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
3689 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
3690 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
3691 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
3692 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
3693 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
3694 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
3695 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
3696 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
3697 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
3698 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
3699 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
3700 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
3701 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
3702 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
3703 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
3704 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
3707 let Predicates = [HasAVX] in {
3708 def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>;
3711 // Move scalar to XMM zero-extended
3712 // movd to XMM register zero-extends
3713 let AddedComplexity = 15 in {
3714 // Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
3715 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
3716 (MOVSDrr (v2f64 (V_SET0PS)), FR64:$src)>;
3717 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
3718 (MOVSSrr (v4f32 (V_SET0PS)), FR32:$src)>;
3719 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
3720 (MOVSSrr (v4f32 (V_SET0PS)),
3721 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
3722 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
3723 (MOVSSrr (v4i32 (V_SET0PI)),
3724 (EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
3727 // Splat v2f64 / v2i64
3728 let AddedComplexity = 10 in {
3729 def : Pat<(splat_lo (v2f64 VR128:$src), (undef)),
3730 (UNPCKLPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3731 def : Pat<(unpckh (v2f64 VR128:$src), (undef)),
3732 (UNPCKHPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3733 def : Pat<(splat_lo (v2i64 VR128:$src), (undef)),
3734 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3735 def : Pat<(unpckh (v2i64 VR128:$src), (undef)),
3736 (PUNPCKHQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3739 // Special unary SHUFPSrri case.
3740 def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
3741 (SHUFPSrri VR128:$src1, VR128:$src1,
3742 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3743 let AddedComplexity = 5 in
3744 def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
3745 (PSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3746 Requires<[HasSSE2]>;
3747 // Special unary SHUFPDrri case.
3748 def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
3749 (SHUFPDrri VR128:$src1, VR128:$src1,
3750 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3751 Requires<[HasSSE2]>;
3752 // Special unary SHUFPDrri case.
3753 def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
3754 (SHUFPDrri VR128:$src1, VR128:$src1,
3755 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3756 Requires<[HasSSE2]>;
3757 // Unary v4f32 shuffle with PSHUF* in order to fold a load.
3758 def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
3759 (PSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3760 Requires<[HasSSE2]>;
3762 // Special binary v4i32 shuffle cases with SHUFPS.
3763 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
3764 (SHUFPSrri VR128:$src1, VR128:$src2,
3765 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3766 Requires<[HasSSE2]>;
3767 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)))),
3768 (SHUFPSrmi VR128:$src1, addr:$src2,
3769 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3770 Requires<[HasSSE2]>;
3771 // Special binary v2i64 shuffle cases using SHUFPDrri.
3772 def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
3773 (SHUFPDrri VR128:$src1, VR128:$src2,
3774 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3775 Requires<[HasSSE2]>;
3777 // vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
3778 let AddedComplexity = 15 in {
3779 def : Pat<(v4i32 (unpckl_undef:$src2 VR128:$src, (undef))),
3780 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3781 Requires<[OptForSpeed, HasSSE2]>;
3782 def : Pat<(v4f32 (unpckl_undef:$src2 VR128:$src, (undef))),
3783 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3784 Requires<[OptForSpeed, HasSSE2]>;
3786 let AddedComplexity = 10 in {
3787 def : Pat<(v4f32 (unpckl_undef VR128:$src, (undef))),
3788 (UNPCKLPSrr VR128:$src, VR128:$src)>;
3789 def : Pat<(v16i8 (unpckl_undef VR128:$src, (undef))),
3790 (PUNPCKLBWrr VR128:$src, VR128:$src)>;
3791 def : Pat<(v8i16 (unpckl_undef VR128:$src, (undef))),
3792 (PUNPCKLWDrr VR128:$src, VR128:$src)>;
3793 def : Pat<(v4i32 (unpckl_undef VR128:$src, (undef))),
3794 (PUNPCKLDQrr VR128:$src, VR128:$src)>;
3797 // vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
3798 let AddedComplexity = 15 in {
3799 def : Pat<(v4i32 (unpckh_undef:$src2 VR128:$src, (undef))),
3800 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3801 Requires<[OptForSpeed, HasSSE2]>;
3802 def : Pat<(v4f32 (unpckh_undef:$src2 VR128:$src, (undef))),
3803 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3804 Requires<[OptForSpeed, HasSSE2]>;
3806 let AddedComplexity = 10 in {
3807 def : Pat<(v4f32 (unpckh_undef VR128:$src, (undef))),
3808 (UNPCKHPSrr VR128:$src, VR128:$src)>;
3809 def : Pat<(v16i8 (unpckh_undef VR128:$src, (undef))),
3810 (PUNPCKHBWrr VR128:$src, VR128:$src)>;
3811 def : Pat<(v8i16 (unpckh_undef VR128:$src, (undef))),
3812 (PUNPCKHWDrr VR128:$src, VR128:$src)>;
3813 def : Pat<(v4i32 (unpckh_undef VR128:$src, (undef))),
3814 (PUNPCKHDQrr VR128:$src, VR128:$src)>;
3817 let AddedComplexity = 20 in {
3818 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
3819 def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
3820 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
3822 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
3823 def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
3824 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
3826 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
3827 def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
3828 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3829 def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
3830 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3833 let AddedComplexity = 20 in {
3834 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
3835 def : Pat<(v4f32 (movlp VR128:$src1, (load addr:$src2))),
3836 (MOVLPSrm VR128:$src1, addr:$src2)>;
3837 def : Pat<(v2f64 (movlp VR128:$src1, (load addr:$src2))),
3838 (MOVLPDrm VR128:$src1, addr:$src2)>;
3839 def : Pat<(v4i32 (movlp VR128:$src1, (load addr:$src2))),
3840 (MOVLPSrm VR128:$src1, addr:$src2)>;
3841 def : Pat<(v2i64 (movlp VR128:$src1, (load addr:$src2))),
3842 (MOVLPDrm VR128:$src1, addr:$src2)>;
3845 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
3846 def : Pat<(store (v4f32 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3847 (MOVLPSmr addr:$src1, VR128:$src2)>;
3848 def : Pat<(store (v2f64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3849 (MOVLPDmr addr:$src1, VR128:$src2)>;
3850 def : Pat<(store (v4i32 (movlp (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
3852 (MOVLPSmr addr:$src1, VR128:$src2)>;
3853 def : Pat<(store (v2i64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3854 (MOVLPDmr addr:$src1, VR128:$src2)>;
3856 let AddedComplexity = 15 in {
3857 // Setting the lowest element in the vector.
3858 def : Pat<(v4i32 (movl VR128:$src1, VR128:$src2)),
3859 (MOVSSrr (v4i32 VR128:$src1),
3860 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
3861 def : Pat<(v2i64 (movl VR128:$src1, VR128:$src2)),
3862 (MOVSDrr (v2i64 VR128:$src1),
3863 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
3865 // vector_shuffle v1, v2 <4, 5, 2, 3> using movsd
3866 def : Pat<(v4f32 (movlp VR128:$src1, VR128:$src2)),
3867 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
3868 Requires<[HasSSE2]>;
3869 def : Pat<(v4i32 (movlp VR128:$src1, VR128:$src2)),
3870 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
3871 Requires<[HasSSE2]>;
3874 // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
3875 // fall back to this for SSE1)
3876 def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
3877 (SHUFPSrri VR128:$src2, VR128:$src1,
3878 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3880 // Set lowest element and zero upper elements.
3881 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
3882 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
3884 // vector -> vector casts
3885 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
3886 (Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
3887 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
3888 (CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
3890 // Use movaps / movups for SSE integer load / store (one byte shorter).
3891 let Predicates = [HasSSE1] in {
3892 def : Pat<(alignedloadv4i32 addr:$src),
3893 (MOVAPSrm addr:$src)>;
3894 def : Pat<(loadv4i32 addr:$src),
3895 (MOVUPSrm addr:$src)>;
3896 def : Pat<(alignedloadv2i64 addr:$src),
3897 (MOVAPSrm addr:$src)>;
3898 def : Pat<(loadv2i64 addr:$src),
3899 (MOVUPSrm addr:$src)>;
3901 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
3902 (MOVAPSmr addr:$dst, VR128:$src)>;
3903 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3904 (MOVAPSmr addr:$dst, VR128:$src)>;
3905 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3906 (MOVAPSmr addr:$dst, VR128:$src)>;
3907 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3908 (MOVAPSmr addr:$dst, VR128:$src)>;
3909 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
3910 (MOVUPSmr addr:$dst, VR128:$src)>;
3911 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3912 (MOVUPSmr addr:$dst, VR128:$src)>;
3913 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3914 (MOVUPSmr addr:$dst, VR128:$src)>;
3915 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3916 (MOVUPSmr addr:$dst, VR128:$src)>;
3919 // Use vmovaps/vmovups for AVX 128-bit integer load/store (one byte shorter).
3920 let Predicates = [HasAVX] in {
3921 def : Pat<(alignedloadv4i32 addr:$src),
3922 (VMOVAPSrm addr:$src)>;
3923 def : Pat<(loadv4i32 addr:$src),
3924 (VMOVUPSrm addr:$src)>;
3925 def : Pat<(alignedloadv2i64 addr:$src),
3926 (VMOVAPSrm addr:$src)>;
3927 def : Pat<(loadv2i64 addr:$src),
3928 (VMOVUPSrm addr:$src)>;
3930 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
3931 (VMOVAPSmr addr:$dst, VR128:$src)>;
3932 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3933 (VMOVAPSmr addr:$dst, VR128:$src)>;
3934 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3935 (VMOVAPSmr addr:$dst, VR128:$src)>;
3936 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3937 (VMOVAPSmr addr:$dst, VR128:$src)>;
3938 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
3939 (VMOVUPSmr addr:$dst, VR128:$src)>;
3940 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3941 (VMOVUPSmr addr:$dst, VR128:$src)>;
3942 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3943 (VMOVUPSmr addr:$dst, VR128:$src)>;
3944 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3945 (VMOVUPSmr addr:$dst, VR128:$src)>;
3948 //===----------------------------------------------------------------------===//
3949 // SSE4.1 - Packed Move with Sign/Zero Extend
3950 //===----------------------------------------------------------------------===//
3952 multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3953 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3954 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3955 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3957 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3958 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3960 (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
3964 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
3965 defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw", int_x86_sse41_pmovsxbw>,
3967 defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd", int_x86_sse41_pmovsxwd>,
3969 defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq", int_x86_sse41_pmovsxdq>,
3971 defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw", int_x86_sse41_pmovzxbw>,
3973 defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd", int_x86_sse41_pmovzxwd>,
3975 defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
3979 defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
3980 defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
3981 defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
3982 defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
3983 defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
3984 defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
3986 // Common patterns involving scalar load.
3987 def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
3988 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
3989 def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
3990 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
3992 def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
3993 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
3994 def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
3995 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
3997 def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
3998 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
3999 def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
4000 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4002 def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
4003 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4004 def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
4005 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4007 def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
4008 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4009 def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
4010 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4012 def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
4013 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4014 def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
4015 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4018 multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4019 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4020 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4021 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4023 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4024 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4026 (IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
4030 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
4031 defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd>,
4033 defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq>,
4035 defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd>,
4037 defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
4041 defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
4042 defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
4043 defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
4044 defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
4046 // Common patterns involving scalar load
4047 def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
4048 (PMOVSXBDrm addr:$src)>, Requires<[HasSSE41]>;
4049 def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
4050 (PMOVSXWQrm addr:$src)>, Requires<[HasSSE41]>;
4052 def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
4053 (PMOVZXBDrm addr:$src)>, Requires<[HasSSE41]>;
4054 def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
4055 (PMOVZXWQrm addr:$src)>, Requires<[HasSSE41]>;
4058 multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4059 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4060 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4061 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4063 // Expecting a i16 load any extended to i32 value.
4064 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
4065 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4066 [(set VR128:$dst, (IntId (bitconvert
4067 (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))]>,
4071 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
4072 defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq>,
4074 defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
4077 defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
4078 defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
4080 // Common patterns involving scalar load
4081 def : Pat<(int_x86_sse41_pmovsxbq
4082 (bitconvert (v4i32 (X86vzmovl
4083 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4084 (PMOVSXBQrm addr:$src)>, Requires<[HasSSE41]>;
4086 def : Pat<(int_x86_sse41_pmovzxbq
4087 (bitconvert (v4i32 (X86vzmovl
4088 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4089 (PMOVZXBQrm addr:$src)>, Requires<[HasSSE41]>;
4091 //===----------------------------------------------------------------------===//
4092 // SSE4.1 - Extract Instructions
4093 //===----------------------------------------------------------------------===//
4095 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
4096 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
4097 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4098 (ins VR128:$src1, i32i8imm:$src2),
4099 !strconcat(OpcodeStr,
4100 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4101 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
4103 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4104 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
4105 !strconcat(OpcodeStr,
4106 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4109 // There's an AssertZext in the way of writing the store pattern
4110 // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4113 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
4114 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
4115 def VPEXTRBrr64 : SS4AIi8<0x14, MRMDestReg, (outs GR64:$dst),
4116 (ins VR128:$src1, i32i8imm:$src2),
4117 "vpextrb\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, OpSize, VEX;
4120 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
4123 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
4124 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
4125 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4126 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
4127 !strconcat(OpcodeStr,
4128 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4131 // There's an AssertZext in the way of writing the store pattern
4132 // (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4135 let isAsmParserOnly = 0, Predicates = [HasAVX] in
4136 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
4138 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
4141 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4142 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
4143 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4144 (ins VR128:$src1, i32i8imm:$src2),
4145 !strconcat(OpcodeStr,
4146 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4148 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
4149 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4150 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
4151 !strconcat(OpcodeStr,
4152 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4153 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
4154 addr:$dst)]>, OpSize;
4157 let isAsmParserOnly = 0, Predicates = [HasAVX] in
4158 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
4160 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
4162 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4163 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
4164 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
4165 (ins VR128:$src1, i32i8imm:$src2),
4166 !strconcat(OpcodeStr,
4167 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4169 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
4170 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4171 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
4172 !strconcat(OpcodeStr,
4173 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4174 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
4175 addr:$dst)]>, OpSize, REX_W;
4178 let isAsmParserOnly = 0, Predicates = [HasAVX] in
4179 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
4181 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
4183 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
4185 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
4186 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4187 (ins VR128:$src1, i32i8imm:$src2),
4188 !strconcat(OpcodeStr,
4189 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4191 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
4193 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4194 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
4195 !strconcat(OpcodeStr,
4196 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4197 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
4198 addr:$dst)]>, OpSize;
4201 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
4202 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
4203 def VEXTRACTPSrr64 : SS4AIi8<0x17, MRMDestReg, (outs GR64:$dst),
4204 (ins VR128:$src1, i32i8imm:$src2),
4205 "vextractps \t{$src2, $src1, $dst|$dst, $src1, $src2}",
4208 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
4210 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
4211 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
4214 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
4215 Requires<[HasSSE41]>;
4217 //===----------------------------------------------------------------------===//
4218 // SSE4.1 - Insert Instructions
4219 //===----------------------------------------------------------------------===//
4221 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
4222 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4223 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4225 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4227 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4229 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
4230 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4231 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
4233 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4235 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4237 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
4238 imm:$src3))]>, OpSize;
4241 let isAsmParserOnly = 0, Predicates = [HasAVX] in
4242 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
4243 let Constraints = "$src1 = $dst" in
4244 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
4246 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
4247 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4248 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4250 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4252 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4254 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
4256 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4257 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
4259 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4261 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4263 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
4264 imm:$src3)))]>, OpSize;
4267 let isAsmParserOnly = 0, Predicates = [HasAVX] in
4268 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
4269 let Constraints = "$src1 = $dst" in
4270 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
4272 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
4273 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4274 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
4276 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4278 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4280 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
4282 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4283 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
4285 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4287 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4289 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
4290 imm:$src3)))]>, OpSize;
4293 let isAsmParserOnly = 0, Predicates = [HasAVX] in
4294 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
4295 let Constraints = "$src1 = $dst" in
4296 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
4298 // insertps has a few different modes, there's the first two here below which
4299 // are optimized inserts that won't zero arbitrary elements in the destination
4300 // vector. The next one matches the intrinsic and could zero arbitrary elements
4301 // in the target vector.
4302 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
4303 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4304 (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4306 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4308 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4310 (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
4312 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4313 (ins VR128:$src1, f32mem:$src2, i32i8imm:$src3),
4315 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4317 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4319 (X86insrtps VR128:$src1,
4320 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
4321 imm:$src3))]>, OpSize;
4324 let Constraints = "$src1 = $dst" in
4325 defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
4326 let isAsmParserOnly = 0, Predicates = [HasAVX] in
4327 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
4329 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4330 (VINSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4332 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4333 (INSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4334 Requires<[HasSSE41]>;
4336 //===----------------------------------------------------------------------===//
4337 // SSE4.1 - Round Instructions
4338 //===----------------------------------------------------------------------===//
4340 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
4341 X86MemOperand x86memop, RegisterClass RC,
4342 PatFrag mem_frag32, PatFrag mem_frag64,
4343 Intrinsic V4F32Int, Intrinsic V2F64Int> {
4344 // Intrinsic operation, reg.
4345 // Vector intrinsic operation, reg
4346 def PSr : SS4AIi8<opcps, MRMSrcReg,
4347 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4348 !strconcat(OpcodeStr,
4349 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4350 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>,
4353 // Vector intrinsic operation, mem
4354 def PSm : Ii8<opcps, MRMSrcMem,
4355 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4356 !strconcat(OpcodeStr,
4357 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4359 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
4361 Requires<[HasSSE41]>;
4363 // Vector intrinsic operation, reg
4364 def PDr : SS4AIi8<opcpd, MRMSrcReg,
4365 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4366 !strconcat(OpcodeStr,
4367 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4368 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>,
4371 // Vector intrinsic operation, mem
4372 def PDm : SS4AIi8<opcpd, MRMSrcMem,
4373 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4374 !strconcat(OpcodeStr,
4375 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4377 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
4381 multiclass sse41_fp_unop_rm_avx_p<bits<8> opcps, bits<8> opcpd,
4382 RegisterClass RC, X86MemOperand x86memop, string OpcodeStr> {
4383 // Intrinsic operation, reg.
4384 // Vector intrinsic operation, reg
4385 def PSr_AVX : SS4AIi8<opcps, MRMSrcReg,
4386 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4387 !strconcat(OpcodeStr,
4388 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4391 // Vector intrinsic operation, mem
4392 def PSm_AVX : Ii8<opcps, MRMSrcMem,
4393 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4394 !strconcat(OpcodeStr,
4395 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4396 []>, TA, OpSize, Requires<[HasSSE41]>;
4398 // Vector intrinsic operation, reg
4399 def PDr_AVX : SS4AIi8<opcpd, MRMSrcReg,
4400 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4401 !strconcat(OpcodeStr,
4402 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4405 // Vector intrinsic operation, mem
4406 def PDm_AVX : SS4AIi8<opcpd, MRMSrcMem,
4407 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4408 !strconcat(OpcodeStr,
4409 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4413 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
4416 Intrinsic F64Int, bit Is2Addr = 1> {
4417 // Intrinsic operation, reg.
4418 def SSr : SS4AIi8<opcss, MRMSrcReg,
4419 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4421 !strconcat(OpcodeStr,
4422 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4423 !strconcat(OpcodeStr,
4424 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4425 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4428 // Intrinsic operation, mem.
4429 def SSm : SS4AIi8<opcss, MRMSrcMem,
4430 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4432 !strconcat(OpcodeStr,
4433 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4434 !strconcat(OpcodeStr,
4435 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4437 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
4440 // Intrinsic operation, reg.
4441 def SDr : SS4AIi8<opcsd, MRMSrcReg,
4442 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4444 !strconcat(OpcodeStr,
4445 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4446 !strconcat(OpcodeStr,
4447 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4448 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4451 // Intrinsic operation, mem.
4452 def SDm : SS4AIi8<opcsd, MRMSrcMem,
4453 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4455 !strconcat(OpcodeStr,
4456 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4457 !strconcat(OpcodeStr,
4458 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4460 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
4464 multiclass sse41_fp_binop_rm_avx_s<bits<8> opcss, bits<8> opcsd,
4466 // Intrinsic operation, reg.
4467 def SSr_AVX : SS4AIi8<opcss, MRMSrcReg,
4468 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4469 !strconcat(OpcodeStr,
4470 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4473 // Intrinsic operation, mem.
4474 def SSm_AVX : SS4AIi8<opcss, MRMSrcMem,
4475 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4476 !strconcat(OpcodeStr,
4477 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4480 // Intrinsic operation, reg.
4481 def SDr_AVX : SS4AIi8<opcsd, MRMSrcReg,
4482 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4483 !strconcat(OpcodeStr,
4484 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4487 // Intrinsic operation, mem.
4488 def SDm_AVX : SS4AIi8<opcsd, MRMSrcMem,
4489 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4490 !strconcat(OpcodeStr,
4491 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4495 // FP round - roundss, roundps, roundsd, roundpd
4496 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
4498 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128,
4499 memopv4f32, memopv2f64,
4500 int_x86_sse41_round_ps,
4501 int_x86_sse41_round_pd>, VEX;
4502 defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
4503 memopv8f32, memopv4f64,
4504 int_x86_avx_round_ps_256,
4505 int_x86_avx_round_pd_256>, VEX;
4506 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
4507 int_x86_sse41_round_ss,
4508 int_x86_sse41_round_sd, 0>, VEX_4V;
4510 // Instructions for the assembler
4511 defm VROUND : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR128, f128mem, "vround">,
4513 defm VROUNDY : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR256, f256mem, "vround">,
4515 defm VROUND : sse41_fp_binop_rm_avx_s<0x0A, 0x0B, "vround">, VEX_4V;
4518 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
4519 memopv4f32, memopv2f64,
4520 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
4521 let Constraints = "$src1 = $dst" in
4522 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
4523 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
4525 //===----------------------------------------------------------------------===//
4526 // SSE4.1 - Packed Bit Test
4527 //===----------------------------------------------------------------------===//
4529 // ptest instruction we'll lower to this in X86ISelLowering primarily from
4530 // the intel intrinsic that corresponds to this.
4531 let Defs = [EFLAGS], isAsmParserOnly = 0, Predicates = [HasAVX] in {
4532 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4533 "vptest\t{$src2, $src1|$src1, $src2}",
4534 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
4536 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
4537 "vptest\t{$src2, $src1|$src1, $src2}",
4538 [(set EFLAGS,(X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
4541 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
4542 "vptest\t{$src2, $src1|$src1, $src2}",
4543 [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
4545 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
4546 "vptest\t{$src2, $src1|$src1, $src2}",
4547 [(set EFLAGS,(X86ptest VR256:$src1, (memopv4i64 addr:$src2)))]>,
4551 let Defs = [EFLAGS] in {
4552 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4553 "ptest \t{$src2, $src1|$src1, $src2}",
4554 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
4556 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
4557 "ptest \t{$src2, $src1|$src1, $src2}",
4558 [(set EFLAGS, (X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
4562 // The bit test instructions below are AVX only
4563 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
4564 X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> {
4565 def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
4566 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
4567 [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>, OpSize, VEX;
4568 def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
4569 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
4570 [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
4574 let Defs = [EFLAGS], isAsmParserOnly = 0, Predicates = [HasAVX] in {
4575 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, memopv4f32, v4f32>;
4576 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, memopv8f32, v8f32>;
4577 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, memopv2f64, v2f64>;
4578 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>;
4581 //===----------------------------------------------------------------------===//
4582 // SSE4.1 - Misc Instructions
4583 //===----------------------------------------------------------------------===//
4585 def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
4586 "popcnt{w}\t{$src, $dst|$dst, $src}",
4587 [(set GR16:$dst, (ctpop GR16:$src))]>, OpSize, XS;
4588 def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
4589 "popcnt{w}\t{$src, $dst|$dst, $src}",
4590 [(set GR16:$dst, (ctpop (loadi16 addr:$src)))]>, OpSize, XS;
4592 def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
4593 "popcnt{l}\t{$src, $dst|$dst, $src}",
4594 [(set GR32:$dst, (ctpop GR32:$src))]>, XS;
4595 def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
4596 "popcnt{l}\t{$src, $dst|$dst, $src}",
4597 [(set GR32:$dst, (ctpop (loadi32 addr:$src)))]>, XS;
4599 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
4600 "popcnt{q}\t{$src, $dst|$dst, $src}",
4601 [(set GR64:$dst, (ctpop GR64:$src))]>, XS;
4602 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
4603 "popcnt{q}\t{$src, $dst|$dst, $src}",
4604 [(set GR64:$dst, (ctpop (loadi64 addr:$src)))]>, XS;
4608 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
4609 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
4610 Intrinsic IntId128> {
4611 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4613 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4614 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
4615 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4617 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4620 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
4623 let isAsmParserOnly = 0, Predicates = [HasAVX] in
4624 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
4625 int_x86_sse41_phminposuw>, VEX;
4626 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
4627 int_x86_sse41_phminposuw>;
4629 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
4630 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
4631 Intrinsic IntId128, bit Is2Addr = 1> {
4632 let isCommutable = 1 in
4633 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4634 (ins VR128:$src1, VR128:$src2),
4636 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4637 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4638 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>, OpSize;
4639 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4640 (ins VR128:$src1, i128mem:$src2),
4642 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4643 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4645 (IntId128 VR128:$src1,
4646 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4649 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
4650 let isCommutable = 0 in
4651 defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
4653 defm VPCMPEQQ : SS41I_binop_rm_int<0x29, "vpcmpeqq", int_x86_sse41_pcmpeqq,
4655 defm VPMINSB : SS41I_binop_rm_int<0x38, "vpminsb", int_x86_sse41_pminsb,
4657 defm VPMINSD : SS41I_binop_rm_int<0x39, "vpminsd", int_x86_sse41_pminsd,
4659 defm VPMINUD : SS41I_binop_rm_int<0x3B, "vpminud", int_x86_sse41_pminud,
4661 defm VPMINUW : SS41I_binop_rm_int<0x3A, "vpminuw", int_x86_sse41_pminuw,
4663 defm VPMAXSB : SS41I_binop_rm_int<0x3C, "vpmaxsb", int_x86_sse41_pmaxsb,
4665 defm VPMAXSD : SS41I_binop_rm_int<0x3D, "vpmaxsd", int_x86_sse41_pmaxsd,
4667 defm VPMAXUD : SS41I_binop_rm_int<0x3F, "vpmaxud", int_x86_sse41_pmaxud,
4669 defm VPMAXUW : SS41I_binop_rm_int<0x3E, "vpmaxuw", int_x86_sse41_pmaxuw,
4671 defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
4675 let Constraints = "$src1 = $dst" in {
4676 let isCommutable = 0 in
4677 defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
4678 defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq", int_x86_sse41_pcmpeqq>;
4679 defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb", int_x86_sse41_pminsb>;
4680 defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd", int_x86_sse41_pminsd>;
4681 defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud", int_x86_sse41_pminud>;
4682 defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw", int_x86_sse41_pminuw>;
4683 defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb", int_x86_sse41_pmaxsb>;
4684 defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd", int_x86_sse41_pmaxsd>;
4685 defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud", int_x86_sse41_pmaxud>;
4686 defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw", int_x86_sse41_pmaxuw>;
4687 defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq>;
4690 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
4691 (PCMPEQQrr VR128:$src1, VR128:$src2)>;
4692 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
4693 (PCMPEQQrm VR128:$src1, addr:$src2)>;
4695 /// SS48I_binop_rm - Simple SSE41 binary operator.
4696 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
4697 ValueType OpVT, bit Is2Addr = 1> {
4698 let isCommutable = 1 in
4699 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4700 (ins VR128:$src1, VR128:$src2),
4702 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4703 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4704 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>,
4706 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4707 (ins VR128:$src1, i128mem:$src2),
4709 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4710 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4711 [(set VR128:$dst, (OpNode VR128:$src1,
4712 (bc_v4i32 (memopv2i64 addr:$src2))))]>,
4716 let isAsmParserOnly = 0, Predicates = [HasAVX] in
4717 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, 0>, VEX_4V;
4718 let Constraints = "$src1 = $dst" in
4719 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32>;
4721 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
4722 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
4723 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
4724 X86MemOperand x86memop, bit Is2Addr = 1> {
4725 let isCommutable = 1 in
4726 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
4727 (ins RC:$src1, RC:$src2, i32i8imm:$src3),
4729 !strconcat(OpcodeStr,
4730 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4731 !strconcat(OpcodeStr,
4732 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4733 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
4735 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
4736 (ins RC:$src1, x86memop:$src2, i32i8imm:$src3),
4738 !strconcat(OpcodeStr,
4739 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4740 !strconcat(OpcodeStr,
4741 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4744 (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
4748 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
4749 let isCommutable = 0 in {
4750 defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
4751 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4752 defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
4753 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4754 defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
4755 int_x86_avx_blend_ps_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
4756 defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
4757 int_x86_avx_blend_pd_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
4758 defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
4759 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4760 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
4761 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4763 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
4764 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4765 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
4766 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4767 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
4768 VR256, memopv32i8, i256mem, 0>, VEX_4V;
4771 let Constraints = "$src1 = $dst" in {
4772 let isCommutable = 0 in {
4773 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
4774 VR128, memopv16i8, i128mem>;
4775 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
4776 VR128, memopv16i8, i128mem>;
4777 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
4778 VR128, memopv16i8, i128mem>;
4779 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
4780 VR128, memopv16i8, i128mem>;
4782 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
4783 VR128, memopv16i8, i128mem>;
4784 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
4785 VR128, memopv16i8, i128mem>;
4788 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
4789 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
4790 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
4791 RegisterClass RC, X86MemOperand x86memop,
4792 PatFrag mem_frag, Intrinsic IntId> {
4793 def rr : I<opc, MRMSrcReg, (outs RC:$dst),
4794 (ins RC:$src1, RC:$src2, RC:$src3),
4795 !strconcat(OpcodeStr,
4796 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4797 [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
4798 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4800 def rm : I<opc, MRMSrcMem, (outs RC:$dst),
4801 (ins RC:$src1, x86memop:$src2, RC:$src3),
4802 !strconcat(OpcodeStr,
4803 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4805 (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
4807 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4811 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, i128mem,
4812 memopv16i8, int_x86_sse41_blendvpd>;
4813 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, i128mem,
4814 memopv16i8, int_x86_sse41_blendvps>;
4815 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
4816 memopv16i8, int_x86_sse41_pblendvb>;
4817 defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, i256mem,
4818 memopv32i8, int_x86_avx_blendv_pd_256>;
4819 defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, i256mem,
4820 memopv32i8, int_x86_avx_blendv_ps_256>;
4822 /// SS41I_ternary_int - SSE 4.1 ternary operator
4823 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
4824 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4825 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4826 (ins VR128:$src1, VR128:$src2),
4827 !strconcat(OpcodeStr,
4828 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
4829 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
4832 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4833 (ins VR128:$src1, i128mem:$src2),
4834 !strconcat(OpcodeStr,
4835 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
4838 (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
4842 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
4843 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
4844 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
4846 def : Pat<(X86pblendv VR128:$src1, VR128:$src2, XMM0),
4847 (PBLENDVBrr0 VR128:$src1, VR128:$src2)>;
4849 let isAsmParserOnly = 0, Predicates = [HasAVX] in
4850 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4851 "vmovntdqa\t{$src, $dst|$dst, $src}",
4852 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
4854 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4855 "movntdqa\t{$src, $dst|$dst, $src}",
4856 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
4859 //===----------------------------------------------------------------------===//
4860 // SSE4.2 - Compare Instructions
4861 //===----------------------------------------------------------------------===//
4863 /// SS42I_binop_rm_int - Simple SSE 4.2 binary operator
4864 multiclass SS42I_binop_rm_int<bits<8> opc, string OpcodeStr,
4865 Intrinsic IntId128, bit Is2Addr = 1> {
4866 def rr : SS428I<opc, MRMSrcReg, (outs VR128:$dst),
4867 (ins VR128:$src1, VR128:$src2),
4869 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4870 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4871 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
4873 def rm : SS428I<opc, MRMSrcMem, (outs VR128:$dst),
4874 (ins VR128:$src1, i128mem:$src2),
4876 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4877 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4879 (IntId128 VR128:$src1,
4880 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4883 let isAsmParserOnly = 0, Predicates = [HasAVX] in
4884 defm VPCMPGTQ : SS42I_binop_rm_int<0x37, "vpcmpgtq", int_x86_sse42_pcmpgtq,
4886 let Constraints = "$src1 = $dst" in
4887 defm PCMPGTQ : SS42I_binop_rm_int<0x37, "pcmpgtq", int_x86_sse42_pcmpgtq>;
4889 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
4890 (PCMPGTQrr VR128:$src1, VR128:$src2)>;
4891 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
4892 (PCMPGTQrm VR128:$src1, addr:$src2)>;
4894 //===----------------------------------------------------------------------===//
4895 // SSE4.2 - String/text Processing Instructions
4896 //===----------------------------------------------------------------------===//
4898 // Packed Compare Implicit Length Strings, Return Mask
4899 multiclass pseudo_pcmpistrm<string asm> {
4900 def REG : PseudoI<(outs VR128:$dst),
4901 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4902 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
4904 def MEM : PseudoI<(outs VR128:$dst),
4905 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4906 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128
4907 VR128:$src1, (load addr:$src2), imm:$src3))]>;
4910 let Defs = [EFLAGS], usesCustomInserter = 1 in {
4911 defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[HasSSE42]>;
4912 defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>;
4915 let Defs = [XMM0, EFLAGS], isAsmParserOnly = 0,
4916 Predicates = [HasAVX] in {
4917 def VPCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
4918 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4919 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
4920 def VPCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
4921 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4922 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
4925 let Defs = [XMM0, EFLAGS] in {
4926 def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
4927 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4928 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
4929 def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
4930 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4931 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
4934 // Packed Compare Explicit Length Strings, Return Mask
4935 multiclass pseudo_pcmpestrm<string asm> {
4936 def REG : PseudoI<(outs VR128:$dst),
4937 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4938 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
4939 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
4940 def MEM : PseudoI<(outs VR128:$dst),
4941 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4942 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
4943 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>;
4946 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
4947 defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[HasSSE42]>;
4948 defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>;
4951 let isAsmParserOnly = 0, Predicates = [HasAVX],
4952 Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
4953 def VPCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
4954 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4955 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
4956 def VPCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
4957 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4958 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
4961 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
4962 def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
4963 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4964 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
4965 def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
4966 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4967 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
4970 // Packed Compare Implicit Length Strings, Return Index
4971 let Defs = [ECX, EFLAGS] in {
4972 multiclass SS42AI_pcmpistri<Intrinsic IntId128, string asm = "pcmpistri"> {
4973 def rr : SS42AI<0x63, MRMSrcReg, (outs),
4974 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4975 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
4976 [(set ECX, (IntId128 VR128:$src1, VR128:$src2, imm:$src3)),
4977 (implicit EFLAGS)]>, OpSize;
4978 def rm : SS42AI<0x63, MRMSrcMem, (outs),
4979 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4980 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
4981 [(set ECX, (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)),
4982 (implicit EFLAGS)]>, OpSize;
4986 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
4987 defm VPCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128, "vpcmpistri">,
4989 defm VPCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128, "vpcmpistri">,
4991 defm VPCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128, "vpcmpistri">,
4993 defm VPCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128, "vpcmpistri">,
4995 defm VPCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128, "vpcmpistri">,
4997 defm VPCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128, "vpcmpistri">,
5001 defm PCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128>;
5002 defm PCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128>;
5003 defm PCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128>;
5004 defm PCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128>;
5005 defm PCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128>;
5006 defm PCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128>;
5008 // Packed Compare Explicit Length Strings, Return Index
5009 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX] in {
5010 multiclass SS42AI_pcmpestri<Intrinsic IntId128, string asm = "pcmpestri"> {
5011 def rr : SS42AI<0x61, MRMSrcReg, (outs),
5012 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5013 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5014 [(set ECX, (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)),
5015 (implicit EFLAGS)]>, OpSize;
5016 def rm : SS42AI<0x61, MRMSrcMem, (outs),
5017 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5018 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5020 (IntId128 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5)),
5021 (implicit EFLAGS)]>, OpSize;
5025 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
5026 defm VPCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128, "vpcmpestri">,
5028 defm VPCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128, "vpcmpestri">,
5030 defm VPCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128, "vpcmpestri">,
5032 defm VPCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128, "vpcmpestri">,
5034 defm VPCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128, "vpcmpestri">,
5036 defm VPCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128, "vpcmpestri">,
5040 defm PCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128>;
5041 defm PCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128>;
5042 defm PCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128>;
5043 defm PCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128>;
5044 defm PCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128>;
5045 defm PCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128>;
5047 //===----------------------------------------------------------------------===//
5048 // SSE4.2 - CRC Instructions
5049 //===----------------------------------------------------------------------===//
5051 // No CRC instructions have AVX equivalents
5053 // crc intrinsic instruction
5054 // This set of instructions are only rm, the only difference is the size
5056 let Constraints = "$src1 = $dst" in {
5057 def CRC32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst),
5058 (ins GR32:$src1, i8mem:$src2),
5059 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5061 (int_x86_sse42_crc32_8 GR32:$src1,
5062 (load addr:$src2)))]>;
5063 def CRC32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst),
5064 (ins GR32:$src1, GR8:$src2),
5065 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5067 (int_x86_sse42_crc32_8 GR32:$src1, GR8:$src2))]>;
5068 def CRC32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5069 (ins GR32:$src1, i16mem:$src2),
5070 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5072 (int_x86_sse42_crc32_16 GR32:$src1,
5073 (load addr:$src2)))]>,
5075 def CRC32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5076 (ins GR32:$src1, GR16:$src2),
5077 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5079 (int_x86_sse42_crc32_16 GR32:$src1, GR16:$src2))]>,
5081 def CRC32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5082 (ins GR32:$src1, i32mem:$src2),
5083 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5085 (int_x86_sse42_crc32_32 GR32:$src1,
5086 (load addr:$src2)))]>;
5087 def CRC32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5088 (ins GR32:$src1, GR32:$src2),
5089 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5091 (int_x86_sse42_crc32_32 GR32:$src1, GR32:$src2))]>;
5092 def CRC64m8 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst),
5093 (ins GR64:$src1, i8mem:$src2),
5094 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5096 (int_x86_sse42_crc64_8 GR64:$src1,
5097 (load addr:$src2)))]>,
5099 def CRC64r8 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst),
5100 (ins GR64:$src1, GR8:$src2),
5101 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5103 (int_x86_sse42_crc64_8 GR64:$src1, GR8:$src2))]>,
5105 def CRC64m64 : SS42FI<0xF1, MRMSrcMem, (outs GR64:$dst),
5106 (ins GR64:$src1, i64mem:$src2),
5107 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5109 (int_x86_sse42_crc64_64 GR64:$src1,
5110 (load addr:$src2)))]>,
5112 def CRC64r64 : SS42FI<0xF1, MRMSrcReg, (outs GR64:$dst),
5113 (ins GR64:$src1, GR64:$src2),
5114 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5116 (int_x86_sse42_crc64_64 GR64:$src1, GR64:$src2))]>,
5120 //===----------------------------------------------------------------------===//
5121 // AES-NI Instructions
5122 //===----------------------------------------------------------------------===//
5124 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
5125 Intrinsic IntId128, bit Is2Addr = 1> {
5126 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
5127 (ins VR128:$src1, VR128:$src2),
5129 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5130 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5131 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5133 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
5134 (ins VR128:$src1, i128mem:$src2),
5136 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5137 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5139 (IntId128 VR128:$src1,
5140 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5143 // Perform One Round of an AES Encryption/Decryption Flow
5144 let isAsmParserOnly = 0, Predicates = [HasAVX, HasAES] in {
5145 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
5146 int_x86_aesni_aesenc, 0>, VEX_4V;
5147 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
5148 int_x86_aesni_aesenclast, 0>, VEX_4V;
5149 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
5150 int_x86_aesni_aesdec, 0>, VEX_4V;
5151 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
5152 int_x86_aesni_aesdeclast, 0>, VEX_4V;
5155 let Constraints = "$src1 = $dst" in {
5156 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
5157 int_x86_aesni_aesenc>;
5158 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
5159 int_x86_aesni_aesenclast>;
5160 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
5161 int_x86_aesni_aesdec>;
5162 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
5163 int_x86_aesni_aesdeclast>;
5166 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, VR128:$src2)),
5167 (AESENCrr VR128:$src1, VR128:$src2)>;
5168 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, (memop addr:$src2))),
5169 (AESENCrm VR128:$src1, addr:$src2)>;
5170 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, VR128:$src2)),
5171 (AESENCLASTrr VR128:$src1, VR128:$src2)>;
5172 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, (memop addr:$src2))),
5173 (AESENCLASTrm VR128:$src1, addr:$src2)>;
5174 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, VR128:$src2)),
5175 (AESDECrr VR128:$src1, VR128:$src2)>;
5176 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, (memop addr:$src2))),
5177 (AESDECrm VR128:$src1, addr:$src2)>;
5178 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, VR128:$src2)),
5179 (AESDECLASTrr VR128:$src1, VR128:$src2)>;
5180 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, (memop addr:$src2))),
5181 (AESDECLASTrm VR128:$src1, addr:$src2)>;
5183 // Perform the AES InvMixColumn Transformation
5184 let isAsmParserOnly = 0, Predicates = [HasAVX, HasAES] in {
5185 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5187 "vaesimc\t{$src1, $dst|$dst, $src1}",
5189 (int_x86_aesni_aesimc VR128:$src1))]>,
5191 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5192 (ins i128mem:$src1),
5193 "vaesimc\t{$src1, $dst|$dst, $src1}",
5195 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5198 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5200 "aesimc\t{$src1, $dst|$dst, $src1}",
5202 (int_x86_aesni_aesimc VR128:$src1))]>,
5204 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5205 (ins i128mem:$src1),
5206 "aesimc\t{$src1, $dst|$dst, $src1}",
5208 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5211 // AES Round Key Generation Assist
5212 let isAsmParserOnly = 0, Predicates = [HasAVX, HasAES] in {
5213 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5214 (ins VR128:$src1, i8imm:$src2),
5215 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5217 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5219 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5220 (ins i128mem:$src1, i8imm:$src2),
5221 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5223 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5227 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5228 (ins VR128:$src1, i8imm:$src2),
5229 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5231 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5233 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5234 (ins i128mem:$src1, i8imm:$src2),
5235 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5237 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5241 //===----------------------------------------------------------------------===//
5242 // CLMUL Instructions
5243 //===----------------------------------------------------------------------===//
5245 // Only the AVX version of CLMUL instructions are described here.
5247 // Carry-less Multiplication instructions
5248 let isAsmParserOnly = 0 in {
5249 def VPCLMULQDQrr : CLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
5250 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5251 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5254 def VPCLMULQDQrm : CLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
5255 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5256 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5260 multiclass avx_vpclmul<string asm> {
5261 def rr : I<0, Pseudo, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
5262 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5265 def rm : I<0, Pseudo, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
5266 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5269 defm VPCLMULHQHQDQ : avx_vpclmul<"vpclmulhqhqdq">;
5270 defm VPCLMULHQLQDQ : avx_vpclmul<"vpclmulhqlqdq">;
5271 defm VPCLMULLQHQDQ : avx_vpclmul<"vpclmullqhqdq">;
5272 defm VPCLMULLQLQDQ : avx_vpclmul<"vpclmullqlqdq">;
5274 } // isAsmParserOnly
5276 //===----------------------------------------------------------------------===//
5278 //===----------------------------------------------------------------------===//
5280 let isAsmParserOnly = 0 in {
5282 // Load from memory and broadcast to all elements of the destination operand
5283 class avx_broadcast<bits<8> opc, string OpcodeStr, RegisterClass RC,
5284 X86MemOperand x86memop, Intrinsic Int> :
5285 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
5286 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5287 [(set RC:$dst, (Int addr:$src))]>, VEX;
5289 def VBROADCASTSS : avx_broadcast<0x18, "vbroadcastss", VR128, f32mem,
5290 int_x86_avx_vbroadcastss>;
5291 def VBROADCASTSSY : avx_broadcast<0x18, "vbroadcastss", VR256, f32mem,
5292 int_x86_avx_vbroadcastss_256>;
5293 def VBROADCASTSD : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem,
5294 int_x86_avx_vbroadcast_sd_256>;
5295 def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem,
5296 int_x86_avx_vbroadcastf128_pd_256>;
5298 // Insert packed floating-point values
5299 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
5300 (ins VR256:$src1, VR128:$src2, i8imm:$src3),
5301 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5303 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
5304 (ins VR256:$src1, f128mem:$src2, i8imm:$src3),
5305 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5308 // Extract packed floating-point values
5309 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
5310 (ins VR256:$src1, i8imm:$src2),
5311 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5313 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
5314 (ins f128mem:$dst, VR256:$src1, i8imm:$src2),
5315 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5318 // Conditional SIMD Packed Loads and Stores
5319 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
5320 Intrinsic IntLd, Intrinsic IntLd256,
5321 Intrinsic IntSt, Intrinsic IntSt256,
5322 PatFrag pf128, PatFrag pf256> {
5323 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
5324 (ins VR128:$src1, f128mem:$src2),
5325 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5326 [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
5328 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
5329 (ins VR256:$src1, f256mem:$src2),
5330 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5331 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
5333 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
5334 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
5335 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5336 [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
5337 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
5338 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
5339 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5340 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V;
5343 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
5344 int_x86_avx_maskload_ps,
5345 int_x86_avx_maskload_ps_256,
5346 int_x86_avx_maskstore_ps,
5347 int_x86_avx_maskstore_ps_256,
5348 memopv4f32, memopv8f32>;
5349 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
5350 int_x86_avx_maskload_pd,
5351 int_x86_avx_maskload_pd_256,
5352 int_x86_avx_maskstore_pd,
5353 int_x86_avx_maskstore_pd_256,
5354 memopv2f64, memopv4f64>;
5356 // Permute Floating-Point Values
5357 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
5358 RegisterClass RC, X86MemOperand x86memop_f,
5359 X86MemOperand x86memop_i, PatFrag f_frag, PatFrag i_frag,
5360 Intrinsic IntVar, Intrinsic IntImm> {
5361 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
5362 (ins RC:$src1, RC:$src2),
5363 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5364 [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V;
5365 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
5366 (ins RC:$src1, x86memop_i:$src2),
5367 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5368 [(set RC:$dst, (IntVar RC:$src1, (i_frag addr:$src2)))]>, VEX_4V;
5370 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
5371 (ins RC:$src1, i8imm:$src2),
5372 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5373 [(set RC:$dst, (IntImm RC:$src1, imm:$src2))]>, VEX;
5374 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
5375 (ins x86memop_f:$src1, i8imm:$src2),
5376 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5377 [(set RC:$dst, (IntImm (f_frag addr:$src1), imm:$src2))]>, VEX;
5380 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
5381 memopv4f32, memopv4i32,
5382 int_x86_avx_vpermilvar_ps,
5383 int_x86_avx_vpermil_ps>;
5384 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
5385 memopv8f32, memopv8i32,
5386 int_x86_avx_vpermilvar_ps_256,
5387 int_x86_avx_vpermil_ps_256>;
5388 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
5389 memopv2f64, memopv2i64,
5390 int_x86_avx_vpermilvar_pd,
5391 int_x86_avx_vpermil_pd>;
5392 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
5393 memopv4f64, memopv4i64,
5394 int_x86_avx_vpermilvar_pd_256,
5395 int_x86_avx_vpermil_pd_256>;
5397 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
5398 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
5399 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5401 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
5402 (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
5403 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5406 // Zero All YMM registers
5407 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
5408 [(int_x86_avx_vzeroall)]>, VEX, VEX_L, Requires<[HasAVX]>;
5410 // Zero Upper bits of YMM registers
5411 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
5412 [(int_x86_avx_vzeroupper)]>, VEX, Requires<[HasAVX]>;
5414 } // isAsmParserOnly
5416 def : Pat<(int_x86_avx_vinsertf128_pd_256 VR256:$src1, VR128:$src2, imm:$src3),
5417 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5418 def : Pat<(int_x86_avx_vinsertf128_ps_256 VR256:$src1, VR128:$src2, imm:$src3),
5419 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5420 def : Pat<(int_x86_avx_vinsertf128_si_256 VR256:$src1, VR128:$src2, imm:$src3),
5421 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5423 def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (v4f32 VR128:$src2),
5425 (VINSERTF128rr VR256:$src1, VR128:$src2,
5426 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5427 def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (v2f64 VR128:$src2),
5429 (VINSERTF128rr VR256:$src1, VR128:$src2,
5430 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5431 def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
5433 (VINSERTF128rr VR256:$src1, VR128:$src2,
5434 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5435 def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
5437 (VINSERTF128rr VR256:$src1, VR128:$src2,
5438 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5440 def : Pat<(int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2),
5441 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5442 def : Pat<(int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2),
5443 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5444 def : Pat<(int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2),
5445 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5447 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5448 (v4f32 (VEXTRACTF128rr
5449 (v8f32 VR256:$src1),
5450 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5451 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5452 (v2f64 (VEXTRACTF128rr
5453 (v4f64 VR256:$src1),
5454 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5455 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5456 (v4i32 (VEXTRACTF128rr
5457 (v8i32 VR256:$src1),
5458 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5459 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5460 (v2i64 (VEXTRACTF128rr
5461 (v4i64 VR256:$src1),
5462 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5464 def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
5465 (VBROADCASTF128 addr:$src)>;
5467 def : Pat<(int_x86_avx_vperm2f128_ps_256 VR256:$src1, VR256:$src2, imm:$src3),
5468 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5469 def : Pat<(int_x86_avx_vperm2f128_pd_256 VR256:$src1, VR256:$src2, imm:$src3),
5470 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5471 def : Pat<(int_x86_avx_vperm2f128_si_256 VR256:$src1, VR256:$src2, imm:$src3),
5472 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5474 def : Pat<(int_x86_avx_vperm2f128_ps_256
5475 VR256:$src1, (memopv8f32 addr:$src2), imm:$src3),
5476 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5477 def : Pat<(int_x86_avx_vperm2f128_pd_256
5478 VR256:$src1, (memopv4f64 addr:$src2), imm:$src3),
5479 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5480 def : Pat<(int_x86_avx_vperm2f128_si_256
5481 VR256:$src1, (memopv8i32 addr:$src2), imm:$src3),
5482 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5484 //===----------------------------------------------------------------------===//
5485 // SSE Shuffle pattern fragments
5486 //===----------------------------------------------------------------------===//
5488 // This is part of a "work in progress" refactoring. The idea is that all
5489 // vector shuffles are going to be translated into target specific nodes and
5490 // directly matched by the patterns below (which can be changed along the way)
5491 // The AVX version of some but not all of them are described here, and more
5492 // should come in a near future.
5494 // Shuffle with PSHUFD instruction folding loads. The first two patterns match
5495 // SSE2 loads, which are always promoted to v2i64. The last one should match
5496 // the SSE1 case, where the only legal load is v4f32, but there is no PSHUFD
5497 // in SSE2, how does it ever worked? Anyway, the pattern will remain here until
5498 // we investigate further.
5499 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
5501 (VPSHUFDmi addr:$src1, imm:$imm)>, Requires<[HasAVX]>;
5502 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
5504 (PSHUFDmi addr:$src1, imm:$imm)>;
5505 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv4f32 addr:$src1)),
5507 (PSHUFDmi addr:$src1, imm:$imm)>; // FIXME: has this ever worked?
5509 // Shuffle with PSHUFD instruction.
5510 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5511 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
5512 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5513 (PSHUFDri VR128:$src1, imm:$imm)>;
5515 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5516 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
5517 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5518 (PSHUFDri VR128:$src1, imm:$imm)>;
5520 // Shuffle with SHUFPD instruction.
5521 def : Pat<(v2f64 (X86Shufps VR128:$src1,
5522 (memopv2f64 addr:$src2), (i8 imm:$imm))),
5523 (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5524 def : Pat<(v2f64 (X86Shufps VR128:$src1,
5525 (memopv2f64 addr:$src2), (i8 imm:$imm))),
5526 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
5528 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5529 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5530 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5531 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
5533 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5534 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5535 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5536 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
5538 // Shuffle with SHUFPS instruction.
5539 def : Pat<(v4f32 (X86Shufps VR128:$src1,
5540 (memopv4f32 addr:$src2), (i8 imm:$imm))),
5541 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5542 def : Pat<(v4f32 (X86Shufps VR128:$src1,
5543 (memopv4f32 addr:$src2), (i8 imm:$imm))),
5544 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
5546 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5547 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5548 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5549 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
5551 def : Pat<(v4i32 (X86Shufps VR128:$src1,
5552 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
5553 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5554 def : Pat<(v4i32 (X86Shufps VR128:$src1,
5555 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
5556 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
5558 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5559 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5560 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5561 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
5563 // Shuffle with MOVHLPS instruction
5564 def : Pat<(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)),
5565 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
5566 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
5567 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
5569 // Shuffle with MOVDDUP instruction
5570 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5571 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5572 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5573 (MOVDDUPrm addr:$src)>;
5575 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
5576 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5577 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
5578 (MOVDDUPrm addr:$src)>;
5580 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
5581 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5582 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
5583 (MOVDDUPrm addr:$src)>;
5585 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
5586 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5587 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
5588 (MOVDDUPrm addr:$src)>;
5590 def : Pat<(X86Movddup (bc_v2f64
5591 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5592 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5593 def : Pat<(X86Movddup (bc_v2f64
5594 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5595 (MOVDDUPrm addr:$src)>;
5598 // Shuffle with UNPCKLPS
5599 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
5600 (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5601 def : Pat<(v8f32 (X86Unpcklpsy VR256:$src1, (memopv8f32 addr:$src2))),
5602 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
5603 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
5604 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
5606 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
5607 (VUNPCKLPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5608 def : Pat<(v8f32 (X86Unpcklpsy VR256:$src1, VR256:$src2)),
5609 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
5610 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
5611 (UNPCKLPSrr VR128:$src1, VR128:$src2)>;
5613 // Shuffle with UNPCKHPS
5614 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
5615 (VUNPCKHPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5616 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
5617 (UNPCKHPSrm VR128:$src1, addr:$src2)>;
5619 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
5620 (VUNPCKHPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5621 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
5622 (UNPCKHPSrr VR128:$src1, VR128:$src2)>;
5624 // Shuffle with UNPCKLPD
5625 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
5626 (VUNPCKLPDrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5627 def : Pat<(v4f64 (X86Unpcklpdy VR256:$src1, (memopv4f64 addr:$src2))),
5628 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
5629 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
5630 (UNPCKLPDrm VR128:$src1, addr:$src2)>;
5632 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
5633 (VUNPCKLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5634 def : Pat<(v4f64 (X86Unpcklpdy VR256:$src1, VR256:$src2)),
5635 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
5636 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
5637 (UNPCKLPDrr VR128:$src1, VR128:$src2)>;
5639 // Shuffle with UNPCKHPD
5640 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
5641 (VUNPCKHPDrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5642 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
5643 (UNPCKHPDrm VR128:$src1, addr:$src2)>;
5645 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
5646 (VUNPCKHPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5647 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
5648 (UNPCKHPDrr VR128:$src1, VR128:$src2)>;
5650 // Shuffle with PUNPCKLBW
5651 def : Pat<(v16i8 (X86Punpcklbw VR128:$src1,
5652 (bc_v16i8 (memopv2i64 addr:$src2)))),
5653 (PUNPCKLBWrm VR128:$src1, addr:$src2)>;
5654 def : Pat<(v16i8 (X86Punpcklbw VR128:$src1, VR128:$src2)),
5655 (PUNPCKLBWrr VR128:$src1, VR128:$src2)>;
5657 // Shuffle with PUNPCKLWD
5658 def : Pat<(v8i16 (X86Punpcklwd VR128:$src1,
5659 (bc_v8i16 (memopv2i64 addr:$src2)))),
5660 (PUNPCKLWDrm VR128:$src1, addr:$src2)>;
5661 def : Pat<(v8i16 (X86Punpcklwd VR128:$src1, VR128:$src2)),
5662 (PUNPCKLWDrr VR128:$src1, VR128:$src2)>;
5664 // Shuffle with PUNPCKLDQ
5665 def : Pat<(v4i32 (X86Punpckldq VR128:$src1,
5666 (bc_v4i32 (memopv2i64 addr:$src2)))),
5667 (PUNPCKLDQrm VR128:$src1, addr:$src2)>;
5668 def : Pat<(v4i32 (X86Punpckldq VR128:$src1, VR128:$src2)),
5669 (PUNPCKLDQrr VR128:$src1, VR128:$src2)>;
5671 // Shuffle with PUNPCKLQDQ
5672 def : Pat<(v2i64 (X86Punpcklqdq VR128:$src1, (memopv2i64 addr:$src2))),
5673 (PUNPCKLQDQrm VR128:$src1, addr:$src2)>;
5674 def : Pat<(v2i64 (X86Punpcklqdq VR128:$src1, VR128:$src2)),
5675 (PUNPCKLQDQrr VR128:$src1, VR128:$src2)>;
5677 // Shuffle with PUNPCKHBW
5678 def : Pat<(v16i8 (X86Punpckhbw VR128:$src1,
5679 (bc_v16i8 (memopv2i64 addr:$src2)))),
5680 (PUNPCKHBWrm VR128:$src1, addr:$src2)>;
5681 def : Pat<(v16i8 (X86Punpckhbw VR128:$src1, VR128:$src2)),
5682 (PUNPCKHBWrr VR128:$src1, VR128:$src2)>;
5684 // Shuffle with PUNPCKHWD
5685 def : Pat<(v8i16 (X86Punpckhwd VR128:$src1,
5686 (bc_v8i16 (memopv2i64 addr:$src2)))),
5687 (PUNPCKHWDrm VR128:$src1, addr:$src2)>;
5688 def : Pat<(v8i16 (X86Punpckhwd VR128:$src1, VR128:$src2)),
5689 (PUNPCKHWDrr VR128:$src1, VR128:$src2)>;
5691 // Shuffle with PUNPCKHDQ
5692 def : Pat<(v4i32 (X86Punpckhdq VR128:$src1,
5693 (bc_v4i32 (memopv2i64 addr:$src2)))),
5694 (PUNPCKHDQrm VR128:$src1, addr:$src2)>;
5695 def : Pat<(v4i32 (X86Punpckhdq VR128:$src1, VR128:$src2)),
5696 (PUNPCKHDQrr VR128:$src1, VR128:$src2)>;
5698 // Shuffle with PUNPCKHQDQ
5699 def : Pat<(v2i64 (X86Punpckhqdq VR128:$src1, (memopv2i64 addr:$src2))),
5700 (PUNPCKHQDQrm VR128:$src1, addr:$src2)>;
5701 def : Pat<(v2i64 (X86Punpckhqdq VR128:$src1, VR128:$src2)),
5702 (PUNPCKHQDQrr VR128:$src1, VR128:$src2)>;
5704 // Shuffle with MOVLHPS
5705 def : Pat<(X86Movlhps VR128:$src1,
5706 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
5707 (MOVHPSrm VR128:$src1, addr:$src2)>;
5708 def : Pat<(X86Movlhps VR128:$src1,
5709 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
5710 (MOVHPSrm VR128:$src1, addr:$src2)>;
5711 def : Pat<(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)),
5712 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
5713 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
5714 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
5715 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
5716 (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
5718 // FIXME: Instead of X86Movddup, there should be a X86Unpcklpd here, the problem
5719 // is during lowering, where it's not possible to recognize the load fold cause
5720 // it has two uses through a bitcast. One use disappears at isel time and the
5721 // fold opportunity reappears.
5722 def : Pat<(v2f64 (X86Movddup VR128:$src)),
5723 (UNPCKLPDrr VR128:$src, VR128:$src)>;
5725 // Shuffle with MOVLHPD
5726 def : Pat<(v2f64 (X86Movlhpd VR128:$src1,
5727 (scalar_to_vector (loadf64 addr:$src2)))),
5728 (MOVHPDrm VR128:$src1, addr:$src2)>;
5730 // FIXME: Instead of X86Unpcklpd, there should be a X86Movlhpd here, the problem
5731 // is during lowering, where it's not possible to recognize the load fold cause
5732 // it has two uses through a bitcast. One use disappears at isel time and the
5733 // fold opportunity reappears.
5734 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1,
5735 (scalar_to_vector (loadf64 addr:$src2)))),
5736 (MOVHPDrm VR128:$src1, addr:$src2)>;
5738 // Shuffle with MOVSS
5739 def : Pat<(v4f32 (X86Movss VR128:$src1, (scalar_to_vector FR32:$src2))),
5740 (MOVSSrr VR128:$src1, FR32:$src2)>;
5741 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
5742 (MOVSSrr (v4i32 VR128:$src1),
5743 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
5744 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
5745 (MOVSSrr (v4f32 VR128:$src1),
5746 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
5747 // FIXME: Instead of a X86Movss there should be a X86Movlps here, the problem
5748 // is during lowering, where it's not possible to recognize the load fold cause
5749 // it has two uses through a bitcast. One use disappears at isel time and the
5750 // fold opportunity reappears.
5751 def : Pat<(X86Movss VR128:$src1,
5752 (bc_v4i32 (v2i64 (load addr:$src2)))),
5753 (MOVLPSrm VR128:$src1, addr:$src2)>;
5755 // Shuffle with MOVSD
5756 def : Pat<(v2f64 (X86Movsd VR128:$src1, (scalar_to_vector FR64:$src2))),
5757 (MOVSDrr VR128:$src1, FR64:$src2)>;
5758 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
5759 (MOVSDrr (v2i64 VR128:$src1),
5760 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
5761 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
5762 (MOVSDrr (v2f64 VR128:$src1),
5763 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
5764 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
5765 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>;
5766 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
5767 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_sd))>;
5769 // Shuffle with MOVSHDUP
5770 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
5771 (MOVSHDUPrr VR128:$src)>;
5772 def : Pat<(X86Movshdup (bc_v4i32 (memopv2i64 addr:$src))),
5773 (MOVSHDUPrm addr:$src)>;
5775 def : Pat<(v4f32 (X86Movshdup VR128:$src)),
5776 (MOVSHDUPrr VR128:$src)>;
5777 def : Pat<(X86Movshdup (memopv4f32 addr:$src)),
5778 (MOVSHDUPrm addr:$src)>;
5780 // Shuffle with MOVSLDUP
5781 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
5782 (MOVSLDUPrr VR128:$src)>;
5783 def : Pat<(X86Movsldup (bc_v4i32 (memopv2i64 addr:$src))),
5784 (MOVSLDUPrm addr:$src)>;
5786 def : Pat<(v4f32 (X86Movsldup VR128:$src)),
5787 (MOVSLDUPrr VR128:$src)>;
5788 def : Pat<(X86Movsldup (memopv4f32 addr:$src)),
5789 (MOVSLDUPrm addr:$src)>;
5791 // Shuffle with PSHUFHW
5792 def : Pat<(v8i16 (X86PShufhw VR128:$src, (i8 imm:$imm))),
5793 (PSHUFHWri VR128:$src, imm:$imm)>;
5794 def : Pat<(v8i16 (X86PShufhw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
5795 (PSHUFHWmi addr:$src, imm:$imm)>;
5797 // Shuffle with PSHUFLW
5798 def : Pat<(v8i16 (X86PShuflw VR128:$src, (i8 imm:$imm))),
5799 (PSHUFLWri VR128:$src, imm:$imm)>;
5800 def : Pat<(v8i16 (X86PShuflw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
5801 (PSHUFLWmi addr:$src, imm:$imm)>;
5803 // Shuffle with PALIGN
5804 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5805 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5806 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5807 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5808 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5809 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5810 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5811 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5813 // Shuffle with MOVLPS
5814 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
5815 (MOVLPSrm VR128:$src1, addr:$src2)>;
5816 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
5817 (MOVLPSrm VR128:$src1, addr:$src2)>;
5818 def : Pat<(X86Movlps VR128:$src1,
5819 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
5820 (MOVLPSrm VR128:$src1, addr:$src2)>;
5821 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
5822 // is during lowering, where it's not possible to recognize the load fold cause
5823 // it has two uses through a bitcast. One use disappears at isel time and the
5824 // fold opportunity reappears.
5825 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
5826 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>;
5828 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
5829 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_sd))>;
5831 // Shuffle with MOVLPD
5832 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
5833 (MOVLPDrm VR128:$src1, addr:$src2)>;
5834 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
5835 (MOVLPDrm VR128:$src1, addr:$src2)>;
5836 def : Pat<(v2f64 (X86Movlpd VR128:$src1,
5837 (scalar_to_vector (loadf64 addr:$src2)))),
5838 (MOVLPDrm VR128:$src1, addr:$src2)>;
5840 // Extra patterns to match stores with MOVHPS/PD and MOVLPS/PD
5841 def : Pat<(store (f64 (vector_extract
5842 (v2f64 (X86Unpckhps VR128:$src, (undef))), (iPTR 0))),addr:$dst),
5843 (MOVHPSmr addr:$dst, VR128:$src)>;
5844 def : Pat<(store (f64 (vector_extract
5845 (v2f64 (X86Unpckhpd VR128:$src, (undef))), (iPTR 0))),addr:$dst),
5846 (MOVHPDmr addr:$dst, VR128:$src)>;
5848 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),addr:$src1),
5849 (MOVLPSmr addr:$src1, VR128:$src2)>;
5850 def : Pat<(store (v4i32 (X86Movlps
5851 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1),
5852 (MOVLPSmr addr:$src1, VR128:$src2)>;
5854 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
5855 (MOVLPDmr addr:$src1, VR128:$src2)>;
5856 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
5857 (MOVLPDmr addr:$src1, VR128:$src2)>;