1 //====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // SSE 1 & 2 Instructions Classes
19 //===----------------------------------------------------------------------===//
21 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
22 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
23 RegisterClass RC, X86MemOperand x86memop,
25 let isCommutable = 1 in {
26 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
28 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
29 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
30 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>;
32 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
34 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
35 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
36 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>;
39 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
40 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
41 string asm, string SSEVer, string FPSizeStr,
42 Operand memopr, ComplexPattern mem_cpat,
44 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
46 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
47 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
48 [(set RC:$dst, (!cast<Intrinsic>(
49 !strconcat("int_x86_sse", SSEVer, "_", OpcodeStr, FPSizeStr))
50 RC:$src1, RC:$src2))]>;
51 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
53 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
54 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
55 [(set RC:$dst, (!cast<Intrinsic>(!strconcat("int_x86_sse",
56 SSEVer, "_", OpcodeStr, FPSizeStr))
57 RC:$src1, mem_cpat:$src2))]>;
60 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
61 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
62 RegisterClass RC, ValueType vt,
63 X86MemOperand x86memop, PatFrag mem_frag,
64 Domain d, bit Is2Addr = 1> {
65 let isCommutable = 1 in
66 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
68 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
69 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
70 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], d>;
72 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
74 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
75 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
76 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))], d>;
79 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
80 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
81 string OpcodeStr, X86MemOperand x86memop,
82 list<dag> pat_rr, list<dag> pat_rm,
84 let isCommutable = 1 in
85 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
87 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
88 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
90 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
92 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
93 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
97 /// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
98 multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
99 string asm, string SSEVer, string FPSizeStr,
100 X86MemOperand x86memop, PatFrag mem_frag,
101 Domain d, bit Is2Addr = 1> {
102 def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
104 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
105 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
106 [(set RC:$dst, (!cast<Intrinsic>(
107 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
108 RC:$src1, RC:$src2))], d>;
109 def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1,x86memop:$src2),
111 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
112 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
113 [(set RC:$dst, (!cast<Intrinsic>(
114 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
115 RC:$src1, (mem_frag addr:$src2)))], d>;
118 //===----------------------------------------------------------------------===//
119 // SSE 1 & 2 - Move Instructions
120 //===----------------------------------------------------------------------===//
122 class sse12_move_rr<RegisterClass RC, ValueType vt, string asm> :
123 SI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), asm,
124 [(set (vt VR128:$dst), (movl VR128:$src1, (scalar_to_vector RC:$src2)))]>;
126 // Loading from memory automatically zeroing upper bits.
127 class sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
128 PatFrag mem_pat, string OpcodeStr> :
129 SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
130 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
131 [(set RC:$dst, (mem_pat addr:$src))]>;
133 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
134 // register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
135 // is used instead. Register-to-register movss/movsd is not modeled as an
136 // INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
137 // in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
138 let isAsmParserOnly = 1 in {
139 def VMOVSSrr : sse12_move_rr<FR32, v4f32,
140 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS, VEX_4V;
141 def VMOVSDrr : sse12_move_rr<FR64, v2f64,
142 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD, VEX_4V;
144 let canFoldAsLoad = 1, isReMaterializable = 1 in {
145 def VMOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS, VEX;
147 let AddedComplexity = 20 in
148 def VMOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD, VEX;
152 let Constraints = "$src1 = $dst" in {
153 def MOVSSrr : sse12_move_rr<FR32, v4f32,
154 "movss\t{$src2, $dst|$dst, $src2}">, XS;
155 def MOVSDrr : sse12_move_rr<FR64, v2f64,
156 "movsd\t{$src2, $dst|$dst, $src2}">, XD;
159 let canFoldAsLoad = 1, isReMaterializable = 1 in {
160 def MOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
162 let AddedComplexity = 20 in
163 def MOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
166 let AddedComplexity = 15 in {
167 // Extract the low 32-bit value from one vector and insert it into another.
168 def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
169 (MOVSSrr (v4f32 VR128:$src1),
170 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
171 // Extract the low 64-bit value from one vector and insert it into another.
172 def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
173 (MOVSDrr (v2f64 VR128:$src1),
174 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
177 // Implicitly promote a 32-bit scalar to a vector.
178 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
179 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
180 // Implicitly promote a 64-bit scalar to a vector.
181 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
182 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
184 let AddedComplexity = 20 in {
185 // MOVSSrm zeros the high parts of the register; represent this
186 // with SUBREG_TO_REG.
187 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
188 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
189 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
190 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
191 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
192 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
193 // MOVSDrm zeros the high parts of the register; represent this
194 // with SUBREG_TO_REG.
195 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
196 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
197 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
198 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
199 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
200 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
201 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
202 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
203 def : Pat<(v2f64 (X86vzload addr:$src)),
204 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
207 // Store scalar value to memory.
208 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
209 "movss\t{$src, $dst|$dst, $src}",
210 [(store FR32:$src, addr:$dst)]>;
211 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
212 "movsd\t{$src, $dst|$dst, $src}",
213 [(store FR64:$src, addr:$dst)]>;
215 let isAsmParserOnly = 1 in {
216 def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
217 "movss\t{$src, $dst|$dst, $src}",
218 [(store FR32:$src, addr:$dst)]>, XS, VEX;
219 def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
220 "movsd\t{$src, $dst|$dst, $src}",
221 [(store FR64:$src, addr:$dst)]>, XD, VEX;
224 // Extract and store.
225 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
228 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
229 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
232 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
234 // Move Aligned/Unaligned floating point values
235 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
236 X86MemOperand x86memop, PatFrag ld_frag,
237 string asm, Domain d,
238 bit IsReMaterializable = 1> {
239 let neverHasSideEffects = 1 in
240 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
241 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>;
242 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
243 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
244 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
245 [(set RC:$dst, (ld_frag addr:$src))], d>;
248 let isAsmParserOnly = 1 in {
249 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
250 "movaps", SSEPackedSingle>, VEX;
251 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
252 "movapd", SSEPackedDouble>, OpSize, VEX;
253 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
254 "movups", SSEPackedSingle>, VEX;
255 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
256 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
258 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
259 "movaps", SSEPackedSingle>, VEX;
260 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
261 "movapd", SSEPackedDouble>, OpSize, VEX;
262 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
263 "movups", SSEPackedSingle>, VEX;
264 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
265 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
267 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
268 "movaps", SSEPackedSingle>, TB;
269 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
270 "movapd", SSEPackedDouble>, TB, OpSize;
271 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
272 "movups", SSEPackedSingle>, TB;
273 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
274 "movupd", SSEPackedDouble, 0>, TB, OpSize;
276 let isAsmParserOnly = 1 in {
277 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
278 "movaps\t{$src, $dst|$dst, $src}",
279 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>, VEX;
280 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
281 "movapd\t{$src, $dst|$dst, $src}",
282 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>, VEX;
283 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
284 "movups\t{$src, $dst|$dst, $src}",
285 [(store (v4f32 VR128:$src), addr:$dst)]>, VEX;
286 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
287 "movupd\t{$src, $dst|$dst, $src}",
288 [(store (v2f64 VR128:$src), addr:$dst)]>, VEX;
289 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
290 "movaps\t{$src, $dst|$dst, $src}",
291 [(alignedstore (v8f32 VR256:$src), addr:$dst)]>, VEX;
292 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
293 "movapd\t{$src, $dst|$dst, $src}",
294 [(alignedstore (v4f64 VR256:$src), addr:$dst)]>, VEX;
295 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
296 "movups\t{$src, $dst|$dst, $src}",
297 [(store (v8f32 VR256:$src), addr:$dst)]>, VEX;
298 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
299 "movupd\t{$src, $dst|$dst, $src}",
300 [(store (v4f64 VR256:$src), addr:$dst)]>, VEX;
303 def : Pat<(int_x86_avx_loadu_ps_256 addr:$src), (VMOVUPSYrm addr:$src)>;
304 def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
305 (VMOVUPSYmr addr:$dst, VR256:$src)>;
307 def : Pat<(int_x86_avx_loadu_pd_256 addr:$src), (VMOVUPDYrm addr:$src)>;
308 def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
309 (VMOVUPDYmr addr:$dst, VR256:$src)>;
311 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
312 "movaps\t{$src, $dst|$dst, $src}",
313 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
314 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
315 "movapd\t{$src, $dst|$dst, $src}",
316 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
317 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
318 "movups\t{$src, $dst|$dst, $src}",
319 [(store (v4f32 VR128:$src), addr:$dst)]>;
320 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
321 "movupd\t{$src, $dst|$dst, $src}",
322 [(store (v2f64 VR128:$src), addr:$dst)]>;
324 // Intrinsic forms of MOVUPS/D load and store
325 let isAsmParserOnly = 1 in {
326 let canFoldAsLoad = 1, isReMaterializable = 1 in
327 def VMOVUPSrm_Int : VPSI<0x10, MRMSrcMem, (outs VR128:$dst),
329 "movups\t{$src, $dst|$dst, $src}",
330 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>, VEX;
331 def VMOVUPDrm_Int : VPDI<0x10, MRMSrcMem, (outs VR128:$dst),
333 "movupd\t{$src, $dst|$dst, $src}",
334 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>, VEX;
335 def VMOVUPSmr_Int : VPSI<0x11, MRMDestMem, (outs),
336 (ins f128mem:$dst, VR128:$src),
337 "movups\t{$src, $dst|$dst, $src}",
338 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>, VEX;
339 def VMOVUPDmr_Int : VPDI<0x11, MRMDestMem, (outs),
340 (ins f128mem:$dst, VR128:$src),
341 "movupd\t{$src, $dst|$dst, $src}",
342 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>, VEX;
344 let canFoldAsLoad = 1, isReMaterializable = 1 in
345 def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
346 "movups\t{$src, $dst|$dst, $src}",
347 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
348 def MOVUPDrm_Int : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
349 "movupd\t{$src, $dst|$dst, $src}",
350 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>;
352 def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
353 "movups\t{$src, $dst|$dst, $src}",
354 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
355 def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
356 "movupd\t{$src, $dst|$dst, $src}",
357 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
359 // Move Low/High packed floating point values
360 multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
361 PatFrag mov_frag, string base_opc,
363 def PSrm : PI<opc, MRMSrcMem,
364 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
365 !strconcat(base_opc, "s", asm_opr),
368 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
369 SSEPackedSingle>, TB;
371 def PDrm : PI<opc, MRMSrcMem,
372 (outs RC:$dst), (ins RC:$src1, f64mem:$src2),
373 !strconcat(base_opc, "d", asm_opr),
374 [(set RC:$dst, (v2f64 (mov_frag RC:$src1,
375 (scalar_to_vector (loadf64 addr:$src2)))))],
376 SSEPackedDouble>, TB, OpSize;
379 let isAsmParserOnly = 1, AddedComplexity = 20 in {
380 defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
381 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
382 defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
383 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
385 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
386 defm MOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
387 "\t{$src2, $dst|$dst, $src2}">;
388 defm MOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
389 "\t{$src2, $dst|$dst, $src2}">;
392 let isAsmParserOnly = 1 in {
393 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
394 "movlps\t{$src, $dst|$dst, $src}",
395 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
396 (iPTR 0))), addr:$dst)]>, VEX;
397 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
398 "movlpd\t{$src, $dst|$dst, $src}",
399 [(store (f64 (vector_extract (v2f64 VR128:$src),
400 (iPTR 0))), addr:$dst)]>, VEX;
402 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
403 "movlps\t{$src, $dst|$dst, $src}",
404 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
405 (iPTR 0))), addr:$dst)]>;
406 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
407 "movlpd\t{$src, $dst|$dst, $src}",
408 [(store (f64 (vector_extract (v2f64 VR128:$src),
409 (iPTR 0))), addr:$dst)]>;
411 // v2f64 extract element 1 is always custom lowered to unpack high to low
412 // and extract element 0 so the non-store version isn't too horrible.
413 let isAsmParserOnly = 1 in {
414 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
415 "movhps\t{$src, $dst|$dst, $src}",
416 [(store (f64 (vector_extract
417 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
418 (undef)), (iPTR 0))), addr:$dst)]>,
420 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
421 "movhpd\t{$src, $dst|$dst, $src}",
422 [(store (f64 (vector_extract
423 (v2f64 (unpckh VR128:$src, (undef))),
424 (iPTR 0))), addr:$dst)]>,
427 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
428 "movhps\t{$src, $dst|$dst, $src}",
429 [(store (f64 (vector_extract
430 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
431 (undef)), (iPTR 0))), addr:$dst)]>;
432 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
433 "movhpd\t{$src, $dst|$dst, $src}",
434 [(store (f64 (vector_extract
435 (v2f64 (unpckh VR128:$src, (undef))),
436 (iPTR 0))), addr:$dst)]>;
438 let isAsmParserOnly = 1, AddedComplexity = 20 in {
439 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
440 (ins VR128:$src1, VR128:$src2),
441 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
443 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>,
445 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
446 (ins VR128:$src1, VR128:$src2),
447 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
449 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>,
452 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
453 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
454 (ins VR128:$src1, VR128:$src2),
455 "movlhps\t{$src2, $dst|$dst, $src2}",
457 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>;
458 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
459 (ins VR128:$src1, VR128:$src2),
460 "movhlps\t{$src2, $dst|$dst, $src2}",
462 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>;
465 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
466 (MOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
467 let AddedComplexity = 20 in {
468 def : Pat<(v4f32 (movddup VR128:$src, (undef))),
469 (MOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
470 def : Pat<(v2i64 (movddup VR128:$src, (undef))),
471 (MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
474 //===----------------------------------------------------------------------===//
475 // SSE 1 & 2 - Conversion Instructions
476 //===----------------------------------------------------------------------===//
478 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
479 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
481 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
482 [(set DstRC:$dst, (OpNode SrcRC:$src))]>;
483 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
484 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>;
487 multiclass sse12_cvt_s_np<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
488 X86MemOperand x86memop, string asm> {
489 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
491 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
495 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
496 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
497 string asm, Domain d> {
498 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
499 [(set DstRC:$dst, (OpNode SrcRC:$src))], d>;
500 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
501 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))], d>;
504 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
505 X86MemOperand x86memop, string asm> {
506 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
507 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
508 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
509 (ins DstRC:$src1, x86memop:$src),
510 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
513 let isAsmParserOnly = 1 in {
514 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
515 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX;
516 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
517 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
519 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
520 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
521 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
522 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD,
525 // The assembler can recognize rr 64-bit instructions by seeing a rxx
526 // register, but the same isn't true when only using memory operands,
527 // provide other assembly "l" and "q" forms to address this explicitly
528 // where appropriate to do so.
529 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss">, XS,
531 defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">, XS,
533 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd">, XD,
535 defm VCVTSI2SDL : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">, XD,
537 defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">, XD,
541 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
542 "cvttss2si\t{$src, $dst|$dst, $src}">, XS;
543 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
544 "cvttss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
545 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
546 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD;
547 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
548 "cvttsd2si{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
549 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
550 "cvtsi2ss\t{$src, $dst|$dst, $src}">, XS;
551 defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
552 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
553 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
554 "cvtsi2sd\t{$src, $dst|$dst, $src}">, XD;
555 defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
556 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
558 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
559 // and/or XMM operand(s).
561 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
562 Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
564 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
565 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
566 [(set DstRC:$dst, (Int SrcRC:$src))]>;
567 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
568 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
569 [(set DstRC:$dst, (Int (ld_frag addr:$src)))]>;
572 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
573 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
574 PatFrag ld_frag, string asm, bit Is2Addr = 1> {
575 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
577 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
578 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
579 [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))]>;
580 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
581 (ins DstRC:$src1, x86memop:$src2),
583 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
584 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
585 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))]>;
588 let isAsmParserOnly = 1 in {
589 defm Int_VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
590 f32mem, load, "cvtss2si">, XS, VEX;
591 defm Int_VCVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
592 int_x86_sse_cvtss2si64, f32mem, load, "cvtss2si">,
594 defm Int_VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
595 f128mem, load, "cvtsd2si">, XD, VEX;
596 defm Int_VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
597 int_x86_sse2_cvtsd2si64, f128mem, load, "cvtsd2si">,
600 // FIXME: The asm matcher has a hack to ignore instructions with _Int and Int_
601 // Get rid of this hack or rename the intrinsics, there are several
602 // intructions that only match with the intrinsic form, why create duplicates
603 // to let them be recognized by the assembler?
604 defm VCVTSD2SI_alt : sse12_cvt_s_np<0x2D, FR64, GR32, f64mem,
605 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
606 defm VCVTSD2SI64 : sse12_cvt_s_np<0x2D, FR64, GR64, f64mem,
607 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX, VEX_W;
609 defm Int_CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
610 f32mem, load, "cvtss2si">, XS;
611 defm Int_CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
612 f32mem, load, "cvtss2si{q}">, XS, REX_W;
613 defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
614 f128mem, load, "cvtsd2si{l}">, XD;
615 defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
616 f128mem, load, "cvtsd2si{q}">, XD, REX_W;
619 let isAsmParserOnly = 1 in {
620 defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
621 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss", 0>, XS, VEX_4V;
622 defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
623 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss", 0>, XS, VEX_4V,
625 defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
626 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd", 0>, XD, VEX_4V;
627 defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
628 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd", 0>, XD,
632 let Constraints = "$src1 = $dst" in {
633 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
634 int_x86_sse_cvtsi2ss, i32mem, loadi32,
636 defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
637 int_x86_sse_cvtsi642ss, i64mem, loadi64,
638 "cvtsi2ss{q}">, XS, REX_W;
639 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
640 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
642 defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
643 int_x86_sse2_cvtsi642sd, i64mem, loadi64,
644 "cvtsi2sd">, XD, REX_W;
649 // Aliases for intrinsics
650 let isAsmParserOnly = 1 in {
651 defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
652 f32mem, load, "cvttss2si">, XS, VEX;
653 defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
654 int_x86_sse_cvttss2si64, f32mem, load,
655 "cvttss2si">, XS, VEX, VEX_W;
656 defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
657 f128mem, load, "cvttsd2si">, XD, VEX;
658 defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
659 int_x86_sse2_cvttsd2si64, f128mem, load,
660 "cvttsd2si">, XD, VEX, VEX_W;
662 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
663 f32mem, load, "cvttss2si">, XS;
664 defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
665 int_x86_sse_cvttss2si64, f32mem, load,
666 "cvttss2si{q}">, XS, REX_W;
667 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
668 f128mem, load, "cvttsd2si">, XD;
669 defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
670 int_x86_sse2_cvttsd2si64, f128mem, load,
671 "cvttsd2si{q}">, XD, REX_W;
673 let isAsmParserOnly = 1, Pattern = []<dag> in {
674 defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load,
675 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS, VEX;
676 defm VCVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load,
677 "cvtss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
679 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load,
680 "cvtdq2ps\t{$src, $dst|$dst, $src}",
681 SSEPackedSingle>, TB, VEX;
682 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, undef, i256mem, load,
683 "cvtdq2ps\t{$src, $dst|$dst, $src}",
684 SSEPackedSingle>, TB, VEX;
686 let Pattern = []<dag> in {
687 defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load /*dummy*/,
688 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS;
689 defm CVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load /*dummy*/,
690 "cvtss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
691 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load /*dummy*/,
692 "cvtdq2ps\t{$src, $dst|$dst, $src}",
693 SSEPackedSingle>, TB; /* PD SSE3 form is avaiable */
698 // Convert scalar double to scalar single
699 let isAsmParserOnly = 1 in {
700 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
701 (ins FR64:$src1, FR64:$src2),
702 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
704 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
705 (ins FR64:$src1, f64mem:$src2),
706 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
707 []>, XD, Requires<[HasAVX, OptForSize]>, VEX_4V;
709 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
710 "cvtsd2ss\t{$src, $dst|$dst, $src}",
711 [(set FR32:$dst, (fround FR64:$src))]>;
712 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
713 "cvtsd2ss\t{$src, $dst|$dst, $src}",
714 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>, XD,
715 Requires<[HasSSE2, OptForSize]>;
717 let isAsmParserOnly = 1 in
718 defm Int_VCVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
719 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss", 0>,
721 let Constraints = "$src1 = $dst" in
722 defm Int_CVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
723 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss">, XS;
725 // Convert scalar single to scalar double
726 let isAsmParserOnly = 1 in { // SSE2 instructions with XS prefix
727 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
728 (ins FR32:$src1, FR32:$src2),
729 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
730 []>, XS, Requires<[HasAVX]>, VEX_4V;
731 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
732 (ins FR32:$src1, f32mem:$src2),
733 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
734 []>, XS, VEX_4V, Requires<[HasAVX, OptForSize]>;
736 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
737 "cvtss2sd\t{$src, $dst|$dst, $src}",
738 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
740 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
741 "cvtss2sd\t{$src, $dst|$dst, $src}",
742 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
743 Requires<[HasSSE2, OptForSize]>;
745 let isAsmParserOnly = 1 in {
746 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
747 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
748 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
749 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
750 VR128:$src2))]>, XS, VEX_4V,
752 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
753 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
754 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
755 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
756 (load addr:$src2)))]>, XS, VEX_4V,
759 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
760 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
761 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
762 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
763 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
766 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
767 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
768 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
769 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
770 (load addr:$src2)))]>, XS,
774 def : Pat<(extloadf32 addr:$src),
775 (CVTSS2SDrr (MOVSSrm addr:$src))>,
776 Requires<[HasSSE2, OptForSpeed]>;
778 // Convert doubleword to packed single/double fp
779 let isAsmParserOnly = 1 in { // SSE2 instructions without OpSize prefix
780 def Int_VCVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
781 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
782 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
783 TB, VEX, Requires<[HasAVX]>;
784 def Int_VCVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
785 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
786 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
787 (bitconvert (memopv2i64 addr:$src))))]>,
788 TB, VEX, Requires<[HasAVX]>;
790 def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
791 "cvtdq2ps\t{$src, $dst|$dst, $src}",
792 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
793 TB, Requires<[HasSSE2]>;
794 def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
795 "cvtdq2ps\t{$src, $dst|$dst, $src}",
796 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
797 (bitconvert (memopv2i64 addr:$src))))]>,
798 TB, Requires<[HasSSE2]>;
800 // FIXME: why the non-intrinsic version is described as SSE3?
801 let isAsmParserOnly = 1 in { // SSE2 instructions with XS prefix
802 def Int_VCVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
803 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
804 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
805 XS, VEX, Requires<[HasAVX]>;
806 def Int_VCVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
807 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
808 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
809 (bitconvert (memopv2i64 addr:$src))))]>,
810 XS, VEX, Requires<[HasAVX]>;
812 def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
813 "cvtdq2pd\t{$src, $dst|$dst, $src}",
814 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
815 XS, Requires<[HasSSE2]>;
816 def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
817 "cvtdq2pd\t{$src, $dst|$dst, $src}",
818 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
819 (bitconvert (memopv2i64 addr:$src))))]>,
820 XS, Requires<[HasSSE2]>;
823 // Convert packed single/double fp to doubleword
824 let isAsmParserOnly = 1 in {
825 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
826 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
827 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
828 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
829 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
830 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
831 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
832 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
834 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
835 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
836 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
837 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
839 let isAsmParserOnly = 1 in {
840 def Int_VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
841 "cvtps2dq\t{$src, $dst|$dst, $src}",
842 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>,
844 def Int_VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst),
846 "cvtps2dq\t{$src, $dst|$dst, $src}",
847 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
848 (memop addr:$src)))]>, VEX;
850 def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
851 "cvtps2dq\t{$src, $dst|$dst, $src}",
852 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
853 def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
854 "cvtps2dq\t{$src, $dst|$dst, $src}",
855 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
856 (memop addr:$src)))]>;
858 let isAsmParserOnly = 1 in { // SSE2 packed instructions with XD prefix
859 def Int_VCVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
860 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
861 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
862 XD, VEX, Requires<[HasAVX]>;
863 def Int_VCVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
864 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
865 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
866 (memop addr:$src)))]>,
867 XD, VEX, Requires<[HasAVX]>;
869 def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
870 "cvtpd2dq\t{$src, $dst|$dst, $src}",
871 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
872 XD, Requires<[HasSSE2]>;
873 def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
874 "cvtpd2dq\t{$src, $dst|$dst, $src}",
875 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
876 (memop addr:$src)))]>,
877 XD, Requires<[HasSSE2]>;
880 // Convert with truncation packed single/double fp to doubleword
881 let isAsmParserOnly = 1 in { // SSE2 packed instructions with XS prefix
882 def VCVTTPS2DQrr : VSSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
883 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
884 def VCVTTPS2DQrm : VSSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
885 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
886 def VCVTTPS2DQYrr : VSSI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
887 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
888 def VCVTTPS2DQYrm : VSSI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
889 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
891 def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
892 "cvttps2dq\t{$src, $dst|$dst, $src}",
894 (int_x86_sse2_cvttps2dq VR128:$src))]>;
895 def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
896 "cvttps2dq\t{$src, $dst|$dst, $src}",
898 (int_x86_sse2_cvttps2dq (memop addr:$src)))]>;
901 let isAsmParserOnly = 1 in {
902 def Int_VCVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
903 "vcvttps2dq\t{$src, $dst|$dst, $src}",
905 (int_x86_sse2_cvttps2dq VR128:$src))]>,
906 XS, VEX, Requires<[HasAVX]>;
907 def Int_VCVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
908 "vcvttps2dq\t{$src, $dst|$dst, $src}",
909 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
910 (memop addr:$src)))]>,
911 XS, VEX, Requires<[HasAVX]>;
914 let isAsmParserOnly = 1 in {
915 def Int_VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst),
917 "cvttpd2dq\t{$src, $dst|$dst, $src}",
918 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>,
920 def Int_VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst),
922 "cvttpd2dq\t{$src, $dst|$dst, $src}",
923 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
924 (memop addr:$src)))]>, VEX;
926 def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
927 "cvttpd2dq\t{$src, $dst|$dst, $src}",
928 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
929 def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
930 "cvttpd2dq\t{$src, $dst|$dst, $src}",
931 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
932 (memop addr:$src)))]>;
934 let isAsmParserOnly = 1 in {
935 // The assembler can recognize rr 256-bit instructions by seeing a ymm
936 // register, but the same isn't true when using memory operands instead.
937 // Provide other assembly rr and rm forms to address this explicitly.
938 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
939 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
940 def VCVTTPD2DQXrYr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
941 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
944 def VCVTTPD2DQXrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
945 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
946 def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
947 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
950 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
951 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
952 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
953 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
956 // Convert packed single to packed double
957 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
958 // SSE2 instructions without OpSize prefix
959 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
960 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
961 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
962 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
963 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
964 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
965 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
966 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
968 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
969 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
970 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
971 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
973 let isAsmParserOnly = 1 in {
974 def Int_VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
975 "vcvtps2pd\t{$src, $dst|$dst, $src}",
976 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
977 VEX, Requires<[HasAVX]>;
978 def Int_VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
979 "vcvtps2pd\t{$src, $dst|$dst, $src}",
980 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
981 (load addr:$src)))]>,
982 VEX, Requires<[HasAVX]>;
984 def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
985 "cvtps2pd\t{$src, $dst|$dst, $src}",
986 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
987 TB, Requires<[HasSSE2]>;
988 def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
989 "cvtps2pd\t{$src, $dst|$dst, $src}",
990 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
991 (load addr:$src)))]>,
992 TB, Requires<[HasSSE2]>;
994 // Convert packed double to packed single
995 let isAsmParserOnly = 1 in {
996 // The assembler can recognize rr 256-bit instructions by seeing a ymm
997 // register, but the same isn't true when using memory operands instead.
998 // Provide other assembly rr and rm forms to address this explicitly.
999 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1000 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1001 def VCVTPD2PSXrYr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1002 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1005 def VCVTPD2PSXrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1006 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1007 def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1008 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1011 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1012 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX;
1013 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1014 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
1016 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1017 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1018 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1019 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1022 let isAsmParserOnly = 1 in {
1023 def Int_VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1024 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1025 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1026 def Int_VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst),
1028 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1029 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1030 (memop addr:$src)))]>;
1032 def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1033 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1034 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1035 def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1036 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1037 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1038 (memop addr:$src)))]>;
1040 // AVX 256-bit register conversion intrinsics
1041 // FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
1042 // whenever possible to avoid declaring two versions of each one.
1043 def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
1044 (VCVTDQ2PSYrr VR256:$src)>;
1045 def : Pat<(int_x86_avx_cvtdq2_ps_256 (memopv8i32 addr:$src)),
1046 (VCVTDQ2PSYrm addr:$src)>;
1048 def : Pat<(int_x86_avx_cvt_pd2_ps_256 VR256:$src),
1049 (VCVTPD2PSYrr VR256:$src)>;
1050 def : Pat<(int_x86_avx_cvt_pd2_ps_256 (memopv4f64 addr:$src)),
1051 (VCVTPD2PSYrm addr:$src)>;
1053 def : Pat<(int_x86_avx_cvt_ps2dq_256 VR256:$src),
1054 (VCVTPS2DQYrr VR256:$src)>;
1055 def : Pat<(int_x86_avx_cvt_ps2dq_256 (memopv8f32 addr:$src)),
1056 (VCVTPS2DQYrm addr:$src)>;
1058 def : Pat<(int_x86_avx_cvt_ps2_pd_256 VR128:$src),
1059 (VCVTPS2PDYrr VR128:$src)>;
1060 def : Pat<(int_x86_avx_cvt_ps2_pd_256 (memopv4f32 addr:$src)),
1061 (VCVTPS2PDYrm addr:$src)>;
1063 def : Pat<(int_x86_avx_cvtt_pd2dq_256 VR256:$src),
1064 (VCVTTPD2DQYrr VR256:$src)>;
1065 def : Pat<(int_x86_avx_cvtt_pd2dq_256 (memopv4f64 addr:$src)),
1066 (VCVTTPD2DQYrm addr:$src)>;
1068 def : Pat<(int_x86_avx_cvtt_ps2dq_256 VR256:$src),
1069 (VCVTTPS2DQYrr VR256:$src)>;
1070 def : Pat<(int_x86_avx_cvtt_ps2dq_256 (memopv8f32 addr:$src)),
1071 (VCVTTPS2DQYrm addr:$src)>;
1073 //===----------------------------------------------------------------------===//
1074 // SSE 1 & 2 - Compare Instructions
1075 //===----------------------------------------------------------------------===//
1077 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
1078 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
1079 string asm, string asm_alt> {
1080 def rr : SIi8<0xC2, MRMSrcReg,
1081 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc),
1084 def rm : SIi8<0xC2, MRMSrcMem,
1085 (outs RC:$dst), (ins RC:$src1, x86memop:$src, SSECC:$cc),
1087 // Accept explicit immediate argument form instead of comparison code.
1088 let isAsmParserOnly = 1 in {
1089 def rr_alt : SIi8<0xC2, MRMSrcReg,
1090 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1093 def rm_alt : SIi8<0xC2, MRMSrcMem,
1094 (outs RC:$dst), (ins RC:$src1, x86memop:$src, i8imm:$src2),
1099 let neverHasSideEffects = 1, isAsmParserOnly = 1 in {
1100 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem,
1101 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1102 "cmpss\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1104 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem,
1105 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1106 "cmpsd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1110 let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
1111 defm CMPSS : sse12_cmp_scalar<FR32, f32mem,
1112 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
1113 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}">, XS;
1114 defm CMPSD : sse12_cmp_scalar<FR64, f64mem,
1115 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
1116 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}">, XD;
1119 multiclass sse12_cmp_scalar_int<RegisterClass RC, X86MemOperand x86memop,
1120 Intrinsic Int, string asm> {
1121 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
1122 (ins VR128:$src1, VR128:$src, SSECC:$cc), asm,
1123 [(set VR128:$dst, (Int VR128:$src1,
1124 VR128:$src, imm:$cc))]>;
1125 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
1126 (ins VR128:$src1, f32mem:$src, SSECC:$cc), asm,
1127 [(set VR128:$dst, (Int VR128:$src1,
1128 (load addr:$src), imm:$cc))]>;
1131 // Aliases to match intrinsics which expect XMM operand(s).
1132 let isAsmParserOnly = 1 in {
1133 defm Int_VCMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1134 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}">,
1136 defm Int_VCMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1137 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}">,
1140 let Constraints = "$src1 = $dst" in {
1141 defm Int_CMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1142 "cmp${cc}ss\t{$src, $dst|$dst, $src}">, XS;
1143 defm Int_CMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1144 "cmp${cc}sd\t{$src, $dst|$dst, $src}">, XD;
1148 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
1149 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
1150 ValueType vt, X86MemOperand x86memop,
1151 PatFrag ld_frag, string OpcodeStr, Domain d> {
1152 def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
1153 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1154 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))], d>;
1155 def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
1156 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1157 [(set EFLAGS, (OpNode (vt RC:$src1),
1158 (ld_frag addr:$src2)))], d>;
1161 let Defs = [EFLAGS] in {
1162 let isAsmParserOnly = 1 in {
1163 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1164 "ucomiss", SSEPackedSingle>, VEX;
1165 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1166 "ucomisd", SSEPackedDouble>, OpSize, VEX;
1167 let Pattern = []<dag> in {
1168 defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1169 "comiss", SSEPackedSingle>, VEX;
1170 defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1171 "comisd", SSEPackedDouble>, OpSize, VEX;
1174 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1175 load, "ucomiss", SSEPackedSingle>, VEX;
1176 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1177 load, "ucomisd", SSEPackedDouble>, OpSize, VEX;
1179 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
1180 load, "comiss", SSEPackedSingle>, VEX;
1181 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
1182 load, "comisd", SSEPackedDouble>, OpSize, VEX;
1184 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1185 "ucomiss", SSEPackedSingle>, TB;
1186 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1187 "ucomisd", SSEPackedDouble>, TB, OpSize;
1189 let Pattern = []<dag> in {
1190 defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1191 "comiss", SSEPackedSingle>, TB;
1192 defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1193 "comisd", SSEPackedDouble>, TB, OpSize;
1196 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1197 load, "ucomiss", SSEPackedSingle>, TB;
1198 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1199 load, "ucomisd", SSEPackedDouble>, TB, OpSize;
1201 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
1202 "comiss", SSEPackedSingle>, TB;
1203 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
1204 "comisd", SSEPackedDouble>, TB, OpSize;
1205 } // Defs = [EFLAGS]
1207 // sse12_cmp_packed - sse 1 & 2 compared packed instructions
1208 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
1209 Intrinsic Int, string asm, string asm_alt,
1211 def rri : PIi8<0xC2, MRMSrcReg,
1212 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc), asm,
1213 [(set RC:$dst, (Int RC:$src1, RC:$src, imm:$cc))], d>;
1214 def rmi : PIi8<0xC2, MRMSrcMem,
1215 (outs RC:$dst), (ins RC:$src1, f128mem:$src, SSECC:$cc), asm,
1216 [(set RC:$dst, (Int RC:$src1, (memop addr:$src), imm:$cc))], d>;
1217 // Accept explicit immediate argument form instead of comparison code.
1218 let isAsmParserOnly = 1 in {
1219 def rri_alt : PIi8<0xC2, MRMSrcReg,
1220 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1222 def rmi_alt : PIi8<0xC2, MRMSrcMem,
1223 (outs RC:$dst), (ins RC:$src1, f128mem:$src, i8imm:$src2),
1228 let isAsmParserOnly = 1 in {
1229 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1230 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1231 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1232 SSEPackedSingle>, VEX_4V;
1233 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1234 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1235 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1236 SSEPackedDouble>, OpSize, VEX_4V;
1237 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_ps_256,
1238 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1239 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1240 SSEPackedSingle>, VEX_4V;
1241 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_pd_256,
1242 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1243 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1244 SSEPackedDouble>, OpSize, VEX_4V;
1246 let Constraints = "$src1 = $dst" in {
1247 defm CMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1248 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
1249 "cmpps\t{$src2, $src, $dst|$dst, $src, $src2}",
1250 SSEPackedSingle>, TB;
1251 defm CMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1252 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1253 "cmppd\t{$src2, $src, $dst|$dst, $src, $src2}",
1254 SSEPackedDouble>, TB, OpSize;
1257 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
1258 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
1259 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
1260 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
1261 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
1262 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
1263 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
1264 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
1266 //===----------------------------------------------------------------------===//
1267 // SSE 1 & 2 - Shuffle Instructions
1268 //===----------------------------------------------------------------------===//
1270 /// sse12_shuffle - sse 1 & 2 shuffle instructions
1271 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
1272 ValueType vt, string asm, PatFrag mem_frag,
1273 Domain d, bit IsConvertibleToThreeAddress = 0> {
1274 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
1275 (ins RC:$src1, f128mem:$src2, i8imm:$src3), asm,
1276 [(set RC:$dst, (vt (shufp:$src3
1277 RC:$src1, (mem_frag addr:$src2))))], d>;
1278 let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
1279 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
1280 (ins RC:$src1, RC:$src2, i8imm:$src3), asm,
1282 (vt (shufp:$src3 RC:$src1, RC:$src2)))], d>;
1285 let isAsmParserOnly = 1 in {
1286 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1287 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1288 memopv4f32, SSEPackedSingle>, VEX_4V;
1289 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
1290 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1291 memopv8f32, SSEPackedSingle>, VEX_4V;
1292 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1293 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1294 memopv2f64, SSEPackedDouble>, OpSize, VEX_4V;
1295 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
1296 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1297 memopv4f64, SSEPackedDouble>, OpSize, VEX_4V;
1300 let Constraints = "$src1 = $dst" in {
1301 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1302 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1303 memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>,
1305 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1306 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1307 memopv2f64, SSEPackedDouble>, TB, OpSize;
1310 //===----------------------------------------------------------------------===//
1311 // SSE 1 & 2 - Unpack Instructions
1312 //===----------------------------------------------------------------------===//
1314 /// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
1315 multiclass sse12_unpack_interleave<bits<8> opc, PatFrag OpNode, ValueType vt,
1316 PatFrag mem_frag, RegisterClass RC,
1317 X86MemOperand x86memop, string asm,
1319 def rr : PI<opc, MRMSrcReg,
1320 (outs RC:$dst), (ins RC:$src1, RC:$src2),
1322 (vt (OpNode RC:$src1, RC:$src2)))], d>;
1323 def rm : PI<opc, MRMSrcMem,
1324 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
1326 (vt (OpNode RC:$src1,
1327 (mem_frag addr:$src2))))], d>;
1330 let AddedComplexity = 10 in {
1331 let isAsmParserOnly = 1 in {
1332 defm VUNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1333 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1334 SSEPackedSingle>, VEX_4V;
1335 defm VUNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1336 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1337 SSEPackedDouble>, OpSize, VEX_4V;
1338 defm VUNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1339 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1340 SSEPackedSingle>, VEX_4V;
1341 defm VUNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1342 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1343 SSEPackedDouble>, OpSize, VEX_4V;
1345 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, unpckh, v8f32, memopv8f32,
1346 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1347 SSEPackedSingle>, VEX_4V;
1348 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, unpckh, v4f64, memopv4f64,
1349 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1350 SSEPackedDouble>, OpSize, VEX_4V;
1351 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, unpckl, v8f32, memopv8f32,
1352 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1353 SSEPackedSingle>, VEX_4V;
1354 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, unpckl, v4f64, memopv4f64,
1355 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1356 SSEPackedDouble>, OpSize, VEX_4V;
1359 let Constraints = "$src1 = $dst" in {
1360 defm UNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1361 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
1362 SSEPackedSingle>, TB;
1363 defm UNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1364 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
1365 SSEPackedDouble>, TB, OpSize;
1366 defm UNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1367 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
1368 SSEPackedSingle>, TB;
1369 defm UNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1370 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
1371 SSEPackedDouble>, TB, OpSize;
1372 } // Constraints = "$src1 = $dst"
1373 } // AddedComplexity
1375 //===----------------------------------------------------------------------===//
1376 // SSE 1 & 2 - Extract Floating-Point Sign mask
1377 //===----------------------------------------------------------------------===//
1379 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
1380 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
1382 def rr32 : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
1383 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1384 [(set GR32:$dst, (Int RC:$src))], d>;
1385 def rr64 : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins RC:$src),
1386 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>, REX_W;
1390 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
1391 SSEPackedSingle>, TB;
1392 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
1393 SSEPackedDouble>, TB, OpSize;
1395 let isAsmParserOnly = 1 in {
1396 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
1397 "movmskps", SSEPackedSingle>, VEX;
1398 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
1399 "movmskpd", SSEPackedDouble>, OpSize,
1401 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
1402 "movmskps", SSEPackedSingle>, VEX;
1403 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
1404 "movmskpd", SSEPackedDouble>, OpSize,
1408 def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1409 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1410 def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1411 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1413 def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1414 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1415 def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1416 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1420 //===----------------------------------------------------------------------===//
1421 // SSE 1 & 2 - Misc aliasing of packed SSE 1 & 2 instructions
1422 //===----------------------------------------------------------------------===//
1424 // Aliases of packed SSE1 & SSE2 instructions for scalar use. These all have
1425 // names that start with 'Fs'.
1427 // Alias instructions that map fld0 to pxor for sse.
1428 let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
1429 canFoldAsLoad = 1 in {
1430 // FIXME: Set encoding to pseudo!
1431 def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1432 [(set FR32:$dst, fp32imm0)]>,
1433 Requires<[HasSSE1]>, TB, OpSize;
1434 def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1435 [(set FR64:$dst, fpimm0)]>,
1436 Requires<[HasSSE2]>, TB, OpSize;
1439 // Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
1440 // bits are disregarded.
1441 let neverHasSideEffects = 1 in {
1442 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1443 "movaps\t{$src, $dst|$dst, $src}", []>;
1444 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1445 "movapd\t{$src, $dst|$dst, $src}", []>;
1448 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
1449 // bits are disregarded.
1450 let canFoldAsLoad = 1, isReMaterializable = 1 in {
1451 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1452 "movaps\t{$src, $dst|$dst, $src}",
1453 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
1454 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1455 "movapd\t{$src, $dst|$dst, $src}",
1456 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
1459 //===----------------------------------------------------------------------===//
1460 // SSE 1 & 2 - Logical Instructions
1461 //===----------------------------------------------------------------------===//
1463 /// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
1465 multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
1467 let isAsmParserOnly = 1 in {
1468 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
1469 FR32, f32, f128mem, memopfsf32, SSEPackedSingle, 0>, VEX_4V;
1471 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
1472 FR64, f64, f128mem, memopfsf64, SSEPackedDouble, 0>, OpSize, VEX_4V;
1475 let Constraints = "$src1 = $dst" in {
1476 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
1477 f32, f128mem, memopfsf32, SSEPackedSingle>, TB;
1479 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
1480 f64, f128mem, memopfsf64, SSEPackedDouble>, TB, OpSize;
1484 // Alias bitwise logical operations using SSE logical ops on packed FP values.
1485 let mayLoad = 0 in {
1486 defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand>;
1487 defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for>;
1488 defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor>;
1491 let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
1492 defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef>;
1494 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
1496 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
1497 SDNode OpNode, int HasPat = 0,
1498 list<list<dag>> Pattern = []> {
1499 let isAsmParserOnly = 1, Pattern = []<dag> in {
1500 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1501 !strconcat(OpcodeStr, "ps"), f128mem,
1502 !if(HasPat, Pattern[0], // rr
1503 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
1505 !if(HasPat, Pattern[2], // rm
1506 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1507 (memopv2i64 addr:$src2)))]), 0>,
1510 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1511 !strconcat(OpcodeStr, "pd"), f128mem,
1512 !if(HasPat, Pattern[1], // rr
1513 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1516 !if(HasPat, Pattern[3], // rm
1517 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1518 (memopv2i64 addr:$src2)))]), 0>,
1521 let Constraints = "$src1 = $dst" in {
1522 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1523 !strconcat(OpcodeStr, "ps"), f128mem,
1524 !if(HasPat, Pattern[0], // rr
1525 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
1527 !if(HasPat, Pattern[2], // rm
1528 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1529 (memopv2i64 addr:$src2)))])>, TB;
1531 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1532 !strconcat(OpcodeStr, "pd"), f128mem,
1533 !if(HasPat, Pattern[1], // rr
1534 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1537 !if(HasPat, Pattern[3], // rm
1538 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1539 (memopv2i64 addr:$src2)))])>,
1544 /// sse12_fp_packed_logical_y - AVX 256-bit SSE 1 & 2 logical ops forms
1546 let isAsmParserOnly = 1 in {
1547 multiclass sse12_fp_packed_logical_y<bits<8> opc, string OpcodeStr> {
1548 defm PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
1549 !strconcat(OpcodeStr, "ps"), f256mem, [], [], 0>, VEX_4V;
1551 defm PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
1552 !strconcat(OpcodeStr, "pd"), f256mem, [], [], 0>, OpSize, VEX_4V;
1556 // AVX 256-bit packed logical ops forms
1557 defm VAND : sse12_fp_packed_logical_y<0x54, "and">;
1558 defm VOR : sse12_fp_packed_logical_y<0x56, "or">;
1559 defm VXOR : sse12_fp_packed_logical_y<0x57, "xor">;
1560 let isCommutable = 0 in
1561 defm VANDN : sse12_fp_packed_logical_y<0x55, "andn">;
1563 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
1564 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
1565 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
1566 let isCommutable = 0 in
1567 defm ANDN : sse12_fp_packed_logical<0x55, "andn", undef /* dummy */, 1, [
1569 [(set VR128:$dst, (v2i64 (and (xor VR128:$src1,
1570 (bc_v2i64 (v4i32 immAllOnesV))),
1573 [(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1574 (bc_v2i64 (v2f64 VR128:$src2))))],
1576 [(set VR128:$dst, (v2i64 (and (xor (bc_v2i64 (v4f32 VR128:$src1)),
1577 (bc_v2i64 (v4i32 immAllOnesV))),
1578 (memopv2i64 addr:$src2))))],
1580 [(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1581 (memopv2i64 addr:$src2)))]]>;
1583 //===----------------------------------------------------------------------===//
1584 // SSE 1 & 2 - Arithmetic Instructions
1585 //===----------------------------------------------------------------------===//
1587 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
1590 /// In addition, we also have a special variant of the scalar form here to
1591 /// represent the associated intrinsic operation. This form is unlike the
1592 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
1593 /// and leaves the top elements unmodified (therefore these cannot be commuted).
1595 /// These three forms can each be reg+reg or reg+mem.
1598 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
1600 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
1602 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
1603 OpNode, FR32, f32mem, Is2Addr>, XS;
1604 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
1605 OpNode, FR64, f64mem, Is2Addr>, XD;
1608 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
1610 let mayLoad = 0 in {
1611 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
1612 v4f32, f128mem, memopv4f32, SSEPackedSingle, Is2Addr>, TB;
1613 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
1614 v2f64, f128mem, memopv2f64, SSEPackedDouble, Is2Addr>, TB, OpSize;
1618 multiclass basic_sse12_fp_binop_p_y<bits<8> opc, string OpcodeStr,
1620 let mayLoad = 0 in {
1621 defm PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR256,
1622 v8f32, f256mem, memopv8f32, SSEPackedSingle, 0>, TB;
1623 defm PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR256,
1624 v4f64, f256mem, memopv4f64, SSEPackedDouble, 0>, TB, OpSize;
1628 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
1630 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1631 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32, Is2Addr>, XS;
1632 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1633 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64, Is2Addr>, XD;
1636 multiclass basic_sse12_fp_binop_p_int<bits<8> opc, string OpcodeStr,
1638 defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1639 !strconcat(OpcodeStr, "ps"), "sse", "_ps", f128mem, memopv4f32,
1640 SSEPackedSingle, Is2Addr>, TB;
1642 defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1643 !strconcat(OpcodeStr, "pd"), "sse2", "_pd", f128mem, memopv2f64,
1644 SSEPackedDouble, Is2Addr>, TB, OpSize;
1647 multiclass basic_sse12_fp_binop_p_y_int<bits<8> opc, string OpcodeStr> {
1648 defm PSY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
1649 !strconcat(OpcodeStr, "ps"), "avx", "_ps_256", f256mem, memopv8f32,
1650 SSEPackedSingle, 0>, TB;
1652 defm PDY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
1653 !strconcat(OpcodeStr, "pd"), "avx", "_pd_256", f256mem, memopv4f64,
1654 SSEPackedDouble, 0>, TB, OpSize;
1657 // Binary Arithmetic instructions
1658 let isAsmParserOnly = 1 in {
1659 defm VADD : basic_sse12_fp_binop_s<0x58, "add", fadd, 0>,
1660 basic_sse12_fp_binop_s_int<0x58, "add", 0>,
1661 basic_sse12_fp_binop_p<0x58, "add", fadd, 0>,
1662 basic_sse12_fp_binop_p_y<0x58, "add", fadd>, VEX_4V;
1663 defm VMUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, 0>,
1664 basic_sse12_fp_binop_s_int<0x59, "mul", 0>,
1665 basic_sse12_fp_binop_p<0x59, "mul", fmul, 0>,
1666 basic_sse12_fp_binop_p_y<0x59, "mul", fmul>, VEX_4V;
1668 let isCommutable = 0 in {
1669 defm VSUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, 0>,
1670 basic_sse12_fp_binop_s_int<0x5C, "sub", 0>,
1671 basic_sse12_fp_binop_p<0x5C, "sub", fsub, 0>,
1672 basic_sse12_fp_binop_p_y<0x5C, "sub", fsub>, VEX_4V;
1673 defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, 0>,
1674 basic_sse12_fp_binop_s_int<0x5E, "div", 0>,
1675 basic_sse12_fp_binop_p<0x5E, "div", fdiv, 0>,
1676 basic_sse12_fp_binop_p_y<0x5E, "div", fdiv>, VEX_4V;
1677 defm VMAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, 0>,
1678 basic_sse12_fp_binop_s_int<0x5F, "max", 0>,
1679 basic_sse12_fp_binop_p<0x5F, "max", X86fmax, 0>,
1680 basic_sse12_fp_binop_p_int<0x5F, "max", 0>,
1681 basic_sse12_fp_binop_p_y<0x5F, "max", X86fmax>,
1682 basic_sse12_fp_binop_p_y_int<0x5F, "max">, VEX_4V;
1683 defm VMIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, 0>,
1684 basic_sse12_fp_binop_s_int<0x5D, "min", 0>,
1685 basic_sse12_fp_binop_p<0x5D, "min", X86fmin, 0>,
1686 basic_sse12_fp_binop_p_int<0x5D, "min", 0>,
1687 basic_sse12_fp_binop_p_y_int<0x5D, "min">,
1688 basic_sse12_fp_binop_p_y<0x5D, "min", X86fmin>, VEX_4V;
1692 let Constraints = "$src1 = $dst" in {
1693 defm ADD : basic_sse12_fp_binop_s<0x58, "add", fadd>,
1694 basic_sse12_fp_binop_p<0x58, "add", fadd>,
1695 basic_sse12_fp_binop_s_int<0x58, "add">;
1696 defm MUL : basic_sse12_fp_binop_s<0x59, "mul", fmul>,
1697 basic_sse12_fp_binop_p<0x59, "mul", fmul>,
1698 basic_sse12_fp_binop_s_int<0x59, "mul">;
1700 let isCommutable = 0 in {
1701 defm SUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub>,
1702 basic_sse12_fp_binop_p<0x5C, "sub", fsub>,
1703 basic_sse12_fp_binop_s_int<0x5C, "sub">;
1704 defm DIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv>,
1705 basic_sse12_fp_binop_p<0x5E, "div", fdiv>,
1706 basic_sse12_fp_binop_s_int<0x5E, "div">;
1707 defm MAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax>,
1708 basic_sse12_fp_binop_p<0x5F, "max", X86fmax>,
1709 basic_sse12_fp_binop_s_int<0x5F, "max">,
1710 basic_sse12_fp_binop_p_int<0x5F, "max">;
1711 defm MIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin>,
1712 basic_sse12_fp_binop_p<0x5D, "min", X86fmin>,
1713 basic_sse12_fp_binop_s_int<0x5D, "min">,
1714 basic_sse12_fp_binop_p_int<0x5D, "min">;
1719 /// In addition, we also have a special variant of the scalar form here to
1720 /// represent the associated intrinsic operation. This form is unlike the
1721 /// plain scalar form, in that it takes an entire vector (instead of a
1722 /// scalar) and leaves the top elements undefined.
1724 /// And, we have a special variant form for a full-vector intrinsic form.
1726 /// sse1_fp_unop_s - SSE1 unops in scalar form.
1727 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
1728 SDNode OpNode, Intrinsic F32Int> {
1729 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1730 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1731 [(set FR32:$dst, (OpNode FR32:$src))]>;
1732 // For scalar unary operations, fold a load into the operation
1733 // only in OptForSize mode. It eliminates an instruction, but it also
1734 // eliminates a whole-register clobber (the load), so it introduces a
1735 // partial register update condition.
1736 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
1737 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1738 [(set FR32:$dst, (OpNode (load addr:$src)))]>, XS,
1739 Requires<[HasSSE1, OptForSize]>;
1740 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1741 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1742 [(set VR128:$dst, (F32Int VR128:$src))]>;
1743 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
1744 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1745 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
1748 /// sse1_fp_unop_s_avx - AVX SSE1 unops in scalar form.
1749 multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
1750 SDNode OpNode, Intrinsic F32Int> {
1751 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
1752 !strconcat(OpcodeStr,
1753 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1754 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2),
1755 !strconcat(OpcodeStr,
1756 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1757 []>, XS, Requires<[HasAVX, OptForSize]>;
1758 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1759 !strconcat(OpcodeStr,
1760 "ss\t{$src, $dst, $dst|$dst, $dst, $src}"),
1761 [(set VR128:$dst, (F32Int VR128:$src))]>;
1762 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
1763 !strconcat(OpcodeStr,
1764 "ss\t{$src, $dst, $dst|$dst, $dst, $src}"),
1765 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
1768 /// sse1_fp_unop_p - SSE1 unops in packed form.
1769 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1770 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1771 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1772 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>;
1773 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1774 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1775 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
1778 /// sse1_fp_unop_p_y - AVX 256-bit SSE1 unops in packed form.
1779 multiclass sse1_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1780 def PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1781 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1782 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))]>;
1783 def PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1784 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1785 [(set VR256:$dst, (OpNode (memopv8f32 addr:$src)))]>;
1788 /// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms.
1789 multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
1790 Intrinsic V4F32Int> {
1791 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1792 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1793 [(set VR128:$dst, (V4F32Int VR128:$src))]>;
1794 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1795 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1796 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
1799 /// sse1_fp_unop_p_y_int - AVX 256-bit intrinsics unops in packed forms.
1800 multiclass sse1_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
1801 Intrinsic V4F32Int> {
1802 def PSYr_Int : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1803 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1804 [(set VR256:$dst, (V4F32Int VR256:$src))]>;
1805 def PSYm_Int : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1806 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1807 [(set VR256:$dst, (V4F32Int (memopv8f32 addr:$src)))]>;
1810 /// sse2_fp_unop_s - SSE2 unops in scalar form.
1811 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
1812 SDNode OpNode, Intrinsic F64Int> {
1813 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1814 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1815 [(set FR64:$dst, (OpNode FR64:$src))]>;
1816 // See the comments in sse1_fp_unop_s for why this is OptForSize.
1817 def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
1818 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1819 [(set FR64:$dst, (OpNode (load addr:$src)))]>, XD,
1820 Requires<[HasSSE2, OptForSize]>;
1821 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1822 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1823 [(set VR128:$dst, (F64Int VR128:$src))]>;
1824 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1825 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1826 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1829 /// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
1830 multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
1831 SDNode OpNode, Intrinsic F64Int> {
1832 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1833 !strconcat(OpcodeStr,
1834 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1835 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst),
1836 (ins FR64:$src1, f64mem:$src2),
1837 !strconcat(OpcodeStr,
1838 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1839 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1840 !strconcat(OpcodeStr, "sd\t{$src, $dst, $dst|$dst, $dst, $src}"),
1841 [(set VR128:$dst, (F64Int VR128:$src))]>;
1842 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1843 !strconcat(OpcodeStr, "sd\t{$src, $dst, $dst|$dst, $dst, $src}"),
1844 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1847 /// sse2_fp_unop_p - SSE2 unops in vector forms.
1848 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
1850 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1851 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1852 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>;
1853 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1854 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1855 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
1858 /// sse2_fp_unop_p_y - AVX SSE2 256-bit unops in vector forms.
1859 multiclass sse2_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1860 def PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1861 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1862 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))]>;
1863 def PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1864 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1865 [(set VR256:$dst, (OpNode (memopv4f64 addr:$src)))]>;
1868 /// sse2_fp_unop_p_int - SSE2 intrinsic unops in vector forms.
1869 multiclass sse2_fp_unop_p_int<bits<8> opc, string OpcodeStr,
1870 Intrinsic V2F64Int> {
1871 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1872 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1873 [(set VR128:$dst, (V2F64Int VR128:$src))]>;
1874 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1875 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1876 [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>;
1879 /// sse2_fp_unop_p_y_int - AVX 256-bit intrinsic unops in vector forms.
1880 multiclass sse2_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
1881 Intrinsic V2F64Int> {
1882 def PDYr_Int : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1883 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1884 [(set VR256:$dst, (V2F64Int VR256:$src))]>;
1885 def PDYm_Int : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1886 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1887 [(set VR256:$dst, (V2F64Int (memopv4f64 addr:$src)))]>;
1890 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
1892 defm VSQRT : sse1_fp_unop_s_avx<0x51, "vsqrt", fsqrt, int_x86_sse_sqrt_ss>,
1893 sse2_fp_unop_s_avx<0x51, "vsqrt", fsqrt, int_x86_sse2_sqrt_sd>,
1896 defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt>,
1897 sse2_fp_unop_p<0x51, "vsqrt", fsqrt>,
1898 sse1_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
1899 sse2_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
1900 sse1_fp_unop_p_int<0x51, "vsqrt", int_x86_sse_sqrt_ps>,
1901 sse2_fp_unop_p_int<0x51, "vsqrt", int_x86_sse2_sqrt_pd>,
1902 sse1_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_ps_256>,
1903 sse2_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_pd_256>,
1906 // Reciprocal approximations. Note that these typically require refinement
1907 // in order to obtain suitable precision.
1908 defm VRSQRT : sse1_fp_unop_s_avx<0x52, "vrsqrt", X86frsqrt,
1909 int_x86_sse_rsqrt_ss>, VEX_4V;
1910 defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt>,
1911 sse1_fp_unop_p_y<0x52, "vrsqrt", X86frsqrt>,
1912 sse1_fp_unop_p_y_int<0x52, "vrsqrt", int_x86_avx_rsqrt_ps_256>,
1913 sse1_fp_unop_p_int<0x52, "vrsqrt", int_x86_sse_rsqrt_ps>, VEX;
1915 defm VRCP : sse1_fp_unop_s_avx<0x53, "vrcp", X86frcp, int_x86_sse_rcp_ss>,
1917 defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp>,
1918 sse1_fp_unop_p_y<0x53, "vrcp", X86frcp>,
1919 sse1_fp_unop_p_y_int<0x53, "vrcp", int_x86_avx_rcp_ps_256>,
1920 sse1_fp_unop_p_int<0x53, "vrcp", int_x86_sse_rcp_ps>, VEX;
1924 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
1925 sse1_fp_unop_p<0x51, "sqrt", fsqrt>,
1926 sse1_fp_unop_p_int<0x51, "sqrt", int_x86_sse_sqrt_ps>,
1927 sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
1928 sse2_fp_unop_p<0x51, "sqrt", fsqrt>,
1929 sse2_fp_unop_p_int<0x51, "sqrt", int_x86_sse2_sqrt_pd>;
1931 // Reciprocal approximations. Note that these typically require refinement
1932 // in order to obtain suitable precision.
1933 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss>,
1934 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt>,
1935 sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps>;
1936 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
1937 sse1_fp_unop_p<0x53, "rcp", X86frcp>,
1938 sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps>;
1940 // There is no f64 version of the reciprocal approximation instructions.
1942 //===----------------------------------------------------------------------===//
1943 // SSE 1 & 2 - Non-temporal stores
1944 //===----------------------------------------------------------------------===//
1946 let isAsmParserOnly = 1 in {
1947 def VMOVNTPSmr_Int : VPSI<0x2B, MRMDestMem, (outs),
1948 (ins i128mem:$dst, VR128:$src),
1949 "movntps\t{$src, $dst|$dst, $src}",
1950 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>, VEX;
1951 def VMOVNTPDmr_Int : VPDI<0x2B, MRMDestMem, (outs),
1952 (ins i128mem:$dst, VR128:$src),
1953 "movntpd\t{$src, $dst|$dst, $src}",
1954 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>, VEX;
1956 let ExeDomain = SSEPackedInt in
1957 def VMOVNTDQmr_Int : VPDI<0xE7, MRMDestMem, (outs),
1958 (ins f128mem:$dst, VR128:$src),
1959 "movntdq\t{$src, $dst|$dst, $src}",
1960 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>, VEX;
1962 let AddedComplexity = 400 in { // Prefer non-temporal versions
1963 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
1964 (ins f128mem:$dst, VR128:$src),
1965 "movntps\t{$src, $dst|$dst, $src}",
1966 [(alignednontemporalstore (v4f32 VR128:$src),
1968 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
1969 (ins f128mem:$dst, VR128:$src),
1970 "movntpd\t{$src, $dst|$dst, $src}",
1971 [(alignednontemporalstore (v2f64 VR128:$src),
1973 def VMOVNTDQ_64mr : VPDI<0xE7, MRMDestMem, (outs),
1974 (ins f128mem:$dst, VR128:$src),
1975 "movntdq\t{$src, $dst|$dst, $src}",
1976 [(alignednontemporalstore (v2f64 VR128:$src),
1978 let ExeDomain = SSEPackedInt in
1979 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
1980 (ins f128mem:$dst, VR128:$src),
1981 "movntdq\t{$src, $dst|$dst, $src}",
1982 [(alignednontemporalstore (v4f32 VR128:$src),
1985 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
1986 (ins f256mem:$dst, VR256:$src),
1987 "movntps\t{$src, $dst|$dst, $src}",
1988 [(alignednontemporalstore (v8f32 VR256:$src),
1990 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
1991 (ins f256mem:$dst, VR256:$src),
1992 "movntpd\t{$src, $dst|$dst, $src}",
1993 [(alignednontemporalstore (v4f64 VR256:$src),
1995 def VMOVNTDQY_64mr : VPDI<0xE7, MRMDestMem, (outs),
1996 (ins f256mem:$dst, VR256:$src),
1997 "movntdq\t{$src, $dst|$dst, $src}",
1998 [(alignednontemporalstore (v4f64 VR256:$src),
2000 let ExeDomain = SSEPackedInt in
2001 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
2002 (ins f256mem:$dst, VR256:$src),
2003 "movntdq\t{$src, $dst|$dst, $src}",
2004 [(alignednontemporalstore (v8f32 VR256:$src),
2009 def : Pat<(int_x86_avx_movnt_dq_256 addr:$dst, VR256:$src),
2010 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
2011 def : Pat<(int_x86_avx_movnt_pd_256 addr:$dst, VR256:$src),
2012 (VMOVNTPDYmr addr:$dst, VR256:$src)>;
2013 def : Pat<(int_x86_avx_movnt_ps_256 addr:$dst, VR256:$src),
2014 (VMOVNTPSYmr addr:$dst, VR256:$src)>;
2016 def MOVNTPSmr_Int : PSI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2017 "movntps\t{$src, $dst|$dst, $src}",
2018 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>;
2019 def MOVNTPDmr_Int : PDI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2020 "movntpd\t{$src, $dst|$dst, $src}",
2021 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>;
2023 let ExeDomain = SSEPackedInt in
2024 def MOVNTDQmr_Int : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2025 "movntdq\t{$src, $dst|$dst, $src}",
2026 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>;
2028 let AddedComplexity = 400 in { // Prefer non-temporal versions
2029 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2030 "movntps\t{$src, $dst|$dst, $src}",
2031 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2032 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2033 "movntpd\t{$src, $dst|$dst, $src}",
2034 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
2036 def MOVNTDQ_64mr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2037 "movntdq\t{$src, $dst|$dst, $src}",
2038 [(alignednontemporalstore (v2f64 VR128:$src), addr:$dst)]>;
2040 let ExeDomain = SSEPackedInt in
2041 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2042 "movntdq\t{$src, $dst|$dst, $src}",
2043 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2045 // There is no AVX form for instructions below this point
2046 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2047 "movnti\t{$src, $dst|$dst, $src}",
2048 [(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
2049 TB, Requires<[HasSSE2]>;
2051 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2052 "movnti\t{$src, $dst|$dst, $src}",
2053 [(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
2054 TB, Requires<[HasSSE2]>;
2057 def MOVNTImr_Int : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2058 "movnti\t{$src, $dst|$dst, $src}",
2059 [(int_x86_sse2_movnt_i addr:$dst, GR32:$src)]>,
2060 TB, Requires<[HasSSE2]>;
2062 //===----------------------------------------------------------------------===//
2063 // SSE 1 & 2 - Misc Instructions (No AVX form)
2064 //===----------------------------------------------------------------------===//
2066 // Prefetch intrinsic.
2067 def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
2068 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3))]>;
2069 def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
2070 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2))]>;
2071 def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
2072 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1))]>;
2073 def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
2074 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0))]>;
2076 // Load, store, and memory fence
2077 def SFENCE : I<0xAE, MRM_F8, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>,
2078 TB, Requires<[HasSSE1]>;
2079 def : Pat<(X86SFence), (SFENCE)>;
2081 // Alias instructions that map zero vector to pxor / xorp* for sse.
2082 // We set canFoldAsLoad because this can be converted to a constant-pool
2083 // load of an all-zeros value if folding it would be beneficial.
2084 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
2085 // JIT implementation, it does not expand the instructions below like
2086 // X86MCInstLower does.
2087 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2088 isCodeGenOnly = 1 in {
2089 def V_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2090 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
2091 def V_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2092 [(set VR128:$dst, (v2f64 immAllZerosV))]>;
2093 let ExeDomain = SSEPackedInt in
2094 def V_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2095 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2098 // The same as done above but for AVX. The 128-bit versions are the
2099 // same, but re-encoded. The 256-bit does not support PI version.
2100 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
2101 // JIT implementatioan, it does not expand the instructions below like
2102 // X86MCInstLower does.
2103 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2104 isCodeGenOnly = 1, Predicates = [HasAVX] in {
2105 def AVX_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2106 [(set VR128:$dst, (v4f32 immAllZerosV))]>, VEX_4V;
2107 def AVX_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2108 [(set VR128:$dst, (v2f64 immAllZerosV))]>, VEX_4V;
2109 def AVX_SET0PSY : PSI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
2110 [(set VR256:$dst, (v8f32 immAllZerosV))]>, VEX_4V;
2111 def AVX_SET0PDY : PDI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
2112 [(set VR256:$dst, (v4f64 immAllZerosV))]>, VEX_4V;
2113 let ExeDomain = SSEPackedInt in
2114 def AVX_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2115 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2118 def : Pat<(v2i64 immAllZerosV), (V_SET0PI)>;
2119 def : Pat<(v8i16 immAllZerosV), (V_SET0PI)>;
2120 def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>;
2122 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
2123 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
2125 //===----------------------------------------------------------------------===//
2126 // SSE 1 & 2 - Load/Store XCSR register
2127 //===----------------------------------------------------------------------===//
2129 let isAsmParserOnly = 1 in {
2130 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2131 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>, VEX;
2132 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2133 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>, VEX;
2136 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2137 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
2138 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2139 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
2141 //===---------------------------------------------------------------------===//
2142 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
2143 //===---------------------------------------------------------------------===//
2145 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2147 let isAsmParserOnly = 1 in {
2148 let neverHasSideEffects = 1 in {
2149 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2150 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2151 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2152 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2154 def VMOVDQUrr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2155 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2156 def VMOVDQUYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2157 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2159 let canFoldAsLoad = 1, mayLoad = 1 in {
2160 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2161 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2162 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2163 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2164 let Predicates = [HasAVX] in {
2165 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2166 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2167 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2168 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2172 let mayStore = 1 in {
2173 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
2174 (ins i128mem:$dst, VR128:$src),
2175 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2176 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
2177 (ins i256mem:$dst, VR256:$src),
2178 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2179 let Predicates = [HasAVX] in {
2180 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2181 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2182 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
2183 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2188 let neverHasSideEffects = 1 in
2189 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2190 "movdqa\t{$src, $dst|$dst, $src}", []>;
2192 let canFoldAsLoad = 1, mayLoad = 1 in {
2193 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2194 "movdqa\t{$src, $dst|$dst, $src}",
2195 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
2196 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2197 "movdqu\t{$src, $dst|$dst, $src}",
2198 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
2199 XS, Requires<[HasSSE2]>;
2202 let mayStore = 1 in {
2203 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2204 "movdqa\t{$src, $dst|$dst, $src}",
2205 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
2206 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2207 "movdqu\t{$src, $dst|$dst, $src}",
2208 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
2209 XS, Requires<[HasSSE2]>;
2212 // Intrinsic forms of MOVDQU load and store
2213 let isAsmParserOnly = 1 in {
2214 let canFoldAsLoad = 1 in
2215 def VMOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2216 "vmovdqu\t{$src, $dst|$dst, $src}",
2217 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
2218 XS, VEX, Requires<[HasAVX]>;
2219 def VMOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2220 "vmovdqu\t{$src, $dst|$dst, $src}",
2221 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2222 XS, VEX, Requires<[HasAVX]>;
2225 let canFoldAsLoad = 1 in
2226 def MOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2227 "movdqu\t{$src, $dst|$dst, $src}",
2228 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
2229 XS, Requires<[HasSSE2]>;
2230 def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2231 "movdqu\t{$src, $dst|$dst, $src}",
2232 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2233 XS, Requires<[HasSSE2]>;
2235 } // ExeDomain = SSEPackedInt
2237 def : Pat<(int_x86_avx_loadu_dq_256 addr:$src), (VMOVDQUYrm addr:$src)>;
2238 def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
2239 (VMOVDQUYmr addr:$dst, VR256:$src)>;
2241 //===---------------------------------------------------------------------===//
2242 // SSE2 - Packed Integer Arithmetic Instructions
2243 //===---------------------------------------------------------------------===//
2245 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2247 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
2248 bit IsCommutable = 0, bit Is2Addr = 1> {
2249 let isCommutable = IsCommutable in
2250 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2251 (ins VR128:$src1, VR128:$src2),
2253 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2254 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2255 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2256 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2257 (ins VR128:$src1, i128mem:$src2),
2259 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2260 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2261 [(set VR128:$dst, (IntId VR128:$src1,
2262 (bitconvert (memopv2i64 addr:$src2))))]>;
2265 multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
2266 string OpcodeStr, Intrinsic IntId,
2267 Intrinsic IntId2, bit Is2Addr = 1> {
2268 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2269 (ins VR128:$src1, VR128:$src2),
2271 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2272 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2273 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2274 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2275 (ins VR128:$src1, i128mem:$src2),
2277 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2278 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2279 [(set VR128:$dst, (IntId VR128:$src1,
2280 (bitconvert (memopv2i64 addr:$src2))))]>;
2281 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst),
2282 (ins VR128:$src1, i32i8imm:$src2),
2284 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2285 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2286 [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
2289 /// PDI_binop_rm - Simple SSE2 binary operator.
2290 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2291 ValueType OpVT, bit IsCommutable = 0, bit Is2Addr = 1> {
2292 let isCommutable = IsCommutable in
2293 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2294 (ins VR128:$src1, VR128:$src2),
2296 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2297 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2298 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>;
2299 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2300 (ins VR128:$src1, i128mem:$src2),
2302 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2303 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2304 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
2305 (bitconvert (memopv2i64 addr:$src2)))))]>;
2308 /// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
2310 /// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
2311 /// to collapse (bitconvert VT to VT) into its operand.
2313 multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
2314 bit IsCommutable = 0, bit Is2Addr = 1> {
2315 let isCommutable = IsCommutable in
2316 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2317 (ins VR128:$src1, VR128:$src2),
2319 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2320 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2321 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]>;
2322 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2323 (ins VR128:$src1, i128mem:$src2),
2325 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2326 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2327 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2i64 addr:$src2)))]>;
2330 } // ExeDomain = SSEPackedInt
2332 // 128-bit Integer Arithmetic
2334 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2335 defm VPADDB : PDI_binop_rm<0xFC, "vpaddb", add, v16i8, 1, 0 /*3addr*/>, VEX_4V;
2336 defm VPADDW : PDI_binop_rm<0xFD, "vpaddw", add, v8i16, 1, 0>, VEX_4V;
2337 defm VPADDD : PDI_binop_rm<0xFE, "vpaddd", add, v4i32, 1, 0>, VEX_4V;
2338 defm VPADDQ : PDI_binop_rm_v2i64<0xD4, "vpaddq", add, 1, 0>, VEX_4V;
2339 defm VPMULLW : PDI_binop_rm<0xD5, "vpmullw", mul, v8i16, 1, 0>, VEX_4V;
2340 defm VPSUBB : PDI_binop_rm<0xF8, "vpsubb", sub, v16i8, 0, 0>, VEX_4V;
2341 defm VPSUBW : PDI_binop_rm<0xF9, "vpsubw", sub, v8i16, 0, 0>, VEX_4V;
2342 defm VPSUBD : PDI_binop_rm<0xFA, "vpsubd", sub, v4i32, 0, 0>, VEX_4V;
2343 defm VPSUBQ : PDI_binop_rm_v2i64<0xFB, "vpsubq", sub, 0, 0>, VEX_4V;
2346 defm VPSUBSB : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_sse2_psubs_b, 0, 0>,
2348 defm VPSUBSW : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_sse2_psubs_w, 0, 0>,
2350 defm VPSUBUSB : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_sse2_psubus_b, 0, 0>,
2352 defm VPSUBUSW : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_sse2_psubus_w, 0, 0>,
2354 defm VPADDSB : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_sse2_padds_b, 1, 0>,
2356 defm VPADDSW : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_sse2_padds_w, 1, 0>,
2358 defm VPADDUSB : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_sse2_paddus_b, 1, 0>,
2360 defm VPADDUSW : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_sse2_paddus_w, 1, 0>,
2362 defm VPMULHUW : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_sse2_pmulhu_w, 1, 0>,
2364 defm VPMULHW : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_sse2_pmulh_w, 1, 0>,
2366 defm VPMULUDQ : PDI_binop_rm_int<0xF4, "vpmuludq", int_x86_sse2_pmulu_dq, 1, 0>,
2368 defm VPMADDWD : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_sse2_pmadd_wd, 1, 0>,
2370 defm VPAVGB : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_sse2_pavg_b, 1, 0>,
2372 defm VPAVGW : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_sse2_pavg_w, 1, 0>,
2374 defm VPMINUB : PDI_binop_rm_int<0xDA, "vpminub", int_x86_sse2_pminu_b, 1, 0>,
2376 defm VPMINSW : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_sse2_pmins_w, 1, 0>,
2378 defm VPMAXUB : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_sse2_pmaxu_b, 1, 0>,
2380 defm VPMAXSW : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_sse2_pmaxs_w, 1, 0>,
2382 defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw, 1, 0>,
2386 let Constraints = "$src1 = $dst" in {
2387 defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
2388 defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
2389 defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
2390 defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
2391 defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
2392 defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
2393 defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
2394 defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
2395 defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
2398 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
2399 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
2400 defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
2401 defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
2402 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
2403 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
2404 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
2405 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
2406 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
2407 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w, 1>;
2408 defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
2409 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
2410 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
2411 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
2412 defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
2413 defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
2414 defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
2415 defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
2416 defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw, 1>;
2418 } // Constraints = "$src1 = $dst"
2420 //===---------------------------------------------------------------------===//
2421 // SSE2 - Packed Integer Logical Instructions
2422 //===---------------------------------------------------------------------===//
2424 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2425 defm VPSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "vpsllw",
2426 int_x86_sse2_psll_w, int_x86_sse2_pslli_w, 0>,
2428 defm VPSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "vpslld",
2429 int_x86_sse2_psll_d, int_x86_sse2_pslli_d, 0>,
2431 defm VPSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "vpsllq",
2432 int_x86_sse2_psll_q, int_x86_sse2_pslli_q, 0>,
2435 defm VPSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "vpsrlw",
2436 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w, 0>,
2438 defm VPSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "vpsrld",
2439 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d, 0>,
2441 defm VPSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "vpsrlq",
2442 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q, 0>,
2445 defm VPSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "vpsraw",
2446 int_x86_sse2_psra_w, int_x86_sse2_psrai_w, 0>,
2448 defm VPSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "vpsrad",
2449 int_x86_sse2_psra_d, int_x86_sse2_psrai_d, 0>,
2452 defm VPAND : PDI_binop_rm_v2i64<0xDB, "vpand", and, 1, 0>, VEX_4V;
2453 defm VPOR : PDI_binop_rm_v2i64<0xEB, "vpor" , or, 1, 0>, VEX_4V;
2454 defm VPXOR : PDI_binop_rm_v2i64<0xEF, "vpxor", xor, 1, 0>, VEX_4V;
2456 let ExeDomain = SSEPackedInt in {
2457 let neverHasSideEffects = 1 in {
2458 // 128-bit logical shifts.
2459 def VPSLLDQri : PDIi8<0x73, MRM7r,
2460 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2461 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2463 def VPSRLDQri : PDIi8<0x73, MRM3r,
2464 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2465 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2467 // PSRADQri doesn't exist in SSE[1-3].
2469 def VPANDNrr : PDI<0xDF, MRMSrcReg,
2470 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2471 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2472 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2473 VR128:$src2)))]>, VEX_4V;
2475 def VPANDNrm : PDI<0xDF, MRMSrcMem,
2476 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2477 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2478 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2479 (memopv2i64 addr:$src2))))]>,
2484 let Constraints = "$src1 = $dst" in {
2485 defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
2486 int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
2487 defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
2488 int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
2489 defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
2490 int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
2492 defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
2493 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
2494 defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
2495 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
2496 defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
2497 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
2499 defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
2500 int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
2501 defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
2502 int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
2504 defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
2505 defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or, 1>;
2506 defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
2508 let ExeDomain = SSEPackedInt in {
2509 let neverHasSideEffects = 1 in {
2510 // 128-bit logical shifts.
2511 def PSLLDQri : PDIi8<0x73, MRM7r,
2512 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2513 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
2514 def PSRLDQri : PDIi8<0x73, MRM3r,
2515 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2516 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
2517 // PSRADQri doesn't exist in SSE[1-3].
2519 def PANDNrr : PDI<0xDF, MRMSrcReg,
2520 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2521 "pandn\t{$src2, $dst|$dst, $src2}",
2522 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2525 def PANDNrm : PDI<0xDF, MRMSrcMem,
2526 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2527 "pandn\t{$src2, $dst|$dst, $src2}",
2528 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2529 (memopv2i64 addr:$src2))))]>;
2531 } // Constraints = "$src1 = $dst"
2533 let Predicates = [HasAVX] in {
2534 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2535 (v2i64 (VPSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2536 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2537 (v2i64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2538 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2539 (v2i64 (VPSLLDQri VR128:$src1, imm:$src2))>;
2540 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2541 (v2i64 (VPSRLDQri VR128:$src1, imm:$src2))>;
2542 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2543 (v2f64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2545 // Shift up / down and insert zero's.
2546 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2547 (v2i64 (VPSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2548 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2549 (v2i64 (VPSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2552 let Predicates = [HasSSE2] in {
2553 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2554 (v2i64 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2555 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2556 (v2i64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2557 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2558 (v2i64 (PSLLDQri VR128:$src1, imm:$src2))>;
2559 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2560 (v2i64 (PSRLDQri VR128:$src1, imm:$src2))>;
2561 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2562 (v2f64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2564 // Shift up / down and insert zero's.
2565 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2566 (v2i64 (PSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2567 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2568 (v2i64 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2571 //===---------------------------------------------------------------------===//
2572 // SSE2 - Packed Integer Comparison Instructions
2573 //===---------------------------------------------------------------------===//
2575 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2576 defm VPCMPEQB : PDI_binop_rm_int<0x74, "vpcmpeqb", int_x86_sse2_pcmpeq_b, 1,
2578 defm VPCMPEQW : PDI_binop_rm_int<0x75, "vpcmpeqw", int_x86_sse2_pcmpeq_w, 1,
2580 defm VPCMPEQD : PDI_binop_rm_int<0x76, "vpcmpeqd", int_x86_sse2_pcmpeq_d, 1,
2582 defm VPCMPGTB : PDI_binop_rm_int<0x64, "vpcmpgtb", int_x86_sse2_pcmpgt_b, 0,
2584 defm VPCMPGTW : PDI_binop_rm_int<0x65, "vpcmpgtw", int_x86_sse2_pcmpgt_w, 0,
2586 defm VPCMPGTD : PDI_binop_rm_int<0x66, "vpcmpgtd", int_x86_sse2_pcmpgt_d, 0,
2590 let Constraints = "$src1 = $dst" in {
2591 defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b, 1>;
2592 defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w, 1>;
2593 defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d, 1>;
2594 defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
2595 defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
2596 defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
2597 } // Constraints = "$src1 = $dst"
2599 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
2600 (PCMPEQBrr VR128:$src1, VR128:$src2)>;
2601 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
2602 (PCMPEQBrm VR128:$src1, addr:$src2)>;
2603 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
2604 (PCMPEQWrr VR128:$src1, VR128:$src2)>;
2605 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
2606 (PCMPEQWrm VR128:$src1, addr:$src2)>;
2607 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
2608 (PCMPEQDrr VR128:$src1, VR128:$src2)>;
2609 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
2610 (PCMPEQDrm VR128:$src1, addr:$src2)>;
2612 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
2613 (PCMPGTBrr VR128:$src1, VR128:$src2)>;
2614 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
2615 (PCMPGTBrm VR128:$src1, addr:$src2)>;
2616 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
2617 (PCMPGTWrr VR128:$src1, VR128:$src2)>;
2618 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
2619 (PCMPGTWrm VR128:$src1, addr:$src2)>;
2620 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
2621 (PCMPGTDrr VR128:$src1, VR128:$src2)>;
2622 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
2623 (PCMPGTDrm VR128:$src1, addr:$src2)>;
2625 //===---------------------------------------------------------------------===//
2626 // SSE2 - Packed Integer Pack Instructions
2627 //===---------------------------------------------------------------------===//
2629 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2630 defm VPACKSSWB : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_sse2_packsswb_128,
2632 defm VPACKSSDW : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_sse2_packssdw_128,
2634 defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128,
2638 let Constraints = "$src1 = $dst" in {
2639 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
2640 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
2641 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
2642 } // Constraints = "$src1 = $dst"
2644 //===---------------------------------------------------------------------===//
2645 // SSE2 - Packed Integer Shuffle Instructions
2646 //===---------------------------------------------------------------------===//
2648 let ExeDomain = SSEPackedInt in {
2649 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt, PatFrag pshuf_frag,
2651 def ri : Ii8<0x70, MRMSrcReg,
2652 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
2653 !strconcat(OpcodeStr,
2654 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2655 [(set VR128:$dst, (vt (pshuf_frag:$src2 VR128:$src1,
2657 def mi : Ii8<0x70, MRMSrcMem,
2658 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
2659 !strconcat(OpcodeStr,
2660 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2661 [(set VR128:$dst, (vt (pshuf_frag:$src2
2662 (bc_frag (memopv2i64 addr:$src1)),
2665 } // ExeDomain = SSEPackedInt
2667 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2668 let AddedComplexity = 5 in
2669 defm VPSHUFD : sse2_pshuffle<"vpshufd", v4i32, pshufd, bc_v4i32>, OpSize,
2672 // SSE2 with ImmT == Imm8 and XS prefix.
2673 defm VPSHUFHW : sse2_pshuffle<"vpshufhw", v8i16, pshufhw, bc_v8i16>, XS,
2676 // SSE2 with ImmT == Imm8 and XD prefix.
2677 defm VPSHUFLW : sse2_pshuffle<"vpshuflw", v8i16, pshuflw, bc_v8i16>, XD,
2681 let Predicates = [HasSSE2] in {
2682 let AddedComplexity = 5 in
2683 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize;
2685 // SSE2 with ImmT == Imm8 and XS prefix.
2686 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, pshufhw, bc_v8i16>, XS;
2688 // SSE2 with ImmT == Imm8 and XD prefix.
2689 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, pshuflw, bc_v8i16>, XD;
2692 //===---------------------------------------------------------------------===//
2693 // SSE2 - Packed Integer Unpack Instructions
2694 //===---------------------------------------------------------------------===//
2696 let ExeDomain = SSEPackedInt in {
2697 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
2698 PatFrag unp_frag, PatFrag bc_frag, bit Is2Addr = 1> {
2699 def rr : PDI<opc, MRMSrcReg,
2700 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2702 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2703 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2704 [(set VR128:$dst, (vt (unp_frag VR128:$src1, VR128:$src2)))]>;
2705 def rm : PDI<opc, MRMSrcMem,
2706 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2708 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2709 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2710 [(set VR128:$dst, (unp_frag VR128:$src1,
2711 (bc_frag (memopv2i64
2715 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2716 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, unpckl, bc_v16i8,
2718 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, unpckl, bc_v8i16,
2720 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, unpckl, bc_v4i32,
2723 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2724 /// knew to collapse (bitconvert VT to VT) into its operand.
2725 def VPUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2726 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2727 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2729 (v2i64 (unpckl VR128:$src1, VR128:$src2)))]>, VEX_4V;
2730 def VPUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2731 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2732 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2734 (v2i64 (unpckl VR128:$src1,
2735 (memopv2i64 addr:$src2))))]>, VEX_4V;
2737 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, unpckh, bc_v16i8,
2739 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, unpckh, bc_v8i16,
2741 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, unpckh, bc_v4i32,
2744 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2745 /// knew to collapse (bitconvert VT to VT) into its operand.
2746 def VPUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2747 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2748 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2750 (v2i64 (unpckh VR128:$src1, VR128:$src2)))]>, VEX_4V;
2751 def VPUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2752 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2753 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2755 (v2i64 (unpckh VR128:$src1,
2756 (memopv2i64 addr:$src2))))]>, VEX_4V;
2759 let Constraints = "$src1 = $dst" in {
2760 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, unpckl, bc_v16i8>;
2761 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, unpckl, bc_v8i16>;
2762 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, unpckl, bc_v4i32>;
2764 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2765 /// knew to collapse (bitconvert VT to VT) into its operand.
2766 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2767 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2768 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2770 (v2i64 (unpckl VR128:$src1, VR128:$src2)))]>;
2771 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2772 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2773 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2775 (v2i64 (unpckl VR128:$src1,
2776 (memopv2i64 addr:$src2))))]>;
2778 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, unpckh, bc_v16i8>;
2779 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, unpckh, bc_v8i16>;
2780 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, unpckh, bc_v4i32>;
2782 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2783 /// knew to collapse (bitconvert VT to VT) into its operand.
2784 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2785 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2786 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2788 (v2i64 (unpckh VR128:$src1, VR128:$src2)))]>;
2789 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2790 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2791 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2793 (v2i64 (unpckh VR128:$src1,
2794 (memopv2i64 addr:$src2))))]>;
2797 } // ExeDomain = SSEPackedInt
2799 //===---------------------------------------------------------------------===//
2800 // SSE2 - Packed Integer Extract and Insert
2801 //===---------------------------------------------------------------------===//
2803 let ExeDomain = SSEPackedInt in {
2804 multiclass sse2_pinsrw<bit Is2Addr = 1> {
2805 def rri : Ii8<0xC4, MRMSrcReg,
2806 (outs VR128:$dst), (ins VR128:$src1,
2807 GR32:$src2, i32i8imm:$src3),
2809 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2810 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2812 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
2813 def rmi : Ii8<0xC4, MRMSrcMem,
2814 (outs VR128:$dst), (ins VR128:$src1,
2815 i16mem:$src2, i32i8imm:$src3),
2817 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2818 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2820 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
2825 let isAsmParserOnly = 1, Predicates = [HasAVX] in
2826 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
2827 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2828 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2829 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2830 imm:$src2))]>, OpSize, VEX;
2831 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
2832 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2833 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2834 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2838 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2839 defm VPINSRW : sse2_pinsrw<0>, OpSize, VEX_4V;
2840 def VPINSRWrr64i : Ii8<0xC4, MRMSrcReg, (outs VR128:$dst),
2841 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
2842 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2843 []>, OpSize, VEX_4V;
2846 let Constraints = "$src1 = $dst" in
2847 defm PINSRW : sse2_pinsrw, TB, OpSize, Requires<[HasSSE2]>;
2849 } // ExeDomain = SSEPackedInt
2851 //===---------------------------------------------------------------------===//
2852 // SSE2 - Packed Mask Creation
2853 //===---------------------------------------------------------------------===//
2855 let ExeDomain = SSEPackedInt in {
2857 let isAsmParserOnly = 1 in {
2858 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2859 "pmovmskb\t{$src, $dst|$dst, $src}",
2860 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>, VEX;
2861 def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
2862 "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX;
2864 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2865 "pmovmskb\t{$src, $dst|$dst, $src}",
2866 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
2868 } // ExeDomain = SSEPackedInt
2870 //===---------------------------------------------------------------------===//
2871 // SSE2 - Conditional Store
2872 //===---------------------------------------------------------------------===//
2874 let ExeDomain = SSEPackedInt in {
2876 let isAsmParserOnly = 1 in {
2878 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
2879 (ins VR128:$src, VR128:$mask),
2880 "maskmovdqu\t{$mask, $src|$src, $mask}",
2881 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>, VEX;
2883 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
2884 (ins VR128:$src, VR128:$mask),
2885 "maskmovdqu\t{$mask, $src|$src, $mask}",
2886 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>, VEX;
2890 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2891 "maskmovdqu\t{$mask, $src|$src, $mask}",
2892 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
2894 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2895 "maskmovdqu\t{$mask, $src|$src, $mask}",
2896 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
2898 } // ExeDomain = SSEPackedInt
2900 //===---------------------------------------------------------------------===//
2901 // SSE2 - Move Doubleword
2902 //===---------------------------------------------------------------------===//
2904 // Move Int Doubleword to Packed Double Int
2905 let isAsmParserOnly = 1 in {
2906 def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2907 "movd\t{$src, $dst|$dst, $src}",
2909 (v4i32 (scalar_to_vector GR32:$src)))]>, VEX;
2910 def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2911 "movd\t{$src, $dst|$dst, $src}",
2913 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
2916 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2917 "movd\t{$src, $dst|$dst, $src}",
2919 (v4i32 (scalar_to_vector GR32:$src)))]>;
2920 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2921 "movd\t{$src, $dst|$dst, $src}",
2923 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
2924 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
2925 "mov{d|q}\t{$src, $dst|$dst, $src}",
2927 (v2i64 (scalar_to_vector GR64:$src)))]>;
2928 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
2929 "mov{d|q}\t{$src, $dst|$dst, $src}",
2930 [(set FR64:$dst, (bitconvert GR64:$src))]>;
2933 // Move Int Doubleword to Single Scalar
2934 let isAsmParserOnly = 1 in {
2935 def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2936 "movd\t{$src, $dst|$dst, $src}",
2937 [(set FR32:$dst, (bitconvert GR32:$src))]>, VEX;
2939 def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2940 "movd\t{$src, $dst|$dst, $src}",
2941 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
2944 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2945 "movd\t{$src, $dst|$dst, $src}",
2946 [(set FR32:$dst, (bitconvert GR32:$src))]>;
2948 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2949 "movd\t{$src, $dst|$dst, $src}",
2950 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
2952 // Move Packed Doubleword Int to Packed Double Int
2953 let isAsmParserOnly = 1 in {
2954 def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
2955 "movd\t{$src, $dst|$dst, $src}",
2956 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2958 def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs),
2959 (ins i32mem:$dst, VR128:$src),
2960 "movd\t{$src, $dst|$dst, $src}",
2961 [(store (i32 (vector_extract (v4i32 VR128:$src),
2962 (iPTR 0))), addr:$dst)]>, VEX;
2964 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
2965 "movd\t{$src, $dst|$dst, $src}",
2966 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2968 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
2969 "movd\t{$src, $dst|$dst, $src}",
2970 [(store (i32 (vector_extract (v4i32 VR128:$src),
2971 (iPTR 0))), addr:$dst)]>;
2973 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
2974 "mov{d|q}\t{$src, $dst|$dst, $src}",
2975 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
2977 def MOV64toSDrm : S3SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
2978 "movq\t{$src, $dst|$dst, $src}",
2979 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
2981 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
2982 "mov{d|q}\t{$src, $dst|$dst, $src}",
2983 [(set GR64:$dst, (bitconvert FR64:$src))]>;
2984 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
2985 "movq\t{$src, $dst|$dst, $src}",
2986 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
2988 // Move Scalar Single to Double Int
2989 let isAsmParserOnly = 1 in {
2990 def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
2991 "movd\t{$src, $dst|$dst, $src}",
2992 [(set GR32:$dst, (bitconvert FR32:$src))]>, VEX;
2993 def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
2994 "movd\t{$src, $dst|$dst, $src}",
2995 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>, VEX;
2997 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
2998 "movd\t{$src, $dst|$dst, $src}",
2999 [(set GR32:$dst, (bitconvert FR32:$src))]>;
3000 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
3001 "movd\t{$src, $dst|$dst, $src}",
3002 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
3004 // movd / movq to XMM register zero-extends
3005 let AddedComplexity = 15, isAsmParserOnly = 1 in {
3006 def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3007 "movd\t{$src, $dst|$dst, $src}",
3008 [(set VR128:$dst, (v4i32 (X86vzmovl
3009 (v4i32 (scalar_to_vector GR32:$src)))))]>,
3011 def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3012 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3013 [(set VR128:$dst, (v2i64 (X86vzmovl
3014 (v2i64 (scalar_to_vector GR64:$src)))))]>,
3017 let AddedComplexity = 15 in {
3018 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3019 "movd\t{$src, $dst|$dst, $src}",
3020 [(set VR128:$dst, (v4i32 (X86vzmovl
3021 (v4i32 (scalar_to_vector GR32:$src)))))]>;
3022 def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3023 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3024 [(set VR128:$dst, (v2i64 (X86vzmovl
3025 (v2i64 (scalar_to_vector GR64:$src)))))]>;
3028 let AddedComplexity = 20 in {
3029 let isAsmParserOnly = 1 in
3030 def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3031 "movd\t{$src, $dst|$dst, $src}",
3033 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3034 (loadi32 addr:$src))))))]>,
3036 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3037 "movd\t{$src, $dst|$dst, $src}",
3039 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3040 (loadi32 addr:$src))))))]>;
3042 def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
3043 (MOVZDI2PDIrm addr:$src)>;
3044 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
3045 (MOVZDI2PDIrm addr:$src)>;
3046 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
3047 (MOVZDI2PDIrm addr:$src)>;
3050 //===---------------------------------------------------------------------===//
3051 // SSE2 - Move Quadword
3052 //===---------------------------------------------------------------------===//
3054 // Move Quadword Int to Packed Quadword Int
3055 let isAsmParserOnly = 1 in
3056 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3057 "vmovq\t{$src, $dst|$dst, $src}",
3059 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3060 VEX, Requires<[HasAVX]>;
3061 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3062 "movq\t{$src, $dst|$dst, $src}",
3064 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3065 Requires<[HasSSE2]>; // SSE2 instruction with XS Prefix
3067 // Move Packed Quadword Int to Quadword Int
3068 let isAsmParserOnly = 1 in
3069 def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3070 "movq\t{$src, $dst|$dst, $src}",
3071 [(store (i64 (vector_extract (v2i64 VR128:$src),
3072 (iPTR 0))), addr:$dst)]>, VEX;
3073 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3074 "movq\t{$src, $dst|$dst, $src}",
3075 [(store (i64 (vector_extract (v2i64 VR128:$src),
3076 (iPTR 0))), addr:$dst)]>;
3078 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
3079 (f64 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
3081 // Store / copy lower 64-bits of a XMM register.
3082 let isAsmParserOnly = 1 in
3083 def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3084 "movq\t{$src, $dst|$dst, $src}",
3085 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX;
3086 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3087 "movq\t{$src, $dst|$dst, $src}",
3088 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
3090 let AddedComplexity = 20, isAsmParserOnly = 1 in
3091 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3092 "vmovq\t{$src, $dst|$dst, $src}",
3094 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3095 (loadi64 addr:$src))))))]>,
3096 XS, VEX, Requires<[HasAVX]>;
3098 let AddedComplexity = 20 in {
3099 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3100 "movq\t{$src, $dst|$dst, $src}",
3102 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3103 (loadi64 addr:$src))))))]>,
3104 XS, Requires<[HasSSE2]>;
3106 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
3107 (MOVZQI2PQIrm addr:$src)>;
3108 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
3109 (MOVZQI2PQIrm addr:$src)>;
3110 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
3113 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
3114 // IA32 document. movq xmm1, xmm2 does clear the high bits.
3115 let isAsmParserOnly = 1, AddedComplexity = 15 in
3116 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3117 "vmovq\t{$src, $dst|$dst, $src}",
3118 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3119 XS, VEX, Requires<[HasAVX]>;
3120 let AddedComplexity = 15 in
3121 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3122 "movq\t{$src, $dst|$dst, $src}",
3123 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3124 XS, Requires<[HasSSE2]>;
3126 let AddedComplexity = 20, isAsmParserOnly = 1 in
3127 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3128 "vmovq\t{$src, $dst|$dst, $src}",
3129 [(set VR128:$dst, (v2i64 (X86vzmovl
3130 (loadv2i64 addr:$src))))]>,
3131 XS, VEX, Requires<[HasAVX]>;
3132 let AddedComplexity = 20 in {
3133 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3134 "movq\t{$src, $dst|$dst, $src}",
3135 [(set VR128:$dst, (v2i64 (X86vzmovl
3136 (loadv2i64 addr:$src))))]>,
3137 XS, Requires<[HasSSE2]>;
3139 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
3140 (MOVZPQILo2PQIrm addr:$src)>;
3143 // Instructions to match in the assembler
3144 let isAsmParserOnly = 1 in {
3145 def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3146 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3147 def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3148 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3149 // Recognize "movd" with GR64 destination, but encode as a "movq"
3150 def VMOVQd64rr_alt : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3151 "movd\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3154 // Instructions for the disassembler
3155 // xr = XMM register
3158 let isAsmParserOnly = 1, Predicates = [HasAVX] in
3159 def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3160 "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS;
3161 def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3162 "movq\t{$src, $dst|$dst, $src}", []>, XS;
3164 //===---------------------------------------------------------------------===//
3165 // SSE2 - Misc Instructions
3166 //===---------------------------------------------------------------------===//
3169 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3170 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
3171 TB, Requires<[HasSSE2]>;
3173 // Load, store, and memory fence
3174 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
3175 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
3176 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
3177 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
3178 def : Pat<(X86LFence), (LFENCE)>;
3179 def : Pat<(X86MFence), (MFENCE)>;
3182 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3183 // was introduced with SSE2, it's backward compatible.
3184 def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", []>, REP;
3186 // Alias instructions that map zero vector to pxor / xorp* for sse.
3187 // We set canFoldAsLoad because this can be converted to a constant-pool
3188 // load of an all-ones value if folding it would be beneficial.
3189 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
3190 isCodeGenOnly = 1, ExeDomain = SSEPackedInt in
3191 // FIXME: Change encoding to pseudo.
3192 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
3193 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
3195 //===---------------------------------------------------------------------===//
3196 // SSE3 - Conversion Instructions
3197 //===---------------------------------------------------------------------===//
3199 // Convert Packed Double FP to Packed DW Integers
3200 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3201 // The assembler can recognize rr 256-bit instructions by seeing a ymm
3202 // register, but the same isn't true when using memory operands instead.
3203 // Provide other assembly rr and rm forms to address this explicitly.
3204 def VCVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3205 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3206 def VCVTPD2DQXrYr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3207 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3210 def VCVTPD2DQXrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3211 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3212 def VCVTPD2DQXrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3213 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3216 def VCVTPD2DQYrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3217 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
3218 def VCVTPD2DQYrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
3219 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
3222 def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3223 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3224 def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3225 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3227 // Convert Packed DW Integers to Packed Double FP
3228 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3229 def VCVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3230 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3231 def VCVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3232 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3233 def VCVTDQ2PDYrm : S3SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
3234 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3235 def VCVTDQ2PDYrr : S3SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
3236 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3239 def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3240 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3241 def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3242 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3244 // AVX 256-bit register conversion intrinsics
3245 def : Pat<(int_x86_avx_cvtdq2_pd_256 VR128:$src),
3246 (VCVTDQ2PDYrr VR128:$src)>;
3247 def : Pat<(int_x86_avx_cvtdq2_pd_256 (memopv4i32 addr:$src)),
3248 (VCVTDQ2PDYrm addr:$src)>;
3250 def : Pat<(int_x86_avx_cvt_pd2dq_256 VR256:$src),
3251 (VCVTPD2DQYrr VR256:$src)>;
3252 def : Pat<(int_x86_avx_cvt_pd2dq_256 (memopv4f64 addr:$src)),
3253 (VCVTPD2DQYrm addr:$src)>;
3255 //===---------------------------------------------------------------------===//
3256 // SSE3 - Move Instructions
3257 //===---------------------------------------------------------------------===//
3259 // Replicate Single FP
3260 multiclass sse3_replicate_sfp<bits<8> op, PatFrag rep_frag, string OpcodeStr> {
3261 def rr : S3SI<op, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3262 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3263 [(set VR128:$dst, (v4f32 (rep_frag
3264 VR128:$src, (undef))))]>;
3265 def rm : S3SI<op, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3266 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3267 [(set VR128:$dst, (rep_frag
3268 (memopv4f32 addr:$src), (undef)))]>;
3271 multiclass sse3_replicate_sfp_y<bits<8> op, PatFrag rep_frag,
3273 def rr : S3SI<op, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3274 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
3275 def rm : S3SI<op, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3276 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
3279 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3280 // FIXME: Merge above classes when we have patterns for the ymm version
3281 defm VMOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "vmovshdup">, VEX;
3282 defm VMOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "vmovsldup">, VEX;
3283 defm VMOVSHDUPY : sse3_replicate_sfp_y<0x16, movshdup, "vmovshdup">, VEX;
3284 defm VMOVSLDUPY : sse3_replicate_sfp_y<0x12, movsldup, "vmovsldup">, VEX;
3286 defm MOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "movshdup">;
3287 defm MOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "movsldup">;
3289 // Replicate Double FP
3290 multiclass sse3_replicate_dfp<string OpcodeStr> {
3291 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3292 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3293 [(set VR128:$dst,(v2f64 (movddup VR128:$src, (undef))))]>;
3294 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
3295 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3297 (v2f64 (movddup (scalar_to_vector (loadf64 addr:$src)),
3301 multiclass sse3_replicate_dfp_y<string OpcodeStr> {
3302 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3303 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3305 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3306 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3310 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3311 // FIXME: Merge above classes when we have patterns for the ymm version
3312 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
3313 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX;
3315 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
3317 // Move Unaligned Integer
3318 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3319 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3320 "vlddqu\t{$src, $dst|$dst, $src}",
3321 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
3322 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3323 "vlddqu\t{$src, $dst|$dst, $src}",
3324 [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>, VEX;
3326 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3327 "lddqu\t{$src, $dst|$dst, $src}",
3328 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
3330 def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
3332 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3334 // Several Move patterns
3335 let AddedComplexity = 5 in {
3336 def : Pat<(movddup (memopv2f64 addr:$src), (undef)),
3337 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3338 def : Pat<(movddup (bc_v4f32 (memopv2f64 addr:$src)), (undef)),
3339 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3340 def : Pat<(movddup (memopv2i64 addr:$src), (undef)),
3341 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3342 def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
3343 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3346 // vector_shuffle v1, <undef> <1, 1, 3, 3>
3347 let AddedComplexity = 15 in
3348 def : Pat<(v4i32 (movshdup VR128:$src, (undef))),
3349 (MOVSHDUPrr VR128:$src)>, Requires<[HasSSE3]>;
3350 let AddedComplexity = 20 in
3351 def : Pat<(v4i32 (movshdup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
3352 (MOVSHDUPrm addr:$src)>, Requires<[HasSSE3]>;
3354 // vector_shuffle v1, <undef> <0, 0, 2, 2>
3355 let AddedComplexity = 15 in
3356 def : Pat<(v4i32 (movsldup VR128:$src, (undef))),
3357 (MOVSLDUPrr VR128:$src)>, Requires<[HasSSE3]>;
3358 let AddedComplexity = 20 in
3359 def : Pat<(v4i32 (movsldup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
3360 (MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>;
3362 //===---------------------------------------------------------------------===//
3363 // SSE3 - Arithmetic
3364 //===---------------------------------------------------------------------===//
3366 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
3367 X86MemOperand x86memop, bit Is2Addr = 1> {
3368 def rr : I<0xD0, MRMSrcReg,
3369 (outs RC:$dst), (ins RC:$src1, RC:$src2),
3371 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3372 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3373 [(set RC:$dst, (Int RC:$src1, RC:$src2))]>;
3374 def rm : I<0xD0, MRMSrcMem,
3375 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3377 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3378 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3379 [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))]>;
3382 let isAsmParserOnly = 1, Predicates = [HasAVX],
3383 ExeDomain = SSEPackedDouble in {
3384 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
3385 f128mem, 0>, XD, VEX_4V;
3386 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
3387 f128mem, 0>, OpSize, VEX_4V;
3388 defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
3389 f256mem, 0>, XD, VEX_4V;
3390 defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
3391 f256mem, 0>, OpSize, VEX_4V;
3393 let Constraints = "$src1 = $dst", Predicates = [HasSSE3],
3394 ExeDomain = SSEPackedDouble in {
3395 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
3397 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
3398 f128mem>, TB, OpSize;
3401 //===---------------------------------------------------------------------===//
3402 // SSE3 Instructions
3403 //===---------------------------------------------------------------------===//
3406 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3407 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3408 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3410 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3411 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3412 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3414 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3416 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3417 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3418 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3420 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3421 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3422 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3424 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3425 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3426 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3428 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3430 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3431 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3432 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3435 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3436 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
3437 int_x86_sse3_hadd_ps, 0>, VEX_4V;
3438 defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
3439 int_x86_sse3_hadd_pd, 0>, VEX_4V;
3440 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
3441 int_x86_sse3_hsub_ps, 0>, VEX_4V;
3442 defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
3443 int_x86_sse3_hsub_pd, 0>, VEX_4V;
3444 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
3445 int_x86_avx_hadd_ps_256, 0>, VEX_4V;
3446 defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
3447 int_x86_avx_hadd_pd_256, 0>, VEX_4V;
3448 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
3449 int_x86_avx_hsub_ps_256, 0>, VEX_4V;
3450 defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
3451 int_x86_avx_hsub_pd_256, 0>, VEX_4V;
3454 let Constraints = "$src1 = $dst" in {
3455 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem,
3456 int_x86_sse3_hadd_ps>;
3457 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem,
3458 int_x86_sse3_hadd_pd>;
3459 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem,
3460 int_x86_sse3_hsub_ps>;
3461 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem,
3462 int_x86_sse3_hsub_pd>;
3465 //===---------------------------------------------------------------------===//
3466 // SSSE3 - Packed Absolute Instructions
3467 //===---------------------------------------------------------------------===//
3470 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
3471 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
3472 PatFrag mem_frag128, Intrinsic IntId128> {
3473 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3475 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3476 [(set VR128:$dst, (IntId128 VR128:$src))]>,
3479 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3481 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3484 (bitconvert (mem_frag128 addr:$src))))]>, OpSize;
3487 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3488 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", memopv16i8,
3489 int_x86_ssse3_pabs_b_128>, VEX;
3490 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", memopv8i16,
3491 int_x86_ssse3_pabs_w_128>, VEX;
3492 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", memopv4i32,
3493 int_x86_ssse3_pabs_d_128>, VEX;
3496 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", memopv16i8,
3497 int_x86_ssse3_pabs_b_128>;
3498 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", memopv8i16,
3499 int_x86_ssse3_pabs_w_128>;
3500 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", memopv4i32,
3501 int_x86_ssse3_pabs_d_128>;
3503 //===---------------------------------------------------------------------===//
3504 // SSSE3 - Packed Binary Operator Instructions
3505 //===---------------------------------------------------------------------===//
3507 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
3508 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
3509 PatFrag mem_frag128, Intrinsic IntId128,
3511 let isCommutable = 1 in
3512 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3513 (ins VR128:$src1, VR128:$src2),
3515 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3516 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3517 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3519 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3520 (ins VR128:$src1, i128mem:$src2),
3522 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3523 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3525 (IntId128 VR128:$src1,
3526 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
3529 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3530 let isCommutable = 0 in {
3531 defm VPHADDW : SS3I_binop_rm_int<0x01, "vphaddw", memopv8i16,
3532 int_x86_ssse3_phadd_w_128, 0>, VEX_4V;
3533 defm VPHADDD : SS3I_binop_rm_int<0x02, "vphaddd", memopv4i32,
3534 int_x86_ssse3_phadd_d_128, 0>, VEX_4V;
3535 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw", memopv8i16,
3536 int_x86_ssse3_phadd_sw_128, 0>, VEX_4V;
3537 defm VPHSUBW : SS3I_binop_rm_int<0x05, "vphsubw", memopv8i16,
3538 int_x86_ssse3_phsub_w_128, 0>, VEX_4V;
3539 defm VPHSUBD : SS3I_binop_rm_int<0x06, "vphsubd", memopv4i32,
3540 int_x86_ssse3_phsub_d_128, 0>, VEX_4V;
3541 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw", memopv8i16,
3542 int_x86_ssse3_phsub_sw_128, 0>, VEX_4V;
3543 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw", memopv16i8,
3544 int_x86_ssse3_pmadd_ub_sw_128, 0>, VEX_4V;
3545 defm VPSHUFB : SS3I_binop_rm_int<0x00, "vpshufb", memopv16i8,
3546 int_x86_ssse3_pshuf_b_128, 0>, VEX_4V;
3547 defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb", memopv16i8,
3548 int_x86_ssse3_psign_b_128, 0>, VEX_4V;
3549 defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw", memopv8i16,
3550 int_x86_ssse3_psign_w_128, 0>, VEX_4V;
3551 defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd", memopv4i32,
3552 int_x86_ssse3_psign_d_128, 0>, VEX_4V;
3554 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw", memopv8i16,
3555 int_x86_ssse3_pmul_hr_sw_128, 0>, VEX_4V;
3558 // None of these have i8 immediate fields.
3559 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
3560 let isCommutable = 0 in {
3561 defm PHADDW : SS3I_binop_rm_int<0x01, "phaddw", memopv8i16,
3562 int_x86_ssse3_phadd_w_128>;
3563 defm PHADDD : SS3I_binop_rm_int<0x02, "phaddd", memopv4i32,
3564 int_x86_ssse3_phadd_d_128>;
3565 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw", memopv8i16,
3566 int_x86_ssse3_phadd_sw_128>;
3567 defm PHSUBW : SS3I_binop_rm_int<0x05, "phsubw", memopv8i16,
3568 int_x86_ssse3_phsub_w_128>;
3569 defm PHSUBD : SS3I_binop_rm_int<0x06, "phsubd", memopv4i32,
3570 int_x86_ssse3_phsub_d_128>;
3571 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw", memopv8i16,
3572 int_x86_ssse3_phsub_sw_128>;
3573 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw", memopv16i8,
3574 int_x86_ssse3_pmadd_ub_sw_128>;
3575 defm PSHUFB : SS3I_binop_rm_int<0x00, "pshufb", memopv16i8,
3576 int_x86_ssse3_pshuf_b_128>;
3577 defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", memopv16i8,
3578 int_x86_ssse3_psign_b_128>;
3579 defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", memopv8i16,
3580 int_x86_ssse3_psign_w_128>;
3581 defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", memopv4i32,
3582 int_x86_ssse3_psign_d_128>;
3584 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw", memopv8i16,
3585 int_x86_ssse3_pmul_hr_sw_128>;
3588 def : Pat<(X86pshufb VR128:$src, VR128:$mask),
3589 (PSHUFBrr128 VR128:$src, VR128:$mask)>, Requires<[HasSSSE3]>;
3590 def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
3591 (PSHUFBrm128 VR128:$src, addr:$mask)>, Requires<[HasSSSE3]>;
3593 //===---------------------------------------------------------------------===//
3594 // SSSE3 - Packed Align Instruction Patterns
3595 //===---------------------------------------------------------------------===//
3597 multiclass ssse3_palign<string asm, bit Is2Addr = 1> {
3598 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
3599 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
3601 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3603 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3605 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
3606 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
3608 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3610 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3614 let isAsmParserOnly = 1, Predicates = [HasAVX] in
3615 defm VPALIGN : ssse3_palign<"vpalignr", 0>, VEX_4V;
3616 let Constraints = "$src1 = $dst" in
3617 defm PALIGN : ssse3_palign<"palignr">;
3619 let AddedComplexity = 5 in {
3620 def : Pat<(v4i32 (palign:$src3 VR128:$src1, VR128:$src2)),
3621 (PALIGNR128rr VR128:$src2, VR128:$src1,
3622 (SHUFFLE_get_palign_imm VR128:$src3))>,
3623 Requires<[HasSSSE3]>;
3624 def : Pat<(v4f32 (palign:$src3 VR128:$src1, VR128:$src2)),
3625 (PALIGNR128rr VR128:$src2, VR128:$src1,
3626 (SHUFFLE_get_palign_imm VR128:$src3))>,
3627 Requires<[HasSSSE3]>;
3628 def : Pat<(v8i16 (palign:$src3 VR128:$src1, VR128:$src2)),
3629 (PALIGNR128rr VR128:$src2, VR128:$src1,
3630 (SHUFFLE_get_palign_imm VR128:$src3))>,
3631 Requires<[HasSSSE3]>;
3632 def : Pat<(v16i8 (palign:$src3 VR128:$src1, VR128:$src2)),
3633 (PALIGNR128rr VR128:$src2, VR128:$src1,
3634 (SHUFFLE_get_palign_imm VR128:$src3))>,
3635 Requires<[HasSSSE3]>;
3638 //===---------------------------------------------------------------------===//
3639 // SSSE3 Misc Instructions
3640 //===---------------------------------------------------------------------===//
3642 // Thread synchronization
3643 def MONITOR : I<0x01, MRM_C8, (outs), (ins), "monitor",
3644 [(int_x86_sse3_monitor EAX, ECX, EDX)]>,TB, Requires<[HasSSE3]>;
3645 def MWAIT : I<0x01, MRM_C9, (outs), (ins), "mwait",
3646 [(int_x86_sse3_mwait ECX, EAX)]>, TB, Requires<[HasSSE3]>;
3648 //===---------------------------------------------------------------------===//
3649 // Non-Instruction Patterns
3650 //===---------------------------------------------------------------------===//
3652 // extload f32 -> f64. This matches load+fextend because we have a hack in
3653 // the isel (PreprocessForFPConvert) that can introduce loads after dag
3655 // Since these loads aren't folded into the fextend, we have to match it
3657 let Predicates = [HasSSE2] in
3658 def : Pat<(fextend (loadf32 addr:$src)),
3659 (CVTSS2SDrm addr:$src)>;
3662 let Predicates = [HasSSE2] in {
3663 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
3664 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
3665 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
3666 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
3667 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
3668 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
3669 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
3670 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
3671 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
3672 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
3673 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
3674 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
3675 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
3676 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
3677 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
3678 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
3679 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
3680 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
3681 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
3682 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
3683 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
3684 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
3685 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
3686 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
3687 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
3688 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
3689 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
3690 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
3691 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
3692 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
3695 // Move scalar to XMM zero-extended
3696 // movd to XMM register zero-extends
3697 let AddedComplexity = 15 in {
3698 // Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
3699 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
3700 (MOVSDrr (v2f64 (V_SET0PS)), FR64:$src)>;
3701 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
3702 (MOVSSrr (v4f32 (V_SET0PS)), FR32:$src)>;
3703 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
3704 (MOVSSrr (v4f32 (V_SET0PS)),
3705 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
3706 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
3707 (MOVSSrr (v4i32 (V_SET0PI)),
3708 (EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
3711 // Splat v2f64 / v2i64
3712 let AddedComplexity = 10 in {
3713 def : Pat<(splat_lo (v2f64 VR128:$src), (undef)),
3714 (UNPCKLPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3715 def : Pat<(unpckh (v2f64 VR128:$src), (undef)),
3716 (UNPCKHPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3717 def : Pat<(splat_lo (v2i64 VR128:$src), (undef)),
3718 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3719 def : Pat<(unpckh (v2i64 VR128:$src), (undef)),
3720 (PUNPCKHQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3723 // Special unary SHUFPSrri case.
3724 def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
3725 (SHUFPSrri VR128:$src1, VR128:$src1,
3726 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3727 let AddedComplexity = 5 in
3728 def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
3729 (PSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3730 Requires<[HasSSE2]>;
3731 // Special unary SHUFPDrri case.
3732 def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
3733 (SHUFPDrri VR128:$src1, VR128:$src1,
3734 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3735 Requires<[HasSSE2]>;
3736 // Special unary SHUFPDrri case.
3737 def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
3738 (SHUFPDrri VR128:$src1, VR128:$src1,
3739 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3740 Requires<[HasSSE2]>;
3741 // Unary v4f32 shuffle with PSHUF* in order to fold a load.
3742 def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
3743 (PSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3744 Requires<[HasSSE2]>;
3746 // Special binary v4i32 shuffle cases with SHUFPS.
3747 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
3748 (SHUFPSrri VR128:$src1, VR128:$src2,
3749 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3750 Requires<[HasSSE2]>;
3751 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)))),
3752 (SHUFPSrmi VR128:$src1, addr:$src2,
3753 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3754 Requires<[HasSSE2]>;
3755 // Special binary v2i64 shuffle cases using SHUFPDrri.
3756 def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
3757 (SHUFPDrri VR128:$src1, VR128:$src2,
3758 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3759 Requires<[HasSSE2]>;
3761 // vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
3762 let AddedComplexity = 15 in {
3763 def : Pat<(v4i32 (unpckl_undef:$src2 VR128:$src, (undef))),
3764 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3765 Requires<[OptForSpeed, HasSSE2]>;
3766 def : Pat<(v4f32 (unpckl_undef:$src2 VR128:$src, (undef))),
3767 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3768 Requires<[OptForSpeed, HasSSE2]>;
3770 let AddedComplexity = 10 in {
3771 def : Pat<(v4f32 (unpckl_undef VR128:$src, (undef))),
3772 (UNPCKLPSrr VR128:$src, VR128:$src)>;
3773 def : Pat<(v16i8 (unpckl_undef VR128:$src, (undef))),
3774 (PUNPCKLBWrr VR128:$src, VR128:$src)>;
3775 def : Pat<(v8i16 (unpckl_undef VR128:$src, (undef))),
3776 (PUNPCKLWDrr VR128:$src, VR128:$src)>;
3777 def : Pat<(v4i32 (unpckl_undef VR128:$src, (undef))),
3778 (PUNPCKLDQrr VR128:$src, VR128:$src)>;
3781 // vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
3782 let AddedComplexity = 15 in {
3783 def : Pat<(v4i32 (unpckh_undef:$src2 VR128:$src, (undef))),
3784 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3785 Requires<[OptForSpeed, HasSSE2]>;
3786 def : Pat<(v4f32 (unpckh_undef:$src2 VR128:$src, (undef))),
3787 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3788 Requires<[OptForSpeed, HasSSE2]>;
3790 let AddedComplexity = 10 in {
3791 def : Pat<(v4f32 (unpckh_undef VR128:$src, (undef))),
3792 (UNPCKHPSrr VR128:$src, VR128:$src)>;
3793 def : Pat<(v16i8 (unpckh_undef VR128:$src, (undef))),
3794 (PUNPCKHBWrr VR128:$src, VR128:$src)>;
3795 def : Pat<(v8i16 (unpckh_undef VR128:$src, (undef))),
3796 (PUNPCKHWDrr VR128:$src, VR128:$src)>;
3797 def : Pat<(v4i32 (unpckh_undef VR128:$src, (undef))),
3798 (PUNPCKHDQrr VR128:$src, VR128:$src)>;
3801 let AddedComplexity = 20 in {
3802 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
3803 def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
3804 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
3806 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
3807 def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
3808 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
3810 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
3811 def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
3812 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3813 def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
3814 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3817 let AddedComplexity = 20 in {
3818 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
3819 def : Pat<(v4f32 (movlp VR128:$src1, (load addr:$src2))),
3820 (MOVLPSrm VR128:$src1, addr:$src2)>;
3821 def : Pat<(v2f64 (movlp VR128:$src1, (load addr:$src2))),
3822 (MOVLPDrm VR128:$src1, addr:$src2)>;
3823 def : Pat<(v4i32 (movlp VR128:$src1, (load addr:$src2))),
3824 (MOVLPSrm VR128:$src1, addr:$src2)>;
3825 def : Pat<(v2i64 (movlp VR128:$src1, (load addr:$src2))),
3826 (MOVLPDrm VR128:$src1, addr:$src2)>;
3829 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
3830 def : Pat<(store (v4f32 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3831 (MOVLPSmr addr:$src1, VR128:$src2)>;
3832 def : Pat<(store (v2f64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3833 (MOVLPDmr addr:$src1, VR128:$src2)>;
3834 def : Pat<(store (v4i32 (movlp (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
3836 (MOVLPSmr addr:$src1, VR128:$src2)>;
3837 def : Pat<(store (v2i64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3838 (MOVLPDmr addr:$src1, VR128:$src2)>;
3840 let AddedComplexity = 15 in {
3841 // Setting the lowest element in the vector.
3842 def : Pat<(v4i32 (movl VR128:$src1, VR128:$src2)),
3843 (MOVSSrr (v4i32 VR128:$src1),
3844 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
3845 def : Pat<(v2i64 (movl VR128:$src1, VR128:$src2)),
3846 (MOVSDrr (v2i64 VR128:$src1),
3847 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
3849 // vector_shuffle v1, v2 <4, 5, 2, 3> using movsd
3850 def : Pat<(v4f32 (movlp VR128:$src1, VR128:$src2)),
3851 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
3852 Requires<[HasSSE2]>;
3853 def : Pat<(v4i32 (movlp VR128:$src1, VR128:$src2)),
3854 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
3855 Requires<[HasSSE2]>;
3858 // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
3859 // fall back to this for SSE1)
3860 def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
3861 (SHUFPSrri VR128:$src2, VR128:$src1,
3862 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3864 // Set lowest element and zero upper elements.
3865 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
3866 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
3868 // Some special case pandn patterns.
3869 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
3871 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3872 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
3874 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3875 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
3877 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3879 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
3880 (memop addr:$src2))),
3881 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3882 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
3883 (memop addr:$src2))),
3884 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3885 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
3886 (memop addr:$src2))),
3887 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3889 // vector -> vector casts
3890 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
3891 (Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
3892 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
3893 (CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
3895 // Use movaps / movups for SSE integer load / store (one byte shorter).
3896 let Predicates = [HasSSE1] in {
3897 def : Pat<(alignedloadv4i32 addr:$src),
3898 (MOVAPSrm addr:$src)>;
3899 def : Pat<(loadv4i32 addr:$src),
3900 (MOVUPSrm addr:$src)>;
3901 def : Pat<(alignedloadv2i64 addr:$src),
3902 (MOVAPSrm addr:$src)>;
3903 def : Pat<(loadv2i64 addr:$src),
3904 (MOVUPSrm addr:$src)>;
3906 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
3907 (MOVAPSmr addr:$dst, VR128:$src)>;
3908 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3909 (MOVAPSmr addr:$dst, VR128:$src)>;
3910 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3911 (MOVAPSmr addr:$dst, VR128:$src)>;
3912 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3913 (MOVAPSmr addr:$dst, VR128:$src)>;
3914 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
3915 (MOVUPSmr addr:$dst, VR128:$src)>;
3916 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3917 (MOVUPSmr addr:$dst, VR128:$src)>;
3918 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3919 (MOVUPSmr addr:$dst, VR128:$src)>;
3920 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3921 (MOVUPSmr addr:$dst, VR128:$src)>;
3924 // Use vmovaps/vmovups for AVX 128-bit integer load/store (one byte shorter).
3925 let Predicates = [HasAVX] in {
3926 def : Pat<(alignedloadv4i32 addr:$src),
3927 (VMOVAPSrm addr:$src)>;
3928 def : Pat<(loadv4i32 addr:$src),
3929 (VMOVUPSrm addr:$src)>;
3930 def : Pat<(alignedloadv2i64 addr:$src),
3931 (VMOVAPSrm addr:$src)>;
3932 def : Pat<(loadv2i64 addr:$src),
3933 (VMOVUPSrm addr:$src)>;
3935 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
3936 (VMOVAPSmr addr:$dst, VR128:$src)>;
3937 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3938 (VMOVAPSmr addr:$dst, VR128:$src)>;
3939 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3940 (VMOVAPSmr addr:$dst, VR128:$src)>;
3941 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3942 (VMOVAPSmr addr:$dst, VR128:$src)>;
3943 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
3944 (VMOVUPSmr addr:$dst, VR128:$src)>;
3945 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3946 (VMOVUPSmr addr:$dst, VR128:$src)>;
3947 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3948 (VMOVUPSmr addr:$dst, VR128:$src)>;
3949 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3950 (VMOVUPSmr addr:$dst, VR128:$src)>;
3953 //===----------------------------------------------------------------------===//
3954 // SSE4.1 - Packed Move with Sign/Zero Extend
3955 //===----------------------------------------------------------------------===//
3957 multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3958 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3959 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3960 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3962 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3963 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3965 (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
3969 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3970 defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw", int_x86_sse41_pmovsxbw>,
3972 defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd", int_x86_sse41_pmovsxwd>,
3974 defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq", int_x86_sse41_pmovsxdq>,
3976 defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw", int_x86_sse41_pmovzxbw>,
3978 defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd", int_x86_sse41_pmovzxwd>,
3980 defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
3984 defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
3985 defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
3986 defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
3987 defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
3988 defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
3989 defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
3991 // Common patterns involving scalar load.
3992 def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
3993 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
3994 def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
3995 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
3997 def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
3998 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
3999 def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
4000 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4002 def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
4003 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4004 def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
4005 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4007 def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
4008 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4009 def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
4010 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4012 def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
4013 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4014 def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
4015 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4017 def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
4018 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4019 def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
4020 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4023 multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4024 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4025 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4026 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4028 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4029 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4031 (IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
4035 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4036 defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd>,
4038 defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq>,
4040 defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd>,
4042 defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
4046 defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
4047 defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
4048 defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
4049 defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
4051 // Common patterns involving scalar load
4052 def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
4053 (PMOVSXBDrm addr:$src)>, Requires<[HasSSE41]>;
4054 def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
4055 (PMOVSXWQrm addr:$src)>, Requires<[HasSSE41]>;
4057 def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
4058 (PMOVZXBDrm addr:$src)>, Requires<[HasSSE41]>;
4059 def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
4060 (PMOVZXWQrm addr:$src)>, Requires<[HasSSE41]>;
4063 multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4064 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4065 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4066 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4068 // Expecting a i16 load any extended to i32 value.
4069 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
4070 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4071 [(set VR128:$dst, (IntId (bitconvert
4072 (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))]>,
4076 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4077 defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq>,
4079 defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
4082 defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
4083 defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
4085 // Common patterns involving scalar load
4086 def : Pat<(int_x86_sse41_pmovsxbq
4087 (bitconvert (v4i32 (X86vzmovl
4088 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4089 (PMOVSXBQrm addr:$src)>, Requires<[HasSSE41]>;
4091 def : Pat<(int_x86_sse41_pmovzxbq
4092 (bitconvert (v4i32 (X86vzmovl
4093 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4094 (PMOVZXBQrm addr:$src)>, Requires<[HasSSE41]>;
4096 //===----------------------------------------------------------------------===//
4097 // SSE4.1 - Extract Instructions
4098 //===----------------------------------------------------------------------===//
4100 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
4101 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
4102 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4103 (ins VR128:$src1, i32i8imm:$src2),
4104 !strconcat(OpcodeStr,
4105 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4106 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
4108 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4109 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
4110 !strconcat(OpcodeStr,
4111 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4114 // There's an AssertZext in the way of writing the store pattern
4115 // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4118 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4119 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
4120 def VPEXTRBrr64 : SS4AIi8<0x14, MRMDestReg, (outs GR64:$dst),
4121 (ins VR128:$src1, i32i8imm:$src2),
4122 "vpextrb\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, OpSize, VEX;
4125 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
4128 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
4129 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
4130 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4131 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
4132 !strconcat(OpcodeStr,
4133 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4136 // There's an AssertZext in the way of writing the store pattern
4137 // (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4140 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4141 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
4143 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
4146 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4147 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
4148 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4149 (ins VR128:$src1, i32i8imm:$src2),
4150 !strconcat(OpcodeStr,
4151 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4153 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
4154 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4155 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
4156 !strconcat(OpcodeStr,
4157 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4158 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
4159 addr:$dst)]>, OpSize;
4162 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4163 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
4165 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
4167 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4168 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
4169 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
4170 (ins VR128:$src1, i32i8imm:$src2),
4171 !strconcat(OpcodeStr,
4172 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4174 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
4175 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4176 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
4177 !strconcat(OpcodeStr,
4178 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4179 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
4180 addr:$dst)]>, OpSize, REX_W;
4183 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4184 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
4186 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
4188 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
4190 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
4191 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4192 (ins VR128:$src1, i32i8imm:$src2),
4193 !strconcat(OpcodeStr,
4194 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4196 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
4198 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4199 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
4200 !strconcat(OpcodeStr,
4201 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4202 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
4203 addr:$dst)]>, OpSize;
4206 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4207 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
4208 def VEXTRACTPSrr64 : SS4AIi8<0x17, MRMDestReg, (outs GR64:$dst),
4209 (ins VR128:$src1, i32i8imm:$src2),
4210 "vextractps \t{$src2, $src1, $dst|$dst, $src1, $src2}",
4213 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
4215 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
4216 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
4219 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
4220 Requires<[HasSSE41]>;
4222 //===----------------------------------------------------------------------===//
4223 // SSE4.1 - Insert Instructions
4224 //===----------------------------------------------------------------------===//
4226 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
4227 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4228 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4230 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4232 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4234 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
4235 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4236 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
4238 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4240 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4242 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
4243 imm:$src3))]>, OpSize;
4246 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4247 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
4248 let Constraints = "$src1 = $dst" in
4249 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
4251 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
4252 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4253 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4255 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4257 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4259 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
4261 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4262 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
4264 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4266 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4268 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
4269 imm:$src3)))]>, OpSize;
4272 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4273 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
4274 let Constraints = "$src1 = $dst" in
4275 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
4277 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
4278 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4279 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
4281 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4283 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4285 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
4287 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4288 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
4290 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4292 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4294 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
4295 imm:$src3)))]>, OpSize;
4298 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4299 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
4300 let Constraints = "$src1 = $dst" in
4301 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
4303 // insertps has a few different modes, there's the first two here below which
4304 // are optimized inserts that won't zero arbitrary elements in the destination
4305 // vector. The next one matches the intrinsic and could zero arbitrary elements
4306 // in the target vector.
4307 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
4308 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4309 (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4311 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4313 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4315 (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
4317 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4318 (ins VR128:$src1, f32mem:$src2, i32i8imm:$src3),
4320 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4322 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4324 (X86insrtps VR128:$src1,
4325 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
4326 imm:$src3))]>, OpSize;
4329 let Constraints = "$src1 = $dst" in
4330 defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
4331 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4332 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
4334 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4335 (VINSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4337 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4338 (INSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4339 Requires<[HasSSE41]>;
4341 //===----------------------------------------------------------------------===//
4342 // SSE4.1 - Round Instructions
4343 //===----------------------------------------------------------------------===//
4345 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
4346 X86MemOperand x86memop, RegisterClass RC,
4347 PatFrag mem_frag32, PatFrag mem_frag64,
4348 Intrinsic V4F32Int, Intrinsic V2F64Int> {
4349 // Intrinsic operation, reg.
4350 // Vector intrinsic operation, reg
4351 def PSr : SS4AIi8<opcps, MRMSrcReg,
4352 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4353 !strconcat(OpcodeStr,
4354 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4355 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>,
4358 // Vector intrinsic operation, mem
4359 def PSm : Ii8<opcps, MRMSrcMem,
4360 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4361 !strconcat(OpcodeStr,
4362 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4364 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
4366 Requires<[HasSSE41]>;
4368 // Vector intrinsic operation, reg
4369 def PDr : SS4AIi8<opcpd, MRMSrcReg,
4370 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4371 !strconcat(OpcodeStr,
4372 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4373 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>,
4376 // Vector intrinsic operation, mem
4377 def PDm : SS4AIi8<opcpd, MRMSrcMem,
4378 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4379 !strconcat(OpcodeStr,
4380 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4382 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
4386 multiclass sse41_fp_unop_rm_avx_p<bits<8> opcps, bits<8> opcpd,
4387 RegisterClass RC, X86MemOperand x86memop, string OpcodeStr> {
4388 // Intrinsic operation, reg.
4389 // Vector intrinsic operation, reg
4390 def PSr_AVX : SS4AIi8<opcps, MRMSrcReg,
4391 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4392 !strconcat(OpcodeStr,
4393 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4396 // Vector intrinsic operation, mem
4397 def PSm_AVX : Ii8<opcps, MRMSrcMem,
4398 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4399 !strconcat(OpcodeStr,
4400 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4401 []>, TA, OpSize, Requires<[HasSSE41]>;
4403 // Vector intrinsic operation, reg
4404 def PDr_AVX : SS4AIi8<opcpd, MRMSrcReg,
4405 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4406 !strconcat(OpcodeStr,
4407 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4410 // Vector intrinsic operation, mem
4411 def PDm_AVX : SS4AIi8<opcpd, MRMSrcMem,
4412 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4413 !strconcat(OpcodeStr,
4414 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4418 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
4421 Intrinsic F64Int, bit Is2Addr = 1> {
4422 // Intrinsic operation, reg.
4423 def SSr : SS4AIi8<opcss, MRMSrcReg,
4424 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4426 !strconcat(OpcodeStr,
4427 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4428 !strconcat(OpcodeStr,
4429 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4430 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4433 // Intrinsic operation, mem.
4434 def SSm : SS4AIi8<opcss, MRMSrcMem,
4435 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4437 !strconcat(OpcodeStr,
4438 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4439 !strconcat(OpcodeStr,
4440 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4442 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
4445 // Intrinsic operation, reg.
4446 def SDr : SS4AIi8<opcsd, MRMSrcReg,
4447 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4449 !strconcat(OpcodeStr,
4450 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4451 !strconcat(OpcodeStr,
4452 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4453 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4456 // Intrinsic operation, mem.
4457 def SDm : SS4AIi8<opcsd, MRMSrcMem,
4458 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4460 !strconcat(OpcodeStr,
4461 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4462 !strconcat(OpcodeStr,
4463 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4465 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
4469 multiclass sse41_fp_binop_rm_avx_s<bits<8> opcss, bits<8> opcsd,
4471 // Intrinsic operation, reg.
4472 def SSr_AVX : SS4AIi8<opcss, MRMSrcReg,
4473 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4474 !strconcat(OpcodeStr,
4475 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4478 // Intrinsic operation, mem.
4479 def SSm_AVX : SS4AIi8<opcss, MRMSrcMem,
4480 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4481 !strconcat(OpcodeStr,
4482 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4485 // Intrinsic operation, reg.
4486 def SDr_AVX : SS4AIi8<opcsd, MRMSrcReg,
4487 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4488 !strconcat(OpcodeStr,
4489 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4492 // Intrinsic operation, mem.
4493 def SDm_AVX : SS4AIi8<opcsd, MRMSrcMem,
4494 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4495 !strconcat(OpcodeStr,
4496 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4500 // FP round - roundss, roundps, roundsd, roundpd
4501 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4503 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128,
4504 memopv4f32, memopv2f64,
4505 int_x86_sse41_round_ps,
4506 int_x86_sse41_round_pd>, VEX;
4507 defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
4508 memopv8f32, memopv4f64,
4509 int_x86_avx_round_ps_256,
4510 int_x86_avx_round_pd_256>, VEX;
4511 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
4512 int_x86_sse41_round_ss,
4513 int_x86_sse41_round_sd, 0>, VEX_4V;
4515 // Instructions for the assembler
4516 defm VROUND : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR128, f128mem, "vround">,
4518 defm VROUNDY : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR256, f256mem, "vround">,
4520 defm VROUND : sse41_fp_binop_rm_avx_s<0x0A, 0x0B, "vround">, VEX_4V;
4523 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
4524 memopv4f32, memopv2f64,
4525 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
4526 let Constraints = "$src1 = $dst" in
4527 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
4528 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
4530 //===----------------------------------------------------------------------===//
4531 // SSE4.1 - Packed Bit Test
4532 //===----------------------------------------------------------------------===//
4534 // ptest instruction we'll lower to this in X86ISelLowering primarily from
4535 // the intel intrinsic that corresponds to this.
4536 let Defs = [EFLAGS], isAsmParserOnly = 1, Predicates = [HasAVX] in {
4537 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4538 "vptest\t{$src2, $src1|$src1, $src2}",
4539 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
4541 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
4542 "vptest\t{$src2, $src1|$src1, $src2}",
4543 [(set EFLAGS,(X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
4546 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
4547 "vptest\t{$src2, $src1|$src1, $src2}",
4548 [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
4550 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
4551 "vptest\t{$src2, $src1|$src1, $src2}",
4552 [(set EFLAGS,(X86ptest VR256:$src1, (memopv4i64 addr:$src2)))]>,
4556 let Defs = [EFLAGS] in {
4557 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4558 "ptest \t{$src2, $src1|$src1, $src2}",
4559 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
4561 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
4562 "ptest \t{$src2, $src1|$src1, $src2}",
4563 [(set EFLAGS, (X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
4567 // The bit test instructions below are AVX only
4568 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
4569 X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> {
4570 def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
4571 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
4572 [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>, OpSize, VEX;
4573 def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
4574 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
4575 [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
4579 let Defs = [EFLAGS], isAsmParserOnly = 1, Predicates = [HasAVX] in {
4580 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, memopv4f32, v4f32>;
4581 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, memopv8f32, v8f32>;
4582 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, memopv2f64, v2f64>;
4583 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>;
4586 //===----------------------------------------------------------------------===//
4587 // SSE4.1 - Misc Instructions
4588 //===----------------------------------------------------------------------===//
4590 def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
4591 "popcnt{w}\t{$src, $dst|$dst, $src}", []>, OpSize, XS;
4593 def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
4594 "popcnt{w}\t{$src, $dst|$dst, $src}", []>, OpSize, XS;
4596 def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
4597 "popcnt{l}\t{$src, $dst|$dst, $src}", []>, XS;
4599 def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
4600 "popcnt{l}\t{$src, $dst|$dst, $src}", []>, XS;
4602 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
4603 "popcnt{q}\t{$src, $dst|$dst, $src}", []>, XS;
4605 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
4606 "popcnt{q}\t{$src, $dst|$dst, $src}", []>, XS;
4610 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
4611 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
4612 Intrinsic IntId128> {
4613 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4615 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4616 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
4617 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4619 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4622 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
4625 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4626 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
4627 int_x86_sse41_phminposuw>, VEX;
4628 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
4629 int_x86_sse41_phminposuw>;
4631 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
4632 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
4633 Intrinsic IntId128, bit Is2Addr = 1> {
4634 let isCommutable = 1 in
4635 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4636 (ins VR128:$src1, VR128:$src2),
4638 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4639 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4640 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>, OpSize;
4641 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4642 (ins VR128:$src1, i128mem:$src2),
4644 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4645 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4647 (IntId128 VR128:$src1,
4648 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4651 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4652 let isCommutable = 0 in
4653 defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
4655 defm VPCMPEQQ : SS41I_binop_rm_int<0x29, "vpcmpeqq", int_x86_sse41_pcmpeqq,
4657 defm VPMINSB : SS41I_binop_rm_int<0x38, "vpminsb", int_x86_sse41_pminsb,
4659 defm VPMINSD : SS41I_binop_rm_int<0x39, "vpminsd", int_x86_sse41_pminsd,
4661 defm VPMINUD : SS41I_binop_rm_int<0x3B, "vpminud", int_x86_sse41_pminud,
4663 defm VPMINUW : SS41I_binop_rm_int<0x3A, "vpminuw", int_x86_sse41_pminuw,
4665 defm VPMAXSB : SS41I_binop_rm_int<0x3C, "vpmaxsb", int_x86_sse41_pmaxsb,
4667 defm VPMAXSD : SS41I_binop_rm_int<0x3D, "vpmaxsd", int_x86_sse41_pmaxsd,
4669 defm VPMAXUD : SS41I_binop_rm_int<0x3F, "vpmaxud", int_x86_sse41_pmaxud,
4671 defm VPMAXUW : SS41I_binop_rm_int<0x3E, "vpmaxuw", int_x86_sse41_pmaxuw,
4673 defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
4677 let Constraints = "$src1 = $dst" in {
4678 let isCommutable = 0 in
4679 defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
4680 defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq", int_x86_sse41_pcmpeqq>;
4681 defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb", int_x86_sse41_pminsb>;
4682 defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd", int_x86_sse41_pminsd>;
4683 defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud", int_x86_sse41_pminud>;
4684 defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw", int_x86_sse41_pminuw>;
4685 defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb", int_x86_sse41_pmaxsb>;
4686 defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd", int_x86_sse41_pmaxsd>;
4687 defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud", int_x86_sse41_pmaxud>;
4688 defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw", int_x86_sse41_pmaxuw>;
4689 defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq>;
4692 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
4693 (PCMPEQQrr VR128:$src1, VR128:$src2)>;
4694 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
4695 (PCMPEQQrm VR128:$src1, addr:$src2)>;
4697 /// SS48I_binop_rm - Simple SSE41 binary operator.
4698 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
4699 ValueType OpVT, bit Is2Addr = 1> {
4700 let isCommutable = 1 in
4701 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4702 (ins VR128:$src1, VR128:$src2),
4704 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4705 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4706 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>,
4708 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4709 (ins VR128:$src1, i128mem:$src2),
4711 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4712 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4713 [(set VR128:$dst, (OpNode VR128:$src1,
4714 (bc_v4i32 (memopv2i64 addr:$src2))))]>,
4718 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4719 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, 0>, VEX_4V;
4720 let Constraints = "$src1 = $dst" in
4721 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32>;
4723 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
4724 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
4725 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
4726 X86MemOperand x86memop, bit Is2Addr = 1> {
4727 let isCommutable = 1 in
4728 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
4729 (ins RC:$src1, RC:$src2, i32i8imm:$src3),
4731 !strconcat(OpcodeStr,
4732 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4733 !strconcat(OpcodeStr,
4734 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4735 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
4737 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
4738 (ins RC:$src1, x86memop:$src2, i32i8imm:$src3),
4740 !strconcat(OpcodeStr,
4741 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4742 !strconcat(OpcodeStr,
4743 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4746 (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
4750 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4751 let isCommutable = 0 in {
4752 defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
4753 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4754 defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
4755 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4756 defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
4757 int_x86_avx_blend_ps_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
4758 defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
4759 int_x86_avx_blend_pd_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
4760 defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
4761 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4762 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
4763 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4765 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
4766 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4767 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
4768 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4769 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
4770 VR256, memopv32i8, i256mem, 0>, VEX_4V;
4773 let Constraints = "$src1 = $dst" in {
4774 let isCommutable = 0 in {
4775 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
4776 VR128, memopv16i8, i128mem>;
4777 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
4778 VR128, memopv16i8, i128mem>;
4779 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
4780 VR128, memopv16i8, i128mem>;
4781 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
4782 VR128, memopv16i8, i128mem>;
4784 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
4785 VR128, memopv16i8, i128mem>;
4786 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
4787 VR128, memopv16i8, i128mem>;
4790 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
4791 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4792 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
4793 RegisterClass RC, X86MemOperand x86memop,
4794 PatFrag mem_frag, Intrinsic IntId> {
4795 def rr : I<opc, MRMSrcReg, (outs RC:$dst),
4796 (ins RC:$src1, RC:$src2, RC:$src3),
4797 !strconcat(OpcodeStr,
4798 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4799 [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
4800 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4802 def rm : I<opc, MRMSrcMem, (outs RC:$dst),
4803 (ins RC:$src1, x86memop:$src2, RC:$src3),
4804 !strconcat(OpcodeStr,
4805 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4807 (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
4809 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4813 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, i128mem,
4814 memopv16i8, int_x86_sse41_blendvpd>;
4815 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, i128mem,
4816 memopv16i8, int_x86_sse41_blendvps>;
4817 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
4818 memopv16i8, int_x86_sse41_pblendvb>;
4819 defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, i256mem,
4820 memopv32i8, int_x86_avx_blendv_pd_256>;
4821 defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, i256mem,
4822 memopv32i8, int_x86_avx_blendv_ps_256>;
4824 /// SS41I_ternary_int - SSE 4.1 ternary operator
4825 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
4826 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4827 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4828 (ins VR128:$src1, VR128:$src2),
4829 !strconcat(OpcodeStr,
4830 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
4831 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
4834 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4835 (ins VR128:$src1, i128mem:$src2),
4836 !strconcat(OpcodeStr,
4837 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
4840 (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
4844 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
4845 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
4846 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
4848 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4849 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4850 "vmovntdqa\t{$src, $dst|$dst, $src}",
4851 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
4853 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4854 "movntdqa\t{$src, $dst|$dst, $src}",
4855 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
4858 //===----------------------------------------------------------------------===//
4859 // SSE4.2 - Compare Instructions
4860 //===----------------------------------------------------------------------===//
4862 /// SS42I_binop_rm_int - Simple SSE 4.2 binary operator
4863 multiclass SS42I_binop_rm_int<bits<8> opc, string OpcodeStr,
4864 Intrinsic IntId128, bit Is2Addr = 1> {
4865 def rr : SS428I<opc, MRMSrcReg, (outs VR128:$dst),
4866 (ins VR128:$src1, VR128:$src2),
4868 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4869 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4870 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
4872 def rm : SS428I<opc, MRMSrcMem, (outs VR128:$dst),
4873 (ins VR128:$src1, i128mem:$src2),
4875 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4876 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4878 (IntId128 VR128:$src1,
4879 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4882 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4883 defm VPCMPGTQ : SS42I_binop_rm_int<0x37, "vpcmpgtq", int_x86_sse42_pcmpgtq,
4885 let Constraints = "$src1 = $dst" in
4886 defm PCMPGTQ : SS42I_binop_rm_int<0x37, "pcmpgtq", int_x86_sse42_pcmpgtq>;
4888 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
4889 (PCMPGTQrr VR128:$src1, VR128:$src2)>;
4890 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
4891 (PCMPGTQrm VR128:$src1, addr:$src2)>;
4893 //===----------------------------------------------------------------------===//
4894 // SSE4.2 - String/text Processing Instructions
4895 //===----------------------------------------------------------------------===//
4897 // Packed Compare Implicit Length Strings, Return Mask
4898 multiclass pseudo_pcmpistrm<string asm> {
4899 def REG : Ii8<0, Pseudo, (outs VR128:$dst),
4900 (ins VR128:$src1, VR128:$src2, i8imm:$src3), !strconcat(asm, "rr PSEUDO"),
4901 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
4903 def MEM : Ii8<0, Pseudo, (outs VR128:$dst),
4904 (ins VR128:$src1, i128mem:$src2, i8imm:$src3), !strconcat(asm, "rm PSEUDO"),
4905 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128
4906 VR128:$src1, (load addr:$src2), imm:$src3))]>;
4909 let Defs = [EFLAGS], usesCustomInserter = 1 in {
4910 defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[HasSSE42]>;
4911 defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>;
4914 let Defs = [XMM0, EFLAGS], isAsmParserOnly = 1,
4915 Predicates = [HasAVX] in {
4916 def VPCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
4917 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4918 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
4919 def VPCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
4920 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4921 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
4924 let Defs = [XMM0, EFLAGS] in {
4925 def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
4926 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4927 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
4928 def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
4929 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4930 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
4933 // Packed Compare Explicit Length Strings, Return Mask
4934 multiclass pseudo_pcmpestrm<string asm> {
4935 def REG : Ii8<0, Pseudo, (outs VR128:$dst),
4936 (ins VR128:$src1, VR128:$src3, i8imm:$src5), !strconcat(asm, "rr PSEUDO"),
4937 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
4938 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
4939 def MEM : Ii8<0, Pseudo, (outs VR128:$dst),
4940 (ins VR128:$src1, i128mem:$src3, i8imm:$src5), !strconcat(asm, "rm PSEUDO"),
4941 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
4942 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>;
4945 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
4946 defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[HasSSE42]>;
4947 defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>;
4950 let isAsmParserOnly = 1, Predicates = [HasAVX],
4951 Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
4952 def VPCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
4953 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4954 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
4955 def VPCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
4956 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4957 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
4960 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
4961 def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
4962 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4963 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
4964 def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
4965 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4966 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
4969 // Packed Compare Implicit Length Strings, Return Index
4970 let Defs = [ECX, EFLAGS] in {
4971 multiclass SS42AI_pcmpistri<Intrinsic IntId128, string asm = "pcmpistri"> {
4972 def rr : SS42AI<0x63, MRMSrcReg, (outs),
4973 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4974 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
4975 [(set ECX, (IntId128 VR128:$src1, VR128:$src2, imm:$src3)),
4976 (implicit EFLAGS)]>, OpSize;
4977 def rm : SS42AI<0x63, MRMSrcMem, (outs),
4978 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4979 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
4980 [(set ECX, (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)),
4981 (implicit EFLAGS)]>, OpSize;
4985 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4986 defm VPCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128, "vpcmpistri">,
4988 defm VPCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128, "vpcmpistri">,
4990 defm VPCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128, "vpcmpistri">,
4992 defm VPCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128, "vpcmpistri">,
4994 defm VPCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128, "vpcmpistri">,
4996 defm VPCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128, "vpcmpistri">,
5000 defm PCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128>;
5001 defm PCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128>;
5002 defm PCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128>;
5003 defm PCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128>;
5004 defm PCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128>;
5005 defm PCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128>;
5007 // Packed Compare Explicit Length Strings, Return Index
5008 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX] in {
5009 multiclass SS42AI_pcmpestri<Intrinsic IntId128, string asm = "pcmpestri"> {
5010 def rr : SS42AI<0x61, MRMSrcReg, (outs),
5011 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5012 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5013 [(set ECX, (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)),
5014 (implicit EFLAGS)]>, OpSize;
5015 def rm : SS42AI<0x61, MRMSrcMem, (outs),
5016 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5017 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5019 (IntId128 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5)),
5020 (implicit EFLAGS)]>, OpSize;
5024 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
5025 defm VPCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128, "vpcmpestri">,
5027 defm VPCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128, "vpcmpestri">,
5029 defm VPCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128, "vpcmpestri">,
5031 defm VPCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128, "vpcmpestri">,
5033 defm VPCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128, "vpcmpestri">,
5035 defm VPCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128, "vpcmpestri">,
5039 defm PCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128>;
5040 defm PCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128>;
5041 defm PCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128>;
5042 defm PCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128>;
5043 defm PCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128>;
5044 defm PCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128>;
5046 //===----------------------------------------------------------------------===//
5047 // SSE4.2 - CRC Instructions
5048 //===----------------------------------------------------------------------===//
5050 // No CRC instructions have AVX equivalents
5052 // crc intrinsic instruction
5053 // This set of instructions are only rm, the only difference is the size
5055 let Constraints = "$src1 = $dst" in {
5056 def CRC32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst),
5057 (ins GR32:$src1, i8mem:$src2),
5058 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5060 (int_x86_sse42_crc32_8 GR32:$src1,
5061 (load addr:$src2)))]>;
5062 def CRC32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst),
5063 (ins GR32:$src1, GR8:$src2),
5064 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5066 (int_x86_sse42_crc32_8 GR32:$src1, GR8:$src2))]>;
5067 def CRC32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5068 (ins GR32:$src1, i16mem:$src2),
5069 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5071 (int_x86_sse42_crc32_16 GR32:$src1,
5072 (load addr:$src2)))]>,
5074 def CRC32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5075 (ins GR32:$src1, GR16:$src2),
5076 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5078 (int_x86_sse42_crc32_16 GR32:$src1, GR16:$src2))]>,
5080 def CRC32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5081 (ins GR32:$src1, i32mem:$src2),
5082 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5084 (int_x86_sse42_crc32_32 GR32:$src1,
5085 (load addr:$src2)))]>;
5086 def CRC32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5087 (ins GR32:$src1, GR32:$src2),
5088 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5090 (int_x86_sse42_crc32_32 GR32:$src1, GR32:$src2))]>;
5091 def CRC64m8 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst),
5092 (ins GR64:$src1, i8mem:$src2),
5093 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5095 (int_x86_sse42_crc64_8 GR64:$src1,
5096 (load addr:$src2)))]>,
5098 def CRC64r8 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst),
5099 (ins GR64:$src1, GR8:$src2),
5100 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5102 (int_x86_sse42_crc64_8 GR64:$src1, GR8:$src2))]>,
5104 def CRC64m64 : SS42FI<0xF1, MRMSrcMem, (outs GR64:$dst),
5105 (ins GR64:$src1, i64mem:$src2),
5106 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5108 (int_x86_sse42_crc64_64 GR64:$src1,
5109 (load addr:$src2)))]>,
5111 def CRC64r64 : SS42FI<0xF1, MRMSrcReg, (outs GR64:$dst),
5112 (ins GR64:$src1, GR64:$src2),
5113 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5115 (int_x86_sse42_crc64_64 GR64:$src1, GR64:$src2))]>,
5119 //===----------------------------------------------------------------------===//
5120 // AES-NI Instructions
5121 //===----------------------------------------------------------------------===//
5123 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
5124 Intrinsic IntId128, bit Is2Addr = 1> {
5125 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
5126 (ins VR128:$src1, VR128:$src2),
5128 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5129 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5130 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5132 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
5133 (ins VR128:$src1, i128mem:$src2),
5135 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5136 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5138 (IntId128 VR128:$src1,
5139 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5142 // Perform One Round of an AES Encryption/Decryption Flow
5143 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5144 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
5145 int_x86_aesni_aesenc, 0>, VEX_4V;
5146 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
5147 int_x86_aesni_aesenclast, 0>, VEX_4V;
5148 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
5149 int_x86_aesni_aesdec, 0>, VEX_4V;
5150 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
5151 int_x86_aesni_aesdeclast, 0>, VEX_4V;
5154 let Constraints = "$src1 = $dst" in {
5155 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
5156 int_x86_aesni_aesenc>;
5157 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
5158 int_x86_aesni_aesenclast>;
5159 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
5160 int_x86_aesni_aesdec>;
5161 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
5162 int_x86_aesni_aesdeclast>;
5165 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, VR128:$src2)),
5166 (AESENCrr VR128:$src1, VR128:$src2)>;
5167 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, (memop addr:$src2))),
5168 (AESENCrm VR128:$src1, addr:$src2)>;
5169 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, VR128:$src2)),
5170 (AESENCLASTrr VR128:$src1, VR128:$src2)>;
5171 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, (memop addr:$src2))),
5172 (AESENCLASTrm VR128:$src1, addr:$src2)>;
5173 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, VR128:$src2)),
5174 (AESDECrr VR128:$src1, VR128:$src2)>;
5175 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, (memop addr:$src2))),
5176 (AESDECrm VR128:$src1, addr:$src2)>;
5177 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, VR128:$src2)),
5178 (AESDECLASTrr VR128:$src1, VR128:$src2)>;
5179 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, (memop addr:$src2))),
5180 (AESDECLASTrm VR128:$src1, addr:$src2)>;
5182 // Perform the AES InvMixColumn Transformation
5183 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5184 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5186 "vaesimc\t{$src1, $dst|$dst, $src1}",
5188 (int_x86_aesni_aesimc VR128:$src1))]>,
5190 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5191 (ins i128mem:$src1),
5192 "vaesimc\t{$src1, $dst|$dst, $src1}",
5194 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5197 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5199 "aesimc\t{$src1, $dst|$dst, $src1}",
5201 (int_x86_aesni_aesimc VR128:$src1))]>,
5203 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5204 (ins i128mem:$src1),
5205 "aesimc\t{$src1, $dst|$dst, $src1}",
5207 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5210 // AES Round Key Generation Assist
5211 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5212 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5213 (ins VR128:$src1, i8imm:$src2),
5214 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5216 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5218 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5219 (ins i128mem:$src1, i8imm:$src2),
5220 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5222 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5226 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5227 (ins VR128:$src1, i8imm:$src2),
5228 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5230 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5232 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5233 (ins i128mem:$src1, i8imm:$src2),
5234 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5236 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5240 //===----------------------------------------------------------------------===//
5241 // CLMUL Instructions
5242 //===----------------------------------------------------------------------===//
5244 // Only the AVX version of CLMUL instructions are described here.
5246 // Carry-less Multiplication instructions
5247 let isAsmParserOnly = 1 in {
5248 def VPCLMULQDQrr : CLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
5249 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5250 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5253 def VPCLMULQDQrm : CLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
5254 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5255 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5259 multiclass avx_vpclmul<string asm> {
5260 def rr : I<0, Pseudo, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
5261 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5264 def rm : I<0, Pseudo, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
5265 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5268 defm VPCLMULHQHQDQ : avx_vpclmul<"vpclmulhqhqdq">;
5269 defm VPCLMULHQLQDQ : avx_vpclmul<"vpclmulhqlqdq">;
5270 defm VPCLMULLQHQDQ : avx_vpclmul<"vpclmullqhqdq">;
5271 defm VPCLMULLQLQDQ : avx_vpclmul<"vpclmullqlqdq">;
5273 } // isAsmParserOnly
5275 //===----------------------------------------------------------------------===//
5277 //===----------------------------------------------------------------------===//
5279 let isAsmParserOnly = 1 in {
5281 // Load from memory and broadcast to all elements of the destination operand
5282 class avx_broadcast<bits<8> opc, string OpcodeStr, RegisterClass RC,
5283 X86MemOperand x86memop, Intrinsic Int> :
5284 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
5285 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5286 [(set RC:$dst, (Int addr:$src))]>, VEX;
5288 def VBROADCASTSS : avx_broadcast<0x18, "vbroadcastss", VR128, f32mem,
5289 int_x86_avx_vbroadcastss>;
5290 def VBROADCASTSSY : avx_broadcast<0x18, "vbroadcastss", VR256, f32mem,
5291 int_x86_avx_vbroadcastss_256>;
5292 def VBROADCASTSD : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem,
5293 int_x86_avx_vbroadcast_sd_256>;
5294 def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem,
5295 int_x86_avx_vbroadcastf128_pd_256>;
5297 // Insert packed floating-point values
5298 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
5299 (ins VR256:$src1, VR128:$src2, i8imm:$src3),
5300 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5302 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
5303 (ins VR256:$src1, f128mem:$src2, i8imm:$src3),
5304 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5307 // Extract packed floating-point values
5308 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
5309 (ins VR256:$src1, i8imm:$src2),
5310 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5312 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
5313 (ins f128mem:$dst, VR256:$src1, i8imm:$src2),
5314 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5317 // Conditional SIMD Packed Loads and Stores
5318 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
5319 Intrinsic IntLd, Intrinsic IntLd256,
5320 Intrinsic IntSt, Intrinsic IntSt256,
5321 PatFrag pf128, PatFrag pf256> {
5322 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
5323 (ins VR128:$src1, f128mem:$src2),
5324 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5325 [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
5327 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
5328 (ins VR256:$src1, f256mem:$src2),
5329 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5330 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
5332 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
5333 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
5334 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5335 [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
5336 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
5337 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
5338 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5339 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V;
5342 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
5343 int_x86_avx_maskload_ps,
5344 int_x86_avx_maskload_ps_256,
5345 int_x86_avx_maskstore_ps,
5346 int_x86_avx_maskstore_ps_256,
5347 memopv4f32, memopv8f32>;
5348 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
5349 int_x86_avx_maskload_pd,
5350 int_x86_avx_maskload_pd_256,
5351 int_x86_avx_maskstore_pd,
5352 int_x86_avx_maskstore_pd_256,
5353 memopv2f64, memopv4f64>;
5355 // Permute Floating-Point Values
5356 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
5357 RegisterClass RC, X86MemOperand x86memop_f,
5358 X86MemOperand x86memop_i, PatFrag f_frag, PatFrag i_frag,
5359 Intrinsic IntVar, Intrinsic IntImm> {
5360 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
5361 (ins RC:$src1, RC:$src2),
5362 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5363 [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V;
5364 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
5365 (ins RC:$src1, x86memop_i:$src2),
5366 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5367 [(set RC:$dst, (IntVar RC:$src1, (i_frag addr:$src2)))]>, VEX_4V;
5369 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
5370 (ins RC:$src1, i8imm:$src2),
5371 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5372 [(set RC:$dst, (IntImm RC:$src1, imm:$src2))]>, VEX;
5373 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
5374 (ins x86memop_f:$src1, i8imm:$src2),
5375 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5376 [(set RC:$dst, (IntImm (f_frag addr:$src1), imm:$src2))]>, VEX;
5379 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
5380 memopv4f32, memopv4i32,
5381 int_x86_avx_vpermilvar_ps,
5382 int_x86_avx_vpermil_ps>;
5383 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
5384 memopv8f32, memopv8i32,
5385 int_x86_avx_vpermilvar_ps_256,
5386 int_x86_avx_vpermil_ps_256>;
5387 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
5388 memopv2f64, memopv2i64,
5389 int_x86_avx_vpermilvar_pd,
5390 int_x86_avx_vpermil_pd>;
5391 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
5392 memopv4f64, memopv4i64,
5393 int_x86_avx_vpermilvar_pd_256,
5394 int_x86_avx_vpermil_pd_256>;
5396 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
5397 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
5398 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5400 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
5401 (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
5402 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5405 // Zero All YMM registers
5406 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
5407 [(int_x86_avx_vzeroall)]>, VEX, VEX_L, Requires<[HasAVX]>;
5409 // Zero Upper bits of YMM registers
5410 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
5411 [(int_x86_avx_vzeroupper)]>, VEX, Requires<[HasAVX]>;
5413 } // isAsmParserOnly
5415 def : Pat<(int_x86_avx_vinsertf128_pd_256 VR256:$src1, VR128:$src2, imm:$src3),
5416 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5417 def : Pat<(int_x86_avx_vinsertf128_ps_256 VR256:$src1, VR128:$src2, imm:$src3),
5418 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5419 def : Pat<(int_x86_avx_vinsertf128_si_256 VR256:$src1, VR128:$src2, imm:$src3),
5420 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5422 def : Pat<(int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2),
5423 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5424 def : Pat<(int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2),
5425 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5426 def : Pat<(int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2),
5427 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5429 def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
5430 (VBROADCASTF128 addr:$src)>;
5432 def : Pat<(int_x86_avx_vperm2f128_ps_256 VR256:$src1, VR256:$src2, imm:$src3),
5433 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5434 def : Pat<(int_x86_avx_vperm2f128_pd_256 VR256:$src1, VR256:$src2, imm:$src3),
5435 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5436 def : Pat<(int_x86_avx_vperm2f128_si_256 VR256:$src1, VR256:$src2, imm:$src3),
5437 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5439 def : Pat<(int_x86_avx_vperm2f128_ps_256
5440 VR256:$src1, (memopv8f32 addr:$src2), imm:$src3),
5441 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5442 def : Pat<(int_x86_avx_vperm2f128_pd_256
5443 VR256:$src1, (memopv4f64 addr:$src2), imm:$src3),
5444 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5445 def : Pat<(int_x86_avx_vperm2f128_si_256
5446 VR256:$src1, (memopv8i32 addr:$src2), imm:$src3),
5447 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5449 //===----------------------------------------------------------------------===//
5450 // SSE Shuffle pattern fragments
5451 //===----------------------------------------------------------------------===//
5453 // This is part of a "work in progress" refactoring. The idea is that all
5454 // vector shuffles are going to be translated into target specific nodes and
5455 // directly matched by the patterns below (which can be changed along the way)
5456 // The AVX version of some but not all of them are described here, and more
5457 // should come in a near future.
5459 // Shuffle with PSHUFD instruction folding loads. The first two patterns match
5460 // SSE2 loads, which are always promoted to v2i64. The last one should match
5461 // the SSE1 case, where the only legal load is v4f32, but there is no PSHUFD
5462 // in SSE2, how does it ever worked? Anyway, the pattern will remain here until
5463 // we investigate further.
5464 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
5466 (VPSHUFDmi addr:$src1, imm:$imm)>, Requires<[HasAVX]>;
5467 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
5469 (PSHUFDmi addr:$src1, imm:$imm)>;
5470 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv4f32 addr:$src1)),
5472 (PSHUFDmi addr:$src1, imm:$imm)>; // FIXME: has this ever worked?
5474 // Shuffle with PSHUFD instruction.
5475 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5476 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
5477 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5478 (PSHUFDri VR128:$src1, imm:$imm)>;
5480 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5481 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
5482 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5483 (PSHUFDri VR128:$src1, imm:$imm)>;
5485 // Shuffle with SHUFPD instruction.
5486 def : Pat<(v2f64 (X86Shufps VR128:$src1,
5487 (memopv2f64 addr:$src2), (i8 imm:$imm))),
5488 (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5489 def : Pat<(v2f64 (X86Shufps VR128:$src1,
5490 (memopv2f64 addr:$src2), (i8 imm:$imm))),
5491 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
5493 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5494 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5495 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5496 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
5498 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5499 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5500 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5501 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
5503 // Shuffle with SHUFPS instruction.
5504 def : Pat<(v4f32 (X86Shufps VR128:$src1,
5505 (memopv4f32 addr:$src2), (i8 imm:$imm))),
5506 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5507 def : Pat<(v4f32 (X86Shufps VR128:$src1,
5508 (memopv4f32 addr:$src2), (i8 imm:$imm))),
5509 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
5511 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5512 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5513 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5514 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
5516 def : Pat<(v4i32 (X86Shufps VR128:$src1,
5517 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
5518 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5519 def : Pat<(v4i32 (X86Shufps VR128:$src1,
5520 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
5521 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
5523 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5524 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5525 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5526 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
5528 // Shuffle with MOVHLPS instruction
5529 def : Pat<(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)),
5530 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
5531 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
5532 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
5534 // Shuffle with MOVDDUP instruction
5535 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5536 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5537 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5538 (MOVDDUPrm addr:$src)>;
5540 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
5541 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5542 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
5543 (MOVDDUPrm addr:$src)>;
5545 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
5546 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5547 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
5548 (MOVDDUPrm addr:$src)>;
5550 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
5551 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5552 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
5553 (MOVDDUPrm addr:$src)>;
5555 def : Pat<(X86Movddup (bc_v2f64
5556 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5557 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5558 def : Pat<(X86Movddup (bc_v2f64
5559 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5560 (MOVDDUPrm addr:$src)>;
5563 // Shuffle with UNPCKLPS
5564 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
5565 (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5566 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
5567 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
5569 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
5570 (VUNPCKLPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5571 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
5572 (UNPCKLPSrr VR128:$src1, VR128:$src2)>;
5574 // Shuffle with UNPCKHPS
5575 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
5576 (VUNPCKHPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5577 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
5578 (UNPCKHPSrm VR128:$src1, addr:$src2)>;
5580 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
5581 (VUNPCKHPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5582 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
5583 (UNPCKHPSrr VR128:$src1, VR128:$src2)>;
5585 // Shuffle with UNPCKLPD
5586 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
5587 (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5588 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
5589 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
5591 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
5592 (VUNPCKLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5593 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
5594 (UNPCKLPDrr VR128:$src1, VR128:$src2)>;
5596 // Shuffle with UNPCKHPD
5597 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
5598 (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5599 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
5600 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
5602 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
5603 (VUNPCKHPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5604 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
5605 (UNPCKHPDrr VR128:$src1, VR128:$src2)>;
5607 // Shuffle with PUNPCKLBW
5608 def : Pat<(v16i8 (X86Punpcklbw VR128:$src1,
5609 (bc_v16i8 (memopv2i64 addr:$src2)))),
5610 (PUNPCKLBWrm VR128:$src1, addr:$src2)>;
5611 def : Pat<(v16i8 (X86Punpcklbw VR128:$src1, VR128:$src2)),
5612 (PUNPCKLBWrr VR128:$src1, VR128:$src2)>;
5614 // Shuffle with PUNPCKLWD
5615 def : Pat<(v8i16 (X86Punpcklwd VR128:$src1,
5616 (bc_v8i16 (memopv2i64 addr:$src2)))),
5617 (PUNPCKLWDrm VR128:$src1, addr:$src2)>;
5618 def : Pat<(v8i16 (X86Punpcklwd VR128:$src1, VR128:$src2)),
5619 (PUNPCKLWDrr VR128:$src1, VR128:$src2)>;
5621 // Shuffle with PUNPCKLDQ
5622 def : Pat<(v4i32 (X86Punpckldq VR128:$src1,
5623 (bc_v4i32 (memopv2i64 addr:$src2)))),
5624 (PUNPCKLDQrm VR128:$src1, addr:$src2)>;
5625 def : Pat<(v4i32 (X86Punpckldq VR128:$src1, VR128:$src2)),
5626 (PUNPCKLDQrr VR128:$src1, VR128:$src2)>;
5628 // Shuffle with PUNPCKLQDQ
5629 def : Pat<(v2i64 (X86Punpcklqdq VR128:$src1, (memopv2i64 addr:$src2))),
5630 (PUNPCKLQDQrm VR128:$src1, addr:$src2)>;
5631 def : Pat<(v2i64 (X86Punpcklqdq VR128:$src1, VR128:$src2)),
5632 (PUNPCKLQDQrr VR128:$src1, VR128:$src2)>;
5634 // Shuffle with PUNPCKHBW
5635 def : Pat<(v16i8 (X86Punpckhbw VR128:$src1,
5636 (bc_v16i8 (memopv2i64 addr:$src2)))),
5637 (PUNPCKHBWrm VR128:$src1, addr:$src2)>;
5638 def : Pat<(v16i8 (X86Punpckhbw VR128:$src1, VR128:$src2)),
5639 (PUNPCKHBWrr VR128:$src1, VR128:$src2)>;
5641 // Shuffle with PUNPCKHWD
5642 def : Pat<(v8i16 (X86Punpckhwd VR128:$src1,
5643 (bc_v8i16 (memopv2i64 addr:$src2)))),
5644 (PUNPCKHWDrm VR128:$src1, addr:$src2)>;
5645 def : Pat<(v8i16 (X86Punpckhwd VR128:$src1, VR128:$src2)),
5646 (PUNPCKHWDrr VR128:$src1, VR128:$src2)>;
5648 // Shuffle with PUNPCKHDQ
5649 def : Pat<(v4i32 (X86Punpckhdq VR128:$src1,
5650 (bc_v4i32 (memopv2i64 addr:$src2)))),
5651 (PUNPCKHDQrm VR128:$src1, addr:$src2)>;
5652 def : Pat<(v4i32 (X86Punpckhdq VR128:$src1, VR128:$src2)),
5653 (PUNPCKHDQrr VR128:$src1, VR128:$src2)>;
5655 // Shuffle with PUNPCKHQDQ
5656 def : Pat<(v2i64 (X86Punpckhqdq VR128:$src1, (memopv2i64 addr:$src2))),
5657 (PUNPCKHQDQrm VR128:$src1, addr:$src2)>;
5658 def : Pat<(v2i64 (X86Punpckhqdq VR128:$src1, VR128:$src2)),
5659 (PUNPCKHQDQrr VR128:$src1, VR128:$src2)>;
5661 // Shuffle with MOVLHPS
5662 def : Pat<(X86Movlhps VR128:$src1,
5663 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
5664 (MOVHPSrm VR128:$src1, addr:$src2)>;
5665 def : Pat<(X86Movlhps VR128:$src1,
5666 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
5667 (MOVHPSrm VR128:$src1, addr:$src2)>;
5668 def : Pat<(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)),
5669 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
5670 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
5671 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
5672 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
5673 (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
5675 // FIXME: Instead of X86Movddup, there should be a X86Unpcklpd here, the problem
5676 // is during lowering, where it's not possible to recognize the load fold cause
5677 // it has two uses through a bitcast. One use disappears at isel time and the
5678 // fold opportunity reappears.
5679 def : Pat<(v2f64 (X86Movddup VR128:$src)),
5680 (UNPCKLPDrr VR128:$src, VR128:$src)>;
5682 // Shuffle with MOVLHPD
5683 def : Pat<(v2f64 (X86Movlhpd VR128:$src1,
5684 (scalar_to_vector (loadf64 addr:$src2)))),
5685 (MOVHPDrm VR128:$src1, addr:$src2)>;
5687 // FIXME: Instead of X86Unpcklpd, there should be a X86Movlhpd here, the problem
5688 // is during lowering, where it's not possible to recognize the load fold cause
5689 // it has two uses through a bitcast. One use disappears at isel time and the
5690 // fold opportunity reappears.
5691 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1,
5692 (scalar_to_vector (loadf64 addr:$src2)))),
5693 (MOVHPDrm VR128:$src1, addr:$src2)>;
5695 // Shuffle with MOVSS
5696 def : Pat<(v4f32 (X86Movss VR128:$src1, (scalar_to_vector FR32:$src2))),
5697 (MOVSSrr VR128:$src1, FR32:$src2)>;
5698 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
5699 (MOVSSrr (v4i32 VR128:$src1),
5700 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
5701 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
5702 (MOVSSrr (v4f32 VR128:$src1),
5703 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
5704 // FIXME: Instead of a X86Movss there should be a X86Movlps here, the problem
5705 // is during lowering, where it's not possible to recognize the load fold cause
5706 // it has two uses through a bitcast. One use disappears at isel time and the
5707 // fold opportunity reappears.
5708 def : Pat<(X86Movss VR128:$src1,
5709 (bc_v4i32 (v2i64 (load addr:$src2)))),
5710 (MOVLPSrm VR128:$src1, addr:$src2)>;
5712 // Shuffle with MOVSD
5713 def : Pat<(v2f64 (X86Movsd VR128:$src1, (scalar_to_vector FR64:$src2))),
5714 (MOVSDrr VR128:$src1, FR64:$src2)>;
5715 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
5716 (MOVSDrr (v2i64 VR128:$src1),
5717 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
5718 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
5719 (MOVSDrr (v2f64 VR128:$src1),
5720 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
5721 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
5722 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>;
5723 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
5724 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_sd))>;
5726 // Shuffle with MOVSHDUP
5727 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
5728 (MOVSHDUPrr VR128:$src)>;
5729 def : Pat<(X86Movshdup (bc_v4i32 (memopv2i64 addr:$src))),
5730 (MOVSHDUPrm addr:$src)>;
5732 def : Pat<(v4f32 (X86Movshdup VR128:$src)),
5733 (MOVSHDUPrr VR128:$src)>;
5734 def : Pat<(X86Movshdup (memopv4f32 addr:$src)),
5735 (MOVSHDUPrm addr:$src)>;
5737 // Shuffle with MOVSLDUP
5738 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
5739 (MOVSLDUPrr VR128:$src)>;
5740 def : Pat<(X86Movsldup (bc_v4i32 (memopv2i64 addr:$src))),
5741 (MOVSLDUPrm addr:$src)>;
5743 def : Pat<(v4f32 (X86Movsldup VR128:$src)),
5744 (MOVSLDUPrr VR128:$src)>;
5745 def : Pat<(X86Movsldup (memopv4f32 addr:$src)),
5746 (MOVSLDUPrm addr:$src)>;
5748 // Shuffle with PSHUFHW
5749 def : Pat<(v8i16 (X86PShufhw VR128:$src, (i8 imm:$imm))),
5750 (PSHUFHWri VR128:$src, imm:$imm)>;
5751 def : Pat<(v8i16 (X86PShufhw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
5752 (PSHUFHWmi addr:$src, imm:$imm)>;
5754 // Shuffle with PSHUFLW
5755 def : Pat<(v8i16 (X86PShuflw VR128:$src, (i8 imm:$imm))),
5756 (PSHUFLWri VR128:$src, imm:$imm)>;
5757 def : Pat<(v8i16 (X86PShuflw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
5758 (PSHUFLWmi addr:$src, imm:$imm)>;
5760 // Shuffle with PALIGN
5761 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5762 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5763 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5764 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5765 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5766 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5767 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5768 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5770 // Shuffle with MOVLPS
5771 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
5772 (MOVLPSrm VR128:$src1, addr:$src2)>;
5773 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
5774 (MOVLPSrm VR128:$src1, addr:$src2)>;
5775 def : Pat<(X86Movlps VR128:$src1,
5776 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
5777 (MOVLPSrm VR128:$src1, addr:$src2)>;
5779 // Shuffle with MOVLPD
5780 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
5781 (MOVLPDrm VR128:$src1, addr:$src2)>;
5782 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
5783 (MOVLPDrm VR128:$src1, addr:$src2)>;
5784 def : Pat<(v2f64 (X86Movlpd VR128:$src1,
5785 (scalar_to_vector (loadf64 addr:$src2)))),
5786 (MOVLPDrm VR128:$src1, addr:$src2)>;
5788 // Extra patterns to match stores with MOVHPS/PD and MOVLPS/PD
5789 def : Pat<(store (f64 (vector_extract
5790 (v2f64 (X86Unpckhps VR128:$src, (undef))), (iPTR 0))),addr:$dst),
5791 (MOVHPSmr addr:$dst, VR128:$src)>;
5792 def : Pat<(store (f64 (vector_extract
5793 (v2f64 (X86Unpckhpd VR128:$src, (undef))), (iPTR 0))),addr:$dst),
5794 (MOVHPDmr addr:$dst, VR128:$src)>;
5796 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),addr:$src1),
5797 (MOVLPSmr addr:$src1, VR128:$src2)>;
5798 def : Pat<(store (v4i32 (X86Movlps
5799 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1),
5800 (MOVLPSmr addr:$src1, VR128:$src2)>;
5802 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
5803 (MOVLPDmr addr:$src1, VR128:$src2)>;
5804 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
5805 (MOVLPDmr addr:$src1, VR128:$src2)>;