1 //===-- X86InstrFMA.td - FMA Instruction Set ---------------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes FMA (Fused Multiply-Add) instructions.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // FMA3 - Intel 3 operand Fused Multiply-Add instructions
16 //===----------------------------------------------------------------------===//
18 // For all FMA opcodes declared in fma3p_rm_* and fma3s_rm_* multiclasses
19 // defined below, both the register and memory variants are commutable.
20 // For the register form the commutable operands are 1, 2 and 3.
21 // For the memory variant the folded operand must be in 3. Thus,
22 // in that case, only the operands 1 and 2 can be swapped.
23 // Commuting some of operands may require the opcode change.
25 // operands 1 and 2 (memory & register forms): *213* --> *213*(no changes);
26 // operands 1 and 3 (register forms only): *213* --> *231*;
27 // operands 2 and 3 (register forms only): *213* --> *132*.
29 // operands 1 and 2 (memory & register forms): *132* --> *231*;
30 // operands 1 and 3 (register forms only): *132* --> *132*(no changes);
31 // operands 2 and 3 (register forms only): *132* --> *213*.
33 // operands 1 and 2 (memory & register forms): *231* --> *132*;
34 // operands 1 and 3 (register forms only): *231* --> *213*;
35 // operands 2 and 3 (register forms only): *231* --> *231*(no changes).
37 multiclass fma3p_rm_213<bits<8> opc, string OpcodeStr, RegisterClass RC,
38 ValueType VT, X86MemOperand x86memop, PatFrag MemFrag,
39 SDNode Op, X86FoldableSchedWrite sched> {
40 def r : FMA3<opc, MRMSrcReg, (outs RC:$dst),
41 (ins RC:$src1, RC:$src2, RC:$src3),
43 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
44 [(set RC:$dst, (VT (Op RC:$src2, RC:$src1, RC:$src3)))]>,
48 def m : FMA3<opc, MRMSrcMem, (outs RC:$dst),
49 (ins RC:$src1, RC:$src2, x86memop:$src3),
51 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
52 [(set RC:$dst, (VT (Op RC:$src2, RC:$src1,
53 (MemFrag addr:$src3))))]>,
54 Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>;
57 multiclass fma3p_rm_231<bits<8> opc, string OpcodeStr, RegisterClass RC,
58 ValueType VT, X86MemOperand x86memop, PatFrag MemFrag,
59 SDNode Op, X86FoldableSchedWrite sched> {
60 let hasSideEffects = 0 in
61 def r : FMA3<opc, MRMSrcReg, (outs RC:$dst),
62 (ins RC:$src1, RC:$src2, RC:$src3),
64 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
68 def m : FMA3<opc, MRMSrcMem, (outs RC:$dst),
69 (ins RC:$src1, RC:$src2, x86memop:$src3),
71 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
72 [(set RC:$dst, (VT (Op RC:$src2, (MemFrag addr:$src3),
74 Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>;
77 multiclass fma3p_rm_132<bits<8> opc, string OpcodeStr, RegisterClass RC,
78 ValueType VT, X86MemOperand x86memop, PatFrag MemFrag,
79 SDNode Op, X86FoldableSchedWrite sched> {
80 let hasSideEffects = 0 in
81 def r : FMA3<opc, MRMSrcReg, (outs RC:$dst),
82 (ins RC:$src1, RC:$src2, RC:$src3),
84 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
87 // Pattern is 312 order so that the load is in a different place from the
88 // 213 and 231 patterns this helps tablegen's duplicate pattern detection.
90 def m : FMA3<opc, MRMSrcMem, (outs RC:$dst),
91 (ins RC:$src1, RC:$src2, x86memop:$src3),
93 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
94 [(set RC:$dst, (VT (Op (MemFrag addr:$src3), RC:$src1,
96 Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>;
99 let Constraints = "$src1 = $dst", hasSideEffects = 0, isCommutable = 1 in
100 multiclass fma3p_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
101 string OpcodeStr, string PackTy, string Suff,
102 PatFrag MemFrag128, PatFrag MemFrag256,
103 SDNode Op, ValueType OpTy128, ValueType OpTy256,
104 X86SchedWriteWidths sched> {
105 defm NAME#213#Suff : fma3p_rm_213<opc213, !strconcat(OpcodeStr, "213", PackTy),
106 VR128, OpTy128, f128mem, MemFrag128, Op, sched.XMM>;
107 defm NAME#231#Suff : fma3p_rm_231<opc231, !strconcat(OpcodeStr, "231", PackTy),
108 VR128, OpTy128, f128mem, MemFrag128, Op, sched.XMM>;
109 defm NAME#132#Suff : fma3p_rm_132<opc132, !strconcat(OpcodeStr, "132", PackTy),
110 VR128, OpTy128, f128mem, MemFrag128, Op, sched.XMM>;
112 defm NAME#213#Suff#Y : fma3p_rm_213<opc213, !strconcat(OpcodeStr, "213", PackTy),
113 VR256, OpTy256, f256mem, MemFrag256, Op, sched.YMM>,
115 defm NAME#231#Suff#Y : fma3p_rm_231<opc231, !strconcat(OpcodeStr, "231", PackTy),
116 VR256, OpTy256, f256mem, MemFrag256, Op, sched.YMM>,
118 defm NAME#132#Suff#Y : fma3p_rm_132<opc132, !strconcat(OpcodeStr, "132", PackTy),
119 VR256, OpTy256, f256mem, MemFrag256, Op, sched.YMM>,
123 // Fused Multiply-Add
124 let ExeDomain = SSEPackedSingle in {
125 defm VFMADD : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "ps", "PS",
126 loadv4f32, loadv8f32, X86Fmadd, v4f32, v8f32,
128 defm VFMSUB : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "ps", "PS",
129 loadv4f32, loadv8f32, X86Fmsub, v4f32, v8f32,
131 defm VFMADDSUB : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "ps", "PS",
132 loadv4f32, loadv8f32, X86Fmaddsub, v4f32, v8f32,
134 defm VFMSUBADD : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "ps", "PS",
135 loadv4f32, loadv8f32, X86Fmsubadd, v4f32, v8f32,
139 let ExeDomain = SSEPackedDouble in {
140 defm VFMADD : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "pd", "PD",
141 loadv2f64, loadv4f64, X86Fmadd, v2f64,
142 v4f64, SchedWriteFMA>, VEX_W;
143 defm VFMSUB : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "pd", "PD",
144 loadv2f64, loadv4f64, X86Fmsub, v2f64,
145 v4f64, SchedWriteFMA>, VEX_W;
146 defm VFMADDSUB : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "pd", "PD",
147 loadv2f64, loadv4f64, X86Fmaddsub,
148 v2f64, v4f64, SchedWriteFMA>, VEX_W;
149 defm VFMSUBADD : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "pd", "PD",
150 loadv2f64, loadv4f64, X86Fmsubadd,
151 v2f64, v4f64, SchedWriteFMA>, VEX_W;
154 // Fused Negative Multiply-Add
155 let ExeDomain = SSEPackedSingle in {
156 defm VFNMADD : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "ps", "PS", loadv4f32,
157 loadv8f32, X86Fnmadd, v4f32, v8f32, SchedWriteFMA>;
158 defm VFNMSUB : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "ps", "PS", loadv4f32,
159 loadv8f32, X86Fnmsub, v4f32, v8f32, SchedWriteFMA>;
161 let ExeDomain = SSEPackedDouble in {
162 defm VFNMADD : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "pd", "PD", loadv2f64,
163 loadv4f64, X86Fnmadd, v2f64, v4f64, SchedWriteFMA>, VEX_W;
164 defm VFNMSUB : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "pd", "PD", loadv2f64,
165 loadv4f64, X86Fnmsub, v2f64, v4f64, SchedWriteFMA>, VEX_W;
168 // All source register operands of FMA opcodes defined in fma3s_rm multiclass
169 // can be commuted. In many cases such commute transformation requres an opcode
170 // adjustment, for example, commuting the operands 1 and 2 in FMA*132 form
171 // would require an opcode change to FMA*231:
172 // FMA*132* reg1, reg2, reg3; // reg1 * reg3 + reg2;
174 // FMA*231* reg2, reg1, reg3; // reg1 * reg3 + reg2;
175 // Please see more detailed comment at the very beginning of the section
176 // defining FMA3 opcodes above.
177 multiclass fma3s_rm_213<bits<8> opc, string OpcodeStr,
178 X86MemOperand x86memop, RegisterClass RC,
179 SDPatternOperator OpNode,
180 X86FoldableSchedWrite sched> {
181 def r : FMA3S<opc, MRMSrcReg, (outs RC:$dst),
182 (ins RC:$src1, RC:$src2, RC:$src3),
183 !strconcat(OpcodeStr,
184 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
185 [(set RC:$dst, (OpNode RC:$src2, RC:$src1, RC:$src3))]>,
189 def m : FMA3S<opc, MRMSrcMem, (outs RC:$dst),
190 (ins RC:$src1, RC:$src2, x86memop:$src3),
191 !strconcat(OpcodeStr,
192 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
194 (OpNode RC:$src2, RC:$src1, (load addr:$src3)))]>,
195 Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>;
198 multiclass fma3s_rm_231<bits<8> opc, string OpcodeStr,
199 X86MemOperand x86memop, RegisterClass RC,
200 SDPatternOperator OpNode, X86FoldableSchedWrite sched> {
201 let hasSideEffects = 0 in
202 def r : FMA3S<opc, MRMSrcReg, (outs RC:$dst),
203 (ins RC:$src1, RC:$src2, RC:$src3),
204 !strconcat(OpcodeStr,
205 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
209 def m : FMA3S<opc, MRMSrcMem, (outs RC:$dst),
210 (ins RC:$src1, RC:$src2, x86memop:$src3),
211 !strconcat(OpcodeStr,
212 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
214 (OpNode RC:$src2, (load addr:$src3), RC:$src1))]>,
215 Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>;
218 multiclass fma3s_rm_132<bits<8> opc, string OpcodeStr,
219 X86MemOperand x86memop, RegisterClass RC,
220 SDPatternOperator OpNode, X86FoldableSchedWrite sched> {
221 let hasSideEffects = 0 in
222 def r : FMA3S<opc, MRMSrcReg, (outs RC:$dst),
223 (ins RC:$src1, RC:$src2, RC:$src3),
224 !strconcat(OpcodeStr,
225 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
228 // Pattern is 312 order so that the load is in a different place from the
229 // 213 and 231 patterns this helps tablegen's duplicate pattern detection.
231 def m : FMA3S<opc, MRMSrcMem, (outs RC:$dst),
232 (ins RC:$src1, RC:$src2, x86memop:$src3),
233 !strconcat(OpcodeStr,
234 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
236 (OpNode (load addr:$src3), RC:$src1, RC:$src2))]>,
237 Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>;
240 let Constraints = "$src1 = $dst", isCommutable = 1, hasSideEffects = 0 in
241 multiclass fma3s_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
242 string OpStr, string PackTy, string Suff,
243 SDNode OpNode, RegisterClass RC,
244 X86MemOperand x86memop, X86FoldableSchedWrite sched> {
245 defm NAME#213#Suff : fma3s_rm_213<opc213, !strconcat(OpStr, "213", PackTy),
246 x86memop, RC, OpNode, sched>;
247 defm NAME#231#Suff : fma3s_rm_231<opc231, !strconcat(OpStr, "231", PackTy),
248 x86memop, RC, OpNode, sched>;
249 defm NAME#132#Suff : fma3s_rm_132<opc132, !strconcat(OpStr, "132", PackTy),
250 x86memop, RC, OpNode, sched>;
253 // These FMA*_Int instructions are defined specially for being used when
254 // the scalar FMA intrinsics are lowered to machine instructions, and in that
255 // sense, they are similar to existing ADD*_Int, SUB*_Int, MUL*_Int, etc.
258 // All of the FMA*_Int opcodes are defined as commutable here.
259 // Commuting the 2nd and 3rd source register operands of FMAs is quite trivial
260 // and the corresponding optimizations have been developed.
261 // Commuting the 1st operand of FMA*_Int requires some additional analysis,
262 // the commute optimization is legal only if all users of FMA*_Int use only
263 // the lowest element of the FMA*_Int instruction. Even though such analysis
264 // may be not implemented yet we allow the routines doing the actual commute
265 // transformation to decide if one or another instruction is commutable or not.
266 let Constraints = "$src1 = $dst", isCommutable = 1, isCodeGenOnly = 1,
267 hasSideEffects = 0 in
268 multiclass fma3s_rm_int<bits<8> opc, string OpcodeStr,
269 Operand memopr, RegisterClass RC,
270 X86FoldableSchedWrite sched> {
271 def r_Int : FMA3S_Int<opc, MRMSrcReg, (outs RC:$dst),
272 (ins RC:$src1, RC:$src2, RC:$src3),
273 !strconcat(OpcodeStr,
274 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
278 def m_Int : FMA3S_Int<opc, MRMSrcMem, (outs RC:$dst),
279 (ins RC:$src1, RC:$src2, memopr:$src3),
280 !strconcat(OpcodeStr,
281 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
282 []>, Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>;
285 // The FMA 213 form is created for lowering of scalar FMA intrinscis
286 // to machine instructions.
287 // The FMA 132 form can trivially be get by commuting the 2nd and 3rd operands
289 // The FMA 231 form can be get only by commuting the 1st operand of 213 or 132
290 // forms and is possible only after special analysis of all uses of the initial
291 // instruction. Such analysis do not exist yet and thus introducing the 231
292 // form of FMA*_Int instructions is done using an optimistic assumption that
293 // such analysis will be implemented eventually.
294 multiclass fma3s_int_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
295 string OpStr, string PackTy, string Suff,
296 RegisterClass RC, Operand memop,
297 X86FoldableSchedWrite sched> {
298 defm NAME#132#Suff : fma3s_rm_int<opc132, !strconcat(OpStr, "132", PackTy),
300 defm NAME#213#Suff : fma3s_rm_int<opc213, !strconcat(OpStr, "213", PackTy),
302 defm NAME#231#Suff : fma3s_rm_int<opc231, !strconcat(OpStr, "231", PackTy),
306 multiclass fma3s<bits<8> opc132, bits<8> opc213, bits<8> opc231,
307 string OpStr, SDNode OpNode, X86FoldableSchedWrite sched> {
308 let ExeDomain = SSEPackedSingle in
309 defm NAME : fma3s_forms<opc132, opc213, opc231, OpStr, "ss", "SS", OpNode,
310 FR32, f32mem, sched>,
311 fma3s_int_forms<opc132, opc213, opc231, OpStr, "ss", "SS",
312 VR128, ssmem, sched>;
314 let ExeDomain = SSEPackedDouble in
315 defm NAME : fma3s_forms<opc132, opc213, opc231, OpStr, "sd", "SD", OpNode,
316 FR64, f64mem, sched>,
317 fma3s_int_forms<opc132, opc213, opc231, OpStr, "sd", "SD",
318 VR128, sdmem, sched>, VEX_W;
321 defm VFMADD : fma3s<0x99, 0xA9, 0xB9, "vfmadd", X86Fmadd,
322 SchedWriteFMA.Scl>, VEX_LIG;
323 defm VFMSUB : fma3s<0x9B, 0xAB, 0xBB, "vfmsub", X86Fmsub,
324 SchedWriteFMA.Scl>, VEX_LIG;
326 defm VFNMADD : fma3s<0x9D, 0xAD, 0xBD, "vfnmadd", X86Fnmadd,
327 SchedWriteFMA.Scl>, VEX_LIG;
328 defm VFNMSUB : fma3s<0x9F, 0xAF, 0xBF, "vfnmsub", X86Fnmsub,
329 SchedWriteFMA.Scl>, VEX_LIG;
331 multiclass scalar_fma_patterns<SDNode Op, string Prefix, string Suffix,
332 SDNode Move, ValueType VT, ValueType EltVT,
333 RegisterClass RC, PatFrag mem_frag> {
334 let Predicates = [HasFMA, NoAVX512] in {
335 def : Pat<(VT (Move (VT VR128:$src1), (VT (scalar_to_vector
337 (EltVT (extractelt (VT VR128:$src1), (iPTR 0))),
339 (!cast<Instruction>(Prefix#"213"#Suffix#"r_Int")
340 VR128:$src1, (VT (COPY_TO_REGCLASS RC:$src2, VR128)),
341 (VT (COPY_TO_REGCLASS RC:$src3, VR128)))>;
343 def : Pat<(VT (Move (VT VR128:$src1), (VT (scalar_to_vector
344 (Op RC:$src2, RC:$src3,
345 (EltVT (extractelt (VT VR128:$src1), (iPTR 0)))))))),
346 (!cast<Instruction>(Prefix#"231"#Suffix#"r_Int")
347 VR128:$src1, (VT (COPY_TO_REGCLASS RC:$src2, VR128)),
348 (VT (COPY_TO_REGCLASS RC:$src3, VR128)))>;
350 def : Pat<(VT (Move (VT VR128:$src1), (VT (scalar_to_vector
352 (EltVT (extractelt (VT VR128:$src1), (iPTR 0))),
353 (mem_frag addr:$src3)))))),
354 (!cast<Instruction>(Prefix#"213"#Suffix#"m_Int")
355 VR128:$src1, (VT (COPY_TO_REGCLASS RC:$src2, VR128)),
358 def : Pat<(VT (Move (VT VR128:$src1), (VT (scalar_to_vector
359 (Op (EltVT (extractelt (VT VR128:$src1), (iPTR 0))),
360 (mem_frag addr:$src3), RC:$src2))))),
361 (!cast<Instruction>(Prefix#"132"#Suffix#"m_Int")
362 VR128:$src1, (VT (COPY_TO_REGCLASS RC:$src2, VR128)),
365 def : Pat<(VT (Move (VT VR128:$src1), (VT (scalar_to_vector
366 (Op RC:$src2, (mem_frag addr:$src3),
367 (EltVT (extractelt (VT VR128:$src1), (iPTR 0)))))))),
368 (!cast<Instruction>(Prefix#"231"#Suffix#"m_Int")
369 VR128:$src1, (VT (COPY_TO_REGCLASS RC:$src2, VR128)),
374 defm : scalar_fma_patterns<X86Fmadd, "VFMADD", "SS", X86Movss, v4f32, f32, FR32, loadf32>;
375 defm : scalar_fma_patterns<X86Fmsub, "VFMSUB", "SS", X86Movss, v4f32, f32, FR32, loadf32>;
376 defm : scalar_fma_patterns<X86Fnmadd, "VFNMADD", "SS", X86Movss, v4f32, f32, FR32, loadf32>;
377 defm : scalar_fma_patterns<X86Fnmsub, "VFNMSUB", "SS", X86Movss, v4f32, f32, FR32, loadf32>;
379 defm : scalar_fma_patterns<X86Fmadd, "VFMADD", "SD", X86Movsd, v2f64, f64, FR64, loadf64>;
380 defm : scalar_fma_patterns<X86Fmsub, "VFMSUB", "SD", X86Movsd, v2f64, f64, FR64, loadf64>;
381 defm : scalar_fma_patterns<X86Fnmadd, "VFNMADD", "SD", X86Movsd, v2f64, f64, FR64, loadf64>;
382 defm : scalar_fma_patterns<X86Fnmsub, "VFNMSUB", "SD", X86Movsd, v2f64, f64, FR64, loadf64>;
384 //===----------------------------------------------------------------------===//
385 // FMA4 - AMD 4 operand Fused Multiply-Add instructions
386 //===----------------------------------------------------------------------===//
388 multiclass fma4s<bits<8> opc, string OpcodeStr, RegisterClass RC,
389 X86MemOperand x86memop, ValueType OpVT, SDNode OpNode,
390 PatFrag mem_frag, X86FoldableSchedWrite sched> {
391 let isCommutable = 1 in
392 def rr : FMA4S<opc, MRMSrcRegOp4, (outs RC:$dst),
393 (ins RC:$src1, RC:$src2, RC:$src3),
394 !strconcat(OpcodeStr,
395 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
397 (OpVT (OpNode RC:$src1, RC:$src2, RC:$src3)))]>, VEX_W, VEX_LIG,
399 def rm : FMA4S<opc, MRMSrcMemOp4, (outs RC:$dst),
400 (ins RC:$src1, RC:$src2, x86memop:$src3),
401 !strconcat(OpcodeStr,
402 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
403 [(set RC:$dst, (OpNode RC:$src1, RC:$src2,
404 (mem_frag addr:$src3)))]>, VEX_W, VEX_LIG,
405 Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>;
406 def mr : FMA4S<opc, MRMSrcMem, (outs RC:$dst),
407 (ins RC:$src1, x86memop:$src2, RC:$src3),
408 !strconcat(OpcodeStr,
409 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
411 (OpNode RC:$src1, (mem_frag addr:$src2), RC:$src3))]>, VEX_LIG,
412 Sched<[sched.Folded, sched.ReadAfterFold,
414 ReadDefault, ReadDefault, ReadDefault, ReadDefault,
417 sched.ReadAfterFold]>;
419 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
420 def rr_REV : FMA4S<opc, MRMSrcReg, (outs RC:$dst),
421 (ins RC:$src1, RC:$src2, RC:$src3),
422 !strconcat(OpcodeStr,
423 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>,
424 VEX_LIG, FoldGenData<NAME#rr>, Sched<[sched]>;
427 multiclass fma4s_int<bits<8> opc, string OpcodeStr, Operand memop,
428 ValueType VT, X86FoldableSchedWrite sched> {
429 let isCodeGenOnly = 1, hasSideEffects = 0 in {
430 def rr_Int : FMA4S_Int<opc, MRMSrcRegOp4, (outs VR128:$dst),
431 (ins VR128:$src1, VR128:$src2, VR128:$src3),
432 !strconcat(OpcodeStr,
433 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
434 []>, VEX_W, VEX_LIG, Sched<[sched]>;
436 def rm_Int : FMA4S_Int<opc, MRMSrcMemOp4, (outs VR128:$dst),
437 (ins VR128:$src1, VR128:$src2, memop:$src3),
438 !strconcat(OpcodeStr,
439 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
441 Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>;
443 def mr_Int : FMA4S_Int<opc, MRMSrcMem, (outs VR128:$dst),
444 (ins VR128:$src1, memop:$src2, VR128:$src3),
445 !strconcat(OpcodeStr,
446 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
448 VEX_LIG, Sched<[sched.Folded, sched.ReadAfterFold,
450 ReadDefault, ReadDefault, ReadDefault,
451 ReadDefault, ReadDefault,
453 sched.ReadAfterFold]>;
454 def rr_Int_REV : FMA4S_Int<opc, MRMSrcReg, (outs VR128:$dst),
455 (ins VR128:$src1, VR128:$src2, VR128:$src3),
456 !strconcat(OpcodeStr,
457 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
458 []>, VEX_LIG, FoldGenData<NAME#rr_Int>, Sched<[sched]>;
459 } // isCodeGenOnly = 1
462 multiclass fma4p<bits<8> opc, string OpcodeStr, SDNode OpNode,
463 ValueType OpVT128, ValueType OpVT256,
464 PatFrag ld_frag128, PatFrag ld_frag256,
465 X86SchedWriteWidths sched> {
466 let isCommutable = 1 in
467 def rr : FMA4<opc, MRMSrcRegOp4, (outs VR128:$dst),
468 (ins VR128:$src1, VR128:$src2, VR128:$src3),
469 !strconcat(OpcodeStr,
470 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
472 (OpVT128 (OpNode VR128:$src1, VR128:$src2, VR128:$src3)))]>,
473 VEX_W, Sched<[sched.XMM]>;
474 def rm : FMA4<opc, MRMSrcMemOp4, (outs VR128:$dst),
475 (ins VR128:$src1, VR128:$src2, f128mem:$src3),
476 !strconcat(OpcodeStr,
477 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
478 [(set VR128:$dst, (OpNode VR128:$src1, VR128:$src2,
479 (ld_frag128 addr:$src3)))]>, VEX_W,
480 Sched<[sched.XMM.Folded, sched.XMM.ReadAfterFold, sched.XMM.ReadAfterFold]>;
481 def mr : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
482 (ins VR128:$src1, f128mem:$src2, VR128:$src3),
483 !strconcat(OpcodeStr,
484 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
486 (OpNode VR128:$src1, (ld_frag128 addr:$src2), VR128:$src3))]>,
487 Sched<[sched.XMM.Folded, sched.XMM.ReadAfterFold,
489 ReadDefault, ReadDefault, ReadDefault, ReadDefault,
492 sched.XMM.ReadAfterFold]>;
493 let isCommutable = 1 in
494 def Yrr : FMA4<opc, MRMSrcRegOp4, (outs VR256:$dst),
495 (ins VR256:$src1, VR256:$src2, VR256:$src3),
496 !strconcat(OpcodeStr,
497 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
499 (OpVT256 (OpNode VR256:$src1, VR256:$src2, VR256:$src3)))]>,
500 VEX_W, VEX_L, Sched<[sched.YMM]>;
501 def Yrm : FMA4<opc, MRMSrcMemOp4, (outs VR256:$dst),
502 (ins VR256:$src1, VR256:$src2, f256mem:$src3),
503 !strconcat(OpcodeStr,
504 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
505 [(set VR256:$dst, (OpNode VR256:$src1, VR256:$src2,
506 (ld_frag256 addr:$src3)))]>, VEX_W, VEX_L,
507 Sched<[sched.YMM.Folded, sched.YMM.ReadAfterFold, sched.YMM.ReadAfterFold]>;
508 def Ymr : FMA4<opc, MRMSrcMem, (outs VR256:$dst),
509 (ins VR256:$src1, f256mem:$src2, VR256:$src3),
510 !strconcat(OpcodeStr,
511 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
512 [(set VR256:$dst, (OpNode VR256:$src1,
513 (ld_frag256 addr:$src2), VR256:$src3))]>, VEX_L,
514 Sched<[sched.YMM.Folded, sched.YMM.ReadAfterFold,
516 ReadDefault, ReadDefault, ReadDefault, ReadDefault,
519 sched.YMM.ReadAfterFold]>;
521 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
522 def rr_REV : FMA4<opc, MRMSrcReg, (outs VR128:$dst),
523 (ins VR128:$src1, VR128:$src2, VR128:$src3),
524 !strconcat(OpcodeStr,
525 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>,
526 Sched<[sched.XMM]>, FoldGenData<NAME#rr>;
527 def Yrr_REV : FMA4<opc, MRMSrcReg, (outs VR256:$dst),
528 (ins VR256:$src1, VR256:$src2, VR256:$src3),
529 !strconcat(OpcodeStr,
530 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>,
531 VEX_L, Sched<[sched.YMM]>, FoldGenData<NAME#Yrr>;
532 } // isCodeGenOnly = 1
535 let ExeDomain = SSEPackedSingle in {
536 // Scalar Instructions
537 defm VFMADDSS4 : fma4s<0x6A, "vfmaddss", FR32, f32mem, f32, X86Fmadd, loadf32,
539 fma4s_int<0x6A, "vfmaddss", ssmem, v4f32,
541 defm VFMSUBSS4 : fma4s<0x6E, "vfmsubss", FR32, f32mem, f32, X86Fmsub, loadf32,
543 fma4s_int<0x6E, "vfmsubss", ssmem, v4f32,
545 defm VFNMADDSS4 : fma4s<0x7A, "vfnmaddss", FR32, f32mem, f32,
546 X86Fnmadd, loadf32, SchedWriteFMA.Scl>,
547 fma4s_int<0x7A, "vfnmaddss", ssmem, v4f32,
549 defm VFNMSUBSS4 : fma4s<0x7E, "vfnmsubss", FR32, f32mem, f32,
550 X86Fnmsub, loadf32, SchedWriteFMA.Scl>,
551 fma4s_int<0x7E, "vfnmsubss", ssmem, v4f32,
553 // Packed Instructions
554 defm VFMADDPS4 : fma4p<0x68, "vfmaddps", X86Fmadd, v4f32, v8f32,
555 loadv4f32, loadv8f32, SchedWriteFMA>;
556 defm VFMSUBPS4 : fma4p<0x6C, "vfmsubps", X86Fmsub, v4f32, v8f32,
557 loadv4f32, loadv8f32, SchedWriteFMA>;
558 defm VFNMADDPS4 : fma4p<0x78, "vfnmaddps", X86Fnmadd, v4f32, v8f32,
559 loadv4f32, loadv8f32, SchedWriteFMA>;
560 defm VFNMSUBPS4 : fma4p<0x7C, "vfnmsubps", X86Fnmsub, v4f32, v8f32,
561 loadv4f32, loadv8f32, SchedWriteFMA>;
562 defm VFMADDSUBPS4 : fma4p<0x5C, "vfmaddsubps", X86Fmaddsub, v4f32, v8f32,
563 loadv4f32, loadv8f32, SchedWriteFMA>;
564 defm VFMSUBADDPS4 : fma4p<0x5E, "vfmsubaddps", X86Fmsubadd, v4f32, v8f32,
565 loadv4f32, loadv8f32, SchedWriteFMA>;
568 let ExeDomain = SSEPackedDouble in {
569 // Scalar Instructions
570 defm VFMADDSD4 : fma4s<0x6B, "vfmaddsd", FR64, f64mem, f64, X86Fmadd, loadf64,
572 fma4s_int<0x6B, "vfmaddsd", sdmem, v2f64,
574 defm VFMSUBSD4 : fma4s<0x6F, "vfmsubsd", FR64, f64mem, f64, X86Fmsub, loadf64,
576 fma4s_int<0x6F, "vfmsubsd", sdmem, v2f64,
578 defm VFNMADDSD4 : fma4s<0x7B, "vfnmaddsd", FR64, f64mem, f64,
579 X86Fnmadd, loadf64, SchedWriteFMA.Scl>,
580 fma4s_int<0x7B, "vfnmaddsd", sdmem, v2f64,
582 defm VFNMSUBSD4 : fma4s<0x7F, "vfnmsubsd", FR64, f64mem, f64,
583 X86Fnmsub, loadf64, SchedWriteFMA.Scl>,
584 fma4s_int<0x7F, "vfnmsubsd", sdmem, v2f64,
586 // Packed Instructions
587 defm VFMADDPD4 : fma4p<0x69, "vfmaddpd", X86Fmadd, v2f64, v4f64,
588 loadv2f64, loadv4f64, SchedWriteFMA>;
589 defm VFMSUBPD4 : fma4p<0x6D, "vfmsubpd", X86Fmsub, v2f64, v4f64,
590 loadv2f64, loadv4f64, SchedWriteFMA>;
591 defm VFNMADDPD4 : fma4p<0x79, "vfnmaddpd", X86Fnmadd, v2f64, v4f64,
592 loadv2f64, loadv4f64, SchedWriteFMA>;
593 defm VFNMSUBPD4 : fma4p<0x7D, "vfnmsubpd", X86Fnmsub, v2f64, v4f64,
594 loadv2f64, loadv4f64, SchedWriteFMA>;
595 defm VFMADDSUBPD4 : fma4p<0x5D, "vfmaddsubpd", X86Fmaddsub, v2f64, v4f64,
596 loadv2f64, loadv4f64, SchedWriteFMA>;
597 defm VFMSUBADDPD4 : fma4p<0x5F, "vfmsubaddpd", X86Fmsubadd, v2f64, v4f64,
598 loadv2f64, loadv4f64, SchedWriteFMA>;
601 multiclass scalar_fma4_patterns<SDNode Op, string Name,
602 ValueType VT, ValueType EltVT,
603 RegisterClass RC, PatFrag mem_frag> {
604 let Predicates = [HasFMA4] in {
605 def : Pat<(VT (X86vzmovl (VT (scalar_to_vector
606 (Op RC:$src1, RC:$src2, RC:$src3))))),
607 (!cast<Instruction>(Name#"rr_Int")
608 (VT (COPY_TO_REGCLASS RC:$src1, VR128)),
609 (VT (COPY_TO_REGCLASS RC:$src2, VR128)),
610 (VT (COPY_TO_REGCLASS RC:$src3, VR128)))>;
612 def : Pat<(VT (X86vzmovl (VT (scalar_to_vector
613 (Op RC:$src1, RC:$src2,
614 (mem_frag addr:$src3)))))),
615 (!cast<Instruction>(Name#"rm_Int")
616 (VT (COPY_TO_REGCLASS RC:$src1, VR128)),
617 (VT (COPY_TO_REGCLASS RC:$src2, VR128)), addr:$src3)>;
619 def : Pat<(VT (X86vzmovl (VT (scalar_to_vector
620 (Op RC:$src1, (mem_frag addr:$src2),
622 (!cast<Instruction>(Name#"mr_Int")
623 (VT (COPY_TO_REGCLASS RC:$src1, VR128)), addr:$src2,
624 (VT (COPY_TO_REGCLASS RC:$src3, VR128)))>;
628 defm : scalar_fma4_patterns<X86Fmadd, "VFMADDSS4", v4f32, f32, FR32, loadf32>;
629 defm : scalar_fma4_patterns<X86Fmsub, "VFMSUBSS4", v4f32, f32, FR32, loadf32>;
630 defm : scalar_fma4_patterns<X86Fnmadd, "VFNMADDSS4", v4f32, f32, FR32, loadf32>;
631 defm : scalar_fma4_patterns<X86Fnmsub, "VFNMSUBSS4", v4f32, f32, FR32, loadf32>;
633 defm : scalar_fma4_patterns<X86Fmadd, "VFMADDSD4", v2f64, f64, FR64, loadf64>;
634 defm : scalar_fma4_patterns<X86Fmsub, "VFMSUBSD4", v2f64, f64, FR64, loadf64>;
635 defm : scalar_fma4_patterns<X86Fnmadd, "VFNMADDSD4", v2f64, f64, FR64, loadf64>;
636 defm : scalar_fma4_patterns<X86Fnmsub, "VFNMSUBSD4", v2f64, f64, FR64, loadf64>;