1 //===- ARMInstrVFP.td - VFP support for ARM ----------------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the ARM VFP instruction set.
12 //===----------------------------------------------------------------------===//
14 def SDT_FTOI : SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisFP<1>]>;
15 def SDT_ITOF : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, f32>]>;
16 def SDT_CMPFP0 : SDTypeProfile<0, 1, [SDTCisFP<0>]>;
17 def SDT_VMOVDRR : SDTypeProfile<1, 2, [SDTCisVT<0, f64>, SDTCisVT<1, i32>,
20 def arm_ftoui : SDNode<"ARMISD::FTOUI", SDT_FTOI>;
21 def arm_ftosi : SDNode<"ARMISD::FTOSI", SDT_FTOI>;
22 def arm_sitof : SDNode<"ARMISD::SITOF", SDT_ITOF>;
23 def arm_uitof : SDNode<"ARMISD::UITOF", SDT_ITOF>;
24 def arm_fmstat : SDNode<"ARMISD::FMSTAT", SDTNone, [SDNPInFlag, SDNPOutFlag]>;
25 def arm_cmpfp : SDNode<"ARMISD::CMPFP", SDT_ARMCmp, [SDNPOutFlag]>;
26 def arm_cmpfp0 : SDNode<"ARMISD::CMPFPw0", SDT_CMPFP0, [SDNPOutFlag]>;
27 def arm_fmdrr : SDNode<"ARMISD::VMOVDRR", SDT_VMOVDRR>;
30 //===----------------------------------------------------------------------===//
31 // Operand Definitions.
34 def vfp_f32imm : Operand<f32>,
35 PatLeaf<(f32 fpimm), [{
36 return ARM::getVFPf32Imm(N->getValueAPF()) != -1;
38 let PrintMethod = "printVFPf32ImmOperand";
41 def vfp_f64imm : Operand<f64>,
42 PatLeaf<(f64 fpimm), [{
43 return ARM::getVFPf64Imm(N->getValueAPF()) != -1;
45 let PrintMethod = "printVFPf64ImmOperand";
49 //===----------------------------------------------------------------------===//
50 // Load / store Instructions.
53 let canFoldAsLoad = 1, isReMaterializable = 1 in {
55 def VLDRD : ADI5<0b1101, 0b01, (outs DPR:$Dd), (ins addrmode5:$addr),
56 IIC_fpLoad64, "vldr", ".64\t$Dd, $addr",
57 [(set DPR:$Dd, (f64 (load addrmode5:$addr)))]> {
58 // Instruction operands.
62 // Encode instruction operands.
63 let Inst{23} = addr{8}; // U (add = (U == '1'))
65 let Inst{19-16} = addr{12-9}; // Rn
66 let Inst{15-12} = Dd{3-0};
67 let Inst{7-0} = addr{7-0}; // imm8
70 def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$Sd), (ins addrmode5:$addr),
71 IIC_fpLoad32, "vldr", ".32\t$Sd, $addr",
72 [(set SPR:$Sd, (load addrmode5:$addr))]> {
73 // Instruction operands.
77 // Encode instruction operands.
78 let Inst{23} = addr{8}; // U (add = (U == '1'))
80 let Inst{19-16} = addr{12-9}; // Rn
81 let Inst{15-12} = Sd{4-1};
82 let Inst{7-0} = addr{7-0}; // imm8
85 } // End of 'let canFoldAsLoad = 1, isReMaterializable = 1 in'
87 def VSTRD : ADI5<0b1101, 0b00, (outs), (ins DPR:$src, addrmode5:$addr),
88 IIC_fpStore64, "vstr", ".64\t$src, $addr",
89 [(store (f64 DPR:$src), addrmode5:$addr)]>;
91 def VSTRS : ASI5<0b1101, 0b00, (outs), (ins SPR:$src, addrmode5:$addr),
92 IIC_fpStore32, "vstr", ".32\t$src, $addr",
93 [(store SPR:$src, addrmode5:$addr)]>;
95 //===----------------------------------------------------------------------===//
96 // Load / store multiple Instructions.
99 let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1,
100 isCodeGenOnly = 1 in {
101 def VLDMD : AXDI4<(outs), (ins GPR:$Rn, ldstm_mode:$amode, pred:$p,
102 reglist:$dsts, variable_ops),
103 IndexModeNone, IIC_fpLoad_m,
104 "vldm${amode}${p}\t$Rn, $dsts", "", []> {
108 def VLDMS : AXSI4<(outs), (ins GPR:$Rn, ldstm_mode:$amode, pred:$p,
109 reglist:$dsts, variable_ops),
110 IndexModeNone, IIC_fpLoad_m,
111 "vldm${amode}${p}\t$Rn, $dsts", "", []> {
115 def VLDMD_UPD : AXDI4<(outs GPR:$wb), (ins GPR:$Rn, ldstm_mode:$amode, pred:$p,
116 reglist:$dsts, variable_ops),
117 IndexModeUpd, IIC_fpLoad_mu,
118 "vldm${amode}${p}\t$Rn!, $dsts",
123 def VLDMS_UPD : AXSI4<(outs GPR:$wb), (ins GPR:$Rn, ldstm_mode:$amode, pred:$p,
124 reglist:$dsts, variable_ops),
125 IndexModeUpd, IIC_fpLoad_mu,
126 "vldm${amode}${p}\t$Rn!, $dsts",
130 } // mayLoad, neverHasSideEffects, hasExtraDefRegAllocReq
132 let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1,
133 isCodeGenOnly = 1 in {
134 def VSTMD : AXDI4<(outs), (ins GPR:$Rn, ldstm_mode:$amode, pred:$p,
135 reglist:$srcs, variable_ops),
136 IndexModeNone, IIC_fpStore_m,
137 "vstm${amode}${p}\t$Rn, $srcs", "", []> {
141 def VSTMS : AXSI4<(outs), (ins GPR:$Rn, ldstm_mode:$amode, pred:$p,
142 reglist:$srcs, variable_ops), IndexModeNone,
144 "vstm${amode}${p}\t$Rn, $srcs", "", []> {
148 def VSTMD_UPD : AXDI4<(outs GPR:$wb), (ins GPR:$Rn, ldstm_mode:$amode, pred:$p,
149 reglist:$srcs, variable_ops),
150 IndexModeUpd, IIC_fpStore_mu,
151 "vstm${amode}${p}\t$Rn!, $srcs",
156 def VSTMS_UPD : AXSI4<(outs GPR:$wb), (ins GPR:$Rn, ldstm_mode:$amode, pred:$p,
157 reglist:$srcs, variable_ops),
158 IndexModeUpd, IIC_fpStore_mu,
159 "vstm${amode}${p}\t$Rn!, $srcs",
163 } // mayStore, neverHasSideEffects, hasExtraSrcRegAllocReq
165 // FLDMX, FSTMX - mixing S/D registers for pre-armv6 cores
167 //===----------------------------------------------------------------------===//
168 // FP Binary Operations.
171 def VADDD : ADbI<0b11100, 0b11, 0, 0,
172 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
173 IIC_fpALU64, "vadd", ".f64\t$Dd, $Dn, $Dm",
174 [(set DPR:$Dd, (fadd DPR:$Dn, (f64 DPR:$Dm)))]>;
176 def VADDS : ASbIn<0b11100, 0b11, 0, 0,
177 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
178 IIC_fpALU32, "vadd", ".f32\t$Sd, $Sn, $Sm",
179 [(set SPR:$Sd, (fadd SPR:$Sn, SPR:$Sm))]>;
181 def VSUBD : ADbI<0b11100, 0b11, 1, 0,
182 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
183 IIC_fpALU64, "vsub", ".f64\t$Dd, $Dn, $Dm",
184 [(set DPR:$Dd, (fsub DPR:$Dn, (f64 DPR:$Dm)))]>;
186 def VSUBS : ASbIn<0b11100, 0b11, 1, 0,
187 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
188 IIC_fpALU32, "vsub", ".f32\t$Sd, $Sn, $Sm",
189 [(set SPR:$Sd, (fsub SPR:$Sn, SPR:$Sm))]>;
191 def VDIVD : ADbI<0b11101, 0b00, 0, 0,
192 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
193 IIC_fpDIV64, "vdiv", ".f64\t$Dd, $Dn, $Dm",
194 [(set DPR:$Dd, (fdiv DPR:$Dn, (f64 DPR:$Dm)))]>;
196 def VDIVS : ASbI<0b11101, 0b00, 0, 0,
197 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
198 IIC_fpDIV32, "vdiv", ".f32\t$Sd, $Sn, $Sm",
199 [(set SPR:$Sd, (fdiv SPR:$Sn, SPR:$Sm))]>;
201 def VMULD : ADbI<0b11100, 0b10, 0, 0,
202 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
203 IIC_fpMUL64, "vmul", ".f64\t$Dd, $Dn, $Dm",
204 [(set DPR:$Dd, (fmul DPR:$Dn, (f64 DPR:$Dm)))]>;
206 def VMULS : ASbIn<0b11100, 0b10, 0, 0,
207 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
208 IIC_fpMUL32, "vmul", ".f32\t$Sd, $Sn, $Sm",
209 [(set SPR:$Sd, (fmul SPR:$Sn, SPR:$Sm))]>;
211 def VNMULD : ADbI<0b11100, 0b10, 1, 0,
212 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
213 IIC_fpMUL64, "vnmul", ".f64\t$Dd, $Dn, $Dm",
214 [(set DPR:$Dd, (fneg (fmul DPR:$Dn, (f64 DPR:$Dm))))]>;
216 def VNMULS : ASbI<0b11100, 0b10, 1, 0,
217 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
218 IIC_fpMUL32, "vnmul", ".f32\t$Sd, $Sn, $Sm",
219 [(set SPR:$Sd, (fneg (fmul SPR:$Sn, SPR:$Sm)))]>;
221 // Match reassociated forms only if not sign dependent rounding.
222 def : Pat<(fmul (fneg DPR:$a), (f64 DPR:$b)),
223 (VNMULD DPR:$a, DPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
224 def : Pat<(fmul (fneg SPR:$a), SPR:$b),
225 (VNMULS SPR:$a, SPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
227 // These are encoded as unary instructions.
228 let Defs = [FPSCR] in {
229 def VCMPED : ADuI<0b11101, 0b11, 0b0100, 0b11, 0,
230 (outs), (ins DPR:$Dd, DPR:$Dm),
231 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, $Dm",
232 [(arm_cmpfp DPR:$Dd, (f64 DPR:$Dm))]>;
234 def VCMPES : ASuI<0b11101, 0b11, 0b0100, 0b11, 0,
235 (outs), (ins SPR:$Sd, SPR:$Sm),
236 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, $Sm",
237 [(arm_cmpfp SPR:$Sd, SPR:$Sm)]>;
239 // FIXME: Verify encoding after integrated assembler is working.
240 def VCMPD : ADuI<0b11101, 0b11, 0b0100, 0b01, 0,
241 (outs), (ins DPR:$Dd, DPR:$Dm),
242 IIC_fpCMP64, "vcmp", ".f64\t$Dd, $Dm",
243 [/* For disassembly only; pattern left blank */]>;
245 def VCMPS : ASuI<0b11101, 0b11, 0b0100, 0b01, 0,
246 (outs), (ins SPR:$Sd, SPR:$Sm),
247 IIC_fpCMP32, "vcmp", ".f32\t$Sd, $Sm",
248 [/* For disassembly only; pattern left blank */]>;
251 //===----------------------------------------------------------------------===//
252 // FP Unary Operations.
255 def VABSD : ADuI<0b11101, 0b11, 0b0000, 0b11, 0,
256 (outs DPR:$Dd), (ins DPR:$Dm),
257 IIC_fpUNA64, "vabs", ".f64\t$Dd, $Dm",
258 [(set DPR:$Dd, (fabs (f64 DPR:$Dm)))]>;
260 def VABSS : ASuIn<0b11101, 0b11, 0b0000, 0b11, 0,
261 (outs SPR:$Sd), (ins SPR:$Sm),
262 IIC_fpUNA32, "vabs", ".f32\t$Sd, $Sm",
263 [(set SPR:$Sd, (fabs SPR:$Sm))]>;
265 let Defs = [FPSCR] in {
266 def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0,
267 (outs), (ins DPR:$Dd),
268 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, #0",
269 [(arm_cmpfp0 (f64 DPR:$Dd))]> {
270 let Inst{3-0} = 0b0000;
274 def VCMPEZS : ASuI<0b11101, 0b11, 0b0101, 0b11, 0,
275 (outs), (ins SPR:$Sd),
276 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, #0",
277 [(arm_cmpfp0 SPR:$Sd)]> {
278 let Inst{3-0} = 0b0000;
282 // FIXME: Verify encoding after integrated assembler is working.
283 def VCMPZD : ADuI<0b11101, 0b11, 0b0101, 0b01, 0,
284 (outs), (ins DPR:$Dd),
285 IIC_fpCMP64, "vcmp", ".f64\t$Dd, #0",
286 [/* For disassembly only; pattern left blank */]> {
287 let Inst{3-0} = 0b0000;
291 def VCMPZS : ASuI<0b11101, 0b11, 0b0101, 0b01, 0,
292 (outs), (ins SPR:$Sd),
293 IIC_fpCMP32, "vcmp", ".f32\t$Sd, #0",
294 [/* For disassembly only; pattern left blank */]> {
295 let Inst{3-0} = 0b0000;
300 def VCVTDS : ASuI<0b11101, 0b11, 0b0111, 0b11, 0,
301 (outs DPR:$Dd), (ins SPR:$Sm),
302 IIC_fpCVTDS, "vcvt", ".f64.f32\t$Dd, $Sm",
303 [(set DPR:$Dd, (fextend SPR:$Sm))]> {
304 // Instruction operands.
308 // Encode instruction operands.
309 let Inst{3-0} = Sm{4-1};
311 let Inst{15-12} = Dd{3-0};
312 let Inst{22} = Dd{4};
315 // Special case encoding: bits 11-8 is 0b1011.
316 def VCVTSD : VFPAI<(outs SPR:$Sd), (ins DPR:$Dm), VFPUnaryFrm,
317 IIC_fpCVTSD, "vcvt", ".f32.f64\t$Sd, $Dm",
318 [(set SPR:$Sd, (fround DPR:$Dm))]> {
319 // Instruction operands.
323 // Encode instruction operands.
324 let Inst{3-0} = Dm{3-0};
326 let Inst{15-12} = Sd{4-1};
327 let Inst{22} = Sd{0};
329 let Inst{27-23} = 0b11101;
330 let Inst{21-16} = 0b110111;
331 let Inst{11-8} = 0b1011;
332 let Inst{7-6} = 0b11;
336 // Between half-precision and single-precision. For disassembly only.
338 // FIXME: Verify encoding after integrated assembler is working.
339 def VCVTBSH: ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
340 /* FIXME */ IIC_fpCVTSH, "vcvtb", ".f32.f16\t$dst, $a",
341 [/* For disassembly only; pattern left blank */]>;
343 def : ARMPat<(f32_to_f16 SPR:$a),
344 (i32 (COPY_TO_REGCLASS (VCVTBSH SPR:$a), GPR))>;
346 def VCVTBHS: ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
347 /* FIXME */ IIC_fpCVTHS, "vcvtb", ".f16.f32\t$dst, $a",
348 [/* For disassembly only; pattern left blank */]>;
350 def : ARMPat<(f16_to_f32 GPR:$a),
351 (VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>;
353 def VCVTTSH: ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
354 /* FIXME */ IIC_fpCVTSH, "vcvtt", ".f32.f16\t$dst, $a",
355 [/* For disassembly only; pattern left blank */]>;
357 def VCVTTHS: ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
358 /* FIXME */ IIC_fpCVTHS, "vcvtt", ".f16.f32\t$dst, $a",
359 [/* For disassembly only; pattern left blank */]>;
361 def VNEGD : ADuI<0b11101, 0b11, 0b0001, 0b01, 0,
362 (outs DPR:$Dd), (ins DPR:$Dm),
363 IIC_fpUNA64, "vneg", ".f64\t$Dd, $Dm",
364 [(set DPR:$Dd, (fneg (f64 DPR:$Dm)))]>;
366 def VNEGS : ASuIn<0b11101, 0b11, 0b0001, 0b01, 0,
367 (outs SPR:$Sd), (ins SPR:$Sm),
368 IIC_fpUNA32, "vneg", ".f32\t$Sd, $Sm",
369 [(set SPR:$Sd, (fneg SPR:$Sm))]>;
371 def VSQRTD : ADuI<0b11101, 0b11, 0b0001, 0b11, 0,
372 (outs DPR:$Dd), (ins DPR:$Dm),
373 IIC_fpSQRT64, "vsqrt", ".f64\t$Dd, $Dm",
374 [(set DPR:$Dd, (fsqrt (f64 DPR:$Dm)))]>;
376 def VSQRTS : ASuI<0b11101, 0b11, 0b0001, 0b11, 0,
377 (outs SPR:$Sd), (ins SPR:$Sm),
378 IIC_fpSQRT32, "vsqrt", ".f32\t$Sd, $Sm",
379 [(set SPR:$Sd, (fsqrt SPR:$Sm))]>;
381 let neverHasSideEffects = 1 in {
382 def VMOVD : ADuI<0b11101, 0b11, 0b0000, 0b01, 0,
383 (outs DPR:$Dd), (ins DPR:$Dm),
384 IIC_fpUNA64, "vmov", ".f64\t$Dd, $Dm", []>;
386 def VMOVS : ASuI<0b11101, 0b11, 0b0000, 0b01, 0,
387 (outs SPR:$Sd), (ins SPR:$Sm),
388 IIC_fpUNA32, "vmov", ".f32\t$Sd, $Sm", []>;
389 } // neverHasSideEffects
391 //===----------------------------------------------------------------------===//
392 // FP <-> GPR Copies. Int <-> FP Conversions.
395 def VMOVRS : AVConv2I<0b11100001, 0b1010,
396 (outs GPR:$Rt), (ins SPR:$Sn),
397 IIC_fpMOVSI, "vmov", "\t$Rt, $Sn",
398 [(set GPR:$Rt, (bitconvert SPR:$Sn))]> {
399 // Instruction operands.
403 // Encode instruction operands.
404 let Inst{19-16} = Sn{4-1};
406 let Inst{15-12} = Rt;
408 let Inst{6-5} = 0b00;
409 let Inst{3-0} = 0b0000;
412 def VMOVSR : AVConv4I<0b11100000, 0b1010,
413 (outs SPR:$Sn), (ins GPR:$Rt),
414 IIC_fpMOVIS, "vmov", "\t$Sn, $Rt",
415 [(set SPR:$Sn, (bitconvert GPR:$Rt))]> {
416 // Instruction operands.
420 // Encode instruction operands.
421 let Inst{19-16} = Sn{4-1};
423 let Inst{15-12} = Rt;
425 let Inst{6-5} = 0b00;
426 let Inst{3-0} = 0b0000;
429 let neverHasSideEffects = 1 in {
430 def VMOVRRD : AVConv3I<0b11000101, 0b1011,
431 (outs GPR:$Rt, GPR:$Rt2), (ins DPR:$Dm),
432 IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $Dm",
433 [/* FIXME: Can't write pattern for multiple result instr*/]> {
434 // Instruction operands.
439 // Encode instruction operands.
440 let Inst{3-0} = Dm{3-0};
442 let Inst{15-12} = Rt;
443 let Inst{19-16} = Rt2;
445 let Inst{7-6} = 0b00;
448 def VMOVRRS : AVConv3I<0b11000101, 0b1010,
449 (outs GPR:$wb, GPR:$dst2), (ins SPR:$src1, SPR:$src2),
450 IIC_fpMOVDI, "vmov", "\t$wb, $dst2, $src1, $src2",
451 [/* For disassembly only; pattern left blank */]> {
452 let Inst{7-6} = 0b00;
454 } // neverHasSideEffects
459 def VMOVDRR : AVConv5I<0b11000100, 0b1011,
460 (outs DPR:$Dm), (ins GPR:$Rt, GPR:$Rt2),
461 IIC_fpMOVID, "vmov", "\t$Dm, $Rt, $Rt2",
462 [(set DPR:$Dm, (arm_fmdrr GPR:$Rt, GPR:$Rt2))]> {
463 // Instruction operands.
468 // Encode instruction operands.
469 let Inst{3-0} = Dm{3-0};
471 let Inst{15-12} = Rt;
472 let Inst{19-16} = Rt2;
474 let Inst{7-6} = 0b00;
477 let neverHasSideEffects = 1 in
478 def VMOVSRR : AVConv5I<0b11000100, 0b1010,
479 (outs SPR:$dst1, SPR:$dst2), (ins GPR:$src1, GPR:$src2),
480 IIC_fpMOVID, "vmov", "\t$dst1, $dst2, $src1, $src2",
481 [/* For disassembly only; pattern left blank */]> {
482 let Inst{7-6} = 0b00;
488 // FMRX: SPR system reg -> GPR
490 // FMXR: GPR -> VFP system reg
495 class AVConv1IDs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
496 bits<4> opcod4, dag oops, dag iops,
497 InstrItinClass itin, string opc, string asm,
499 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
501 // Instruction operands.
505 // Encode instruction operands.
506 let Inst{3-0} = Sm{4-1};
508 let Inst{15-12} = Dd{3-0};
509 let Inst{22} = Dd{4};
512 class AVConv1InSs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
513 bits<4> opcod4, dag oops, dag iops,InstrItinClass itin,
514 string opc, string asm, list<dag> pattern>
515 : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
517 // Instruction operands.
521 // Encode instruction operands.
522 let Inst{3-0} = Sm{4-1};
524 let Inst{15-12} = Sd{4-1};
525 let Inst{22} = Sd{0};
528 def VSITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
529 (outs DPR:$Dd), (ins SPR:$Sm),
530 IIC_fpCVTID, "vcvt", ".f64.s32\t$Dd, $Sm",
531 [(set DPR:$Dd, (f64 (arm_sitof SPR:$Sm)))]> {
532 let Inst{7} = 1; // s32
535 def VSITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010,
536 (outs SPR:$Sd),(ins SPR:$Sm),
537 IIC_fpCVTIS, "vcvt", ".f32.s32\t$Sd, $Sm",
538 [(set SPR:$Sd, (arm_sitof SPR:$Sm))]> {
539 let Inst{7} = 1; // s32
542 def VUITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
543 (outs DPR:$Dd), (ins SPR:$Sm),
544 IIC_fpCVTID, "vcvt", ".f64.u32\t$Dd, $Sm",
545 [(set DPR:$Dd, (f64 (arm_uitof SPR:$Sm)))]> {
546 let Inst{7} = 0; // u32
549 def VUITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010,
550 (outs SPR:$Sd), (ins SPR:$Sm),
551 IIC_fpCVTIS, "vcvt", ".f32.u32\t$Sd, $Sm",
552 [(set SPR:$Sd, (arm_uitof SPR:$Sm))]> {
553 let Inst{7} = 0; // u32
558 class AVConv1IsD_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
559 bits<4> opcod4, dag oops, dag iops,
560 InstrItinClass itin, string opc, string asm,
562 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
564 // Instruction operands.
568 // Encode instruction operands.
569 let Inst{3-0} = Dm{3-0};
571 let Inst{15-12} = Sd{4-1};
572 let Inst{22} = Sd{0};
575 class AVConv1InsS_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
576 bits<4> opcod4, dag oops, dag iops,
577 InstrItinClass itin, string opc, string asm,
579 : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
581 // Instruction operands.
585 // Encode instruction operands.
586 let Inst{3-0} = Sm{4-1};
588 let Inst{15-12} = Sd{4-1};
589 let Inst{22} = Sd{0};
592 // Always set Z bit in the instruction, i.e. "round towards zero" variants.
593 def VTOSIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
594 (outs SPR:$Sd), (ins DPR:$Dm),
595 IIC_fpCVTDI, "vcvt", ".s32.f64\t$Sd, $Dm",
596 [(set SPR:$Sd, (arm_ftosi (f64 DPR:$Dm)))]> {
597 let Inst{7} = 1; // Z bit
600 def VTOSIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
601 (outs SPR:$Sd), (ins SPR:$Sm),
602 IIC_fpCVTSI, "vcvt", ".s32.f32\t$Sd, $Sm",
603 [(set SPR:$Sd, (arm_ftosi SPR:$Sm))]> {
604 let Inst{7} = 1; // Z bit
607 def VTOUIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
608 (outs SPR:$Sd), (ins DPR:$Dm),
609 IIC_fpCVTDI, "vcvt", ".u32.f64\t$Sd, $Dm",
610 [(set SPR:$Sd, (arm_ftoui (f64 DPR:$Dm)))]> {
611 let Inst{7} = 1; // Z bit
614 def VTOUIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
615 (outs SPR:$Sd), (ins SPR:$Sm),
616 IIC_fpCVTSI, "vcvt", ".u32.f32\t$Sd, $Sm",
617 [(set SPR:$Sd, (arm_ftoui SPR:$Sm))]> {
618 let Inst{7} = 1; // Z bit
621 // And the Z bit '0' variants, i.e. use the rounding mode specified by FPSCR.
622 // For disassembly only.
623 let Uses = [FPSCR] in {
624 // FIXME: Verify encoding after integrated assembler is working.
625 def VTOSIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
626 (outs SPR:$Sd), (ins DPR:$Dm),
627 IIC_fpCVTDI, "vcvtr", ".s32.f64\t$Sd, $Dm",
628 [(set SPR:$Sd, (int_arm_vcvtr (f64 DPR:$Dm)))]>{
629 let Inst{7} = 0; // Z bit
632 def VTOSIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
633 (outs SPR:$Sd), (ins SPR:$Sm),
634 IIC_fpCVTSI, "vcvtr", ".s32.f32\t$Sd, $Sm",
635 [(set SPR:$Sd, (int_arm_vcvtr SPR:$Sm))]> {
636 let Inst{7} = 0; // Z bit
639 def VTOUIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
640 (outs SPR:$Sd), (ins DPR:$Dm),
641 IIC_fpCVTDI, "vcvtr", ".u32.f64\t$Sd, $Dm",
642 [(set SPR:$Sd, (int_arm_vcvtru(f64 DPR:$Dm)))]>{
643 let Inst{7} = 0; // Z bit
646 def VTOUIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
647 (outs SPR:$Sd), (ins SPR:$Sm),
648 IIC_fpCVTSI, "vcvtr", ".u32.f32\t$Sd, $Sm",
649 [(set SPR:$Sd, (int_arm_vcvtru SPR:$Sm))]> {
650 let Inst{7} = 0; // Z bit
654 // Convert between floating-point and fixed-point
655 // Data type for fixed-point naming convention:
656 // S16 (U=0, sx=0) -> SH
657 // U16 (U=1, sx=0) -> UH
658 // S32 (U=0, sx=1) -> SL
659 // U32 (U=1, sx=1) -> UL
661 // FIXME: Marking these as codegen only seems wrong. They are real
663 let Constraints = "$a = $dst", isCodeGenOnly = 1 in {
665 // FP to Fixed-Point:
667 def VTOSHS : AVConv1XI<0b11101, 0b11, 0b1110, 0b1010, 0,
668 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
669 IIC_fpCVTSI, "vcvt", ".s16.f32\t$dst, $a, $fbits",
670 [/* For disassembly only; pattern left blank */]>;
672 def VTOUHS : AVConv1XI<0b11101, 0b11, 0b1111, 0b1010, 0,
673 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
674 IIC_fpCVTSI, "vcvt", ".u16.f32\t$dst, $a, $fbits",
675 [/* For disassembly only; pattern left blank */]>;
677 def VTOSLS : AVConv1XI<0b11101, 0b11, 0b1110, 0b1010, 1,
678 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
679 IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a, $fbits",
680 [/* For disassembly only; pattern left blank */]>;
682 def VTOULS : AVConv1XI<0b11101, 0b11, 0b1111, 0b1010, 1,
683 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
684 IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a, $fbits",
685 [/* For disassembly only; pattern left blank */]>;
687 def VTOSHD : AVConv1XI<0b11101, 0b11, 0b1110, 0b1011, 0,
688 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
689 IIC_fpCVTDI, "vcvt", ".s16.f64\t$dst, $a, $fbits",
690 [/* For disassembly only; pattern left blank */]>;
692 def VTOUHD : AVConv1XI<0b11101, 0b11, 0b1111, 0b1011, 0,
693 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
694 IIC_fpCVTDI, "vcvt", ".u16.f64\t$dst, $a, $fbits",
695 [/* For disassembly only; pattern left blank */]>;
697 def VTOSLD : AVConv1XI<0b11101, 0b11, 0b1110, 0b1011, 1,
698 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
699 IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a, $fbits",
700 [/* For disassembly only; pattern left blank */]>;
702 def VTOULD : AVConv1XI<0b11101, 0b11, 0b1111, 0b1011, 1,
703 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
704 IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a, $fbits",
705 [/* For disassembly only; pattern left blank */]>;
707 // Fixed-Point to FP:
709 def VSHTOS : AVConv1XI<0b11101, 0b11, 0b1010, 0b1010, 0,
710 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
711 IIC_fpCVTIS, "vcvt", ".f32.s16\t$dst, $a, $fbits",
712 [/* For disassembly only; pattern left blank */]>;
714 def VUHTOS : AVConv1XI<0b11101, 0b11, 0b1011, 0b1010, 0,
715 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
716 IIC_fpCVTIS, "vcvt", ".f32.u16\t$dst, $a, $fbits",
717 [/* For disassembly only; pattern left blank */]>;
719 def VSLTOS : AVConv1XI<0b11101, 0b11, 0b1010, 0b1010, 1,
720 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
721 IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a, $fbits",
722 [/* For disassembly only; pattern left blank */]>;
724 def VULTOS : AVConv1XI<0b11101, 0b11, 0b1011, 0b1010, 1,
725 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
726 IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a, $fbits",
727 [/* For disassembly only; pattern left blank */]>;
729 def VSHTOD : AVConv1XI<0b11101, 0b11, 0b1010, 0b1011, 0,
730 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
731 IIC_fpCVTID, "vcvt", ".f64.s16\t$dst, $a, $fbits",
732 [/* For disassembly only; pattern left blank */]>;
734 def VUHTOD : AVConv1XI<0b11101, 0b11, 0b1011, 0b1011, 0,
735 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
736 IIC_fpCVTID, "vcvt", ".f64.u16\t$dst, $a, $fbits",
737 [/* For disassembly only; pattern left blank */]>;
739 def VSLTOD : AVConv1XI<0b11101, 0b11, 0b1010, 0b1011, 1,
740 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
741 IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a, $fbits",
742 [/* For disassembly only; pattern left blank */]>;
744 def VULTOD : AVConv1XI<0b11101, 0b11, 0b1011, 0b1011, 1,
745 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
746 IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a, $fbits",
747 [/* For disassembly only; pattern left blank */]>;
749 } // End of 'let Constraints = "$a = $dst", isCodeGenOnly = 1 in'
751 //===----------------------------------------------------------------------===//
752 // FP FMA Operations.
755 def VMLAD : ADbI_vmlX<0b11100, 0b00, 0, 0,
756 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
757 IIC_fpMAC64, "vmla", ".f64\t$Dd, $Dn, $Dm",
758 [(set DPR:$Dd, (fadd (fmul DPR:$Dn, DPR:$Dm),
760 RegConstraint<"$Ddin = $Dd">;
762 def VMLAS : ASbIn<0b11100, 0b00, 0, 0,
763 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
764 IIC_fpMAC32, "vmla", ".f32\t$Sd, $Sn, $Sm",
765 [(set SPR:$Sd, (fadd (fmul SPR:$Sn, SPR:$Sm),
767 RegConstraint<"$Sdin = $Sd">;
769 def : Pat<(fadd DPR:$dstin, (fmul DPR:$a, (f64 DPR:$b))),
770 (VMLAD DPR:$dstin, DPR:$a, DPR:$b)>, Requires<[DontUseNEONForFP]>;
771 def : Pat<(fadd SPR:$dstin, (fmul SPR:$a, SPR:$b)),
772 (VMLAS SPR:$dstin, SPR:$a, SPR:$b)>, Requires<[DontUseNEONForFP]>;
774 def VMLSD : ADbI_vmlX<0b11100, 0b00, 1, 0,
775 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
776 IIC_fpMAC64, "vmls", ".f64\t$Dd, $Dn, $Dm",
777 [(set DPR:$Dd, (fadd (fneg (fmul DPR:$Dn,DPR:$Dm)),
779 RegConstraint<"$Ddin = $Dd">;
781 def VMLSS : ASbIn<0b11100, 0b00, 1, 0,
782 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
783 IIC_fpMAC32, "vmls", ".f32\t$Sd, $Sn, $Sm",
784 [(set SPR:$Sd, (fadd (fneg (fmul SPR:$Sn, SPR:$Sm)),
786 RegConstraint<"$Sdin = $Sd">;
788 def : Pat<(fsub DPR:$dstin, (fmul DPR:$a, (f64 DPR:$b))),
789 (VMLSD DPR:$dstin, DPR:$a, DPR:$b)>, Requires<[DontUseNEONForFP]>;
790 def : Pat<(fsub SPR:$dstin, (fmul SPR:$a, SPR:$b)),
791 (VMLSS SPR:$dstin, SPR:$a, SPR:$b)>, Requires<[DontUseNEONForFP]>;
793 def VNMLAD : ADbI_vmlX<0b11100, 0b01, 1, 0,
794 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
795 IIC_fpMAC64, "vnmla", ".f64\t$Dd, $Dn, $Dm",
796 [(set DPR:$Dd,(fsub (fneg (fmul DPR:$Dn,DPR:$Dm)),
798 RegConstraint<"$Ddin = $Dd">;
800 def VNMLAS : ASbI<0b11100, 0b01, 1, 0,
801 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
802 IIC_fpMAC32, "vnmla", ".f32\t$Sd, $Sn, $Sm",
803 [(set SPR:$Sd, (fsub (fneg (fmul SPR:$Sn, SPR:$Sm)),
805 RegConstraint<"$Sdin = $Sd">;
807 def : Pat<(fsub (fneg (fmul DPR:$a, (f64 DPR:$b))), DPR:$dstin),
808 (VNMLAD DPR:$dstin, DPR:$a, DPR:$b)>, Requires<[DontUseNEONForFP]>;
809 def : Pat<(fsub (fneg (fmul SPR:$a, SPR:$b)), SPR:$dstin),
810 (VNMLAS SPR:$dstin, SPR:$a, SPR:$b)>, Requires<[DontUseNEONForFP]>;
812 def VNMLSD : ADbI_vmlX<0b11100, 0b01, 0, 0,
813 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
814 IIC_fpMAC64, "vnmls", ".f64\t$Dd, $Dn, $Dm",
815 [(set DPR:$Dd, (fsub (fmul DPR:$Dn, DPR:$Dm),
817 RegConstraint<"$Ddin = $Dd">;
819 def VNMLSS : ASbI<0b11100, 0b01, 0, 0,
820 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
821 IIC_fpMAC32, "vnmls", ".f32\t$Sd, $Sn, $Sm",
822 [(set SPR:$Sd, (fsub (fmul SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>,
823 RegConstraint<"$Sdin = $Sd">;
825 def : Pat<(fsub (fmul DPR:$a, (f64 DPR:$b)), DPR:$dstin),
826 (VNMLSD DPR:$dstin, DPR:$a, DPR:$b)>, Requires<[DontUseNEONForFP]>;
827 def : Pat<(fsub (fmul SPR:$a, SPR:$b), SPR:$dstin),
828 (VNMLSS SPR:$dstin, SPR:$a, SPR:$b)>, Requires<[DontUseNEONForFP]>;
831 //===----------------------------------------------------------------------===//
832 // FP Conditional moves.
835 let neverHasSideEffects = 1 in {
836 def VMOVDcc : ADuI<0b11101, 0b11, 0b0000, 0b01, 0,
837 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
838 IIC_fpUNA64, "vmov", ".f64\t$Dd, $Dm",
839 [/*(set DPR:$Dd, (ARMcmov DPR:$Dn, DPR:$Dm, imm:$cc))*/]>,
840 RegConstraint<"$Dn = $Dd">;
842 def VMOVScc : ASuI<0b11101, 0b11, 0b0000, 0b01, 0,
843 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
844 IIC_fpUNA32, "vmov", ".f32\t$Sd, $Sm",
845 [/*(set SPR:$Sd, (ARMcmov SPR:$Sn, SPR:$Sm, imm:$cc))*/]>,
846 RegConstraint<"$Sn = $Sd">;
848 def VNEGDcc : ADuI<0b11101, 0b11, 0b0001, 0b01, 0,
849 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
850 IIC_fpUNA64, "vneg", ".f64\t$Dd, $Dm",
851 [/*(set DPR:$Dd, (ARMcneg DPR:$Dn, DPR:$Dm, imm:$cc))*/]>,
852 RegConstraint<"$Dn = $Dd">;
854 def VNEGScc : ASuI<0b11101, 0b11, 0b0001, 0b01, 0,
855 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
856 IIC_fpUNA32, "vneg", ".f32\t$Sd, $Sm",
857 [/*(set SPR:$Sd, (ARMcneg SPR:$Sn, SPR:$Sm, imm:$cc))*/]>,
858 RegConstraint<"$Sn = $Sd">;
859 } // neverHasSideEffects
861 //===----------------------------------------------------------------------===//
865 // APSR is the application level alias of CPSR. This FPSCR N, Z, C, V flags
867 let Defs = [CPSR], Uses = [FPSCR] in
868 def FMSTAT : VFPAI<(outs), (ins), VFPMiscFrm, IIC_fpSTAT,
869 "vmrs", "\tapsr_nzcv, fpscr",
871 let Inst{27-20} = 0b11101111;
872 let Inst{19-16} = 0b0001;
873 let Inst{15-12} = 0b1111;
874 let Inst{11-8} = 0b1010;
876 let Inst{6-5} = 0b00;
878 let Inst{3-0} = 0b0000;
882 let hasSideEffects = 1, Uses = [FPSCR] in
883 def VMRS : VFPAI<(outs GPR:$Rt), (ins), VFPMiscFrm, IIC_fpSTAT,
884 "vmrs", "\t$Rt, fpscr",
885 [(set GPR:$Rt, (int_arm_get_fpscr))]> {
886 // Instruction operand.
889 // Encode instruction operand.
890 let Inst{15-12} = Rt;
892 let Inst{27-20} = 0b11101111;
893 let Inst{19-16} = 0b0001;
894 let Inst{11-8} = 0b1010;
896 let Inst{6-5} = 0b00;
898 let Inst{3-0} = 0b0000;
901 let Defs = [FPSCR] in
902 def VMSR : VFPAI<(outs), (ins GPR:$src), VFPMiscFrm, IIC_fpSTAT,
903 "vmsr", "\tfpscr, $src",
904 [(int_arm_set_fpscr GPR:$src)]> {
905 // Instruction operand.
908 // Encode instruction operand.
909 let Inst{15-12} = src;
911 let Inst{27-20} = 0b11101110;
912 let Inst{19-16} = 0b0001;
913 let Inst{11-8} = 0b1010;
918 // Materialize FP immediates. VFP3 only.
919 let isReMaterializable = 1 in {
920 def FCONSTD : VFPAI<(outs DPR:$Dd), (ins vfp_f64imm:$imm),
921 VFPMiscFrm, IIC_fpUNA64,
922 "vmov", ".f64\t$Dd, $imm",
923 [(set DPR:$Dd, vfp_f64imm:$imm)]>, Requires<[HasVFP3]> {
924 // Instruction operands.
928 // Encode instruction operands.
929 let Inst{15-12} = Dd{3-0};
930 let Inst{22} = Dd{4};
931 let Inst{19} = imm{31};
932 let Inst{18-16} = imm{22-20};
933 let Inst{3-0} = imm{19-16};
935 // Encode remaining instruction bits.
936 let Inst{27-23} = 0b11101;
937 let Inst{21-20} = 0b11;
938 let Inst{11-9} = 0b101;
939 let Inst{8} = 1; // Double precision.
940 let Inst{7-4} = 0b0000;
943 def FCONSTS : VFPAI<(outs SPR:$Sd), (ins vfp_f32imm:$imm),
944 VFPMiscFrm, IIC_fpUNA32,
945 "vmov", ".f32\t$Sd, $imm",
946 [(set SPR:$Sd, vfp_f32imm:$imm)]>, Requires<[HasVFP3]> {
947 // Instruction operands.
951 // Encode instruction operands.
952 let Inst{15-12} = Sd{4-1};
953 let Inst{22} = Sd{0};
954 let Inst{19} = imm{31}; // The immediate is handled as a double.
955 let Inst{18-16} = imm{22-20};
956 let Inst{3-0} = imm{19-16};
958 // Encode remaining instruction bits.
959 let Inst{27-23} = 0b11101;
960 let Inst{21-20} = 0b11;
961 let Inst{11-9} = 0b101;
962 let Inst{8} = 0; // Single precision.
963 let Inst{7-4} = 0b0000;