1 //===- ARMInstrVFP.td - VFP support for ARM ----------------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the ARM VFP instruction set.
12 //===----------------------------------------------------------------------===//
14 def SDT_FTOI : SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisFP<1>]>;
15 def SDT_ITOF : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, f32>]>;
16 def SDT_CMPFP0 : SDTypeProfile<0, 1, [SDTCisFP<0>]>;
17 def SDT_VMOVDRR : SDTypeProfile<1, 2, [SDTCisVT<0, f64>, SDTCisVT<1, i32>,
20 def arm_ftoui : SDNode<"ARMISD::FTOUI", SDT_FTOI>;
21 def arm_ftosi : SDNode<"ARMISD::FTOSI", SDT_FTOI>;
22 def arm_sitof : SDNode<"ARMISD::SITOF", SDT_ITOF>;
23 def arm_uitof : SDNode<"ARMISD::UITOF", SDT_ITOF>;
24 def arm_fmstat : SDNode<"ARMISD::FMSTAT", SDTNone, [SDNPInGlue, SDNPOutGlue]>;
25 def arm_cmpfp : SDNode<"ARMISD::CMPFP", SDT_ARMCmp, [SDNPOutGlue]>;
26 def arm_cmpfp0 : SDNode<"ARMISD::CMPFPw0", SDT_CMPFP0, [SDNPOutGlue]>;
27 def arm_fmdrr : SDNode<"ARMISD::VMOVDRR", SDT_VMOVDRR>;
30 //===----------------------------------------------------------------------===//
31 // Operand Definitions.
34 def vfp_f32imm : Operand<f32>,
35 PatLeaf<(f32 fpimm), [{
36 return ARM::getVFPf32Imm(N->getValueAPF()) != -1;
38 let PrintMethod = "printVFPf32ImmOperand";
41 def vfp_f64imm : Operand<f64>,
42 PatLeaf<(f64 fpimm), [{
43 return ARM::getVFPf64Imm(N->getValueAPF()) != -1;
45 let PrintMethod = "printVFPf64ImmOperand";
49 //===----------------------------------------------------------------------===//
50 // Load / store Instructions.
53 let canFoldAsLoad = 1, isReMaterializable = 1 in {
55 def VLDRD : ADI5<0b1101, 0b01, (outs DPR:$Dd), (ins addrmode5:$addr),
56 IIC_fpLoad64, "vldr", ".64\t$Dd, $addr",
57 [(set DPR:$Dd, (f64 (load addrmode5:$addr)))]>;
59 def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$Sd), (ins addrmode5:$addr),
60 IIC_fpLoad32, "vldr", ".32\t$Sd, $addr",
61 [(set SPR:$Sd, (load addrmode5:$addr))]> {
62 // Some single precision VFP instructions may be executed on both NEON and VFP
64 let D = VFPNeonDomain;
67 } // End of 'let canFoldAsLoad = 1, isReMaterializable = 1 in'
69 def VSTRD : ADI5<0b1101, 0b00, (outs), (ins DPR:$Dd, addrmode5:$addr),
70 IIC_fpStore64, "vstr", ".64\t$Dd, $addr",
71 [(store (f64 DPR:$Dd), addrmode5:$addr)]>;
73 def VSTRS : ASI5<0b1101, 0b00, (outs), (ins SPR:$Sd, addrmode5:$addr),
74 IIC_fpStore32, "vstr", ".32\t$Sd, $addr",
75 [(store SPR:$Sd, addrmode5:$addr)]> {
76 // Some single precision VFP instructions may be executed on both NEON and VFP
78 let D = VFPNeonDomain;
81 //===----------------------------------------------------------------------===//
82 // Load / store multiple Instructions.
85 multiclass vfp_ldst_mult<string asm, bit L_bit,
86 InstrItinClass itin, InstrItinClass itin_upd> {
89 AXDI4<(outs), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
91 !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> {
92 let Inst{24-23} = 0b01; // Increment After
93 let Inst{21} = 0; // No writeback
97 AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs,
99 IndexModeUpd, itin_upd,
100 !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
101 let Inst{24-23} = 0b01; // Increment After
102 let Inst{21} = 1; // Writeback
103 let Inst{20} = L_bit;
106 AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs,
108 IndexModeUpd, itin_upd,
109 !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
110 let Inst{24-23} = 0b10; // Decrement Before
111 let Inst{21} = 1; // Writeback
112 let Inst{20} = L_bit;
117 AXSI4<(outs), (ins GPR:$Rn, pred:$p, spr_reglist:$regs, variable_ops),
119 !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> {
120 let Inst{24-23} = 0b01; // Increment After
121 let Inst{21} = 0; // No writeback
122 let Inst{20} = L_bit;
124 // Some single precision VFP instructions may be executed on both NEON and
126 let D = VFPNeonDomain;
129 AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs,
131 IndexModeUpd, itin_upd,
132 !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
133 let Inst{24-23} = 0b01; // Increment After
134 let Inst{21} = 1; // Writeback
135 let Inst{20} = L_bit;
137 // Some single precision VFP instructions may be executed on both NEON and
139 let D = VFPNeonDomain;
142 AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs,
144 IndexModeUpd, itin_upd,
145 !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
146 let Inst{24-23} = 0b10; // Decrement Before
147 let Inst{21} = 1; // Writeback
148 let Inst{20} = L_bit;
150 // Some single precision VFP instructions may be executed on both NEON and
152 let D = VFPNeonDomain;
156 let neverHasSideEffects = 1 in {
158 let mayLoad = 1, hasExtraDefRegAllocReq = 1 in
159 defm VLDM : vfp_ldst_mult<"vldm", 1, IIC_fpLoad_m, IIC_fpLoad_mu>;
161 let mayStore = 1, hasExtraSrcRegAllocReq = 1 in
162 defm VSTM : vfp_ldst_mult<"vstm", 0, IIC_fpLoad_m, IIC_fpLoad_mu>;
164 } // neverHasSideEffects
166 def : MnemonicAlias<"vldm", "vldmia">;
167 def : MnemonicAlias<"vstm", "vstmia">;
169 def : InstAlias<"vpush${p} $r", (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r)>,
171 def : InstAlias<"vpush${p} $r", (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r)>,
173 def : InstAlias<"vpop${p} $r", (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r)>,
175 def : InstAlias<"vpop${p} $r", (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r)>,
178 // FLDMX, FSTMX - mixing S/D registers for pre-armv6 cores
180 //===----------------------------------------------------------------------===//
181 // FP Binary Operations.
184 def VADDD : ADbI<0b11100, 0b11, 0, 0,
185 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
186 IIC_fpALU64, "vadd", ".f64\t$Dd, $Dn, $Dm",
187 [(set DPR:$Dd, (fadd DPR:$Dn, (f64 DPR:$Dm)))]>;
189 def VADDS : ASbIn<0b11100, 0b11, 0, 0,
190 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
191 IIC_fpALU32, "vadd", ".f32\t$Sd, $Sn, $Sm",
192 [(set SPR:$Sd, (fadd SPR:$Sn, SPR:$Sm))]> {
193 // Some single precision VFP instructions may be executed on both NEON and
194 // VFP pipelines on A8.
195 let D = VFPNeonA8Domain;
198 def VSUBD : ADbI<0b11100, 0b11, 1, 0,
199 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
200 IIC_fpALU64, "vsub", ".f64\t$Dd, $Dn, $Dm",
201 [(set DPR:$Dd, (fsub DPR:$Dn, (f64 DPR:$Dm)))]>;
203 def VSUBS : ASbIn<0b11100, 0b11, 1, 0,
204 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
205 IIC_fpALU32, "vsub", ".f32\t$Sd, $Sn, $Sm",
206 [(set SPR:$Sd, (fsub SPR:$Sn, SPR:$Sm))]> {
207 // Some single precision VFP instructions may be executed on both NEON and
208 // VFP pipelines on A8.
209 let D = VFPNeonA8Domain;
212 def VDIVD : ADbI<0b11101, 0b00, 0, 0,
213 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
214 IIC_fpDIV64, "vdiv", ".f64\t$Dd, $Dn, $Dm",
215 [(set DPR:$Dd, (fdiv DPR:$Dn, (f64 DPR:$Dm)))]>;
217 def VDIVS : ASbI<0b11101, 0b00, 0, 0,
218 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
219 IIC_fpDIV32, "vdiv", ".f32\t$Sd, $Sn, $Sm",
220 [(set SPR:$Sd, (fdiv SPR:$Sn, SPR:$Sm))]>;
222 def VMULD : ADbI<0b11100, 0b10, 0, 0,
223 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
224 IIC_fpMUL64, "vmul", ".f64\t$Dd, $Dn, $Dm",
225 [(set DPR:$Dd, (fmul DPR:$Dn, (f64 DPR:$Dm)))]>;
227 def VMULS : ASbIn<0b11100, 0b10, 0, 0,
228 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
229 IIC_fpMUL32, "vmul", ".f32\t$Sd, $Sn, $Sm",
230 [(set SPR:$Sd, (fmul SPR:$Sn, SPR:$Sm))]> {
231 // Some single precision VFP instructions may be executed on both NEON and
232 // VFP pipelines on A8.
233 let D = VFPNeonA8Domain;
236 def VNMULD : ADbI<0b11100, 0b10, 1, 0,
237 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
238 IIC_fpMUL64, "vnmul", ".f64\t$Dd, $Dn, $Dm",
239 [(set DPR:$Dd, (fneg (fmul DPR:$Dn, (f64 DPR:$Dm))))]>;
241 def VNMULS : ASbI<0b11100, 0b10, 1, 0,
242 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
243 IIC_fpMUL32, "vnmul", ".f32\t$Sd, $Sn, $Sm",
244 [(set SPR:$Sd, (fneg (fmul SPR:$Sn, SPR:$Sm)))]> {
245 // Some single precision VFP instructions may be executed on both NEON and
246 // VFP pipelines on A8.
247 let D = VFPNeonA8Domain;
250 // Match reassociated forms only if not sign dependent rounding.
251 def : Pat<(fmul (fneg DPR:$a), (f64 DPR:$b)),
252 (VNMULD DPR:$a, DPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
253 def : Pat<(fmul (fneg SPR:$a), SPR:$b),
254 (VNMULS SPR:$a, SPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
256 // These are encoded as unary instructions.
257 let Defs = [FPSCR] in {
258 def VCMPED : ADuI<0b11101, 0b11, 0b0100, 0b11, 0,
259 (outs), (ins DPR:$Dd, DPR:$Dm),
260 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, $Dm",
261 [(arm_cmpfp DPR:$Dd, (f64 DPR:$Dm))]>;
263 def VCMPES : ASuI<0b11101, 0b11, 0b0100, 0b11, 0,
264 (outs), (ins SPR:$Sd, SPR:$Sm),
265 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, $Sm",
266 [(arm_cmpfp SPR:$Sd, SPR:$Sm)]> {
267 // Some single precision VFP instructions may be executed on both NEON and
268 // VFP pipelines on A8.
269 let D = VFPNeonA8Domain;
272 // FIXME: Verify encoding after integrated assembler is working.
273 def VCMPD : ADuI<0b11101, 0b11, 0b0100, 0b01, 0,
274 (outs), (ins DPR:$Dd, DPR:$Dm),
275 IIC_fpCMP64, "vcmp", ".f64\t$Dd, $Dm",
276 [/* For disassembly only; pattern left blank */]>;
278 def VCMPS : ASuI<0b11101, 0b11, 0b0100, 0b01, 0,
279 (outs), (ins SPR:$Sd, SPR:$Sm),
280 IIC_fpCMP32, "vcmp", ".f32\t$Sd, $Sm",
281 [/* For disassembly only; pattern left blank */]> {
282 // Some single precision VFP instructions may be executed on both NEON and
283 // VFP pipelines on A8.
284 let D = VFPNeonA8Domain;
288 //===----------------------------------------------------------------------===//
289 // FP Unary Operations.
292 def VABSD : ADuI<0b11101, 0b11, 0b0000, 0b11, 0,
293 (outs DPR:$Dd), (ins DPR:$Dm),
294 IIC_fpUNA64, "vabs", ".f64\t$Dd, $Dm",
295 [(set DPR:$Dd, (fabs (f64 DPR:$Dm)))]>;
297 def VABSS : ASuIn<0b11101, 0b11, 0b0000, 0b11, 0,
298 (outs SPR:$Sd), (ins SPR:$Sm),
299 IIC_fpUNA32, "vabs", ".f32\t$Sd, $Sm",
300 [(set SPR:$Sd, (fabs SPR:$Sm))]> {
301 // Some single precision VFP instructions may be executed on both NEON and
302 // VFP pipelines on A8.
303 let D = VFPNeonA8Domain;
306 let Defs = [FPSCR] in {
307 def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0,
308 (outs), (ins DPR:$Dd),
309 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, #0",
310 [(arm_cmpfp0 (f64 DPR:$Dd))]> {
311 let Inst{3-0} = 0b0000;
315 def VCMPEZS : ASuI<0b11101, 0b11, 0b0101, 0b11, 0,
316 (outs), (ins SPR:$Sd),
317 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, #0",
318 [(arm_cmpfp0 SPR:$Sd)]> {
319 let Inst{3-0} = 0b0000;
322 // Some single precision VFP instructions may be executed on both NEON and
323 // VFP pipelines on A8.
324 let D = VFPNeonA8Domain;
327 // FIXME: Verify encoding after integrated assembler is working.
328 def VCMPZD : ADuI<0b11101, 0b11, 0b0101, 0b01, 0,
329 (outs), (ins DPR:$Dd),
330 IIC_fpCMP64, "vcmp", ".f64\t$Dd, #0",
331 [/* For disassembly only; pattern left blank */]> {
332 let Inst{3-0} = 0b0000;
336 def VCMPZS : ASuI<0b11101, 0b11, 0b0101, 0b01, 0,
337 (outs), (ins SPR:$Sd),
338 IIC_fpCMP32, "vcmp", ".f32\t$Sd, #0",
339 [/* For disassembly only; pattern left blank */]> {
340 let Inst{3-0} = 0b0000;
343 // Some single precision VFP instructions may be executed on both NEON and
344 // VFP pipelines on A8.
345 let D = VFPNeonA8Domain;
349 def VCVTDS : ASuI<0b11101, 0b11, 0b0111, 0b11, 0,
350 (outs DPR:$Dd), (ins SPR:$Sm),
351 IIC_fpCVTDS, "vcvt", ".f64.f32\t$Dd, $Sm",
352 [(set DPR:$Dd, (fextend SPR:$Sm))]> {
353 // Instruction operands.
357 // Encode instruction operands.
358 let Inst{3-0} = Sm{4-1};
360 let Inst{15-12} = Dd{3-0};
361 let Inst{22} = Dd{4};
364 // Special case encoding: bits 11-8 is 0b1011.
365 def VCVTSD : VFPAI<(outs SPR:$Sd), (ins DPR:$Dm), VFPUnaryFrm,
366 IIC_fpCVTSD, "vcvt", ".f32.f64\t$Sd, $Dm",
367 [(set SPR:$Sd, (fround DPR:$Dm))]> {
368 // Instruction operands.
372 // Encode instruction operands.
373 let Inst{3-0} = Dm{3-0};
375 let Inst{15-12} = Sd{4-1};
376 let Inst{22} = Sd{0};
378 let Inst{27-23} = 0b11101;
379 let Inst{21-16} = 0b110111;
380 let Inst{11-8} = 0b1011;
381 let Inst{7-6} = 0b11;
385 // Between half-precision and single-precision. For disassembly only.
387 // FIXME: Verify encoding after integrated assembler is working.
388 def VCVTBSH: ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
389 /* FIXME */ IIC_fpCVTSH, "vcvtb", ".f32.f16\t$dst, $a",
390 [/* For disassembly only; pattern left blank */]>;
392 def : ARMPat<(f32_to_f16 SPR:$a),
393 (i32 (COPY_TO_REGCLASS (VCVTBSH SPR:$a), GPR))>;
395 def VCVTBHS: ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
396 /* FIXME */ IIC_fpCVTHS, "vcvtb", ".f16.f32\t$dst, $a",
397 [/* For disassembly only; pattern left blank */]>;
399 def : ARMPat<(f16_to_f32 GPR:$a),
400 (VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>;
402 def VCVTTSH: ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
403 /* FIXME */ IIC_fpCVTSH, "vcvtt", ".f32.f16\t$dst, $a",
404 [/* For disassembly only; pattern left blank */]>;
406 def VCVTTHS: ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
407 /* FIXME */ IIC_fpCVTHS, "vcvtt", ".f16.f32\t$dst, $a",
408 [/* For disassembly only; pattern left blank */]>;
410 def VNEGD : ADuI<0b11101, 0b11, 0b0001, 0b01, 0,
411 (outs DPR:$Dd), (ins DPR:$Dm),
412 IIC_fpUNA64, "vneg", ".f64\t$Dd, $Dm",
413 [(set DPR:$Dd, (fneg (f64 DPR:$Dm)))]>;
415 def VNEGS : ASuIn<0b11101, 0b11, 0b0001, 0b01, 0,
416 (outs SPR:$Sd), (ins SPR:$Sm),
417 IIC_fpUNA32, "vneg", ".f32\t$Sd, $Sm",
418 [(set SPR:$Sd, (fneg SPR:$Sm))]> {
419 // Some single precision VFP instructions may be executed on both NEON and
420 // VFP pipelines on A8.
421 let D = VFPNeonA8Domain;
424 def VSQRTD : ADuI<0b11101, 0b11, 0b0001, 0b11, 0,
425 (outs DPR:$Dd), (ins DPR:$Dm),
426 IIC_fpSQRT64, "vsqrt", ".f64\t$Dd, $Dm",
427 [(set DPR:$Dd, (fsqrt (f64 DPR:$Dm)))]>;
429 def VSQRTS : ASuI<0b11101, 0b11, 0b0001, 0b11, 0,
430 (outs SPR:$Sd), (ins SPR:$Sm),
431 IIC_fpSQRT32, "vsqrt", ".f32\t$Sd, $Sm",
432 [(set SPR:$Sd, (fsqrt SPR:$Sm))]>;
434 let neverHasSideEffects = 1 in {
435 def VMOVD : ADuI<0b11101, 0b11, 0b0000, 0b01, 0,
436 (outs DPR:$Dd), (ins DPR:$Dm),
437 IIC_fpUNA64, "vmov", ".f64\t$Dd, $Dm", []>;
439 def VMOVS : ASuI<0b11101, 0b11, 0b0000, 0b01, 0,
440 (outs SPR:$Sd), (ins SPR:$Sm),
441 IIC_fpUNA32, "vmov", ".f32\t$Sd, $Sm", []>;
442 } // neverHasSideEffects
444 //===----------------------------------------------------------------------===//
445 // FP <-> GPR Copies. Int <-> FP Conversions.
448 def VMOVRS : AVConv2I<0b11100001, 0b1010,
449 (outs GPR:$Rt), (ins SPR:$Sn),
450 IIC_fpMOVSI, "vmov", "\t$Rt, $Sn",
451 [(set GPR:$Rt, (bitconvert SPR:$Sn))]> {
452 // Instruction operands.
456 // Encode instruction operands.
457 let Inst{19-16} = Sn{4-1};
459 let Inst{15-12} = Rt;
461 let Inst{6-5} = 0b00;
462 let Inst{3-0} = 0b0000;
464 // Some single precision VFP instructions may be executed on both NEON and VFP
466 let D = VFPNeonDomain;
469 def VMOVSR : AVConv4I<0b11100000, 0b1010,
470 (outs SPR:$Sn), (ins GPR:$Rt),
471 IIC_fpMOVIS, "vmov", "\t$Sn, $Rt",
472 [(set SPR:$Sn, (bitconvert GPR:$Rt))]> {
473 // Instruction operands.
477 // Encode instruction operands.
478 let Inst{19-16} = Sn{4-1};
480 let Inst{15-12} = Rt;
482 let Inst{6-5} = 0b00;
483 let Inst{3-0} = 0b0000;
485 // Some single precision VFP instructions may be executed on both NEON and VFP
487 let D = VFPNeonDomain;
490 let neverHasSideEffects = 1 in {
491 def VMOVRRD : AVConv3I<0b11000101, 0b1011,
492 (outs GPR:$Rt, GPR:$Rt2), (ins DPR:$Dm),
493 IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $Dm",
494 [/* FIXME: Can't write pattern for multiple result instr*/]> {
495 // Instruction operands.
500 // Encode instruction operands.
501 let Inst{3-0} = Dm{3-0};
503 let Inst{15-12} = Rt;
504 let Inst{19-16} = Rt2;
506 let Inst{7-6} = 0b00;
508 // Some single precision VFP instructions may be executed on both NEON and VFP
510 let D = VFPNeonDomain;
513 def VMOVRRS : AVConv3I<0b11000101, 0b1010,
514 (outs GPR:$wb, GPR:$dst2), (ins SPR:$src1, SPR:$src2),
515 IIC_fpMOVDI, "vmov", "\t$wb, $dst2, $src1, $src2",
516 [/* For disassembly only; pattern left blank */]> {
517 let Inst{7-6} = 0b00;
519 // Some single precision VFP instructions may be executed on both NEON and VFP
521 let D = VFPNeonDomain;
523 } // neverHasSideEffects
528 def VMOVDRR : AVConv5I<0b11000100, 0b1011,
529 (outs DPR:$Dm), (ins GPR:$Rt, GPR:$Rt2),
530 IIC_fpMOVID, "vmov", "\t$Dm, $Rt, $Rt2",
531 [(set DPR:$Dm, (arm_fmdrr GPR:$Rt, GPR:$Rt2))]> {
532 // Instruction operands.
537 // Encode instruction operands.
538 let Inst{3-0} = Dm{3-0};
540 let Inst{15-12} = Rt;
541 let Inst{19-16} = Rt2;
543 let Inst{7-6} = 0b00;
545 // Some single precision VFP instructions may be executed on both NEON and VFP
547 let D = VFPNeonDomain;
550 let neverHasSideEffects = 1 in
551 def VMOVSRR : AVConv5I<0b11000100, 0b1010,
552 (outs SPR:$dst1, SPR:$dst2), (ins GPR:$src1, GPR:$src2),
553 IIC_fpMOVID, "vmov", "\t$dst1, $dst2, $src1, $src2",
554 [/* For disassembly only; pattern left blank */]> {
555 let Inst{7-6} = 0b00;
557 // Some single precision VFP instructions may be executed on both NEON and VFP
559 let D = VFPNeonDomain;
565 // FMRX: SPR system reg -> GPR
567 // FMXR: GPR -> VFP system reg
572 class AVConv1IDs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
573 bits<4> opcod4, dag oops, dag iops,
574 InstrItinClass itin, string opc, string asm,
576 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
578 // Instruction operands.
582 // Encode instruction operands.
583 let Inst{3-0} = Sm{4-1};
585 let Inst{15-12} = Dd{3-0};
586 let Inst{22} = Dd{4};
589 class AVConv1InSs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
590 bits<4> opcod4, dag oops, dag iops,InstrItinClass itin,
591 string opc, string asm, list<dag> pattern>
592 : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
594 // Instruction operands.
598 // Encode instruction operands.
599 let Inst{3-0} = Sm{4-1};
601 let Inst{15-12} = Sd{4-1};
602 let Inst{22} = Sd{0};
605 def VSITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
606 (outs DPR:$Dd), (ins SPR:$Sm),
607 IIC_fpCVTID, "vcvt", ".f64.s32\t$Dd, $Sm",
608 [(set DPR:$Dd, (f64 (arm_sitof SPR:$Sm)))]> {
609 let Inst{7} = 1; // s32
612 def VSITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010,
613 (outs SPR:$Sd),(ins SPR:$Sm),
614 IIC_fpCVTIS, "vcvt", ".f32.s32\t$Sd, $Sm",
615 [(set SPR:$Sd, (arm_sitof SPR:$Sm))]> {
616 let Inst{7} = 1; // s32
618 // Some single precision VFP instructions may be executed on both NEON and
619 // VFP pipelines on A8.
620 let D = VFPNeonA8Domain;
623 def VUITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
624 (outs DPR:$Dd), (ins SPR:$Sm),
625 IIC_fpCVTID, "vcvt", ".f64.u32\t$Dd, $Sm",
626 [(set DPR:$Dd, (f64 (arm_uitof SPR:$Sm)))]> {
627 let Inst{7} = 0; // u32
630 def VUITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010,
631 (outs SPR:$Sd), (ins SPR:$Sm),
632 IIC_fpCVTIS, "vcvt", ".f32.u32\t$Sd, $Sm",
633 [(set SPR:$Sd, (arm_uitof SPR:$Sm))]> {
634 let Inst{7} = 0; // u32
636 // Some single precision VFP instructions may be executed on both NEON and
637 // VFP pipelines on A8.
638 let D = VFPNeonA8Domain;
643 class AVConv1IsD_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
644 bits<4> opcod4, dag oops, dag iops,
645 InstrItinClass itin, string opc, string asm,
647 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
649 // Instruction operands.
653 // Encode instruction operands.
654 let Inst{3-0} = Dm{3-0};
656 let Inst{15-12} = Sd{4-1};
657 let Inst{22} = Sd{0};
660 class AVConv1InsS_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
661 bits<4> opcod4, dag oops, dag iops,
662 InstrItinClass itin, string opc, string asm,
664 : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
666 // Instruction operands.
670 // Encode instruction operands.
671 let Inst{3-0} = Sm{4-1};
673 let Inst{15-12} = Sd{4-1};
674 let Inst{22} = Sd{0};
677 // Always set Z bit in the instruction, i.e. "round towards zero" variants.
678 def VTOSIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
679 (outs SPR:$Sd), (ins DPR:$Dm),
680 IIC_fpCVTDI, "vcvt", ".s32.f64\t$Sd, $Dm",
681 [(set SPR:$Sd, (arm_ftosi (f64 DPR:$Dm)))]> {
682 let Inst{7} = 1; // Z bit
685 def VTOSIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
686 (outs SPR:$Sd), (ins SPR:$Sm),
687 IIC_fpCVTSI, "vcvt", ".s32.f32\t$Sd, $Sm",
688 [(set SPR:$Sd, (arm_ftosi SPR:$Sm))]> {
689 let Inst{7} = 1; // Z bit
691 // Some single precision VFP instructions may be executed on both NEON and
692 // VFP pipelines on A8.
693 let D = VFPNeonA8Domain;
696 def VTOUIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
697 (outs SPR:$Sd), (ins DPR:$Dm),
698 IIC_fpCVTDI, "vcvt", ".u32.f64\t$Sd, $Dm",
699 [(set SPR:$Sd, (arm_ftoui (f64 DPR:$Dm)))]> {
700 let Inst{7} = 1; // Z bit
703 def VTOUIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
704 (outs SPR:$Sd), (ins SPR:$Sm),
705 IIC_fpCVTSI, "vcvt", ".u32.f32\t$Sd, $Sm",
706 [(set SPR:$Sd, (arm_ftoui SPR:$Sm))]> {
707 let Inst{7} = 1; // Z bit
709 // Some single precision VFP instructions may be executed on both NEON and
710 // VFP pipelines on A8.
711 let D = VFPNeonA8Domain;
714 // And the Z bit '0' variants, i.e. use the rounding mode specified by FPSCR.
715 let Uses = [FPSCR] in {
716 // FIXME: Verify encoding after integrated assembler is working.
717 def VTOSIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
718 (outs SPR:$Sd), (ins DPR:$Dm),
719 IIC_fpCVTDI, "vcvtr", ".s32.f64\t$Sd, $Dm",
720 [(set SPR:$Sd, (int_arm_vcvtr (f64 DPR:$Dm)))]>{
721 let Inst{7} = 0; // Z bit
724 def VTOSIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
725 (outs SPR:$Sd), (ins SPR:$Sm),
726 IIC_fpCVTSI, "vcvtr", ".s32.f32\t$Sd, $Sm",
727 [(set SPR:$Sd, (int_arm_vcvtr SPR:$Sm))]> {
728 let Inst{7} = 0; // Z bit
731 def VTOUIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
732 (outs SPR:$Sd), (ins DPR:$Dm),
733 IIC_fpCVTDI, "vcvtr", ".u32.f64\t$Sd, $Dm",
734 [(set SPR:$Sd, (int_arm_vcvtru(f64 DPR:$Dm)))]>{
735 let Inst{7} = 0; // Z bit
738 def VTOUIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
739 (outs SPR:$Sd), (ins SPR:$Sm),
740 IIC_fpCVTSI, "vcvtr", ".u32.f32\t$Sd, $Sm",
741 [(set SPR:$Sd, (int_arm_vcvtru SPR:$Sm))]> {
742 let Inst{7} = 0; // Z bit
746 // Convert between floating-point and fixed-point
747 // Data type for fixed-point naming convention:
748 // S16 (U=0, sx=0) -> SH
749 // U16 (U=1, sx=0) -> UH
750 // S32 (U=0, sx=1) -> SL
751 // U32 (U=1, sx=1) -> UL
753 // FIXME: Marking these as codegen only seems wrong. They are real
755 let Constraints = "$a = $dst", isCodeGenOnly = 1 in {
757 // FP to Fixed-Point:
759 def VTOSHS : AVConv1XI<0b11101, 0b11, 0b1110, 0b1010, 0,
760 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
761 IIC_fpCVTSI, "vcvt", ".s16.f32\t$dst, $a, $fbits",
762 [/* For disassembly only; pattern left blank */]> {
763 // Some single precision VFP instructions may be executed on both NEON and
764 // VFP pipelines on A8.
765 let D = VFPNeonA8Domain;
768 def VTOUHS : AVConv1XI<0b11101, 0b11, 0b1111, 0b1010, 0,
769 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
770 IIC_fpCVTSI, "vcvt", ".u16.f32\t$dst, $a, $fbits",
771 [/* For disassembly only; pattern left blank */]> {
772 // Some single precision VFP instructions may be executed on both NEON and
773 // VFP pipelines on A8.
774 let D = VFPNeonA8Domain;
777 def VTOSLS : AVConv1XI<0b11101, 0b11, 0b1110, 0b1010, 1,
778 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
779 IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a, $fbits",
780 [/* For disassembly only; pattern left blank */]> {
781 // Some single precision VFP instructions may be executed on both NEON and
782 // VFP pipelines on A8.
783 let D = VFPNeonA8Domain;
786 def VTOULS : AVConv1XI<0b11101, 0b11, 0b1111, 0b1010, 1,
787 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
788 IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a, $fbits",
789 [/* For disassembly only; pattern left blank */]> {
790 // Some single precision VFP instructions may be executed on both NEON and
791 // VFP pipelines on A8.
792 let D = VFPNeonA8Domain;
795 def VTOSHD : AVConv1XI<0b11101, 0b11, 0b1110, 0b1011, 0,
796 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
797 IIC_fpCVTDI, "vcvt", ".s16.f64\t$dst, $a, $fbits",
798 [/* For disassembly only; pattern left blank */]>;
800 def VTOUHD : AVConv1XI<0b11101, 0b11, 0b1111, 0b1011, 0,
801 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
802 IIC_fpCVTDI, "vcvt", ".u16.f64\t$dst, $a, $fbits",
803 [/* For disassembly only; pattern left blank */]>;
805 def VTOSLD : AVConv1XI<0b11101, 0b11, 0b1110, 0b1011, 1,
806 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
807 IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a, $fbits",
808 [/* For disassembly only; pattern left blank */]>;
810 def VTOULD : AVConv1XI<0b11101, 0b11, 0b1111, 0b1011, 1,
811 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
812 IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a, $fbits",
813 [/* For disassembly only; pattern left blank */]>;
815 // Fixed-Point to FP:
817 def VSHTOS : AVConv1XI<0b11101, 0b11, 0b1010, 0b1010, 0,
818 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
819 IIC_fpCVTIS, "vcvt", ".f32.s16\t$dst, $a, $fbits",
820 [/* For disassembly only; pattern left blank */]> {
821 // Some single precision VFP instructions may be executed on both NEON and
822 // VFP pipelines on A8.
823 let D = VFPNeonA8Domain;
826 def VUHTOS : AVConv1XI<0b11101, 0b11, 0b1011, 0b1010, 0,
827 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
828 IIC_fpCVTIS, "vcvt", ".f32.u16\t$dst, $a, $fbits",
829 [/* For disassembly only; pattern left blank */]> {
830 // Some single precision VFP instructions may be executed on both NEON and
831 // VFP pipelines on A8.
832 let D = VFPNeonA8Domain;
835 def VSLTOS : AVConv1XI<0b11101, 0b11, 0b1010, 0b1010, 1,
836 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
837 IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a, $fbits",
838 [/* For disassembly only; pattern left blank */]> {
839 // Some single precision VFP instructions may be executed on both NEON and
840 // VFP pipelines on A8.
841 let D = VFPNeonA8Domain;
844 def VULTOS : AVConv1XI<0b11101, 0b11, 0b1011, 0b1010, 1,
845 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
846 IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a, $fbits",
847 [/* For disassembly only; pattern left blank */]> {
848 // Some single precision VFP instructions may be executed on both NEON and
849 // VFP pipelines on A8.
850 let D = VFPNeonA8Domain;
853 def VSHTOD : AVConv1XI<0b11101, 0b11, 0b1010, 0b1011, 0,
854 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
855 IIC_fpCVTID, "vcvt", ".f64.s16\t$dst, $a, $fbits",
856 [/* For disassembly only; pattern left blank */]>;
858 def VUHTOD : AVConv1XI<0b11101, 0b11, 0b1011, 0b1011, 0,
859 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
860 IIC_fpCVTID, "vcvt", ".f64.u16\t$dst, $a, $fbits",
861 [/* For disassembly only; pattern left blank */]>;
863 def VSLTOD : AVConv1XI<0b11101, 0b11, 0b1010, 0b1011, 1,
864 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
865 IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a, $fbits",
866 [/* For disassembly only; pattern left blank */]>;
868 def VULTOD : AVConv1XI<0b11101, 0b11, 0b1011, 0b1011, 1,
869 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
870 IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a, $fbits",
871 [/* For disassembly only; pattern left blank */]>;
873 } // End of 'let Constraints = "$a = $dst", isCodeGenOnly = 1 in'
875 //===----------------------------------------------------------------------===//
876 // FP Multiply-Accumulate Operations.
879 def VMLAD : ADbI<0b11100, 0b00, 0, 0,
880 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
881 IIC_fpMAC64, "vmla", ".f64\t$Dd, $Dn, $Dm",
882 [(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm),
884 RegConstraint<"$Ddin = $Dd">,
885 Requires<[HasVFP2,UseFPVMLx]>;
887 def VMLAS : ASbIn<0b11100, 0b00, 0, 0,
888 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
889 IIC_fpMAC32, "vmla", ".f32\t$Sd, $Sn, $Sm",
890 [(set SPR:$Sd, (fadd_mlx (fmul_su SPR:$Sn, SPR:$Sm),
892 RegConstraint<"$Sdin = $Sd">,
893 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]> {
894 // Some single precision VFP instructions may be executed on both NEON and
895 // VFP pipelines on A8.
896 let D = VFPNeonA8Domain;
899 def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
900 (VMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
901 Requires<[HasVFP2,UseFPVMLx]>;
902 def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
903 (VMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
904 Requires<[HasVFP2,DontUseNEONForFP, UseFPVMLx]>;
906 def VMLSD : ADbI<0b11100, 0b00, 1, 0,
907 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
908 IIC_fpMAC64, "vmls", ".f64\t$Dd, $Dn, $Dm",
909 [(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
911 RegConstraint<"$Ddin = $Dd">,
912 Requires<[HasVFP2,UseFPVMLx]>;
914 def VMLSS : ASbIn<0b11100, 0b00, 1, 0,
915 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
916 IIC_fpMAC32, "vmls", ".f32\t$Sd, $Sn, $Sm",
917 [(set SPR:$Sd, (fadd_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
919 RegConstraint<"$Sdin = $Sd">,
920 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]> {
921 // Some single precision VFP instructions may be executed on both NEON and
922 // VFP pipelines on A8.
923 let D = VFPNeonA8Domain;
926 def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
927 (VMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
928 Requires<[HasVFP2,UseFPVMLx]>;
929 def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
930 (VMLSS SPR:$dstin, SPR:$a, SPR:$b)>,
931 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
933 def VNMLAD : ADbI<0b11100, 0b01, 1, 0,
934 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
935 IIC_fpMAC64, "vnmla", ".f64\t$Dd, $Dn, $Dm",
936 [(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
938 RegConstraint<"$Ddin = $Dd">,
939 Requires<[HasVFP2,UseFPVMLx]>;
941 def VNMLAS : ASbI<0b11100, 0b01, 1, 0,
942 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
943 IIC_fpMAC32, "vnmla", ".f32\t$Sd, $Sn, $Sm",
944 [(set SPR:$Sd, (fsub_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
946 RegConstraint<"$Sdin = $Sd">,
947 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]> {
948 // Some single precision VFP instructions may be executed on both NEON and
949 // VFP pipelines on A8.
950 let D = VFPNeonA8Domain;
953 def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin),
954 (VNMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
955 Requires<[HasVFP2,UseFPVMLx]>;
956 def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin),
957 (VNMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
958 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
960 def VNMLSD : ADbI<0b11100, 0b01, 0, 0,
961 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
962 IIC_fpMAC64, "vnmls", ".f64\t$Dd, $Dn, $Dm",
963 [(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm),
965 RegConstraint<"$Ddin = $Dd">,
966 Requires<[HasVFP2,UseFPVMLx]>;
968 def VNMLSS : ASbI<0b11100, 0b01, 0, 0,
969 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
970 IIC_fpMAC32, "vnmls", ".f32\t$Sd, $Sn, $Sm",
971 [(set SPR:$Sd, (fsub_mlx (fmul_su SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>,
972 RegConstraint<"$Sdin = $Sd">,
973 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]> {
974 // Some single precision VFP instructions may be executed on both NEON and
975 // VFP pipelines on A8.
976 let D = VFPNeonA8Domain;
979 def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin),
980 (VNMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
981 Requires<[HasVFP2,UseFPVMLx]>;
982 def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin),
983 (VNMLSS SPR:$dstin, SPR:$a, SPR:$b)>,
984 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
987 //===----------------------------------------------------------------------===//
988 // FP Conditional moves.
991 let neverHasSideEffects = 1 in {
992 def VMOVDcc : ARMPseudoInst<(outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm, pred:$p),
993 Size4Bytes, IIC_fpUNA64,
994 [/*(set DPR:$Dd, (ARMcmov DPR:$Dn, DPR:$Dm, imm:$cc))*/]>,
995 RegConstraint<"$Dn = $Dd">;
997 def VMOVScc : ARMPseudoInst<(outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm, pred:$p),
998 Size4Bytes, IIC_fpUNA32,
999 [/*(set SPR:$Sd, (ARMcmov SPR:$Sn, SPR:$Sm, imm:$cc))*/]>,
1000 RegConstraint<"$Sn = $Sd">;
1001 } // neverHasSideEffects
1003 //===----------------------------------------------------------------------===//
1004 // Move from VFP System Register to ARM core register.
1007 class MovFromVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm,
1009 VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, pattern> {
1011 // Instruction operand.
1014 let Inst{27-20} = 0b11101111;
1015 let Inst{19-16} = opc19_16;
1016 let Inst{15-12} = Rt;
1017 let Inst{11-8} = 0b1010;
1019 let Inst{6-5} = 0b00;
1021 let Inst{3-0} = 0b0000;
1024 // APSR is the application level alias of CPSR. This FPSCR N, Z, C, V flags
1026 let Defs = [CPSR], Uses = [FPSCR], Rt = 0b1111 /* apsr_nzcv */ in
1027 def FMSTAT : MovFromVFP<0b0001 /* fpscr */, (outs), (ins),
1028 "vmrs", "\tapsr_nzcv, fpscr", [(arm_fmstat)]>;
1030 // Application level FPSCR -> GPR
1031 let hasSideEffects = 1, Uses = [FPSCR] in
1032 def VMRS : MovFromVFP<0b0001 /* fpscr */, (outs GPR:$Rt), (ins),
1033 "vmrs", "\t$Rt, fpscr",
1034 [(set GPR:$Rt, (int_arm_get_fpscr))]>;
1036 // System level FPEXC, FPSID -> GPR
1037 let Uses = [FPSCR] in {
1038 def VMRS_FPEXC : MovFromVFP<0b1000 /* fpexc */, (outs GPR:$Rt), (ins),
1039 "vmrs", "\t$Rt, fpexc", []>;
1040 def VMRS_FPSID : MovFromVFP<0b0000 /* fpsid */, (outs GPR:$Rt), (ins),
1041 "vmrs", "\t$Rt, fpsid", []>;
1044 //===----------------------------------------------------------------------===//
1045 // Move from ARM core register to VFP System Register.
1048 class MovToVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm,
1050 VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, pattern> {
1052 // Instruction operand.
1055 // Encode instruction operand.
1056 let Inst{15-12} = src;
1058 let Inst{27-20} = 0b11101110;
1059 let Inst{19-16} = opc19_16;
1060 let Inst{11-8} = 0b1010;
1065 let Defs = [FPSCR] in {
1066 // Application level GPR -> FPSCR
1067 def VMSR : MovToVFP<0b0001 /* fpscr */, (outs), (ins GPR:$src),
1068 "vmsr", "\tfpscr, $src", [(int_arm_set_fpscr GPR:$src)]>;
1069 // System level GPR -> FPEXC
1070 def VMSR_FPEXC : MovToVFP<0b1000 /* fpexc */, (outs), (ins GPR:$src),
1071 "vmsr", "\tfpexc, $src", []>;
1072 // System level GPR -> FPSID
1073 def VMSR_FPSID : MovToVFP<0b0000 /* fpsid */, (outs), (ins GPR:$src),
1074 "vmsr", "\tfpsid, $src", []>;
1077 //===----------------------------------------------------------------------===//
1081 // Materialize FP immediates. VFP3 only.
1082 let isReMaterializable = 1 in {
1083 def FCONSTD : VFPAI<(outs DPR:$Dd), (ins vfp_f64imm:$imm),
1084 VFPMiscFrm, IIC_fpUNA64,
1085 "vmov", ".f64\t$Dd, $imm",
1086 [(set DPR:$Dd, vfp_f64imm:$imm)]>, Requires<[HasVFP3]> {
1087 // Instruction operands.
1091 // Encode instruction operands.
1092 let Inst{15-12} = Dd{3-0};
1093 let Inst{22} = Dd{4};
1094 let Inst{19} = imm{31};
1095 let Inst{18-16} = imm{22-20};
1096 let Inst{3-0} = imm{19-16};
1098 // Encode remaining instruction bits.
1099 let Inst{27-23} = 0b11101;
1100 let Inst{21-20} = 0b11;
1101 let Inst{11-9} = 0b101;
1102 let Inst{8} = 1; // Double precision.
1103 let Inst{7-4} = 0b0000;
1106 def FCONSTS : VFPAI<(outs SPR:$Sd), (ins vfp_f32imm:$imm),
1107 VFPMiscFrm, IIC_fpUNA32,
1108 "vmov", ".f32\t$Sd, $imm",
1109 [(set SPR:$Sd, vfp_f32imm:$imm)]>, Requires<[HasVFP3]> {
1110 // Instruction operands.
1114 // Encode instruction operands.
1115 let Inst{15-12} = Sd{4-1};
1116 let Inst{22} = Sd{0};
1117 let Inst{19} = imm{31}; // The immediate is handled as a double.
1118 let Inst{18-16} = imm{22-20};
1119 let Inst{3-0} = imm{19-16};
1121 // Encode remaining instruction bits.
1122 let Inst{27-23} = 0b11101;
1123 let Inst{21-20} = 0b11;
1124 let Inst{11-9} = 0b101;
1125 let Inst{8} = 0; // Single precision.
1126 let Inst{7-4} = 0b0000;