1 //===-- ARMInstrVFP.td - VFP support for ARM ---------------*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file describes the ARM VFP instruction set.
11 //===----------------------------------------------------------------------===//
13 def SDT_CMPFP0 : SDTypeProfile<0, 2, [SDTCisFP<0>, SDTCisVT<1, i32>]>;
14 def SDT_VMOVDRR : SDTypeProfile<1, 2, [SDTCisVT<0, f64>, SDTCisVT<1, i32>,
16 def SDT_VMOVRRD : SDTypeProfile<2, 1, [SDTCisVT<0, i32>, SDTCisSameAs<0, 1>,
19 def SDT_VMOVSR : SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisVT<1, i32>]>;
21 def arm_fmstat : SDNode<"ARMISD::FMSTAT", SDTNone, [SDNPInGlue, SDNPOutGlue]>;
22 def arm_cmpfp : SDNode<"ARMISD::CMPFP", SDT_ARMFCmp, [SDNPOutGlue]>;
23 def arm_cmpfp0 : SDNode<"ARMISD::CMPFPw0", SDT_CMPFP0, [SDNPOutGlue]>;
24 def arm_fmdrr : SDNode<"ARMISD::VMOVDRR", SDT_VMOVDRR>;
25 def arm_fmrrd : SDNode<"ARMISD::VMOVRRD", SDT_VMOVRRD>;
26 def arm_vmovsr : SDNode<"ARMISD::VMOVSR", SDT_VMOVSR>;
28 def SDT_VMOVhr : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, i32>] >;
29 def SDT_VMOVrh : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisFP<1>] >;
30 def arm_vmovhr : SDNode<"ARMISD::VMOVhr", SDT_VMOVhr>;
31 def arm_vmovrh : SDNode<"ARMISD::VMOVrh", SDT_VMOVrh>;
33 //===----------------------------------------------------------------------===//
34 // Operand Definitions.
37 // 8-bit floating-point immediate encodings.
38 def FPImmOperand : AsmOperandClass {
40 let ParserMethod = "parseFPImm";
43 def vfp_f16imm : Operand<f16>,
44 PatLeaf<(f16 fpimm), [{
45 return ARM_AM::getFP16Imm(N->getValueAPF()) != -1;
46 }], SDNodeXForm<fpimm, [{
47 APFloat InVal = N->getValueAPF();
48 uint32_t enc = ARM_AM::getFP16Imm(InVal);
49 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32);
51 let PrintMethod = "printFPImmOperand";
52 let ParserMatchClass = FPImmOperand;
55 def vfp_f32imm_xform : SDNodeXForm<fpimm, [{
56 APFloat InVal = N->getValueAPF();
57 uint32_t enc = ARM_AM::getFP32Imm(InVal);
58 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32);
61 def gi_vfp_f32imm : GICustomOperandRenderer<"renderVFPF32Imm">,
62 GISDNodeXFormEquiv<vfp_f32imm_xform>;
64 def vfp_f32imm : Operand<f32>,
65 PatLeaf<(f32 fpimm), [{
66 return ARM_AM::getFP32Imm(N->getValueAPF()) != -1;
67 }], vfp_f32imm_xform> {
68 let PrintMethod = "printFPImmOperand";
69 let ParserMatchClass = FPImmOperand;
70 let GISelPredicateCode = [{
71 const auto &MO = MI.getOperand(1);
74 return ARM_AM::getFP32Imm(MO.getFPImm()->getValueAPF()) != -1;
78 def vfp_f64imm_xform : SDNodeXForm<fpimm, [{
79 APFloat InVal = N->getValueAPF();
80 uint32_t enc = ARM_AM::getFP64Imm(InVal);
81 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32);
84 def gi_vfp_f64imm : GICustomOperandRenderer<"renderVFPF64Imm">,
85 GISDNodeXFormEquiv<vfp_f64imm_xform>;
87 def vfp_f64imm : Operand<f64>,
88 PatLeaf<(f64 fpimm), [{
89 return ARM_AM::getFP64Imm(N->getValueAPF()) != -1;
90 }], vfp_f64imm_xform> {
91 let PrintMethod = "printFPImmOperand";
92 let ParserMatchClass = FPImmOperand;
93 let GISelPredicateCode = [{
94 const auto &MO = MI.getOperand(1);
97 return ARM_AM::getFP64Imm(MO.getFPImm()->getValueAPF()) != -1;
101 def alignedload16 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
102 return cast<LoadSDNode>(N)->getAlignment() >= 2;
105 def alignedload32 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
106 return cast<LoadSDNode>(N)->getAlignment() >= 4;
109 def alignedstore16 : PatFrag<(ops node:$val, node:$ptr),
110 (store node:$val, node:$ptr), [{
111 return cast<StoreSDNode>(N)->getAlignment() >= 2;
114 def alignedstore32 : PatFrag<(ops node:$val, node:$ptr),
115 (store node:$val, node:$ptr), [{
116 return cast<StoreSDNode>(N)->getAlignment() >= 4;
119 // The VCVT to/from fixed-point instructions encode the 'fbits' operand
120 // (the number of fixed bits) differently than it appears in the assembly
121 // source. It's encoded as "Size - fbits" where Size is the size of the
122 // fixed-point representation (32 or 16) and fbits is the value appearing
123 // in the assembly source, an integer in [0,16] or (0,32], depending on size.
124 def fbits32_asm_operand : AsmOperandClass { let Name = "FBits32"; }
125 def fbits32 : Operand<i32> {
126 let PrintMethod = "printFBits32";
127 let ParserMatchClass = fbits32_asm_operand;
130 def fbits16_asm_operand : AsmOperandClass { let Name = "FBits16"; }
131 def fbits16 : Operand<i32> {
132 let PrintMethod = "printFBits16";
133 let ParserMatchClass = fbits16_asm_operand;
136 //===----------------------------------------------------------------------===//
137 // Load / store Instructions.
140 let canFoldAsLoad = 1, isReMaterializable = 1 in {
142 def VLDRD : ADI5<0b1101, 0b01, (outs DPR:$Dd), (ins addrmode5:$addr),
143 IIC_fpLoad64, "vldr", "\t$Dd, $addr",
144 [(set DPR:$Dd, (f64 (alignedload32 addrmode5:$addr)))]>,
145 Requires<[HasFPRegs]>;
147 def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$Sd), (ins addrmode5:$addr),
148 IIC_fpLoad32, "vldr", "\t$Sd, $addr",
149 [(set SPR:$Sd, (alignedload32 addrmode5:$addr))]>,
150 Requires<[HasFPRegs]> {
151 // Some single precision VFP instructions may be executed on both NEON and VFP
153 let D = VFPNeonDomain;
156 let isUnpredicable = 1 in
157 def VLDRH : AHI5<0b1101, 0b01, (outs HPR:$Sd), (ins addrmode5fp16:$addr),
158 IIC_fpLoad16, "vldr", ".16\t$Sd, $addr",
159 [(set HPR:$Sd, (alignedload16 addrmode5fp16:$addr))]>,
160 Requires<[HasFPRegs16]>;
162 } // End of 'let canFoldAsLoad = 1, isReMaterializable = 1 in'
164 def VSTRD : ADI5<0b1101, 0b00, (outs), (ins DPR:$Dd, addrmode5:$addr),
165 IIC_fpStore64, "vstr", "\t$Dd, $addr",
166 [(alignedstore32 (f64 DPR:$Dd), addrmode5:$addr)]>,
167 Requires<[HasFPRegs]>;
169 def VSTRS : ASI5<0b1101, 0b00, (outs), (ins SPR:$Sd, addrmode5:$addr),
170 IIC_fpStore32, "vstr", "\t$Sd, $addr",
171 [(alignedstore32 SPR:$Sd, addrmode5:$addr)]>,
172 Requires<[HasFPRegs]> {
173 // Some single precision VFP instructions may be executed on both NEON and VFP
175 let D = VFPNeonDomain;
178 let isUnpredicable = 1 in
179 def VSTRH : AHI5<0b1101, 0b00, (outs), (ins HPR:$Sd, addrmode5fp16:$addr),
180 IIC_fpStore16, "vstr", ".16\t$Sd, $addr",
181 [(alignedstore16 HPR:$Sd, addrmode5fp16:$addr)]>,
182 Requires<[HasFPRegs16]>;
184 //===----------------------------------------------------------------------===//
185 // Load / store multiple Instructions.
188 multiclass vfp_ldst_mult<string asm, bit L_bit,
189 InstrItinClass itin, InstrItinClass itin_upd> {
190 let Predicates = [HasFPRegs] in {
193 AXDI4<(outs), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
195 !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> {
196 let Inst{24-23} = 0b01; // Increment After
197 let Inst{21} = 0; // No writeback
198 let Inst{20} = L_bit;
201 AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs,
203 IndexModeUpd, itin_upd,
204 !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
205 let Inst{24-23} = 0b01; // Increment After
206 let Inst{21} = 1; // Writeback
207 let Inst{20} = L_bit;
210 AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs,
212 IndexModeUpd, itin_upd,
213 !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
214 let Inst{24-23} = 0b10; // Decrement Before
215 let Inst{21} = 1; // Writeback
216 let Inst{20} = L_bit;
221 AXSI4<(outs), (ins GPR:$Rn, pred:$p, spr_reglist:$regs, variable_ops),
223 !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> {
224 let Inst{24-23} = 0b01; // Increment After
225 let Inst{21} = 0; // No writeback
226 let Inst{20} = L_bit;
228 // Some single precision VFP instructions may be executed on both NEON and
230 let D = VFPNeonDomain;
233 AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs,
235 IndexModeUpd, itin_upd,
236 !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
237 let Inst{24-23} = 0b01; // Increment After
238 let Inst{21} = 1; // Writeback
239 let Inst{20} = L_bit;
241 // Some single precision VFP instructions may be executed on both NEON and
243 let D = VFPNeonDomain;
246 AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs,
248 IndexModeUpd, itin_upd,
249 !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
250 let Inst{24-23} = 0b10; // Decrement Before
251 let Inst{21} = 1; // Writeback
252 let Inst{20} = L_bit;
254 // Some single precision VFP instructions may be executed on both NEON and
256 let D = VFPNeonDomain;
261 let hasSideEffects = 0 in {
263 let mayLoad = 1, hasExtraDefRegAllocReq = 1 in
264 defm VLDM : vfp_ldst_mult<"vldm", 1, IIC_fpLoad_m, IIC_fpLoad_mu>;
266 let mayStore = 1, hasExtraSrcRegAllocReq = 1 in
267 defm VSTM : vfp_ldst_mult<"vstm", 0, IIC_fpStore_m, IIC_fpStore_mu>;
271 def : MnemonicAlias<"vldm", "vldmia">;
272 def : MnemonicAlias<"vstm", "vstmia">;
275 //===----------------------------------------------------------------------===//
276 // Lazy load / store multiple Instructions
279 def VLLDM : AXSI4<(outs), (ins GPRnopc:$Rn, pred:$p), IndexModeNone,
280 IIC_fpLoad_m, "vlldm${p}\t$Rn", "", []>,
281 Requires<[HasV8MMainline, Has8MSecExt]> {
282 let Inst{24-23} = 0b00;
292 def VLSTM : AXSI4<(outs), (ins GPRnopc:$Rn, pred:$p), IndexModeNone,
293 IIC_fpStore_m, "vlstm${p}\t$Rn", "", []>,
294 Requires<[HasV8MMainline, Has8MSecExt]> {
295 let Inst{24-23} = 0b00;
304 def : InstAlias<"vpush${p} $r", (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r), 0>,
305 Requires<[HasFPRegs]>;
306 def : InstAlias<"vpush${p} $r", (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r), 0>,
307 Requires<[HasFPRegs]>;
308 def : InstAlias<"vpop${p} $r", (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r), 0>,
309 Requires<[HasFPRegs]>;
310 def : InstAlias<"vpop${p} $r", (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r), 0>,
311 Requires<[HasFPRegs]>;
312 defm : VFPDTAnyInstAlias<"vpush${p}", "$r",
313 (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r)>;
314 defm : VFPDTAnyInstAlias<"vpush${p}", "$r",
315 (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r)>;
316 defm : VFPDTAnyInstAlias<"vpop${p}", "$r",
317 (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r)>;
318 defm : VFPDTAnyInstAlias<"vpop${p}", "$r",
319 (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r)>;
321 // FLDMX, FSTMX - Load and store multiple unknown precision registers for
323 // These instruction are deprecated so we don't want them to get selected.
324 // However, there is no UAL syntax for them, so we keep them around for
325 // (dis)assembly only.
326 multiclass vfp_ldstx_mult<string asm, bit L_bit> {
327 let Predicates = [HasFPRegs] in {
330 AXXI4<(outs), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
331 IndexModeNone, !strconcat(asm, "iax${p}\t$Rn, $regs"), "", []> {
332 let Inst{24-23} = 0b01; // Increment After
333 let Inst{21} = 0; // No writeback
334 let Inst{20} = L_bit;
337 AXXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
338 IndexModeUpd, !strconcat(asm, "iax${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
339 let Inst{24-23} = 0b01; // Increment After
340 let Inst{21} = 1; // Writeback
341 let Inst{20} = L_bit;
344 AXXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
345 IndexModeUpd, !strconcat(asm, "dbx${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
346 let Inst{24-23} = 0b10; // Decrement Before
347 let Inst{21} = 1; // Writeback
348 let Inst{20} = L_bit;
353 defm FLDM : vfp_ldstx_mult<"fldm", 1>;
354 defm FSTM : vfp_ldstx_mult<"fstm", 0>;
356 def : VFP2MnemonicAlias<"fldmeax", "fldmdbx">;
357 def : VFP2MnemonicAlias<"fldmfdx", "fldmiax">;
359 def : VFP2MnemonicAlias<"fstmeax", "fstmiax">;
360 def : VFP2MnemonicAlias<"fstmfdx", "fstmdbx">;
362 //===----------------------------------------------------------------------===//
363 // FP Binary Operations.
366 let TwoOperandAliasConstraint = "$Dn = $Dd" in
367 def VADDD : ADbI<0b11100, 0b11, 0, 0,
368 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
369 IIC_fpALU64, "vadd", ".f64\t$Dd, $Dn, $Dm",
370 [(set DPR:$Dd, (fadd DPR:$Dn, (f64 DPR:$Dm)))]>,
371 Sched<[WriteFPALU64]>;
373 let TwoOperandAliasConstraint = "$Sn = $Sd" in
374 def VADDS : ASbIn<0b11100, 0b11, 0, 0,
375 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
376 IIC_fpALU32, "vadd", ".f32\t$Sd, $Sn, $Sm",
377 [(set SPR:$Sd, (fadd SPR:$Sn, SPR:$Sm))]>,
378 Sched<[WriteFPALU32]> {
379 // Some single precision VFP instructions may be executed on both NEON and
380 // VFP pipelines on A8.
381 let D = VFPNeonA8Domain;
384 let TwoOperandAliasConstraint = "$Sn = $Sd" in
385 def VADDH : AHbI<0b11100, 0b11, 0, 0,
386 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
387 IIC_fpALU16, "vadd", ".f16\t$Sd, $Sn, $Sm",
388 [(set HPR:$Sd, (fadd HPR:$Sn, HPR:$Sm))]>,
389 Sched<[WriteFPALU32]>;
391 let TwoOperandAliasConstraint = "$Dn = $Dd" in
392 def VSUBD : ADbI<0b11100, 0b11, 1, 0,
393 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
394 IIC_fpALU64, "vsub", ".f64\t$Dd, $Dn, $Dm",
395 [(set DPR:$Dd, (fsub DPR:$Dn, (f64 DPR:$Dm)))]>,
396 Sched<[WriteFPALU64]>;
398 let TwoOperandAliasConstraint = "$Sn = $Sd" in
399 def VSUBS : ASbIn<0b11100, 0b11, 1, 0,
400 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
401 IIC_fpALU32, "vsub", ".f32\t$Sd, $Sn, $Sm",
402 [(set SPR:$Sd, (fsub SPR:$Sn, SPR:$Sm))]>,
403 Sched<[WriteFPALU32]>{
404 // Some single precision VFP instructions may be executed on both NEON and
405 // VFP pipelines on A8.
406 let D = VFPNeonA8Domain;
409 let TwoOperandAliasConstraint = "$Sn = $Sd" in
410 def VSUBH : AHbI<0b11100, 0b11, 1, 0,
411 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
412 IIC_fpALU16, "vsub", ".f16\t$Sd, $Sn, $Sm",
413 [(set HPR:$Sd, (fsub HPR:$Sn, HPR:$Sm))]>,
414 Sched<[WriteFPALU32]>;
416 let TwoOperandAliasConstraint = "$Dn = $Dd" in
417 def VDIVD : ADbI<0b11101, 0b00, 0, 0,
418 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
419 IIC_fpDIV64, "vdiv", ".f64\t$Dd, $Dn, $Dm",
420 [(set DPR:$Dd, (fdiv DPR:$Dn, (f64 DPR:$Dm)))]>,
421 Sched<[WriteFPDIV64]>;
423 let TwoOperandAliasConstraint = "$Sn = $Sd" in
424 def VDIVS : ASbI<0b11101, 0b00, 0, 0,
425 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
426 IIC_fpDIV32, "vdiv", ".f32\t$Sd, $Sn, $Sm",
427 [(set SPR:$Sd, (fdiv SPR:$Sn, SPR:$Sm))]>,
428 Sched<[WriteFPDIV32]>;
430 let TwoOperandAliasConstraint = "$Sn = $Sd" in
431 def VDIVH : AHbI<0b11101, 0b00, 0, 0,
432 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
433 IIC_fpDIV16, "vdiv", ".f16\t$Sd, $Sn, $Sm",
434 [(set HPR:$Sd, (fdiv HPR:$Sn, HPR:$Sm))]>,
435 Sched<[WriteFPDIV32]>;
437 let TwoOperandAliasConstraint = "$Dn = $Dd" in
438 def VMULD : ADbI<0b11100, 0b10, 0, 0,
439 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
440 IIC_fpMUL64, "vmul", ".f64\t$Dd, $Dn, $Dm",
441 [(set DPR:$Dd, (fmul DPR:$Dn, (f64 DPR:$Dm)))]>,
442 Sched<[WriteFPMUL64, ReadFPMUL, ReadFPMUL]>;
444 let TwoOperandAliasConstraint = "$Sn = $Sd" in
445 def VMULS : ASbIn<0b11100, 0b10, 0, 0,
446 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
447 IIC_fpMUL32, "vmul", ".f32\t$Sd, $Sn, $Sm",
448 [(set SPR:$Sd, (fmul SPR:$Sn, SPR:$Sm))]>,
449 Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]> {
450 // Some single precision VFP instructions may be executed on both NEON and
451 // VFP pipelines on A8.
452 let D = VFPNeonA8Domain;
455 let TwoOperandAliasConstraint = "$Sn = $Sd" in
456 def VMULH : AHbI<0b11100, 0b10, 0, 0,
457 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
458 IIC_fpMUL16, "vmul", ".f16\t$Sd, $Sn, $Sm",
459 [(set HPR:$Sd, (fmul HPR:$Sn, HPR:$Sm))]>,
460 Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]>;
462 def VNMULD : ADbI<0b11100, 0b10, 1, 0,
463 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
464 IIC_fpMUL64, "vnmul", ".f64\t$Dd, $Dn, $Dm",
465 [(set DPR:$Dd, (fneg (fmul DPR:$Dn, (f64 DPR:$Dm))))]>,
466 Sched<[WriteFPMUL64, ReadFPMUL, ReadFPMUL]>;
468 def VNMULS : ASbI<0b11100, 0b10, 1, 0,
469 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
470 IIC_fpMUL32, "vnmul", ".f32\t$Sd, $Sn, $Sm",
471 [(set SPR:$Sd, (fneg (fmul SPR:$Sn, SPR:$Sm)))]>,
472 Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]> {
473 // Some single precision VFP instructions may be executed on both NEON and
474 // VFP pipelines on A8.
475 let D = VFPNeonA8Domain;
478 def VNMULH : AHbI<0b11100, 0b10, 1, 0,
479 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
480 IIC_fpMUL16, "vnmul", ".f16\t$Sd, $Sn, $Sm",
481 [(set HPR:$Sd, (fneg (fmul HPR:$Sn, HPR:$Sm)))]>,
482 Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]>;
484 multiclass vsel_inst<string op, bits<2> opc, int CC> {
485 let DecoderNamespace = "VFPV8", PostEncoderMethod = "",
486 Uses = [CPSR], AddedComplexity = 4, isUnpredicable = 1 in {
487 def H : AHbInp<0b11100, opc, 0,
488 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
489 NoItinerary, !strconcat("vsel", op, ".f16\t$Sd, $Sn, $Sm"),
490 [(set HPR:$Sd, (ARMcmov HPR:$Sm, HPR:$Sn, CC))]>,
491 Requires<[HasFullFP16]>;
493 def S : ASbInp<0b11100, opc, 0,
494 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
495 NoItinerary, !strconcat("vsel", op, ".f32\t$Sd, $Sn, $Sm"),
496 [(set SPR:$Sd, (ARMcmov SPR:$Sm, SPR:$Sn, CC))]>,
497 Requires<[HasFPARMv8]>;
499 def D : ADbInp<0b11100, opc, 0,
500 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
501 NoItinerary, !strconcat("vsel", op, ".f64\t$Dd, $Dn, $Dm"),
502 [(set DPR:$Dd, (ARMcmov (f64 DPR:$Dm), (f64 DPR:$Dn), CC))]>,
503 Requires<[HasFPARMv8, HasDPVFP]>;
507 // The CC constants here match ARMCC::CondCodes.
508 defm VSELGT : vsel_inst<"gt", 0b11, 12>;
509 defm VSELGE : vsel_inst<"ge", 0b10, 10>;
510 defm VSELEQ : vsel_inst<"eq", 0b00, 0>;
511 defm VSELVS : vsel_inst<"vs", 0b01, 6>;
513 multiclass vmaxmin_inst<string op, bit opc, SDNode SD> {
514 let DecoderNamespace = "VFPV8", PostEncoderMethod = "",
515 isUnpredicable = 1 in {
516 def H : AHbInp<0b11101, 0b00, opc,
517 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
518 NoItinerary, !strconcat(op, ".f16\t$Sd, $Sn, $Sm"),
519 [(set HPR:$Sd, (SD HPR:$Sn, HPR:$Sm))]>,
520 Requires<[HasFullFP16]>;
522 def S : ASbInp<0b11101, 0b00, opc,
523 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
524 NoItinerary, !strconcat(op, ".f32\t$Sd, $Sn, $Sm"),
525 [(set SPR:$Sd, (SD SPR:$Sn, SPR:$Sm))]>,
526 Requires<[HasFPARMv8]>;
528 def D : ADbInp<0b11101, 0b00, opc,
529 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
530 NoItinerary, !strconcat(op, ".f64\t$Dd, $Dn, $Dm"),
531 [(set DPR:$Dd, (f64 (SD (f64 DPR:$Dn), (f64 DPR:$Dm))))]>,
532 Requires<[HasFPARMv8, HasDPVFP]>;
536 defm VFP_VMAXNM : vmaxmin_inst<"vmaxnm", 0, fmaxnum>;
537 defm VFP_VMINNM : vmaxmin_inst<"vminnm", 1, fminnum>;
539 // Match reassociated forms only if not sign dependent rounding.
540 def : Pat<(fmul (fneg DPR:$a), (f64 DPR:$b)),
541 (VNMULD DPR:$a, DPR:$b)>,
542 Requires<[NoHonorSignDependentRounding,HasDPVFP]>;
543 def : Pat<(fmul (fneg SPR:$a), SPR:$b),
544 (VNMULS SPR:$a, SPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
546 // These are encoded as unary instructions.
547 let Defs = [FPSCR_NZCV] in {
548 def VCMPED : ADuI<0b11101, 0b11, 0b0100, 0b11, 0,
549 (outs), (ins DPR:$Dd, DPR:$Dm),
550 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, $Dm",
551 [(arm_cmpfp DPR:$Dd, (f64 DPR:$Dm), (i32 1))]>;
553 def VCMPES : ASuI<0b11101, 0b11, 0b0100, 0b11, 0,
554 (outs), (ins SPR:$Sd, SPR:$Sm),
555 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, $Sm",
556 [(arm_cmpfp SPR:$Sd, SPR:$Sm, (i32 1))]> {
557 // Some single precision VFP instructions may be executed on both NEON and
558 // VFP pipelines on A8.
559 let D = VFPNeonA8Domain;
562 def VCMPEH : AHuI<0b11101, 0b11, 0b0100, 0b11, 0,
563 (outs), (ins HPR:$Sd, HPR:$Sm),
564 IIC_fpCMP16, "vcmpe", ".f16\t$Sd, $Sm",
565 [(arm_cmpfp HPR:$Sd, HPR:$Sm, (i32 1))]>;
567 def VCMPD : ADuI<0b11101, 0b11, 0b0100, 0b01, 0,
568 (outs), (ins DPR:$Dd, DPR:$Dm),
569 IIC_fpCMP64, "vcmp", ".f64\t$Dd, $Dm",
570 [(arm_cmpfp DPR:$Dd, (f64 DPR:$Dm), (i32 0))]>;
572 def VCMPS : ASuI<0b11101, 0b11, 0b0100, 0b01, 0,
573 (outs), (ins SPR:$Sd, SPR:$Sm),
574 IIC_fpCMP32, "vcmp", ".f32\t$Sd, $Sm",
575 [(arm_cmpfp SPR:$Sd, SPR:$Sm, (i32 0))]> {
576 // Some single precision VFP instructions may be executed on both NEON and
577 // VFP pipelines on A8.
578 let D = VFPNeonA8Domain;
581 def VCMPH : AHuI<0b11101, 0b11, 0b0100, 0b01, 0,
582 (outs), (ins HPR:$Sd, HPR:$Sm),
583 IIC_fpCMP16, "vcmp", ".f16\t$Sd, $Sm",
584 [(arm_cmpfp HPR:$Sd, HPR:$Sm, (i32 0))]>;
585 } // Defs = [FPSCR_NZCV]
587 //===----------------------------------------------------------------------===//
588 // FP Unary Operations.
591 def VABSD : ADuI<0b11101, 0b11, 0b0000, 0b11, 0,
592 (outs DPR:$Dd), (ins DPR:$Dm),
593 IIC_fpUNA64, "vabs", ".f64\t$Dd, $Dm",
594 [(set DPR:$Dd, (fabs (f64 DPR:$Dm)))]>;
596 def VABSS : ASuIn<0b11101, 0b11, 0b0000, 0b11, 0,
597 (outs SPR:$Sd), (ins SPR:$Sm),
598 IIC_fpUNA32, "vabs", ".f32\t$Sd, $Sm",
599 [(set SPR:$Sd, (fabs SPR:$Sm))]> {
600 // Some single precision VFP instructions may be executed on both NEON and
601 // VFP pipelines on A8.
602 let D = VFPNeonA8Domain;
605 def VABSH : AHuI<0b11101, 0b11, 0b0000, 0b11, 0,
606 (outs HPR:$Sd), (ins HPR:$Sm),
607 IIC_fpUNA16, "vabs", ".f16\t$Sd, $Sm",
608 [(set HPR:$Sd, (fabs (f16 HPR:$Sm)))]>;
610 let Defs = [FPSCR_NZCV] in {
611 def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0,
612 (outs), (ins DPR:$Dd),
613 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, #0",
614 [(arm_cmpfp0 (f64 DPR:$Dd), (i32 1))]> {
615 let Inst{3-0} = 0b0000;
619 def VCMPEZS : ASuI<0b11101, 0b11, 0b0101, 0b11, 0,
620 (outs), (ins SPR:$Sd),
621 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, #0",
622 [(arm_cmpfp0 SPR:$Sd, (i32 1))]> {
623 let Inst{3-0} = 0b0000;
626 // Some single precision VFP instructions may be executed on both NEON and
627 // VFP pipelines on A8.
628 let D = VFPNeonA8Domain;
631 def VCMPEZH : AHuI<0b11101, 0b11, 0b0101, 0b11, 0,
632 (outs), (ins HPR:$Sd),
633 IIC_fpCMP16, "vcmpe", ".f16\t$Sd, #0",
634 [(arm_cmpfp0 HPR:$Sd, (i32 1))]> {
635 let Inst{3-0} = 0b0000;
639 def VCMPZD : ADuI<0b11101, 0b11, 0b0101, 0b01, 0,
640 (outs), (ins DPR:$Dd),
641 IIC_fpCMP64, "vcmp", ".f64\t$Dd, #0",
642 [(arm_cmpfp0 (f64 DPR:$Dd), (i32 0))]> {
643 let Inst{3-0} = 0b0000;
647 def VCMPZS : ASuI<0b11101, 0b11, 0b0101, 0b01, 0,
648 (outs), (ins SPR:$Sd),
649 IIC_fpCMP32, "vcmp", ".f32\t$Sd, #0",
650 [(arm_cmpfp0 SPR:$Sd, (i32 0))]> {
651 let Inst{3-0} = 0b0000;
654 // Some single precision VFP instructions may be executed on both NEON and
655 // VFP pipelines on A8.
656 let D = VFPNeonA8Domain;
659 def VCMPZH : AHuI<0b11101, 0b11, 0b0101, 0b01, 0,
660 (outs), (ins HPR:$Sd),
661 IIC_fpCMP16, "vcmp", ".f16\t$Sd, #0",
662 [(arm_cmpfp0 HPR:$Sd, (i32 0))]> {
663 let Inst{3-0} = 0b0000;
666 } // Defs = [FPSCR_NZCV]
668 def VCVTDS : ASuI<0b11101, 0b11, 0b0111, 0b11, 0,
669 (outs DPR:$Dd), (ins SPR:$Sm),
670 IIC_fpCVTDS, "vcvt", ".f64.f32\t$Dd, $Sm",
671 [(set DPR:$Dd, (fpextend SPR:$Sm))]>,
672 Sched<[WriteFPCVT]> {
673 // Instruction operands.
677 // Encode instruction operands.
678 let Inst{3-0} = Sm{4-1};
680 let Inst{15-12} = Dd{3-0};
681 let Inst{22} = Dd{4};
683 let Predicates = [HasVFP2, HasDPVFP];
686 // Special case encoding: bits 11-8 is 0b1011.
687 def VCVTSD : VFPAI<(outs SPR:$Sd), (ins DPR:$Dm), VFPUnaryFrm,
688 IIC_fpCVTSD, "vcvt", ".f32.f64\t$Sd, $Dm",
689 [(set SPR:$Sd, (fpround DPR:$Dm))]>,
690 Sched<[WriteFPCVT]> {
691 // Instruction operands.
695 // Encode instruction operands.
696 let Inst{3-0} = Dm{3-0};
698 let Inst{15-12} = Sd{4-1};
699 let Inst{22} = Sd{0};
701 let Inst{27-23} = 0b11101;
702 let Inst{21-16} = 0b110111;
703 let Inst{11-8} = 0b1011;
704 let Inst{7-6} = 0b11;
707 let Predicates = [HasVFP2, HasDPVFP];
710 // Between half, single and double-precision.
711 def VCVTBHS: ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm),
712 /* FIXME */ IIC_fpCVTSH, "vcvtb", ".f32.f16\t$Sd, $Sm",
713 [/* Intentionally left blank, see patterns below */]>,
717 def : FP16Pat<(f32 (fpextend HPR:$Sm)),
718 (VCVTBHS (COPY_TO_REGCLASS HPR:$Sm, SPR))>;
719 def : FP16Pat<(f16_to_fp GPR:$a),
720 (VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>;
722 def VCVTBSH: ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm),
723 /* FIXME */ IIC_fpCVTHS, "vcvtb", ".f16.f32\t$Sd, $Sm",
724 [/* Intentionally left blank, see patterns below */]>,
728 def : FP16Pat<(f16 (fpround SPR:$Sm)),
729 (COPY_TO_REGCLASS (VCVTBSH SPR:$Sm), HPR)>;
730 def : FP16Pat<(fp_to_f16 SPR:$a),
731 (i32 (COPY_TO_REGCLASS (VCVTBSH SPR:$a), GPR))>;
733 def VCVTTHS: ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm),
734 /* FIXME */ IIC_fpCVTSH, "vcvtt", ".f32.f16\t$Sd, $Sm",
735 [/* For disassembly only; pattern left blank */]>,
739 def VCVTTSH: ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm),
740 /* FIXME */ IIC_fpCVTHS, "vcvtt", ".f16.f32\t$Sd, $Sm",
741 [/* For disassembly only; pattern left blank */]>,
745 def VCVTBHD : ADuI<0b11101, 0b11, 0b0010, 0b01, 0,
746 (outs DPR:$Dd), (ins SPR:$Sm),
747 NoItinerary, "vcvtb", ".f64.f16\t$Dd, $Sm",
748 [/* Intentionally left blank, see patterns below */]>,
749 Requires<[HasFPARMv8, HasDPVFP]>,
750 Sched<[WriteFPCVT]> {
751 // Instruction operands.
754 // Encode instruction operands.
755 let Inst{3-0} = Sm{4-1};
759 def : FullFP16Pat<(f64 (fpextend HPR:$Sm)),
760 (VCVTBHD (COPY_TO_REGCLASS HPR:$Sm, SPR))>,
761 Requires<[HasFPARMv8, HasDPVFP]>;
762 def : FP16Pat<(f64 (f16_to_fp GPR:$a)),
763 (VCVTBHD (COPY_TO_REGCLASS GPR:$a, SPR))>,
764 Requires<[HasFPARMv8, HasDPVFP]>;
766 def VCVTBDH : ADuI<0b11101, 0b11, 0b0011, 0b01, 0,
767 (outs SPR:$Sd), (ins DPR:$Dm),
768 NoItinerary, "vcvtb", ".f16.f64\t$Sd, $Dm",
769 [/* Intentionally left blank, see patterns below */]>,
770 Requires<[HasFPARMv8, HasDPVFP]> {
771 // Instruction operands.
775 // Encode instruction operands.
776 let Inst{3-0} = Dm{3-0};
778 let Inst{15-12} = Sd{4-1};
779 let Inst{22} = Sd{0};
782 def : FullFP16Pat<(f16 (fpround DPR:$Dm)),
783 (COPY_TO_REGCLASS (VCVTBDH DPR:$Dm), HPR)>,
784 Requires<[HasFPARMv8, HasDPVFP]>;
785 def : FP16Pat<(fp_to_f16 (f64 DPR:$a)),
786 (i32 (COPY_TO_REGCLASS (VCVTBDH DPR:$a), GPR))>,
787 Requires<[HasFPARMv8, HasDPVFP]>;
789 def VCVTTHD : ADuI<0b11101, 0b11, 0b0010, 0b11, 0,
790 (outs DPR:$Dd), (ins SPR:$Sm),
791 NoItinerary, "vcvtt", ".f64.f16\t$Dd, $Sm",
792 []>, Requires<[HasFPARMv8, HasDPVFP]> {
793 // Instruction operands.
796 // Encode instruction operands.
797 let Inst{3-0} = Sm{4-1};
801 def VCVTTDH : ADuI<0b11101, 0b11, 0b0011, 0b11, 0,
802 (outs SPR:$Sd), (ins DPR:$Dm),
803 NoItinerary, "vcvtt", ".f16.f64\t$Sd, $Dm",
804 []>, Requires<[HasFPARMv8, HasDPVFP]> {
805 // Instruction operands.
809 // Encode instruction operands.
810 let Inst{15-12} = Sd{4-1};
811 let Inst{22} = Sd{0};
812 let Inst{3-0} = Dm{3-0};
816 multiclass vcvt_inst<string opc, bits<2> rm,
817 SDPatternOperator node = null_frag> {
818 let PostEncoderMethod = "", DecoderNamespace = "VFPV8" in {
819 def SH : AHuInp<0b11101, 0b11, 0b1100, 0b11, 0,
820 (outs SPR:$Sd), (ins HPR:$Sm),
821 NoItinerary, !strconcat("vcvt", opc, ".s32.f16\t$Sd, $Sm"),
823 Requires<[HasFullFP16]> {
824 let Inst{17-16} = rm;
827 def UH : AHuInp<0b11101, 0b11, 0b1100, 0b01, 0,
828 (outs SPR:$Sd), (ins HPR:$Sm),
829 NoItinerary, !strconcat("vcvt", opc, ".u32.f16\t$Sd, $Sm"),
831 Requires<[HasFullFP16]> {
832 let Inst{17-16} = rm;
835 def SS : ASuInp<0b11101, 0b11, 0b1100, 0b11, 0,
836 (outs SPR:$Sd), (ins SPR:$Sm),
837 NoItinerary, !strconcat("vcvt", opc, ".s32.f32\t$Sd, $Sm"),
839 Requires<[HasFPARMv8]> {
840 let Inst{17-16} = rm;
843 def US : ASuInp<0b11101, 0b11, 0b1100, 0b01, 0,
844 (outs SPR:$Sd), (ins SPR:$Sm),
845 NoItinerary, !strconcat("vcvt", opc, ".u32.f32\t$Sd, $Sm"),
847 Requires<[HasFPARMv8]> {
848 let Inst{17-16} = rm;
851 def SD : ASuInp<0b11101, 0b11, 0b1100, 0b11, 0,
852 (outs SPR:$Sd), (ins DPR:$Dm),
853 NoItinerary, !strconcat("vcvt", opc, ".s32.f64\t$Sd, $Dm"),
855 Requires<[HasFPARMv8, HasDPVFP]> {
858 let Inst{17-16} = rm;
860 // Encode instruction operands.
861 let Inst{3-0} = Dm{3-0};
866 def UD : ASuInp<0b11101, 0b11, 0b1100, 0b01, 0,
867 (outs SPR:$Sd), (ins DPR:$Dm),
868 NoItinerary, !strconcat("vcvt", opc, ".u32.f64\t$Sd, $Dm"),
870 Requires<[HasFPARMv8, HasDPVFP]> {
873 let Inst{17-16} = rm;
875 // Encode instruction operands
876 let Inst{3-0} = Dm{3-0};
882 let Predicates = [HasFPARMv8] in {
883 let Predicates = [HasFullFP16] in {
884 def : Pat<(i32 (fp_to_sint (node HPR:$a))),
886 (!cast<Instruction>(NAME#"SH") HPR:$a),
889 def : Pat<(i32 (fp_to_uint (node HPR:$a))),
891 (!cast<Instruction>(NAME#"UH") HPR:$a),
894 def : Pat<(i32 (fp_to_sint (node SPR:$a))),
896 (!cast<Instruction>(NAME#"SS") SPR:$a),
898 def : Pat<(i32 (fp_to_uint (node SPR:$a))),
900 (!cast<Instruction>(NAME#"US") SPR:$a),
903 let Predicates = [HasFPARMv8, HasDPVFP] in {
904 def : Pat<(i32 (fp_to_sint (node (f64 DPR:$a)))),
906 (!cast<Instruction>(NAME#"SD") DPR:$a),
908 def : Pat<(i32 (fp_to_uint (node (f64 DPR:$a)))),
910 (!cast<Instruction>(NAME#"UD") DPR:$a),
915 defm VCVTA : vcvt_inst<"a", 0b00, fround>;
916 defm VCVTN : vcvt_inst<"n", 0b01>;
917 defm VCVTP : vcvt_inst<"p", 0b10, fceil>;
918 defm VCVTM : vcvt_inst<"m", 0b11, ffloor>;
920 def VNEGD : ADuI<0b11101, 0b11, 0b0001, 0b01, 0,
921 (outs DPR:$Dd), (ins DPR:$Dm),
922 IIC_fpUNA64, "vneg", ".f64\t$Dd, $Dm",
923 [(set DPR:$Dd, (fneg (f64 DPR:$Dm)))]>;
925 def VNEGS : ASuIn<0b11101, 0b11, 0b0001, 0b01, 0,
926 (outs SPR:$Sd), (ins SPR:$Sm),
927 IIC_fpUNA32, "vneg", ".f32\t$Sd, $Sm",
928 [(set SPR:$Sd, (fneg SPR:$Sm))]> {
929 // Some single precision VFP instructions may be executed on both NEON and
930 // VFP pipelines on A8.
931 let D = VFPNeonA8Domain;
934 def VNEGH : AHuI<0b11101, 0b11, 0b0001, 0b01, 0,
935 (outs HPR:$Sd), (ins HPR:$Sm),
936 IIC_fpUNA16, "vneg", ".f16\t$Sd, $Sm",
937 [(set HPR:$Sd, (fneg HPR:$Sm))]>;
939 multiclass vrint_inst_zrx<string opc, bit op, bit op2, SDPatternOperator node> {
940 def H : AHuI<0b11101, 0b11, 0b0110, 0b11, 0,
941 (outs HPR:$Sd), (ins HPR:$Sm),
942 NoItinerary, !strconcat("vrint", opc), ".f16\t$Sd, $Sm",
943 [(set (f16 HPR:$Sd), (node (f16 HPR:$Sm)))]>,
944 Requires<[HasFullFP16]> {
949 def S : ASuI<0b11101, 0b11, 0b0110, 0b11, 0,
950 (outs SPR:$Sd), (ins SPR:$Sm),
951 NoItinerary, !strconcat("vrint", opc), ".f32\t$Sd, $Sm",
952 [(set (f32 SPR:$Sd), (node (f32 SPR:$Sm)))]>,
953 Requires<[HasFPARMv8]> {
957 def D : ADuI<0b11101, 0b11, 0b0110, 0b11, 0,
958 (outs DPR:$Dd), (ins DPR:$Dm),
959 NoItinerary, !strconcat("vrint", opc), ".f64\t$Dd, $Dm",
960 [(set (f64 DPR:$Dd), (node (f64 DPR:$Dm)))]>,
961 Requires<[HasFPARMv8, HasDPVFP]> {
966 def : InstAlias<!strconcat("vrint", opc, "$p.f16.f16\t$Sd, $Sm"),
967 (!cast<Instruction>(NAME#"H") SPR:$Sd, SPR:$Sm, pred:$p), 0>,
968 Requires<[HasFullFP16]>;
969 def : InstAlias<!strconcat("vrint", opc, "$p.f32.f32\t$Sd, $Sm"),
970 (!cast<Instruction>(NAME#"S") SPR:$Sd, SPR:$Sm, pred:$p), 0>,
971 Requires<[HasFPARMv8]>;
972 def : InstAlias<!strconcat("vrint", opc, "$p.f64.f64\t$Dd, $Dm"),
973 (!cast<Instruction>(NAME#"D") DPR:$Dd, DPR:$Dm, pred:$p), 0>,
974 Requires<[HasFPARMv8,HasDPVFP]>;
977 defm VRINTZ : vrint_inst_zrx<"z", 0, 1, ftrunc>;
978 defm VRINTR : vrint_inst_zrx<"r", 0, 0, fnearbyint>;
979 defm VRINTX : vrint_inst_zrx<"x", 1, 0, frint>;
981 multiclass vrint_inst_anpm<string opc, bits<2> rm,
982 SDPatternOperator node = null_frag> {
983 let PostEncoderMethod = "", DecoderNamespace = "VFPV8",
984 isUnpredicable = 1 in {
985 def H : AHuInp<0b11101, 0b11, 0b1000, 0b01, 0,
986 (outs HPR:$Sd), (ins HPR:$Sm),
987 NoItinerary, !strconcat("vrint", opc, ".f16\t$Sd, $Sm"),
988 [(set (f16 HPR:$Sd), (node (f16 HPR:$Sm)))]>,
989 Requires<[HasFullFP16]> {
990 let Inst{17-16} = rm;
992 def S : ASuInp<0b11101, 0b11, 0b1000, 0b01, 0,
993 (outs SPR:$Sd), (ins SPR:$Sm),
994 NoItinerary, !strconcat("vrint", opc, ".f32\t$Sd, $Sm"),
995 [(set (f32 SPR:$Sd), (node (f32 SPR:$Sm)))]>,
996 Requires<[HasFPARMv8]> {
997 let Inst{17-16} = rm;
999 def D : ADuInp<0b11101, 0b11, 0b1000, 0b01, 0,
1000 (outs DPR:$Dd), (ins DPR:$Dm),
1001 NoItinerary, !strconcat("vrint", opc, ".f64\t$Dd, $Dm"),
1002 [(set (f64 DPR:$Dd), (node (f64 DPR:$Dm)))]>,
1003 Requires<[HasFPARMv8, HasDPVFP]> {
1004 let Inst{17-16} = rm;
1008 def : InstAlias<!strconcat("vrint", opc, ".f32.f32\t$Sd, $Sm"),
1009 (!cast<Instruction>(NAME#"S") SPR:$Sd, SPR:$Sm), 0>,
1010 Requires<[HasFPARMv8]>;
1011 def : InstAlias<!strconcat("vrint", opc, ".f64.f64\t$Dd, $Dm"),
1012 (!cast<Instruction>(NAME#"D") DPR:$Dd, DPR:$Dm), 0>,
1013 Requires<[HasFPARMv8,HasDPVFP]>;
1016 defm VRINTA : vrint_inst_anpm<"a", 0b00, fround>;
1017 defm VRINTN : vrint_inst_anpm<"n", 0b01, int_arm_neon_vrintn>;
1018 defm VRINTP : vrint_inst_anpm<"p", 0b10, fceil>;
1019 defm VRINTM : vrint_inst_anpm<"m", 0b11, ffloor>;
1021 def VSQRTD : ADuI<0b11101, 0b11, 0b0001, 0b11, 0,
1022 (outs DPR:$Dd), (ins DPR:$Dm),
1023 IIC_fpSQRT64, "vsqrt", ".f64\t$Dd, $Dm",
1024 [(set DPR:$Dd, (fsqrt (f64 DPR:$Dm)))]>,
1025 Sched<[WriteFPSQRT64]>;
1027 def VSQRTS : ASuI<0b11101, 0b11, 0b0001, 0b11, 0,
1028 (outs SPR:$Sd), (ins SPR:$Sm),
1029 IIC_fpSQRT32, "vsqrt", ".f32\t$Sd, $Sm",
1030 [(set SPR:$Sd, (fsqrt SPR:$Sm))]>,
1031 Sched<[WriteFPSQRT32]>;
1033 def VSQRTH : AHuI<0b11101, 0b11, 0b0001, 0b11, 0,
1034 (outs HPR:$Sd), (ins HPR:$Sm),
1035 IIC_fpSQRT16, "vsqrt", ".f16\t$Sd, $Sm",
1036 [(set HPR:$Sd, (fsqrt (f16 HPR:$Sm)))]>;
1038 let hasSideEffects = 0 in {
1039 let isMoveReg = 1 in {
1040 def VMOVD : ADuI<0b11101, 0b11, 0b0000, 0b01, 0,
1041 (outs DPR:$Dd), (ins DPR:$Dm),
1042 IIC_fpUNA64, "vmov", ".f64\t$Dd, $Dm", []>,
1043 Requires<[HasFPRegs64]>;
1045 def VMOVS : ASuI<0b11101, 0b11, 0b0000, 0b01, 0,
1046 (outs SPR:$Sd), (ins SPR:$Sm),
1047 IIC_fpUNA32, "vmov", ".f32\t$Sd, $Sm", []>,
1048 Requires<[HasFPRegs]>;
1051 let PostEncoderMethod = "", DecoderNamespace = "VFPV8", isUnpredicable = 1 in {
1052 def VMOVH : ASuInp<0b11101, 0b11, 0b0000, 0b01, 0,
1053 (outs SPR:$Sd), (ins SPR:$Sm),
1054 IIC_fpUNA16, "vmovx.f16\t$Sd, $Sm", []>,
1055 Requires<[HasFullFP16]>;
1057 def VINSH : ASuInp<0b11101, 0b11, 0b0000, 0b11, 0,
1058 (outs SPR:$Sd), (ins SPR:$Sm),
1059 IIC_fpUNA16, "vins.f16\t$Sd, $Sm", []>,
1060 Requires<[HasFullFP16]>;
1061 } // PostEncoderMethod
1064 //===----------------------------------------------------------------------===//
1065 // FP <-> GPR Copies. Int <-> FP Conversions.
1068 let isMoveReg = 1 in {
1069 def VMOVRS : AVConv2I<0b11100001, 0b1010,
1070 (outs GPR:$Rt), (ins SPR:$Sn),
1071 IIC_fpMOVSI, "vmov", "\t$Rt, $Sn",
1072 [(set GPR:$Rt, (bitconvert SPR:$Sn))]>,
1073 Requires<[HasFPRegs]>,
1074 Sched<[WriteFPMOV]> {
1075 // Instruction operands.
1079 // Encode instruction operands.
1080 let Inst{19-16} = Sn{4-1};
1081 let Inst{7} = Sn{0};
1082 let Inst{15-12} = Rt;
1084 let Inst{6-5} = 0b00;
1085 let Inst{3-0} = 0b0000;
1087 // Some single precision VFP instructions may be executed on both NEON and VFP
1089 let D = VFPNeonDomain;
1092 // Bitcast i32 -> f32. NEON prefers to use VMOVDRR.
1093 def VMOVSR : AVConv4I<0b11100000, 0b1010,
1094 (outs SPR:$Sn), (ins GPR:$Rt),
1095 IIC_fpMOVIS, "vmov", "\t$Sn, $Rt",
1096 [(set SPR:$Sn, (bitconvert GPR:$Rt))]>,
1097 Requires<[HasFPRegs, UseVMOVSR]>,
1098 Sched<[WriteFPMOV]> {
1099 // Instruction operands.
1103 // Encode instruction operands.
1104 let Inst{19-16} = Sn{4-1};
1105 let Inst{7} = Sn{0};
1106 let Inst{15-12} = Rt;
1108 let Inst{6-5} = 0b00;
1109 let Inst{3-0} = 0b0000;
1111 // Some single precision VFP instructions may be executed on both NEON and VFP
1113 let D = VFPNeonDomain;
1116 def : Pat<(arm_vmovsr GPR:$Rt), (VMOVSR GPR:$Rt)>, Requires<[HasVFP2, UseVMOVSR]>;
1118 let hasSideEffects = 0 in {
1119 def VMOVRRD : AVConv3I<0b11000101, 0b1011,
1120 (outs GPR:$Rt, GPR:$Rt2), (ins DPR:$Dm),
1121 IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $Dm",
1122 [(set GPR:$Rt, GPR:$Rt2, (arm_fmrrd DPR:$Dm))]>,
1123 Requires<[HasFPRegs]>,
1124 Sched<[WriteFPMOV]> {
1125 // Instruction operands.
1130 // Encode instruction operands.
1131 let Inst{3-0} = Dm{3-0};
1132 let Inst{5} = Dm{4};
1133 let Inst{15-12} = Rt;
1134 let Inst{19-16} = Rt2;
1136 let Inst{7-6} = 0b00;
1138 // Some single precision VFP instructions may be executed on both NEON and VFP
1140 let D = VFPNeonDomain;
1142 // This instruction is equivalent to
1143 // $Rt = EXTRACT_SUBREG $Dm, ssub_0
1144 // $Rt2 = EXTRACT_SUBREG $Dm, ssub_1
1145 let isExtractSubreg = 1;
1148 def VMOVRRS : AVConv3I<0b11000101, 0b1010,
1149 (outs GPR:$Rt, GPR:$Rt2), (ins SPR:$src1, SPR:$src2),
1150 IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $src1, $src2",
1151 [/* For disassembly only; pattern left blank */]>,
1152 Requires<[HasFPRegs]>,
1153 Sched<[WriteFPMOV]> {
1158 // Encode instruction operands.
1159 let Inst{3-0} = src1{4-1};
1160 let Inst{5} = src1{0};
1161 let Inst{15-12} = Rt;
1162 let Inst{19-16} = Rt2;
1164 let Inst{7-6} = 0b00;
1166 // Some single precision VFP instructions may be executed on both NEON and VFP
1168 let D = VFPNeonDomain;
1169 let DecoderMethod = "DecodeVMOVRRS";
1173 // FMDHR: GPR -> SPR
1174 // FMDLR: GPR -> SPR
1176 def VMOVDRR : AVConv5I<0b11000100, 0b1011,
1177 (outs DPR:$Dm), (ins GPR:$Rt, GPR:$Rt2),
1178 IIC_fpMOVID, "vmov", "\t$Dm, $Rt, $Rt2",
1179 [(set DPR:$Dm, (arm_fmdrr GPR:$Rt, GPR:$Rt2))]>,
1180 Requires<[HasFPRegs]>,
1181 Sched<[WriteFPMOV]> {
1182 // Instruction operands.
1187 // Encode instruction operands.
1188 let Inst{3-0} = Dm{3-0};
1189 let Inst{5} = Dm{4};
1190 let Inst{15-12} = Rt;
1191 let Inst{19-16} = Rt2;
1193 let Inst{7-6} = 0b00;
1195 // Some single precision VFP instructions may be executed on both NEON and VFP
1197 let D = VFPNeonDomain;
1199 // This instruction is equivalent to
1200 // $Dm = REG_SEQUENCE $Rt, ssub_0, $Rt2, ssub_1
1201 let isRegSequence = 1;
1204 // Hoist an fabs or a fneg of a value coming from integer registers
1205 // and do the fabs/fneg on the integer value. This is never a lose
1206 // and could enable the conversion to float to be removed completely.
1207 def : Pat<(fabs (arm_fmdrr GPR:$Rl, GPR:$Rh)),
1208 (VMOVDRR GPR:$Rl, (BFC GPR:$Rh, (i32 0x7FFFFFFF)))>,
1209 Requires<[IsARM, HasV6T2]>;
1210 def : Pat<(fabs (arm_fmdrr GPR:$Rl, GPR:$Rh)),
1211 (VMOVDRR GPR:$Rl, (t2BFC GPR:$Rh, (i32 0x7FFFFFFF)))>,
1212 Requires<[IsThumb2, HasV6T2]>;
1213 def : Pat<(fneg (arm_fmdrr GPR:$Rl, GPR:$Rh)),
1214 (VMOVDRR GPR:$Rl, (EORri GPR:$Rh, (i32 0x80000000)))>,
1216 def : Pat<(fneg (arm_fmdrr GPR:$Rl, GPR:$Rh)),
1217 (VMOVDRR GPR:$Rl, (t2EORri GPR:$Rh, (i32 0x80000000)))>,
1218 Requires<[IsThumb2]>;
1220 let hasSideEffects = 0 in
1221 def VMOVSRR : AVConv5I<0b11000100, 0b1010,
1222 (outs SPR:$dst1, SPR:$dst2), (ins GPR:$src1, GPR:$src2),
1223 IIC_fpMOVID, "vmov", "\t$dst1, $dst2, $src1, $src2",
1224 [/* For disassembly only; pattern left blank */]>,
1225 Requires<[HasFPRegs]>,
1226 Sched<[WriteFPMOV]> {
1227 // Instruction operands.
1232 // Encode instruction operands.
1233 let Inst{3-0} = dst1{4-1};
1234 let Inst{5} = dst1{0};
1235 let Inst{15-12} = src1;
1236 let Inst{19-16} = src2;
1238 let Inst{7-6} = 0b00;
1240 // Some single precision VFP instructions may be executed on both NEON and VFP
1242 let D = VFPNeonDomain;
1244 let DecoderMethod = "DecodeVMOVSRR";
1247 // Move H->R, clearing top 16 bits
1248 def VMOVRH : AVConv2I<0b11100001, 0b1001,
1249 (outs rGPR:$Rt), (ins HPR:$Sn),
1250 IIC_fpMOVSI, "vmov", ".f16\t$Rt, $Sn",
1251 [(set rGPR:$Rt, (arm_vmovrh HPR:$Sn))]>,
1252 Requires<[HasFPRegs16]>,
1253 Sched<[WriteFPMOV]> {
1254 // Instruction operands.
1258 // Encode instruction operands.
1259 let Inst{19-16} = Sn{4-1};
1260 let Inst{7} = Sn{0};
1261 let Inst{15-12} = Rt;
1263 let Inst{6-5} = 0b00;
1264 let Inst{3-0} = 0b0000;
1266 let isUnpredicable = 1;
1269 // Move R->H, clearing top 16 bits
1270 def VMOVHR : AVConv4I<0b11100000, 0b1001,
1271 (outs HPR:$Sn), (ins rGPR:$Rt),
1272 IIC_fpMOVIS, "vmov", ".f16\t$Sn, $Rt",
1273 [(set HPR:$Sn, (arm_vmovhr rGPR:$Rt))]>,
1274 Requires<[HasFPRegs16]>,
1275 Sched<[WriteFPMOV]> {
1276 // Instruction operands.
1280 // Encode instruction operands.
1281 let Inst{19-16} = Sn{4-1};
1282 let Inst{7} = Sn{0};
1283 let Inst{15-12} = Rt;
1285 let Inst{6-5} = 0b00;
1286 let Inst{3-0} = 0b0000;
1288 let isUnpredicable = 1;
1291 // FMRDH: SPR -> GPR
1292 // FMRDL: SPR -> GPR
1293 // FMRRS: SPR -> GPR
1294 // FMRX: SPR system reg -> GPR
1295 // FMSRR: GPR -> SPR
1296 // FMXR: GPR -> VFP system reg
1301 class AVConv1IDs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
1302 bits<4> opcod4, dag oops, dag iops,
1303 InstrItinClass itin, string opc, string asm,
1305 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
1307 // Instruction operands.
1311 // Encode instruction operands.
1312 let Inst{3-0} = Sm{4-1};
1313 let Inst{5} = Sm{0};
1314 let Inst{15-12} = Dd{3-0};
1315 let Inst{22} = Dd{4};
1317 let Predicates = [HasVFP2, HasDPVFP];
1320 class AVConv1InSs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
1321 bits<4> opcod4, dag oops, dag iops,InstrItinClass itin,
1322 string opc, string asm, list<dag> pattern>
1323 : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
1325 // Instruction operands.
1329 // Encode instruction operands.
1330 let Inst{3-0} = Sm{4-1};
1331 let Inst{5} = Sm{0};
1332 let Inst{15-12} = Sd{4-1};
1333 let Inst{22} = Sd{0};
1336 class AVConv1IHs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
1337 bits<4> opcod4, dag oops, dag iops,
1338 InstrItinClass itin, string opc, string asm,
1340 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
1342 // Instruction operands.
1346 // Encode instruction operands.
1347 let Inst{3-0} = Sm{4-1};
1348 let Inst{5} = Sm{0};
1349 let Inst{15-12} = Sd{4-1};
1350 let Inst{22} = Sd{0};
1352 let Predicates = [HasFullFP16];
1355 def VSITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
1356 (outs DPR:$Dd), (ins SPR:$Sm),
1357 IIC_fpCVTID, "vcvt", ".f64.s32\t$Dd, $Sm",
1359 Sched<[WriteFPCVT]> {
1360 let Inst{7} = 1; // s32
1363 let Predicates=[HasVFP2, HasDPVFP] in {
1364 def : VFPPat<(f64 (sint_to_fp GPR:$a)),
1365 (VSITOD (COPY_TO_REGCLASS GPR:$a, SPR))>;
1367 def : VFPPat<(f64 (sint_to_fp (i32 (alignedload32 addrmode5:$a)))),
1368 (VSITOD (VLDRS addrmode5:$a))>;
1371 def VSITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010,
1372 (outs SPR:$Sd),(ins SPR:$Sm),
1373 IIC_fpCVTIS, "vcvt", ".f32.s32\t$Sd, $Sm",
1375 Sched<[WriteFPCVT]> {
1376 let Inst{7} = 1; // s32
1378 // Some single precision VFP instructions may be executed on both NEON and
1379 // VFP pipelines on A8.
1380 let D = VFPNeonA8Domain;
1383 def : VFPNoNEONPat<(f32 (sint_to_fp GPR:$a)),
1384 (VSITOS (COPY_TO_REGCLASS GPR:$a, SPR))>;
1386 def : VFPNoNEONPat<(f32 (sint_to_fp (i32 (alignedload32 addrmode5:$a)))),
1387 (VSITOS (VLDRS addrmode5:$a))>;
1389 def VSITOH : AVConv1IHs_Encode<0b11101, 0b11, 0b1000, 0b1001,
1390 (outs HPR:$Sd), (ins SPR:$Sm),
1391 IIC_fpCVTIH, "vcvt", ".f16.s32\t$Sd, $Sm",
1393 Sched<[WriteFPCVT]> {
1394 let Inst{7} = 1; // s32
1395 let isUnpredicable = 1;
1398 def : VFPNoNEONPat<(f16 (sint_to_fp GPR:$a)),
1399 (VSITOH (COPY_TO_REGCLASS GPR:$a, SPR))>;
1401 def VUITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
1402 (outs DPR:$Dd), (ins SPR:$Sm),
1403 IIC_fpCVTID, "vcvt", ".f64.u32\t$Dd, $Sm",
1405 Sched<[WriteFPCVT]> {
1406 let Inst{7} = 0; // u32
1409 let Predicates=[HasVFP2, HasDPVFP] in {
1410 def : VFPPat<(f64 (uint_to_fp GPR:$a)),
1411 (VUITOD (COPY_TO_REGCLASS GPR:$a, SPR))>;
1413 def : VFPPat<(f64 (uint_to_fp (i32 (alignedload32 addrmode5:$a)))),
1414 (VUITOD (VLDRS addrmode5:$a))>;
1417 def VUITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010,
1418 (outs SPR:$Sd), (ins SPR:$Sm),
1419 IIC_fpCVTIS, "vcvt", ".f32.u32\t$Sd, $Sm",
1421 Sched<[WriteFPCVT]> {
1422 let Inst{7} = 0; // u32
1424 // Some single precision VFP instructions may be executed on both NEON and
1425 // VFP pipelines on A8.
1426 let D = VFPNeonA8Domain;
1429 def : VFPNoNEONPat<(f32 (uint_to_fp GPR:$a)),
1430 (VUITOS (COPY_TO_REGCLASS GPR:$a, SPR))>;
1432 def : VFPNoNEONPat<(f32 (uint_to_fp (i32 (alignedload32 addrmode5:$a)))),
1433 (VUITOS (VLDRS addrmode5:$a))>;
1435 def VUITOH : AVConv1IHs_Encode<0b11101, 0b11, 0b1000, 0b1001,
1436 (outs HPR:$Sd), (ins SPR:$Sm),
1437 IIC_fpCVTIH, "vcvt", ".f16.u32\t$Sd, $Sm",
1439 Sched<[WriteFPCVT]> {
1440 let Inst{7} = 0; // u32
1441 let isUnpredicable = 1;
1444 def : VFPNoNEONPat<(f16 (uint_to_fp GPR:$a)),
1445 (VUITOH (COPY_TO_REGCLASS GPR:$a, SPR))>;
1449 class AVConv1IsD_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
1450 bits<4> opcod4, dag oops, dag iops,
1451 InstrItinClass itin, string opc, string asm,
1453 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
1455 // Instruction operands.
1459 // Encode instruction operands.
1460 let Inst{3-0} = Dm{3-0};
1461 let Inst{5} = Dm{4};
1462 let Inst{15-12} = Sd{4-1};
1463 let Inst{22} = Sd{0};
1465 let Predicates = [HasVFP2, HasDPVFP];
1468 class AVConv1InsS_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
1469 bits<4> opcod4, dag oops, dag iops,
1470 InstrItinClass itin, string opc, string asm,
1472 : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
1474 // Instruction operands.
1478 // Encode instruction operands.
1479 let Inst{3-0} = Sm{4-1};
1480 let Inst{5} = Sm{0};
1481 let Inst{15-12} = Sd{4-1};
1482 let Inst{22} = Sd{0};
1485 class AVConv1IsH_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
1486 bits<4> opcod4, dag oops, dag iops,
1487 InstrItinClass itin, string opc, string asm,
1489 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
1491 // Instruction operands.
1495 // Encode instruction operands.
1496 let Inst{3-0} = Sm{4-1};
1497 let Inst{5} = Sm{0};
1498 let Inst{15-12} = Sd{4-1};
1499 let Inst{22} = Sd{0};
1501 let Predicates = [HasFullFP16];
1504 // Always set Z bit in the instruction, i.e. "round towards zero" variants.
1505 def VTOSIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
1506 (outs SPR:$Sd), (ins DPR:$Dm),
1507 IIC_fpCVTDI, "vcvt", ".s32.f64\t$Sd, $Dm",
1509 Sched<[WriteFPCVT]> {
1510 let Inst{7} = 1; // Z bit
1513 let Predicates=[HasVFP2, HasDPVFP] in {
1514 def : VFPPat<(i32 (fp_to_sint (f64 DPR:$a))),
1515 (COPY_TO_REGCLASS (VTOSIZD DPR:$a), GPR)>;
1517 def : VFPPat<(alignedstore32 (i32 (fp_to_sint (f64 DPR:$a))), addrmode5:$ptr),
1518 (VSTRS (VTOSIZD DPR:$a), addrmode5:$ptr)>;
1521 def VTOSIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
1522 (outs SPR:$Sd), (ins SPR:$Sm),
1523 IIC_fpCVTSI, "vcvt", ".s32.f32\t$Sd, $Sm",
1525 Sched<[WriteFPCVT]> {
1526 let Inst{7} = 1; // Z bit
1528 // Some single precision VFP instructions may be executed on both NEON and
1529 // VFP pipelines on A8.
1530 let D = VFPNeonA8Domain;
1533 def : VFPNoNEONPat<(i32 (fp_to_sint SPR:$a)),
1534 (COPY_TO_REGCLASS (VTOSIZS SPR:$a), GPR)>;
1536 def : VFPNoNEONPat<(alignedstore32 (i32 (fp_to_sint (f32 SPR:$a))),
1538 (VSTRS (VTOSIZS SPR:$a), addrmode5:$ptr)>;
1540 def VTOSIZH : AVConv1IsH_Encode<0b11101, 0b11, 0b1101, 0b1001,
1541 (outs SPR:$Sd), (ins HPR:$Sm),
1542 IIC_fpCVTHI, "vcvt", ".s32.f16\t$Sd, $Sm",
1544 Sched<[WriteFPCVT]> {
1545 let Inst{7} = 1; // Z bit
1546 let isUnpredicable = 1;
1549 def : VFPNoNEONPat<(i32 (fp_to_sint HPR:$a)),
1550 (COPY_TO_REGCLASS (VTOSIZH HPR:$a), GPR)>;
1552 def VTOUIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
1553 (outs SPR:$Sd), (ins DPR:$Dm),
1554 IIC_fpCVTDI, "vcvt", ".u32.f64\t$Sd, $Dm",
1556 Sched<[WriteFPCVT]> {
1557 let Inst{7} = 1; // Z bit
1560 let Predicates=[HasVFP2, HasDPVFP] in {
1561 def : VFPPat<(i32 (fp_to_uint (f64 DPR:$a))),
1562 (COPY_TO_REGCLASS (VTOUIZD DPR:$a), GPR)>;
1564 def : VFPPat<(alignedstore32 (i32 (fp_to_uint (f64 DPR:$a))), addrmode5:$ptr),
1565 (VSTRS (VTOUIZD DPR:$a), addrmode5:$ptr)>;
1568 def VTOUIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
1569 (outs SPR:$Sd), (ins SPR:$Sm),
1570 IIC_fpCVTSI, "vcvt", ".u32.f32\t$Sd, $Sm",
1572 Sched<[WriteFPCVT]> {
1573 let Inst{7} = 1; // Z bit
1575 // Some single precision VFP instructions may be executed on both NEON and
1576 // VFP pipelines on A8.
1577 let D = VFPNeonA8Domain;
1580 def : VFPNoNEONPat<(i32 (fp_to_uint SPR:$a)),
1581 (COPY_TO_REGCLASS (VTOUIZS SPR:$a), GPR)>;
1583 def : VFPNoNEONPat<(alignedstore32 (i32 (fp_to_uint (f32 SPR:$a))),
1585 (VSTRS (VTOUIZS SPR:$a), addrmode5:$ptr)>;
1587 def VTOUIZH : AVConv1IsH_Encode<0b11101, 0b11, 0b1100, 0b1001,
1588 (outs SPR:$Sd), (ins HPR:$Sm),
1589 IIC_fpCVTHI, "vcvt", ".u32.f16\t$Sd, $Sm",
1591 Sched<[WriteFPCVT]> {
1592 let Inst{7} = 1; // Z bit
1593 let isUnpredicable = 1;
1596 def : VFPNoNEONPat<(i32 (fp_to_uint HPR:$a)),
1597 (COPY_TO_REGCLASS (VTOUIZH HPR:$a), GPR)>;
1599 // And the Z bit '0' variants, i.e. use the rounding mode specified by FPSCR.
1600 let Uses = [FPSCR] in {
1601 def VTOSIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
1602 (outs SPR:$Sd), (ins DPR:$Dm),
1603 IIC_fpCVTDI, "vcvtr", ".s32.f64\t$Sd, $Dm",
1604 [(set SPR:$Sd, (int_arm_vcvtr (f64 DPR:$Dm)))]>,
1605 Sched<[WriteFPCVT]> {
1606 let Inst{7} = 0; // Z bit
1609 def VTOSIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
1610 (outs SPR:$Sd), (ins SPR:$Sm),
1611 IIC_fpCVTSI, "vcvtr", ".s32.f32\t$Sd, $Sm",
1612 [(set SPR:$Sd, (int_arm_vcvtr SPR:$Sm))]>,
1613 Sched<[WriteFPCVT]> {
1614 let Inst{7} = 0; // Z bit
1617 def VTOSIRH : AVConv1IsH_Encode<0b11101, 0b11, 0b1101, 0b1001,
1618 (outs SPR:$Sd), (ins SPR:$Sm),
1619 IIC_fpCVTHI, "vcvtr", ".s32.f16\t$Sd, $Sm",
1621 Sched<[WriteFPCVT]> {
1622 let Inst{7} = 0; // Z bit
1623 let isUnpredicable = 1;
1626 def VTOUIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
1627 (outs SPR:$Sd), (ins DPR:$Dm),
1628 IIC_fpCVTDI, "vcvtr", ".u32.f64\t$Sd, $Dm",
1629 [(set SPR:$Sd, (int_arm_vcvtru(f64 DPR:$Dm)))]>,
1630 Sched<[WriteFPCVT]> {
1631 let Inst{7} = 0; // Z bit
1634 def VTOUIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
1635 (outs SPR:$Sd), (ins SPR:$Sm),
1636 IIC_fpCVTSI, "vcvtr", ".u32.f32\t$Sd, $Sm",
1637 [(set SPR:$Sd, (int_arm_vcvtru SPR:$Sm))]>,
1638 Sched<[WriteFPCVT]> {
1639 let Inst{7} = 0; // Z bit
1642 def VTOUIRH : AVConv1IsH_Encode<0b11101, 0b11, 0b1100, 0b1001,
1643 (outs SPR:$Sd), (ins SPR:$Sm),
1644 IIC_fpCVTHI, "vcvtr", ".u32.f16\t$Sd, $Sm",
1646 Sched<[WriteFPCVT]> {
1647 let Inst{7} = 0; // Z bit
1648 let isUnpredicable = 1;
1652 // v8.3-a Javascript Convert to Signed fixed-point
1653 def VJCVT : AVConv1IsD_Encode<0b11101, 0b11, 0b1001, 0b1011,
1654 (outs SPR:$Sd), (ins DPR:$Dm),
1655 IIC_fpCVTDI, "vjcvt", ".s32.f64\t$Sd, $Dm",
1657 Requires<[HasFPARMv8, HasV8_3a]> {
1658 let Inst{7} = 1; // Z bit
1661 // Convert between floating-point and fixed-point
1662 // Data type for fixed-point naming convention:
1663 // S16 (U=0, sx=0) -> SH
1664 // U16 (U=1, sx=0) -> UH
1665 // S32 (U=0, sx=1) -> SL
1666 // U32 (U=1, sx=1) -> UL
1668 let Constraints = "$a = $dst" in {
1670 // FP to Fixed-Point:
1672 // Single Precision register
1673 class AVConv1XInsS_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4,
1674 bit op5, dag oops, dag iops, InstrItinClass itin,
1675 string opc, string asm, list<dag> pattern>
1676 : AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern> {
1678 // if dp_operation then UInt(D:Vd) else UInt(Vd:D);
1679 let Inst{22} = dst{0};
1680 let Inst{15-12} = dst{4-1};
1683 // Double Precision register
1684 class AVConv1XInsD_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4,
1685 bit op5, dag oops, dag iops, InstrItinClass itin,
1686 string opc, string asm, list<dag> pattern>
1687 : AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern> {
1689 // if dp_operation then UInt(D:Vd) else UInt(Vd:D);
1690 let Inst{22} = dst{4};
1691 let Inst{15-12} = dst{3-0};
1693 let Predicates = [HasVFP2, HasDPVFP];
1696 let isUnpredicable = 1 in {
1698 def VTOSHH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1001, 0,
1699 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1700 IIC_fpCVTHI, "vcvt", ".s16.f16\t$dst, $a, $fbits", []>,
1701 Requires<[HasFullFP16]>,
1702 Sched<[WriteFPCVT]>;
1704 def VTOUHH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1001, 0,
1705 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1706 IIC_fpCVTHI, "vcvt", ".u16.f16\t$dst, $a, $fbits", []>,
1707 Requires<[HasFullFP16]>,
1708 Sched<[WriteFPCVT]>;
1710 def VTOSLH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1001, 1,
1711 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1712 IIC_fpCVTHI, "vcvt", ".s32.f16\t$dst, $a, $fbits", []>,
1713 Requires<[HasFullFP16]>,
1714 Sched<[WriteFPCVT]>;
1716 def VTOULH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1001, 1,
1717 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1718 IIC_fpCVTHI, "vcvt", ".u32.f16\t$dst, $a, $fbits", []>,
1719 Requires<[HasFullFP16]>,
1720 Sched<[WriteFPCVT]>;
1722 } // End of 'let isUnpredicable = 1 in'
1724 def VTOSHS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1010, 0,
1725 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1726 IIC_fpCVTSI, "vcvt", ".s16.f32\t$dst, $a, $fbits", []>,
1727 Sched<[WriteFPCVT]> {
1728 // Some single precision VFP instructions may be executed on both NEON and
1729 // VFP pipelines on A8.
1730 let D = VFPNeonA8Domain;
1733 def VTOUHS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1010, 0,
1734 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1735 IIC_fpCVTSI, "vcvt", ".u16.f32\t$dst, $a, $fbits", []> {
1736 // Some single precision VFP instructions may be executed on both NEON and
1737 // VFP pipelines on A8.
1738 let D = VFPNeonA8Domain;
1741 def VTOSLS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1010, 1,
1742 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1743 IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a, $fbits", []> {
1744 // Some single precision VFP instructions may be executed on both NEON and
1745 // VFP pipelines on A8.
1746 let D = VFPNeonA8Domain;
1749 def VTOULS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1010, 1,
1750 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1751 IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a, $fbits", []> {
1752 // Some single precision VFP instructions may be executed on both NEON and
1753 // VFP pipelines on A8.
1754 let D = VFPNeonA8Domain;
1757 def VTOSHD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1110, 0b1011, 0,
1758 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
1759 IIC_fpCVTDI, "vcvt", ".s16.f64\t$dst, $a, $fbits", []>,
1760 Sched<[WriteFPCVT]>;
1762 def VTOUHD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1111, 0b1011, 0,
1763 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
1764 IIC_fpCVTDI, "vcvt", ".u16.f64\t$dst, $a, $fbits", []>,
1765 Sched<[WriteFPCVT]>;
1767 def VTOSLD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1110, 0b1011, 1,
1768 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
1769 IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a, $fbits", []>,
1770 Sched<[WriteFPCVT]>;
1772 def VTOULD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1111, 0b1011, 1,
1773 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
1774 IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a, $fbits", []>,
1775 Sched<[WriteFPCVT]>;
1777 // Fixed-Point to FP:
1779 let isUnpredicable = 1 in {
1781 def VSHTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1001, 0,
1782 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1783 IIC_fpCVTIH, "vcvt", ".f16.s16\t$dst, $a, $fbits", []>,
1784 Requires<[HasFullFP16]>,
1785 Sched<[WriteFPCVT]>;
1787 def VUHTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1001, 0,
1788 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1789 IIC_fpCVTIH, "vcvt", ".f16.u16\t$dst, $a, $fbits", []>,
1790 Requires<[HasFullFP16]>,
1791 Sched<[WriteFPCVT]>;
1793 def VSLTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1001, 1,
1794 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1795 IIC_fpCVTIH, "vcvt", ".f16.s32\t$dst, $a, $fbits", []>,
1796 Requires<[HasFullFP16]>,
1797 Sched<[WriteFPCVT]>;
1799 def VULTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1001, 1,
1800 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1801 IIC_fpCVTIH, "vcvt", ".f16.u32\t$dst, $a, $fbits", []>,
1802 Requires<[HasFullFP16]>,
1803 Sched<[WriteFPCVT]>;
1805 } // End of 'let isUnpredicable = 1 in'
1807 def VSHTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1010, 0,
1808 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1809 IIC_fpCVTIS, "vcvt", ".f32.s16\t$dst, $a, $fbits", []>,
1810 Sched<[WriteFPCVT]> {
1811 // Some single precision VFP instructions may be executed on both NEON and
1812 // VFP pipelines on A8.
1813 let D = VFPNeonA8Domain;
1816 def VUHTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1010, 0,
1817 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1818 IIC_fpCVTIS, "vcvt", ".f32.u16\t$dst, $a, $fbits", []>,
1819 Sched<[WriteFPCVT]> {
1820 // Some single precision VFP instructions may be executed on both NEON and
1821 // VFP pipelines on A8.
1822 let D = VFPNeonA8Domain;
1825 def VSLTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1010, 1,
1826 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1827 IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a, $fbits", []>,
1828 Sched<[WriteFPCVT]> {
1829 // Some single precision VFP instructions may be executed on both NEON and
1830 // VFP pipelines on A8.
1831 let D = VFPNeonA8Domain;
1834 def VULTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1010, 1,
1835 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1836 IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a, $fbits", []>,
1837 Sched<[WriteFPCVT]> {
1838 // Some single precision VFP instructions may be executed on both NEON and
1839 // VFP pipelines on A8.
1840 let D = VFPNeonA8Domain;
1843 def VSHTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1010, 0b1011, 0,
1844 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
1845 IIC_fpCVTID, "vcvt", ".f64.s16\t$dst, $a, $fbits", []>,
1846 Sched<[WriteFPCVT]>;
1848 def VUHTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1011, 0b1011, 0,
1849 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
1850 IIC_fpCVTID, "vcvt", ".f64.u16\t$dst, $a, $fbits", []>,
1851 Sched<[WriteFPCVT]>;
1853 def VSLTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1010, 0b1011, 1,
1854 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
1855 IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a, $fbits", []>,
1856 Sched<[WriteFPCVT]>;
1858 def VULTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1011, 0b1011, 1,
1859 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
1860 IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a, $fbits", []>,
1861 Sched<[WriteFPCVT]>;
1863 } // End of 'let Constraints = "$a = $dst" in'
1865 //===----------------------------------------------------------------------===//
1866 // FP Multiply-Accumulate Operations.
1869 def VMLAD : ADbI<0b11100, 0b00, 0, 0,
1870 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
1871 IIC_fpMAC64, "vmla", ".f64\t$Dd, $Dn, $Dm",
1872 [(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm),
1873 (f64 DPR:$Ddin)))]>,
1874 RegConstraint<"$Ddin = $Dd">,
1875 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>,
1876 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
1878 def VMLAS : ASbIn<0b11100, 0b00, 0, 0,
1879 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
1880 IIC_fpMAC32, "vmla", ".f32\t$Sd, $Sn, $Sm",
1881 [(set SPR:$Sd, (fadd_mlx (fmul_su SPR:$Sn, SPR:$Sm),
1883 RegConstraint<"$Sdin = $Sd">,
1884 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>,
1885 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
1886 // Some single precision VFP instructions may be executed on both NEON and
1887 // VFP pipelines on A8.
1888 let D = VFPNeonA8Domain;
1891 def VMLAH : AHbI<0b11100, 0b00, 0, 0,
1892 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
1893 IIC_fpMAC16, "vmla", ".f16\t$Sd, $Sn, $Sm",
1894 [(set HPR:$Sd, (fadd_mlx (fmul_su HPR:$Sn, HPR:$Sm),
1896 RegConstraint<"$Sdin = $Sd">,
1897 Requires<[HasFullFP16,UseFPVMLx]>;
1899 def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
1900 (VMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
1901 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>;
1902 def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
1903 (VMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
1904 Requires<[HasVFP2,DontUseNEONForFP, UseFPVMLx]>;
1905 def : Pat<(fadd_mlx HPR:$dstin, (fmul_su HPR:$a, HPR:$b)),
1906 (VMLAH HPR:$dstin, HPR:$a, HPR:$b)>,
1907 Requires<[HasFullFP16,DontUseNEONForFP, UseFPVMLx]>;
1910 def VMLSD : ADbI<0b11100, 0b00, 1, 0,
1911 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
1912 IIC_fpMAC64, "vmls", ".f64\t$Dd, $Dn, $Dm",
1913 [(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
1914 (f64 DPR:$Ddin)))]>,
1915 RegConstraint<"$Ddin = $Dd">,
1916 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>,
1917 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
1919 def VMLSS : ASbIn<0b11100, 0b00, 1, 0,
1920 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
1921 IIC_fpMAC32, "vmls", ".f32\t$Sd, $Sn, $Sm",
1922 [(set SPR:$Sd, (fadd_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
1924 RegConstraint<"$Sdin = $Sd">,
1925 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>,
1926 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
1927 // Some single precision VFP instructions may be executed on both NEON and
1928 // VFP pipelines on A8.
1929 let D = VFPNeonA8Domain;
1932 def VMLSH : AHbI<0b11100, 0b00, 1, 0,
1933 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
1934 IIC_fpMAC16, "vmls", ".f16\t$Sd, $Sn, $Sm",
1935 [(set HPR:$Sd, (fadd_mlx (fneg (fmul_su HPR:$Sn, HPR:$Sm)),
1937 RegConstraint<"$Sdin = $Sd">,
1938 Requires<[HasFullFP16,UseFPVMLx]>;
1940 def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
1941 (VMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
1942 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>;
1943 def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
1944 (VMLSS SPR:$dstin, SPR:$a, SPR:$b)>,
1945 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
1946 def : Pat<(fsub_mlx HPR:$dstin, (fmul_su HPR:$a, HPR:$b)),
1947 (VMLSH HPR:$dstin, HPR:$a, HPR:$b)>,
1948 Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>;
1950 def VNMLAD : ADbI<0b11100, 0b01, 1, 0,
1951 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
1952 IIC_fpMAC64, "vnmla", ".f64\t$Dd, $Dn, $Dm",
1953 [(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
1954 (f64 DPR:$Ddin)))]>,
1955 RegConstraint<"$Ddin = $Dd">,
1956 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>,
1957 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
1959 def VNMLAS : ASbI<0b11100, 0b01, 1, 0,
1960 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
1961 IIC_fpMAC32, "vnmla", ".f32\t$Sd, $Sn, $Sm",
1962 [(set SPR:$Sd, (fsub_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
1964 RegConstraint<"$Sdin = $Sd">,
1965 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>,
1966 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
1967 // Some single precision VFP instructions may be executed on both NEON and
1968 // VFP pipelines on A8.
1969 let D = VFPNeonA8Domain;
1972 def VNMLAH : AHbI<0b11100, 0b01, 1, 0,
1973 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
1974 IIC_fpMAC16, "vnmla", ".f16\t$Sd, $Sn, $Sm",
1975 [(set HPR:$Sd, (fsub_mlx (fneg (fmul_su HPR:$Sn, HPR:$Sm)),
1977 RegConstraint<"$Sdin = $Sd">,
1978 Requires<[HasFullFP16,UseFPVMLx]>;
1980 // (-(a * b) - dst) -> -(dst + (a * b))
1981 def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin),
1982 (VNMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
1983 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>;
1984 def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin),
1985 (VNMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
1986 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
1987 def : Pat<(fsub_mlx (fneg (fmul_su HPR:$a, HPR:$b)), HPR:$dstin),
1988 (VNMLAH HPR:$dstin, HPR:$a, HPR:$b)>,
1989 Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>;
1991 // (-dst - (a * b)) -> -(dst + (a * b))
1992 def : Pat<(fsub_mlx (fneg DPR:$dstin), (fmul_su DPR:$a, (f64 DPR:$b))),
1993 (VNMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
1994 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>;
1995 def : Pat<(fsub_mlx (fneg SPR:$dstin), (fmul_su SPR:$a, SPR:$b)),
1996 (VNMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
1997 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
1998 def : Pat<(fsub_mlx (fneg HPR:$dstin), (fmul_su HPR:$a, HPR:$b)),
1999 (VNMLAH HPR:$dstin, HPR:$a, HPR:$b)>,
2000 Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>;
2002 def VNMLSD : ADbI<0b11100, 0b01, 0, 0,
2003 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
2004 IIC_fpMAC64, "vnmls", ".f64\t$Dd, $Dn, $Dm",
2005 [(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm),
2006 (f64 DPR:$Ddin)))]>,
2007 RegConstraint<"$Ddin = $Dd">,
2008 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>,
2009 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2011 def VNMLSS : ASbI<0b11100, 0b01, 0, 0,
2012 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
2013 IIC_fpMAC32, "vnmls", ".f32\t$Sd, $Sn, $Sm",
2014 [(set SPR:$Sd, (fsub_mlx (fmul_su SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>,
2015 RegConstraint<"$Sdin = $Sd">,
2016 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>,
2017 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
2018 // Some single precision VFP instructions may be executed on both NEON and
2019 // VFP pipelines on A8.
2020 let D = VFPNeonA8Domain;
2023 def VNMLSH : AHbI<0b11100, 0b01, 0, 0,
2024 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
2025 IIC_fpMAC16, "vnmls", ".f16\t$Sd, $Sn, $Sm",
2026 [(set HPR:$Sd, (fsub_mlx (fmul_su HPR:$Sn, HPR:$Sm), HPR:$Sdin))]>,
2027 RegConstraint<"$Sdin = $Sd">,
2028 Requires<[HasFullFP16,UseFPVMLx]>;
2030 def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin),
2031 (VNMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
2032 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>;
2033 def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin),
2034 (VNMLSS SPR:$dstin, SPR:$a, SPR:$b)>,
2035 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
2036 def : Pat<(fsub_mlx (fmul_su HPR:$a, HPR:$b), HPR:$dstin),
2037 (VNMLSH HPR:$dstin, HPR:$a, HPR:$b)>,
2038 Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>;
2040 //===----------------------------------------------------------------------===//
2041 // Fused FP Multiply-Accumulate Operations.
2043 def VFMAD : ADbI<0b11101, 0b10, 0, 0,
2044 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
2045 IIC_fpFMAC64, "vfma", ".f64\t$Dd, $Dn, $Dm",
2046 [(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm),
2047 (f64 DPR:$Ddin)))]>,
2048 RegConstraint<"$Ddin = $Dd">,
2049 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>,
2050 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2052 def VFMAS : ASbIn<0b11101, 0b10, 0, 0,
2053 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
2054 IIC_fpFMAC32, "vfma", ".f32\t$Sd, $Sn, $Sm",
2055 [(set SPR:$Sd, (fadd_mlx (fmul_su SPR:$Sn, SPR:$Sm),
2057 RegConstraint<"$Sdin = $Sd">,
2058 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>,
2059 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
2060 // Some single precision VFP instructions may be executed on both NEON and
2064 def VFMAH : AHbI<0b11101, 0b10, 0, 0,
2065 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
2066 IIC_fpFMAC16, "vfma", ".f16\t$Sd, $Sn, $Sm",
2067 [(set HPR:$Sd, (fadd_mlx (fmul_su HPR:$Sn, HPR:$Sm),
2069 RegConstraint<"$Sdin = $Sd">,
2070 Requires<[HasFullFP16,UseFusedMAC]>,
2071 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2073 def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
2074 (VFMAD DPR:$dstin, DPR:$a, DPR:$b)>,
2075 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
2076 def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
2077 (VFMAS SPR:$dstin, SPR:$a, SPR:$b)>,
2078 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
2079 def : Pat<(fadd_mlx HPR:$dstin, (fmul_su HPR:$a, HPR:$b)),
2080 (VFMAH HPR:$dstin, HPR:$a, HPR:$b)>,
2081 Requires<[HasFullFP16,DontUseNEONForFP,UseFusedMAC]>;
2083 // Match @llvm.fma.* intrinsics
2084 // (fma x, y, z) -> (vfms z, x, y)
2085 def : Pat<(f64 (fma DPR:$Dn, DPR:$Dm, DPR:$Ddin)),
2086 (VFMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
2087 Requires<[HasVFP4,HasDPVFP]>;
2088 def : Pat<(f32 (fma SPR:$Sn, SPR:$Sm, SPR:$Sdin)),
2089 (VFMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
2090 Requires<[HasVFP4]>;
2091 def : Pat<(f16 (fma HPR:$Sn, HPR:$Sm, HPR:$Sdin)),
2092 (VFMAH HPR:$Sdin, HPR:$Sn, HPR:$Sm)>,
2093 Requires<[HasFullFP16]>;
2095 def VFMSD : ADbI<0b11101, 0b10, 1, 0,
2096 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
2097 IIC_fpFMAC64, "vfms", ".f64\t$Dd, $Dn, $Dm",
2098 [(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
2099 (f64 DPR:$Ddin)))]>,
2100 RegConstraint<"$Ddin = $Dd">,
2101 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>,
2102 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2104 def VFMSS : ASbIn<0b11101, 0b10, 1, 0,
2105 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
2106 IIC_fpFMAC32, "vfms", ".f32\t$Sd, $Sn, $Sm",
2107 [(set SPR:$Sd, (fadd_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
2109 RegConstraint<"$Sdin = $Sd">,
2110 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>,
2111 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
2112 // Some single precision VFP instructions may be executed on both NEON and
2116 def VFMSH : AHbI<0b11101, 0b10, 1, 0,
2117 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
2118 IIC_fpFMAC16, "vfms", ".f16\t$Sd, $Sn, $Sm",
2119 [(set HPR:$Sd, (fadd_mlx (fneg (fmul_su HPR:$Sn, HPR:$Sm)),
2121 RegConstraint<"$Sdin = $Sd">,
2122 Requires<[HasFullFP16,UseFusedMAC]>,
2123 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2125 def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
2126 (VFMSD DPR:$dstin, DPR:$a, DPR:$b)>,
2127 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
2128 def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
2129 (VFMSS SPR:$dstin, SPR:$a, SPR:$b)>,
2130 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
2131 def : Pat<(fsub_mlx HPR:$dstin, (fmul_su HPR:$a, HPR:$b)),
2132 (VFMSH HPR:$dstin, HPR:$a, HPR:$b)>,
2133 Requires<[HasFullFP16,DontUseNEONForFP,UseFusedMAC]>;
2135 // Match @llvm.fma.* intrinsics
2136 // (fma (fneg x), y, z) -> (vfms z, x, y)
2137 def : Pat<(f64 (fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin)),
2138 (VFMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
2139 Requires<[HasVFP4,HasDPVFP]>;
2140 def : Pat<(f32 (fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin)),
2141 (VFMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
2142 Requires<[HasVFP4]>;
2143 // (fma x, (fneg y), z) -> (vfms z, x, y)
2144 def : Pat<(f64 (fma DPR:$Dn, (fneg DPR:$Dm), DPR:$Ddin)),
2145 (VFMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
2146 Requires<[HasVFP4,HasDPVFP]>;
2147 def : Pat<(f32 (fma SPR:$Sn, (fneg SPR:$Sm), SPR:$Sdin)),
2148 (VFMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
2149 Requires<[HasVFP4]>;
2151 def VFNMAD : ADbI<0b11101, 0b01, 1, 0,
2152 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
2153 IIC_fpFMAC64, "vfnma", ".f64\t$Dd, $Dn, $Dm",
2154 [(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
2155 (f64 DPR:$Ddin)))]>,
2156 RegConstraint<"$Ddin = $Dd">,
2157 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>,
2158 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2160 def VFNMAS : ASbI<0b11101, 0b01, 1, 0,
2161 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
2162 IIC_fpFMAC32, "vfnma", ".f32\t$Sd, $Sn, $Sm",
2163 [(set SPR:$Sd, (fsub_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
2165 RegConstraint<"$Sdin = $Sd">,
2166 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>,
2167 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
2168 // Some single precision VFP instructions may be executed on both NEON and
2172 def VFNMAH : AHbI<0b11101, 0b01, 1, 0,
2173 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
2174 IIC_fpFMAC16, "vfnma", ".f16\t$Sd, $Sn, $Sm",
2175 [(set HPR:$Sd, (fsub_mlx (fneg (fmul_su HPR:$Sn, HPR:$Sm)),
2177 RegConstraint<"$Sdin = $Sd">,
2178 Requires<[HasFullFP16,UseFusedMAC]>,
2179 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2181 def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin),
2182 (VFNMAD DPR:$dstin, DPR:$a, DPR:$b)>,
2183 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
2184 def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin),
2185 (VFNMAS SPR:$dstin, SPR:$a, SPR:$b)>,
2186 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
2188 // Match @llvm.fma.* intrinsics
2189 // (fneg (fma x, y, z)) -> (vfnma z, x, y)
2190 def : Pat<(fneg (fma (f64 DPR:$Dn), (f64 DPR:$Dm), (f64 DPR:$Ddin))),
2191 (VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
2192 Requires<[HasVFP4,HasDPVFP]>;
2193 def : Pat<(fneg (fma (f32 SPR:$Sn), (f32 SPR:$Sm), (f32 SPR:$Sdin))),
2194 (VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
2195 Requires<[HasVFP4]>;
2196 // (fma (fneg x), y, (fneg z)) -> (vfnma z, x, y)
2197 def : Pat<(f64 (fma (fneg DPR:$Dn), DPR:$Dm, (fneg DPR:$Ddin))),
2198 (VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
2199 Requires<[HasVFP4,HasDPVFP]>;
2200 def : Pat<(f32 (fma (fneg SPR:$Sn), SPR:$Sm, (fneg SPR:$Sdin))),
2201 (VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
2202 Requires<[HasVFP4]>;
2204 def VFNMSD : ADbI<0b11101, 0b01, 0, 0,
2205 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
2206 IIC_fpFMAC64, "vfnms", ".f64\t$Dd, $Dn, $Dm",
2207 [(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm),
2208 (f64 DPR:$Ddin)))]>,
2209 RegConstraint<"$Ddin = $Dd">,
2210 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>,
2211 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2213 def VFNMSS : ASbI<0b11101, 0b01, 0, 0,
2214 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
2215 IIC_fpFMAC32, "vfnms", ".f32\t$Sd, $Sn, $Sm",
2216 [(set SPR:$Sd, (fsub_mlx (fmul_su SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>,
2217 RegConstraint<"$Sdin = $Sd">,
2218 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>,
2219 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
2220 // Some single precision VFP instructions may be executed on both NEON and
2224 def VFNMSH : AHbI<0b11101, 0b01, 0, 0,
2225 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
2226 IIC_fpFMAC16, "vfnms", ".f16\t$Sd, $Sn, $Sm",
2227 [(set HPR:$Sd, (fsub_mlx (fmul_su HPR:$Sn, HPR:$Sm), HPR:$Sdin))]>,
2228 RegConstraint<"$Sdin = $Sd">,
2229 Requires<[HasFullFP16,UseFusedMAC]>,
2230 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2232 def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin),
2233 (VFNMSD DPR:$dstin, DPR:$a, DPR:$b)>,
2234 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
2235 def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin),
2236 (VFNMSS SPR:$dstin, SPR:$a, SPR:$b)>,
2237 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
2239 // Match @llvm.fma.* intrinsics
2241 // (fma x, y, (fneg z)) -> (vfnms z, x, y))
2242 def : Pat<(f64 (fma DPR:$Dn, DPR:$Dm, (fneg DPR:$Ddin))),
2243 (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
2244 Requires<[HasVFP4,HasDPVFP]>;
2245 def : Pat<(f32 (fma SPR:$Sn, SPR:$Sm, (fneg SPR:$Sdin))),
2246 (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
2247 Requires<[HasVFP4]>;
2248 // (fneg (fma (fneg x), y, z)) -> (vfnms z, x, y)
2249 def : Pat<(fneg (f64 (fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin))),
2250 (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
2251 Requires<[HasVFP4,HasDPVFP]>;
2252 def : Pat<(fneg (f32 (fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin))),
2253 (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
2254 Requires<[HasVFP4]>;
2255 // (fneg (fma x, (fneg y), z) -> (vfnms z, x, y)
2256 def : Pat<(fneg (f64 (fma DPR:$Dn, (fneg DPR:$Dm), DPR:$Ddin))),
2257 (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
2258 Requires<[HasVFP4,HasDPVFP]>;
2259 def : Pat<(fneg (f32 (fma SPR:$Sn, (fneg SPR:$Sm), SPR:$Sdin))),
2260 (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
2261 Requires<[HasVFP4]>;
2263 //===----------------------------------------------------------------------===//
2264 // FP Conditional moves.
2267 let hasSideEffects = 0 in {
2268 def VMOVDcc : PseudoInst<(outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm, cmovpred:$p),
2270 [(set (f64 DPR:$Dd),
2271 (ARMcmov DPR:$Dn, DPR:$Dm, cmovpred:$p))]>,
2272 RegConstraint<"$Dn = $Dd">, Requires<[HasFPRegs64]>;
2274 def VMOVScc : PseudoInst<(outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm, cmovpred:$p),
2276 [(set (f32 SPR:$Sd),
2277 (ARMcmov SPR:$Sn, SPR:$Sm, cmovpred:$p))]>,
2278 RegConstraint<"$Sn = $Sd">, Requires<[HasFPRegs]>;
2281 //===----------------------------------------------------------------------===//
2282 // Move from VFP System Register to ARM core register.
2285 class MovFromVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm,
2287 VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, pattern> {
2289 // Instruction operand.
2292 let Inst{27-20} = 0b11101111;
2293 let Inst{19-16} = opc19_16;
2294 let Inst{15-12} = Rt;
2295 let Inst{11-8} = 0b1010;
2297 let Inst{6-5} = 0b00;
2299 let Inst{3-0} = 0b0000;
2300 let Unpredictable{7-5} = 0b111;
2301 let Unpredictable{3-0} = 0b1111;
2304 let DecoderMethod = "DecodeForVMRSandVMSR" in {
2305 // APSR is the application level alias of CPSR. This FPSCR N, Z, C, V flags
2307 let Defs = [CPSR], Uses = [FPSCR_NZCV], Predicates = [HasFPRegs],
2308 Rt = 0b1111 /* apsr_nzcv */ in
2309 def FMSTAT : MovFromVFP<0b0001 /* fpscr */, (outs), (ins),
2310 "vmrs", "\tAPSR_nzcv, fpscr", [(arm_fmstat)]>;
2312 // Application level FPSCR -> GPR
2313 let hasSideEffects = 1, Uses = [FPSCR], Predicates = [HasFPRegs] in
2314 def VMRS : MovFromVFP<0b0001 /* fpscr */, (outs GPRnopc:$Rt), (ins),
2315 "vmrs", "\t$Rt, fpscr",
2316 [(set GPRnopc:$Rt, (int_arm_get_fpscr))]>;
2318 // System level FPEXC, FPSID -> GPR
2319 let Uses = [FPSCR] in {
2320 def VMRS_FPEXC : MovFromVFP<0b1000 /* fpexc */, (outs GPRnopc:$Rt), (ins),
2321 "vmrs", "\t$Rt, fpexc", []>;
2322 def VMRS_FPSID : MovFromVFP<0b0000 /* fpsid */, (outs GPRnopc:$Rt), (ins),
2323 "vmrs", "\t$Rt, fpsid", []>;
2324 def VMRS_MVFR0 : MovFromVFP<0b0111 /* mvfr0 */, (outs GPRnopc:$Rt), (ins),
2325 "vmrs", "\t$Rt, mvfr0", []>;
2326 def VMRS_MVFR1 : MovFromVFP<0b0110 /* mvfr1 */, (outs GPRnopc:$Rt), (ins),
2327 "vmrs", "\t$Rt, mvfr1", []>;
2328 let Predicates = [HasFPARMv8] in {
2329 def VMRS_MVFR2 : MovFromVFP<0b0101 /* mvfr2 */, (outs GPRnopc:$Rt), (ins),
2330 "vmrs", "\t$Rt, mvfr2", []>;
2332 def VMRS_FPINST : MovFromVFP<0b1001 /* fpinst */, (outs GPRnopc:$Rt), (ins),
2333 "vmrs", "\t$Rt, fpinst", []>;
2334 def VMRS_FPINST2 : MovFromVFP<0b1010 /* fpinst2 */, (outs GPRnopc:$Rt),
2335 (ins), "vmrs", "\t$Rt, fpinst2", []>;
2336 let Predicates = [HasV8_1MMainline, HasFPRegs] in {
2337 // System level FPSCR_NZCVQC -> GPR
2338 def VMRS_FPSCR_NZCVQC
2339 : MovFromVFP<0b0010 /* fpscr_nzcvqc */,
2340 (outs GPR:$Rt), (ins cl_FPSCR_NZCV:$fpscr_in),
2341 "vmrs", "\t$Rt, fpscr_nzcvqc", []>;
2344 let Predicates = [HasV8_1MMainline, Has8MSecExt] in {
2345 // System level FPSCR -> GPR, with context saving for security extensions
2346 def VMRS_FPCXTNS : MovFromVFP<0b1110 /* fpcxtns */, (outs GPR:$Rt), (ins),
2347 "vmrs", "\t$Rt, fpcxtns", []>;
2349 let Predicates = [HasV8_1MMainline, Has8MSecExt] in {
2350 // System level FPSCR -> GPR, with context saving for security extensions
2351 def VMRS_FPCXTS : MovFromVFP<0b1111 /* fpcxts */, (outs GPR:$Rt), (ins),
2352 "vmrs", "\t$Rt, fpcxts", []>;
2355 let Predicates = [HasV8_1MMainline, HasMVEInt] in {
2356 // System level VPR/P0 -> GPR
2358 def VMRS_VPR : MovFromVFP<0b1100 /* vpr */, (outs GPR:$Rt), (ins),
2359 "vmrs", "\t$Rt, vpr", []>;
2361 def VMRS_P0 : MovFromVFP<0b1101 /* p0 */, (outs GPR:$Rt), (ins VCCR:$cond),
2362 "vmrs", "\t$Rt, p0", []>;
2366 //===----------------------------------------------------------------------===//
2367 // Move from ARM core register to VFP System Register.
2370 class MovToVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm,
2372 VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, pattern> {
2374 // Instruction operand.
2377 let Inst{27-20} = 0b11101110;
2378 let Inst{19-16} = opc19_16;
2379 let Inst{15-12} = Rt;
2380 let Inst{11-8} = 0b1010;
2382 let Inst{6-5} = 0b00;
2384 let Inst{3-0} = 0b0000;
2385 let Predicates = [HasVFP2];
2386 let Unpredictable{7-5} = 0b111;
2387 let Unpredictable{3-0} = 0b1111;
2390 let DecoderMethod = "DecodeForVMRSandVMSR" in {
2391 let Defs = [FPSCR] in {
2392 let Predicates = [HasFPRegs] in
2393 // Application level GPR -> FPSCR
2394 def VMSR : MovToVFP<0b0001 /* fpscr */, (outs), (ins GPRnopc:$Rt),
2395 "vmsr", "\tfpscr, $Rt",
2396 [(int_arm_set_fpscr GPRnopc:$Rt)]>;
2397 // System level GPR -> FPEXC
2398 def VMSR_FPEXC : MovToVFP<0b1000 /* fpexc */, (outs), (ins GPRnopc:$Rt),
2399 "vmsr", "\tfpexc, $Rt", []>;
2400 // System level GPR -> FPSID
2401 def VMSR_FPSID : MovToVFP<0b0000 /* fpsid */, (outs), (ins GPRnopc:$Rt),
2402 "vmsr", "\tfpsid, $Rt", []>;
2403 def VMSR_FPINST : MovToVFP<0b1001 /* fpinst */, (outs), (ins GPRnopc:$Rt),
2404 "vmsr", "\tfpinst, $Rt", []>;
2405 def VMSR_FPINST2 : MovToVFP<0b1010 /* fpinst2 */, (outs), (ins GPRnopc:$Rt),
2406 "vmsr", "\tfpinst2, $Rt", []>;
2408 let Predicates = [HasV8_1MMainline, Has8MSecExt] in {
2409 // System level GPR -> FPSCR with context saving for security extensions
2410 def VMSR_FPCXTNS : MovToVFP<0b1110 /* fpcxtns */, (outs), (ins GPR:$Rt),
2411 "vmsr", "\tfpcxtns, $Rt", []>;
2413 let Predicates = [HasV8_1MMainline, Has8MSecExt] in {
2414 // System level GPR -> FPSCR with context saving for security extensions
2415 def VMSR_FPCXTS : MovToVFP<0b1111 /* fpcxts */, (outs), (ins GPR:$Rt),
2416 "vmsr", "\tfpcxts, $Rt", []>;
2418 let Predicates = [HasV8_1MMainline, HasFPRegs] in {
2419 // System level GPR -> FPSCR_NZCVQC
2420 def VMSR_FPSCR_NZCVQC
2421 : MovToVFP<0b0010 /* fpscr_nzcvqc */,
2422 (outs cl_FPSCR_NZCV:$fpscr_out), (ins GPR:$Rt),
2423 "vmsr", "\tfpscr_nzcvqc, $Rt", []>;
2426 let Predicates = [HasV8_1MMainline, HasMVEInt] in {
2427 // System level GPR -> VPR/P0
2429 def VMSR_VPR : MovToVFP<0b1100 /* vpr */, (outs), (ins GPR:$Rt),
2430 "vmsr", "\tvpr, $Rt", []>;
2432 def VMSR_P0 : MovToVFP<0b1101 /* p0 */, (outs VCCR:$cond), (ins GPR:$Rt),
2433 "vmsr", "\tp0, $Rt", []>;
2437 //===----------------------------------------------------------------------===//
2441 // Materialize FP immediates. VFP3 only.
2442 let isReMaterializable = 1 in {
2443 def FCONSTD : VFPAI<(outs DPR:$Dd), (ins vfp_f64imm:$imm),
2444 VFPMiscFrm, IIC_fpUNA64,
2445 "vmov", ".f64\t$Dd, $imm",
2446 [(set DPR:$Dd, vfp_f64imm:$imm)]>,
2447 Requires<[HasVFP3,HasDPVFP]> {
2451 let Inst{27-23} = 0b11101;
2452 let Inst{22} = Dd{4};
2453 let Inst{21-20} = 0b11;
2454 let Inst{19-16} = imm{7-4};
2455 let Inst{15-12} = Dd{3-0};
2456 let Inst{11-9} = 0b101;
2457 let Inst{8} = 1; // Double precision.
2458 let Inst{7-4} = 0b0000;
2459 let Inst{3-0} = imm{3-0};
2462 def FCONSTS : VFPAI<(outs SPR:$Sd), (ins vfp_f32imm:$imm),
2463 VFPMiscFrm, IIC_fpUNA32,
2464 "vmov", ".f32\t$Sd, $imm",
2465 [(set SPR:$Sd, vfp_f32imm:$imm)]>, Requires<[HasVFP3]> {
2469 let Inst{27-23} = 0b11101;
2470 let Inst{22} = Sd{0};
2471 let Inst{21-20} = 0b11;
2472 let Inst{19-16} = imm{7-4};
2473 let Inst{15-12} = Sd{4-1};
2474 let Inst{11-9} = 0b101;
2475 let Inst{8} = 0; // Single precision.
2476 let Inst{7-4} = 0b0000;
2477 let Inst{3-0} = imm{3-0};
2480 def FCONSTH : VFPAI<(outs HPR:$Sd), (ins vfp_f16imm:$imm),
2481 VFPMiscFrm, IIC_fpUNA16,
2482 "vmov", ".f16\t$Sd, $imm",
2483 [(set HPR:$Sd, vfp_f16imm:$imm)]>,
2484 Requires<[HasFullFP16]> {
2488 let Inst{27-23} = 0b11101;
2489 let Inst{22} = Sd{0};
2490 let Inst{21-20} = 0b11;
2491 let Inst{19-16} = imm{7-4};
2492 let Inst{15-12} = Sd{4-1};
2493 let Inst{11-8} = 0b1001; // Half precision
2494 let Inst{7-4} = 0b0000;
2495 let Inst{3-0} = imm{3-0};
2497 let isUnpredicable = 1;
2501 //===----------------------------------------------------------------------===//
2502 // Assembler aliases.
2504 // A few mnemonic aliases for pre-unifixed syntax. We don't guarantee to
2505 // support them all, but supporting at least some of the basics is
2506 // good to be friendly.
2507 def : VFP2MnemonicAlias<"flds", "vldr">;
2508 def : VFP2MnemonicAlias<"fldd", "vldr">;
2509 def : VFP2MnemonicAlias<"fmrs", "vmov">;
2510 def : VFP2MnemonicAlias<"fmsr", "vmov">;
2511 def : VFP2MnemonicAlias<"fsqrts", "vsqrt">;
2512 def : VFP2MnemonicAlias<"fsqrtd", "vsqrt">;
2513 def : VFP2MnemonicAlias<"fadds", "vadd.f32">;
2514 def : VFP2MnemonicAlias<"faddd", "vadd.f64">;
2515 def : VFP2MnemonicAlias<"fmrdd", "vmov">;
2516 def : VFP2MnemonicAlias<"fmrds", "vmov">;
2517 def : VFP2MnemonicAlias<"fmrrd", "vmov">;
2518 def : VFP2MnemonicAlias<"fmdrr", "vmov">;
2519 def : VFP2MnemonicAlias<"fmuls", "vmul.f32">;
2520 def : VFP2MnemonicAlias<"fmuld", "vmul.f64">;
2521 def : VFP2MnemonicAlias<"fnegs", "vneg.f32">;
2522 def : VFP2MnemonicAlias<"fnegd", "vneg.f64">;
2523 def : VFP2MnemonicAlias<"ftosizd", "vcvt.s32.f64">;
2524 def : VFP2MnemonicAlias<"ftosid", "vcvtr.s32.f64">;
2525 def : VFP2MnemonicAlias<"ftosizs", "vcvt.s32.f32">;
2526 def : VFP2MnemonicAlias<"ftosis", "vcvtr.s32.f32">;
2527 def : VFP2MnemonicAlias<"ftouizd", "vcvt.u32.f64">;
2528 def : VFP2MnemonicAlias<"ftouid", "vcvtr.u32.f64">;
2529 def : VFP2MnemonicAlias<"ftouizs", "vcvt.u32.f32">;
2530 def : VFP2MnemonicAlias<"ftouis", "vcvtr.u32.f32">;
2531 def : VFP2MnemonicAlias<"fsitod", "vcvt.f64.s32">;
2532 def : VFP2MnemonicAlias<"fsitos", "vcvt.f32.s32">;
2533 def : VFP2MnemonicAlias<"fuitod", "vcvt.f64.u32">;
2534 def : VFP2MnemonicAlias<"fuitos", "vcvt.f32.u32">;
2535 def : VFP2MnemonicAlias<"fsts", "vstr">;
2536 def : VFP2MnemonicAlias<"fstd", "vstr">;
2537 def : VFP2MnemonicAlias<"fmacd", "vmla.f64">;
2538 def : VFP2MnemonicAlias<"fmacs", "vmla.f32">;
2539 def : VFP2MnemonicAlias<"fcpys", "vmov.f32">;
2540 def : VFP2MnemonicAlias<"fcpyd", "vmov.f64">;
2541 def : VFP2MnemonicAlias<"fcmps", "vcmp.f32">;
2542 def : VFP2MnemonicAlias<"fcmpd", "vcmp.f64">;
2543 def : VFP2MnemonicAlias<"fdivs", "vdiv.f32">;
2544 def : VFP2MnemonicAlias<"fdivd", "vdiv.f64">;
2545 def : VFP2MnemonicAlias<"fmrx", "vmrs">;
2546 def : VFP2MnemonicAlias<"fmxr", "vmsr">;
2548 // Be friendly and accept the old form of zero-compare
2549 def : VFP2DPInstAlias<"fcmpzd${p} $val", (VCMPZD DPR:$val, pred:$p)>;
2550 def : VFP2InstAlias<"fcmpzs${p} $val", (VCMPZS SPR:$val, pred:$p)>;
2553 def : InstAlias<"fmstat${p}", (FMSTAT pred:$p), 0>, Requires<[HasFPRegs]>;
2554 def : VFP2InstAlias<"fadds${p} $Sd, $Sn, $Sm",
2555 (VADDS SPR:$Sd, SPR:$Sn, SPR:$Sm, pred:$p)>;
2556 def : VFP2DPInstAlias<"faddd${p} $Dd, $Dn, $Dm",
2557 (VADDD DPR:$Dd, DPR:$Dn, DPR:$Dm, pred:$p)>;
2558 def : VFP2InstAlias<"fsubs${p} $Sd, $Sn, $Sm",
2559 (VSUBS SPR:$Sd, SPR:$Sn, SPR:$Sm, pred:$p)>;
2560 def : VFP2DPInstAlias<"fsubd${p} $Dd, $Dn, $Dm",
2561 (VSUBD DPR:$Dd, DPR:$Dn, DPR:$Dm, pred:$p)>;
2563 // No need for the size suffix on VSQRT. It's implied by the register classes.
2564 def : VFP2InstAlias<"vsqrt${p} $Sd, $Sm", (VSQRTS SPR:$Sd, SPR:$Sm, pred:$p)>;
2565 def : VFP2DPInstAlias<"vsqrt${p} $Dd, $Dm", (VSQRTD DPR:$Dd, DPR:$Dm, pred:$p)>;
2567 // VLDR/VSTR accept an optional type suffix.
2568 def : VFP2InstAlias<"vldr${p}.32 $Sd, $addr",
2569 (VLDRS SPR:$Sd, addrmode5:$addr, pred:$p)>;
2570 def : VFP2InstAlias<"vstr${p}.32 $Sd, $addr",
2571 (VSTRS SPR:$Sd, addrmode5:$addr, pred:$p)>;
2572 def : VFP2InstAlias<"vldr${p}.64 $Dd, $addr",
2573 (VLDRD DPR:$Dd, addrmode5:$addr, pred:$p)>;
2574 def : VFP2InstAlias<"vstr${p}.64 $Dd, $addr",
2575 (VSTRD DPR:$Dd, addrmode5:$addr, pred:$p)>;
2577 // VMOV can accept optional 32-bit or less data type suffix suffix.
2578 def : VFP2InstAlias<"vmov${p}.8 $Rt, $Sn",
2579 (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>;
2580 def : VFP2InstAlias<"vmov${p}.16 $Rt, $Sn",
2581 (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>;
2582 def : VFP2InstAlias<"vmov${p}.32 $Rt, $Sn",
2583 (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>;
2584 def : VFP2InstAlias<"vmov${p}.8 $Sn, $Rt",
2585 (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>;
2586 def : VFP2InstAlias<"vmov${p}.16 $Sn, $Rt",
2587 (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>;
2588 def : VFP2InstAlias<"vmov${p}.32 $Sn, $Rt",
2589 (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>;
2591 def : VFP2InstAlias<"vmov${p}.f64 $Rt, $Rt2, $Dn",
2592 (VMOVRRD GPR:$Rt, GPR:$Rt2, DPR:$Dn, pred:$p)>;
2593 def : VFP2InstAlias<"vmov${p}.f64 $Dn, $Rt, $Rt2",
2594 (VMOVDRR DPR:$Dn, GPR:$Rt, GPR:$Rt2, pred:$p)>;
2596 // VMOVS doesn't need the .f32 to disambiguate from the NEON encoding the way
2598 def : VFP2InstAlias<"vmov${p} $Sd, $Sm",
2599 (VMOVS SPR:$Sd, SPR:$Sm, pred:$p)>;
2601 // FCONSTD/FCONSTS alias for vmov.f64/vmov.f32
2602 // These aliases provide added functionality over vmov.f instructions by
2603 // allowing users to write assembly containing encoded floating point constants
2604 // (e.g. #0x70 vs #1.0). Without these alises there is no way for the
2605 // assembler to accept encoded fp constants (but the equivalent fp-literal is
2606 // accepted directly by vmovf).
2607 def : VFP3InstAlias<"fconstd${p} $Dd, $val",
2608 (FCONSTD DPR:$Dd, vfp_f64imm:$val, pred:$p)>;
2609 def : VFP3InstAlias<"fconsts${p} $Sd, $val",
2610 (FCONSTS SPR:$Sd, vfp_f32imm:$val, pred:$p)>;
2612 def VSCCLRMD : VFPXI<(outs), (ins pred:$p, fp_dreglist_with_vpr:$regs, variable_ops),
2613 AddrModeNone, 4, IndexModeNone, VFPMiscFrm, NoItinerary,
2614 "vscclrm{$p}\t$regs", "", []>, Sched<[]> {
2616 let Inst{31-23} = 0b111011001;
2617 let Inst{22} = regs{12};
2618 let Inst{21-16} = 0b011111;
2619 let Inst{15-12} = regs{11-8};
2620 let Inst{11-8} = 0b1011;
2621 let Inst{7-0} = regs{7-0};
2623 let DecoderMethod = "DecodeVSCCLRM";
2625 list<Predicate> Predicates = [HasV8_1MMainline, Has8MSecExt];
2628 def VSCCLRMS : VFPXI<(outs), (ins pred:$p, fp_sreglist_with_vpr:$regs, variable_ops),
2629 AddrModeNone, 4, IndexModeNone, VFPMiscFrm, NoItinerary,
2630 "vscclrm{$p}\t$regs", "", []>, Sched<[]> {
2632 let Inst{31-23} = 0b111011001;
2633 let Inst{22} = regs{8};
2634 let Inst{21-16} = 0b011111;
2635 let Inst{15-12} = regs{12-9};
2636 let Inst{11-8} = 0b1010;
2637 let Inst{7-0} = regs{7-0};
2639 let DecoderMethod = "DecodeVSCCLRM";
2641 list<Predicate> Predicates = [HasV8_1MMainline, Has8MSecExt];
2644 //===----------------------------------------------------------------------===//
2645 // Store VFP System Register to memory.
2648 class vfp_vstrldr<bit opc, bit P, bit W, bits<4> SysReg, string sysreg,
2649 dag oops, dag iops, IndexMode im, string Dest, string cstr>
2650 : VFPI<oops, iops, AddrModeT2_i7s4, 4, im, VFPLdStFrm, IIC_fpSTAT,
2651 !if(opc,"vldr","vstr"), !strconcat("\t", sysreg, ", ", Dest), cstr, []>,
2654 let Inst{27-25} = 0b110;
2656 let Inst{23} = addr{7};
2657 let Inst{22} = SysReg{3};
2660 let Inst{19-16} = addr{11-8};
2661 let Inst{15-13} = SysReg{2-0};
2662 let Inst{12-7} = 0b011111;
2663 let Inst{6-0} = addr{6-0};
2664 list<Predicate> Predicates = [HasFPRegs, HasV8_1MMainline];
2666 let mayStore = !if(opc, 0b0, 0b1);
2667 let hasSideEffects = 1;
2670 multiclass vfp_vstrldr_sysreg<bit opc, bits<4> SysReg, string sysreg,
2671 dag oops=(outs), dag iops=(ins)> {
2673 vfp_vstrldr<opc, 1, 0, SysReg, sysreg,
2674 oops, !con(iops, (ins t2addrmode_imm7s4:$addr)),
2675 IndexModePost, "$addr", "" > {
2676 let DecoderMethod = "DecodeVSTRVLDR_SYSREG<false>";
2680 vfp_vstrldr<opc, 1, 1, SysReg, sysreg,
2681 !con(oops, (outs GPRnopc:$wb)),
2682 !con(iops, (ins t2addrmode_imm7s4_pre:$addr)),
2683 IndexModePre, "$addr!", "$addr.base = $wb"> {
2684 let DecoderMethod = "DecodeVSTRVLDR_SYSREG<true>";
2688 vfp_vstrldr<opc, 0, 1, SysReg, sysreg,
2689 !con(oops, (outs GPRnopc:$wb)),
2690 !con(iops, (ins t2_addr_offset_none:$Rn,
2691 t2am_imm7s4_offset:$addr)),
2692 IndexModePost, "$Rn$addr", "$Rn.base = $wb"> {
2694 let Inst{19-16} = Rn{3-0};
2695 let DecoderMethod = "DecodeVSTRVLDR_SYSREG<true>";
2699 let Defs = [FPSCR] in {
2700 defm VSTR_FPSCR : vfp_vstrldr_sysreg<0b0,0b0001, "fpscr">;
2701 defm VSTR_FPSCR_NZCVQC : vfp_vstrldr_sysreg<0b0,0b0010, "fpscr_nzcvqc">;
2703 let Predicates = [HasV8_1MMainline, Has8MSecExt] in {
2704 defm VSTR_FPCXTNS : vfp_vstrldr_sysreg<0b0,0b1110, "fpcxtns">;
2705 defm VSTR_FPCXTS : vfp_vstrldr_sysreg<0b0,0b1111, "fpcxts">;
2709 let Predicates = [HasV8_1MMainline, HasMVEInt] in {
2710 let Uses = [VPR] in {
2711 defm VSTR_VPR : vfp_vstrldr_sysreg<0b0,0b1100, "vpr">;
2713 defm VSTR_P0 : vfp_vstrldr_sysreg<0b0,0b1101, "p0",
2714 (outs), (ins VCCR:$P0)>;
2717 let Uses = [FPSCR] in {
2718 defm VLDR_FPSCR : vfp_vstrldr_sysreg<0b1,0b0001, "fpscr">;
2719 defm VLDR_FPSCR_NZCVQC : vfp_vstrldr_sysreg<0b1,0b0010, "fpscr_nzcvqc">;
2721 let Predicates = [HasV8_1MMainline, Has8MSecExt] in {
2722 defm VLDR_FPCXTNS : vfp_vstrldr_sysreg<0b1,0b1110, "fpcxtns">;
2723 defm VLDR_FPCXTS : vfp_vstrldr_sysreg<0b1,0b1111, "fpcxts">;
2727 let Predicates = [HasV8_1MMainline, HasMVEInt] in {
2728 let Defs = [VPR] in {
2729 defm VLDR_VPR : vfp_vstrldr_sysreg<0b1,0b1100, "vpr">;
2731 defm VLDR_P0 : vfp_vstrldr_sysreg<0b1,0b1101, "p0",
2732 (outs VCCR:$P0), (ins)>;