1 //===-- ARMInstrVFP.td - VFP support for ARM ---------------*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file describes the ARM VFP instruction set.
11 //===----------------------------------------------------------------------===//
13 def SDT_CMPFP0 : SDTypeProfile<0, 1, [SDTCisFP<0>]>;
14 def SDT_VMOVDRR : SDTypeProfile<1, 2, [SDTCisVT<0, f64>, SDTCisVT<1, i32>,
16 def SDT_VMOVRRD : SDTypeProfile<2, 1, [SDTCisVT<0, i32>, SDTCisSameAs<0, 1>,
19 def SDT_VMOVSR : SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisVT<1, i32>]>;
21 def arm_fmstat : SDNode<"ARMISD::FMSTAT", SDTNone, [SDNPInGlue, SDNPOutGlue]>;
22 def arm_cmpfp : SDNode<"ARMISD::CMPFP", SDT_ARMCmp, [SDNPOutGlue]>;
23 def arm_cmpfp0 : SDNode<"ARMISD::CMPFPw0", SDT_CMPFP0, [SDNPOutGlue]>;
24 def arm_cmpfpe : SDNode<"ARMISD::CMPFPE", SDT_ARMCmp, [SDNPOutGlue]>;
25 def arm_cmpfpe0: SDNode<"ARMISD::CMPFPEw0",SDT_CMPFP0, [SDNPOutGlue]>;
26 def arm_fmdrr : SDNode<"ARMISD::VMOVDRR", SDT_VMOVDRR>;
27 def arm_fmrrd : SDNode<"ARMISD::VMOVRRD", SDT_VMOVRRD>;
28 def arm_vmovsr : SDNode<"ARMISD::VMOVSR", SDT_VMOVSR>;
30 def SDT_VMOVhr : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, i32>] >;
31 def SDT_VMOVrh : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisFP<1>] >;
32 def arm_vmovhr : SDNode<"ARMISD::VMOVhr", SDT_VMOVhr>;
33 def arm_vmovrh : SDNode<"ARMISD::VMOVrh", SDT_VMOVrh>;
35 //===----------------------------------------------------------------------===//
36 // Operand Definitions.
39 // 8-bit floating-point immediate encodings.
40 def FPImmOperand : AsmOperandClass {
42 let ParserMethod = "parseFPImm";
45 def vfp_f16imm : Operand<f16>,
46 PatLeaf<(f16 fpimm), [{
47 return ARM_AM::getFP16Imm(N->getValueAPF()) != -1;
48 }], SDNodeXForm<fpimm, [{
49 APFloat InVal = N->getValueAPF();
50 uint32_t enc = ARM_AM::getFP16Imm(InVal);
51 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32);
53 let PrintMethod = "printFPImmOperand";
54 let ParserMatchClass = FPImmOperand;
57 def vfp_f32f16imm_xform : SDNodeXForm<fpimm, [{
58 APFloat InVal = N->getValueAPF();
59 uint32_t enc = ARM_AM::getFP32FP16Imm(InVal);
60 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32);
63 def vfp_f32f16imm : PatLeaf<(f32 fpimm), [{
64 return ARM_AM::getFP32FP16Imm(N->getValueAPF()) != -1;
65 }], vfp_f32f16imm_xform>;
67 def vfp_f32imm_xform : SDNodeXForm<fpimm, [{
68 APFloat InVal = N->getValueAPF();
69 uint32_t enc = ARM_AM::getFP32Imm(InVal);
70 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32);
73 def gi_vfp_f32imm : GICustomOperandRenderer<"renderVFPF32Imm">,
74 GISDNodeXFormEquiv<vfp_f32imm_xform>;
76 def vfp_f32imm : Operand<f32>,
77 PatLeaf<(f32 fpimm), [{
78 return ARM_AM::getFP32Imm(N->getValueAPF()) != -1;
79 }], vfp_f32imm_xform> {
80 let PrintMethod = "printFPImmOperand";
81 let ParserMatchClass = FPImmOperand;
82 let GISelPredicateCode = [{
83 const auto &MO = MI.getOperand(1);
86 return ARM_AM::getFP32Imm(MO.getFPImm()->getValueAPF()) != -1;
90 def vfp_f64imm_xform : SDNodeXForm<fpimm, [{
91 APFloat InVal = N->getValueAPF();
92 uint32_t enc = ARM_AM::getFP64Imm(InVal);
93 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32);
96 def gi_vfp_f64imm : GICustomOperandRenderer<"renderVFPF64Imm">,
97 GISDNodeXFormEquiv<vfp_f64imm_xform>;
99 def vfp_f64imm : Operand<f64>,
100 PatLeaf<(f64 fpimm), [{
101 return ARM_AM::getFP64Imm(N->getValueAPF()) != -1;
102 }], vfp_f64imm_xform> {
103 let PrintMethod = "printFPImmOperand";
104 let ParserMatchClass = FPImmOperand;
105 let GISelPredicateCode = [{
106 const auto &MO = MI.getOperand(1);
109 return ARM_AM::getFP64Imm(MO.getFPImm()->getValueAPF()) != -1;
113 def alignedload16 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
114 return cast<LoadSDNode>(N)->getAlignment() >= 2;
117 def alignedload32 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
118 return cast<LoadSDNode>(N)->getAlignment() >= 4;
121 def alignedstore16 : PatFrag<(ops node:$val, node:$ptr),
122 (store node:$val, node:$ptr), [{
123 return cast<StoreSDNode>(N)->getAlignment() >= 2;
126 def alignedstore32 : PatFrag<(ops node:$val, node:$ptr),
127 (store node:$val, node:$ptr), [{
128 return cast<StoreSDNode>(N)->getAlignment() >= 4;
131 // The VCVT to/from fixed-point instructions encode the 'fbits' operand
132 // (the number of fixed bits) differently than it appears in the assembly
133 // source. It's encoded as "Size - fbits" where Size is the size of the
134 // fixed-point representation (32 or 16) and fbits is the value appearing
135 // in the assembly source, an integer in [0,16] or (0,32], depending on size.
136 def fbits32_asm_operand : AsmOperandClass { let Name = "FBits32"; }
137 def fbits32 : Operand<i32> {
138 let PrintMethod = "printFBits32";
139 let ParserMatchClass = fbits32_asm_operand;
142 def fbits16_asm_operand : AsmOperandClass { let Name = "FBits16"; }
143 def fbits16 : Operand<i32> {
144 let PrintMethod = "printFBits16";
145 let ParserMatchClass = fbits16_asm_operand;
148 //===----------------------------------------------------------------------===//
149 // Load / store Instructions.
152 let canFoldAsLoad = 1, isReMaterializable = 1 in {
154 def VLDRD : ADI5<0b1101, 0b01, (outs DPR:$Dd), (ins addrmode5:$addr),
155 IIC_fpLoad64, "vldr", "\t$Dd, $addr",
156 [(set DPR:$Dd, (f64 (alignedload32 addrmode5:$addr)))]>,
157 Requires<[HasFPRegs]>;
159 def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$Sd), (ins addrmode5:$addr),
160 IIC_fpLoad32, "vldr", "\t$Sd, $addr",
161 [(set SPR:$Sd, (alignedload32 addrmode5:$addr))]>,
162 Requires<[HasFPRegs]> {
163 // Some single precision VFP instructions may be executed on both NEON and VFP
165 let D = VFPNeonDomain;
168 let isUnpredicable = 1 in
169 def VLDRH : AHI5<0b1101, 0b01, (outs HPR:$Sd), (ins addrmode5fp16:$addr),
170 IIC_fpLoad16, "vldr", ".16\t$Sd, $addr",
171 [(set HPR:$Sd, (f16 (alignedload16 addrmode5fp16:$addr)))]>,
172 Requires<[HasFPRegs16]>;
174 } // End of 'let canFoldAsLoad = 1, isReMaterializable = 1 in'
176 def : Pat<(bf16 (alignedload16 addrmode5fp16:$addr)),
177 (VLDRH addrmode5fp16:$addr)> {
178 let Predicates = [HasFPRegs16];
180 def : Pat<(bf16 (alignedload16 addrmode3:$addr)),
181 (COPY_TO_REGCLASS (LDRH addrmode3:$addr), HPR)> {
182 let Predicates = [HasNoFPRegs16, IsARM];
184 def : Pat<(bf16 (alignedload16 t2addrmode_imm12:$addr)),
185 (COPY_TO_REGCLASS (t2LDRHi12 t2addrmode_imm12:$addr), HPR)> {
186 let Predicates = [HasNoFPRegs16, IsThumb];
189 def VSTRD : ADI5<0b1101, 0b00, (outs), (ins DPR:$Dd, addrmode5:$addr),
190 IIC_fpStore64, "vstr", "\t$Dd, $addr",
191 [(alignedstore32 (f64 DPR:$Dd), addrmode5:$addr)]>,
192 Requires<[HasFPRegs]>;
194 def VSTRS : ASI5<0b1101, 0b00, (outs), (ins SPR:$Sd, addrmode5:$addr),
195 IIC_fpStore32, "vstr", "\t$Sd, $addr",
196 [(alignedstore32 SPR:$Sd, addrmode5:$addr)]>,
197 Requires<[HasFPRegs]> {
198 // Some single precision VFP instructions may be executed on both NEON and VFP
200 let D = VFPNeonDomain;
203 let isUnpredicable = 1 in
204 def VSTRH : AHI5<0b1101, 0b00, (outs), (ins HPR:$Sd, addrmode5fp16:$addr),
205 IIC_fpStore16, "vstr", ".16\t$Sd, $addr",
206 [(alignedstore16 (f16 HPR:$Sd), addrmode5fp16:$addr)]>,
207 Requires<[HasFPRegs16]>;
209 def : Pat<(alignedstore16 (bf16 HPR:$Sd), addrmode5fp16:$addr),
210 (VSTRH (bf16 HPR:$Sd), addrmode5fp16:$addr)> {
211 let Predicates = [HasFPRegs16];
213 def : Pat<(alignedstore16 (bf16 HPR:$Sd), addrmode3:$addr),
214 (STRH (COPY_TO_REGCLASS $Sd, GPR), addrmode3:$addr)> {
215 let Predicates = [HasNoFPRegs16, IsARM];
217 def : Pat<(alignedstore16 (bf16 HPR:$Sd), t2addrmode_imm12:$addr),
218 (t2STRHi12 (COPY_TO_REGCLASS $Sd, GPR), t2addrmode_imm12:$addr)> {
219 let Predicates = [HasNoFPRegs16, IsThumb];
222 //===----------------------------------------------------------------------===//
223 // Load / store multiple Instructions.
226 multiclass vfp_ldst_mult<string asm, bit L_bit,
227 InstrItinClass itin, InstrItinClass itin_upd> {
228 let Predicates = [HasFPRegs] in {
231 AXDI4<(outs), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
233 !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> {
234 let Inst{24-23} = 0b01; // Increment After
235 let Inst{21} = 0; // No writeback
236 let Inst{20} = L_bit;
239 AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs,
241 IndexModeUpd, itin_upd,
242 !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
243 let Inst{24-23} = 0b01; // Increment After
244 let Inst{21} = 1; // Writeback
245 let Inst{20} = L_bit;
248 AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs,
250 IndexModeUpd, itin_upd,
251 !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
252 let Inst{24-23} = 0b10; // Decrement Before
253 let Inst{21} = 1; // Writeback
254 let Inst{20} = L_bit;
259 AXSI4<(outs), (ins GPR:$Rn, pred:$p, spr_reglist:$regs, variable_ops),
261 !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> {
262 let Inst{24-23} = 0b01; // Increment After
263 let Inst{21} = 0; // No writeback
264 let Inst{20} = L_bit;
266 // Some single precision VFP instructions may be executed on both NEON and
268 let D = VFPNeonDomain;
271 AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs,
273 IndexModeUpd, itin_upd,
274 !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
275 let Inst{24-23} = 0b01; // Increment After
276 let Inst{21} = 1; // Writeback
277 let Inst{20} = L_bit;
279 // Some single precision VFP instructions may be executed on both NEON and
281 let D = VFPNeonDomain;
284 AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs,
286 IndexModeUpd, itin_upd,
287 !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
288 let Inst{24-23} = 0b10; // Decrement Before
289 let Inst{21} = 1; // Writeback
290 let Inst{20} = L_bit;
292 // Some single precision VFP instructions may be executed on both NEON and
294 let D = VFPNeonDomain;
299 let hasSideEffects = 0 in {
301 let mayLoad = 1, hasExtraDefRegAllocReq = 1 in
302 defm VLDM : vfp_ldst_mult<"vldm", 1, IIC_fpLoad_m, IIC_fpLoad_mu>;
304 let mayStore = 1, hasExtraSrcRegAllocReq = 1 in
305 defm VSTM : vfp_ldst_mult<"vstm", 0, IIC_fpStore_m, IIC_fpStore_mu>;
309 def : MnemonicAlias<"vldm", "vldmia">;
310 def : MnemonicAlias<"vstm", "vstmia">;
313 //===----------------------------------------------------------------------===//
314 // Lazy load / store multiple Instructions
316 def VLLDM : AXSI4<(outs), (ins GPRnopc:$Rn, pred:$p), IndexModeNone,
317 NoItinerary, "vlldm${p}\t$Rn", "", []>,
318 Requires<[HasV8MMainline, Has8MSecExt]> {
319 let Inst{24-23} = 0b00;
326 let Defs = [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7, VPR, FPSCR, FPSCR_NZCV];
329 def VLSTM : AXSI4<(outs), (ins GPRnopc:$Rn, pred:$p), IndexModeNone,
330 NoItinerary, "vlstm${p}\t$Rn", "", []>,
331 Requires<[HasV8MMainline, Has8MSecExt]> {
332 let Inst{24-23} = 0b00;
341 def : InstAlias<"vpush${p} $r", (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r), 0>,
342 Requires<[HasFPRegs]>;
343 def : InstAlias<"vpush${p} $r", (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r), 0>,
344 Requires<[HasFPRegs]>;
345 def : InstAlias<"vpop${p} $r", (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r), 0>,
346 Requires<[HasFPRegs]>;
347 def : InstAlias<"vpop${p} $r", (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r), 0>,
348 Requires<[HasFPRegs]>;
349 defm : VFPDTAnyInstAlias<"vpush${p}", "$r",
350 (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r)>;
351 defm : VFPDTAnyInstAlias<"vpush${p}", "$r",
352 (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r)>;
353 defm : VFPDTAnyInstAlias<"vpop${p}", "$r",
354 (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r)>;
355 defm : VFPDTAnyInstAlias<"vpop${p}", "$r",
356 (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r)>;
358 // FLDMX, FSTMX - Load and store multiple unknown precision registers for
360 // These instruction are deprecated so we don't want them to get selected.
361 // However, there is no UAL syntax for them, so we keep them around for
362 // (dis)assembly only.
363 multiclass vfp_ldstx_mult<string asm, bit L_bit> {
364 let Predicates = [HasFPRegs], hasNoSchedulingInfo = 1 in {
367 AXXI4<(outs), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
368 IndexModeNone, !strconcat(asm, "iax${p}\t$Rn, $regs"), "", []> {
369 let Inst{24-23} = 0b01; // Increment After
370 let Inst{21} = 0; // No writeback
371 let Inst{20} = L_bit;
374 AXXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
375 IndexModeUpd, !strconcat(asm, "iax${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
376 let Inst{24-23} = 0b01; // Increment After
377 let Inst{21} = 1; // Writeback
378 let Inst{20} = L_bit;
381 AXXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
382 IndexModeUpd, !strconcat(asm, "dbx${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
383 let Inst{24-23} = 0b10; // Decrement Before
384 let Inst{21} = 1; // Writeback
385 let Inst{20} = L_bit;
390 defm FLDM : vfp_ldstx_mult<"fldm", 1>;
391 defm FSTM : vfp_ldstx_mult<"fstm", 0>;
393 def : VFP2MnemonicAlias<"fldmeax", "fldmdbx">;
394 def : VFP2MnemonicAlias<"fldmfdx", "fldmiax">;
396 def : VFP2MnemonicAlias<"fstmeax", "fstmiax">;
397 def : VFP2MnemonicAlias<"fstmfdx", "fstmdbx">;
399 //===----------------------------------------------------------------------===//
400 // FP Binary Operations.
403 let TwoOperandAliasConstraint = "$Dn = $Dd" in
404 def VADDD : ADbI<0b11100, 0b11, 0, 0,
405 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
406 IIC_fpALU64, "vadd", ".f64\t$Dd, $Dn, $Dm",
407 [(set DPR:$Dd, (fadd DPR:$Dn, (f64 DPR:$Dm)))]>,
408 Sched<[WriteFPALU64]>;
410 let TwoOperandAliasConstraint = "$Sn = $Sd" in
411 def VADDS : ASbIn<0b11100, 0b11, 0, 0,
412 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
413 IIC_fpALU32, "vadd", ".f32\t$Sd, $Sn, $Sm",
414 [(set SPR:$Sd, (fadd SPR:$Sn, SPR:$Sm))]>,
415 Sched<[WriteFPALU32]> {
416 // Some single precision VFP instructions may be executed on both NEON and
417 // VFP pipelines on A8.
418 let D = VFPNeonA8Domain;
421 let TwoOperandAliasConstraint = "$Sn = $Sd" in
422 def VADDH : AHbI<0b11100, 0b11, 0, 0,
423 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
424 IIC_fpALU16, "vadd", ".f16\t$Sd, $Sn, $Sm",
425 [(set (f16 HPR:$Sd), (fadd (f16 HPR:$Sn), (f16 HPR:$Sm)))]>,
426 Sched<[WriteFPALU32]>;
428 let TwoOperandAliasConstraint = "$Dn = $Dd" in
429 def VSUBD : ADbI<0b11100, 0b11, 1, 0,
430 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
431 IIC_fpALU64, "vsub", ".f64\t$Dd, $Dn, $Dm",
432 [(set DPR:$Dd, (fsub DPR:$Dn, (f64 DPR:$Dm)))]>,
433 Sched<[WriteFPALU64]>;
435 let TwoOperandAliasConstraint = "$Sn = $Sd" in
436 def VSUBS : ASbIn<0b11100, 0b11, 1, 0,
437 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
438 IIC_fpALU32, "vsub", ".f32\t$Sd, $Sn, $Sm",
439 [(set SPR:$Sd, (fsub SPR:$Sn, SPR:$Sm))]>,
440 Sched<[WriteFPALU32]>{
441 // Some single precision VFP instructions may be executed on both NEON and
442 // VFP pipelines on A8.
443 let D = VFPNeonA8Domain;
446 let TwoOperandAliasConstraint = "$Sn = $Sd" in
447 def VSUBH : AHbI<0b11100, 0b11, 1, 0,
448 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
449 IIC_fpALU16, "vsub", ".f16\t$Sd, $Sn, $Sm",
450 [(set (f16 HPR:$Sd), (fsub (f16 HPR:$Sn), (f16 HPR:$Sm)))]>,
451 Sched<[WriteFPALU32]>;
453 let TwoOperandAliasConstraint = "$Dn = $Dd" in
454 def VDIVD : ADbI<0b11101, 0b00, 0, 0,
455 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
456 IIC_fpDIV64, "vdiv", ".f64\t$Dd, $Dn, $Dm",
457 [(set DPR:$Dd, (fdiv DPR:$Dn, (f64 DPR:$Dm)))]>,
458 Sched<[WriteFPDIV64]>;
460 let TwoOperandAliasConstraint = "$Sn = $Sd" in
461 def VDIVS : ASbI<0b11101, 0b00, 0, 0,
462 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
463 IIC_fpDIV32, "vdiv", ".f32\t$Sd, $Sn, $Sm",
464 [(set SPR:$Sd, (fdiv SPR:$Sn, SPR:$Sm))]>,
465 Sched<[WriteFPDIV32]>;
467 let TwoOperandAliasConstraint = "$Sn = $Sd" in
468 def VDIVH : AHbI<0b11101, 0b00, 0, 0,
469 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
470 IIC_fpDIV16, "vdiv", ".f16\t$Sd, $Sn, $Sm",
471 [(set (f16 HPR:$Sd), (fdiv (f16 HPR:$Sn), (f16 HPR:$Sm)))]>,
472 Sched<[WriteFPDIV32]>;
474 let TwoOperandAliasConstraint = "$Dn = $Dd" in
475 def VMULD : ADbI<0b11100, 0b10, 0, 0,
476 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
477 IIC_fpMUL64, "vmul", ".f64\t$Dd, $Dn, $Dm",
478 [(set DPR:$Dd, (fmul DPR:$Dn, (f64 DPR:$Dm)))]>,
479 Sched<[WriteFPMUL64, ReadFPMUL, ReadFPMUL]>;
481 let TwoOperandAliasConstraint = "$Sn = $Sd" in
482 def VMULS : ASbIn<0b11100, 0b10, 0, 0,
483 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
484 IIC_fpMUL32, "vmul", ".f32\t$Sd, $Sn, $Sm",
485 [(set SPR:$Sd, (fmul SPR:$Sn, SPR:$Sm))]>,
486 Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]> {
487 // Some single precision VFP instructions may be executed on both NEON and
488 // VFP pipelines on A8.
489 let D = VFPNeonA8Domain;
492 let TwoOperandAliasConstraint = "$Sn = $Sd" in
493 def VMULH : AHbI<0b11100, 0b10, 0, 0,
494 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
495 IIC_fpMUL16, "vmul", ".f16\t$Sd, $Sn, $Sm",
496 [(set (f16 HPR:$Sd), (fmul (f16 HPR:$Sn), (f16 HPR:$Sm)))]>,
497 Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]>;
499 def VNMULD : ADbI<0b11100, 0b10, 1, 0,
500 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
501 IIC_fpMUL64, "vnmul", ".f64\t$Dd, $Dn, $Dm",
502 [(set DPR:$Dd, (fneg (fmul DPR:$Dn, (f64 DPR:$Dm))))]>,
503 Sched<[WriteFPMUL64, ReadFPMUL, ReadFPMUL]>;
505 def VNMULS : ASbI<0b11100, 0b10, 1, 0,
506 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
507 IIC_fpMUL32, "vnmul", ".f32\t$Sd, $Sn, $Sm",
508 [(set SPR:$Sd, (fneg (fmul SPR:$Sn, SPR:$Sm)))]>,
509 Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]> {
510 // Some single precision VFP instructions may be executed on both NEON and
511 // VFP pipelines on A8.
512 let D = VFPNeonA8Domain;
515 def VNMULH : AHbI<0b11100, 0b10, 1, 0,
516 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
517 IIC_fpMUL16, "vnmul", ".f16\t$Sd, $Sn, $Sm",
518 [(set (f16 HPR:$Sd), (fneg (fmul (f16 HPR:$Sn), (f16 HPR:$Sm))))]>,
519 Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]>;
521 multiclass vsel_inst<string op, bits<2> opc, int CC> {
522 let DecoderNamespace = "VFPV8", PostEncoderMethod = "",
523 Uses = [CPSR], AddedComplexity = 4, isUnpredicable = 1 in {
524 def H : AHbInp<0b11100, opc, 0,
525 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
526 NoItinerary, !strconcat("vsel", op, ".f16\t$Sd, $Sn, $Sm"),
527 [(set (f16 HPR:$Sd), (ARMcmov (f16 HPR:$Sm), (f16 HPR:$Sn), CC))]>,
528 Requires<[HasFullFP16]>;
530 def S : ASbInp<0b11100, opc, 0,
531 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
532 NoItinerary, !strconcat("vsel", op, ".f32\t$Sd, $Sn, $Sm"),
533 [(set SPR:$Sd, (ARMcmov SPR:$Sm, SPR:$Sn, CC))]>,
534 Requires<[HasFPARMv8]>;
536 def D : ADbInp<0b11100, opc, 0,
537 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
538 NoItinerary, !strconcat("vsel", op, ".f64\t$Dd, $Dn, $Dm"),
539 [(set DPR:$Dd, (ARMcmov (f64 DPR:$Dm), (f64 DPR:$Dn), CC))]>,
540 Requires<[HasFPARMv8, HasDPVFP]>;
544 // The CC constants here match ARMCC::CondCodes.
545 defm VSELGT : vsel_inst<"gt", 0b11, 12>;
546 defm VSELGE : vsel_inst<"ge", 0b10, 10>;
547 defm VSELEQ : vsel_inst<"eq", 0b00, 0>;
548 defm VSELVS : vsel_inst<"vs", 0b01, 6>;
550 multiclass vmaxmin_inst<string op, bit opc, SDNode SD> {
551 let DecoderNamespace = "VFPV8", PostEncoderMethod = "",
552 isUnpredicable = 1 in {
553 def H : AHbInp<0b11101, 0b00, opc,
554 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
555 NoItinerary, !strconcat(op, ".f16\t$Sd, $Sn, $Sm"),
556 [(set (f16 HPR:$Sd), (SD (f16 HPR:$Sn), (f16 HPR:$Sm)))]>,
557 Requires<[HasFullFP16]>;
559 def S : ASbInp<0b11101, 0b00, opc,
560 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
561 NoItinerary, !strconcat(op, ".f32\t$Sd, $Sn, $Sm"),
562 [(set SPR:$Sd, (SD SPR:$Sn, SPR:$Sm))]>,
563 Requires<[HasFPARMv8]>;
565 def D : ADbInp<0b11101, 0b00, opc,
566 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
567 NoItinerary, !strconcat(op, ".f64\t$Dd, $Dn, $Dm"),
568 [(set DPR:$Dd, (f64 (SD (f64 DPR:$Dn), (f64 DPR:$Dm))))]>,
569 Requires<[HasFPARMv8, HasDPVFP]>;
573 defm VFP_VMAXNM : vmaxmin_inst<"vmaxnm", 0, fmaxnum>;
574 defm VFP_VMINNM : vmaxmin_inst<"vminnm", 1, fminnum>;
576 // Match reassociated forms only if not sign dependent rounding.
577 def : Pat<(fmul (fneg DPR:$a), (f64 DPR:$b)),
578 (VNMULD DPR:$a, DPR:$b)>,
579 Requires<[NoHonorSignDependentRounding,HasDPVFP]>;
580 def : Pat<(fmul (fneg SPR:$a), SPR:$b),
581 (VNMULS SPR:$a, SPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
583 // These are encoded as unary instructions.
584 let Defs = [FPSCR_NZCV] in {
585 def VCMPED : ADuI<0b11101, 0b11, 0b0100, 0b11, 0,
586 (outs), (ins DPR:$Dd, DPR:$Dm),
587 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, $Dm",
588 [(arm_cmpfpe DPR:$Dd, (f64 DPR:$Dm))]>;
590 def VCMPES : ASuI<0b11101, 0b11, 0b0100, 0b11, 0,
591 (outs), (ins SPR:$Sd, SPR:$Sm),
592 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, $Sm",
593 [(arm_cmpfpe SPR:$Sd, SPR:$Sm)]> {
594 // Some single precision VFP instructions may be executed on both NEON and
595 // VFP pipelines on A8.
596 let D = VFPNeonA8Domain;
599 def VCMPEH : AHuI<0b11101, 0b11, 0b0100, 0b11, 0,
600 (outs), (ins HPR:$Sd, HPR:$Sm),
601 IIC_fpCMP16, "vcmpe", ".f16\t$Sd, $Sm",
602 [(arm_cmpfpe (f16 HPR:$Sd), (f16 HPR:$Sm))]>;
604 def VCMPD : ADuI<0b11101, 0b11, 0b0100, 0b01, 0,
605 (outs), (ins DPR:$Dd, DPR:$Dm),
606 IIC_fpCMP64, "vcmp", ".f64\t$Dd, $Dm",
607 [(arm_cmpfp DPR:$Dd, (f64 DPR:$Dm))]>;
609 def VCMPS : ASuI<0b11101, 0b11, 0b0100, 0b01, 0,
610 (outs), (ins SPR:$Sd, SPR:$Sm),
611 IIC_fpCMP32, "vcmp", ".f32\t$Sd, $Sm",
612 [(arm_cmpfp SPR:$Sd, SPR:$Sm)]> {
613 // Some single precision VFP instructions may be executed on both NEON and
614 // VFP pipelines on A8.
615 let D = VFPNeonA8Domain;
618 def VCMPH : AHuI<0b11101, 0b11, 0b0100, 0b01, 0,
619 (outs), (ins HPR:$Sd, HPR:$Sm),
620 IIC_fpCMP16, "vcmp", ".f16\t$Sd, $Sm",
621 [(arm_cmpfp (f16 HPR:$Sd), (f16 HPR:$Sm))]>;
622 } // Defs = [FPSCR_NZCV]
624 //===----------------------------------------------------------------------===//
625 // FP Unary Operations.
628 def VABSD : ADuI<0b11101, 0b11, 0b0000, 0b11, 0,
629 (outs DPR:$Dd), (ins DPR:$Dm),
630 IIC_fpUNA64, "vabs", ".f64\t$Dd, $Dm",
631 [(set DPR:$Dd, (fabs (f64 DPR:$Dm)))]>;
633 def VABSS : ASuIn<0b11101, 0b11, 0b0000, 0b11, 0,
634 (outs SPR:$Sd), (ins SPR:$Sm),
635 IIC_fpUNA32, "vabs", ".f32\t$Sd, $Sm",
636 [(set SPR:$Sd, (fabs SPR:$Sm))]> {
637 // Some single precision VFP instructions may be executed on both NEON and
638 // VFP pipelines on A8.
639 let D = VFPNeonA8Domain;
642 def VABSH : AHuI<0b11101, 0b11, 0b0000, 0b11, 0,
643 (outs HPR:$Sd), (ins HPR:$Sm),
644 IIC_fpUNA16, "vabs", ".f16\t$Sd, $Sm",
645 [(set (f16 HPR:$Sd), (fabs (f16 HPR:$Sm)))]>;
647 let Defs = [FPSCR_NZCV] in {
648 def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0,
649 (outs), (ins DPR:$Dd),
650 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, #0",
651 [(arm_cmpfpe0 (f64 DPR:$Dd))]> {
652 let Inst{3-0} = 0b0000;
656 def VCMPEZS : ASuI<0b11101, 0b11, 0b0101, 0b11, 0,
657 (outs), (ins SPR:$Sd),
658 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, #0",
659 [(arm_cmpfpe0 SPR:$Sd)]> {
660 let Inst{3-0} = 0b0000;
663 // Some single precision VFP instructions may be executed on both NEON and
664 // VFP pipelines on A8.
665 let D = VFPNeonA8Domain;
668 def VCMPEZH : AHuI<0b11101, 0b11, 0b0101, 0b11, 0,
669 (outs), (ins HPR:$Sd),
670 IIC_fpCMP16, "vcmpe", ".f16\t$Sd, #0",
671 [(arm_cmpfpe0 (f16 HPR:$Sd))]> {
672 let Inst{3-0} = 0b0000;
676 def VCMPZD : ADuI<0b11101, 0b11, 0b0101, 0b01, 0,
677 (outs), (ins DPR:$Dd),
678 IIC_fpCMP64, "vcmp", ".f64\t$Dd, #0",
679 [(arm_cmpfp0 (f64 DPR:$Dd))]> {
680 let Inst{3-0} = 0b0000;
684 def VCMPZS : ASuI<0b11101, 0b11, 0b0101, 0b01, 0,
685 (outs), (ins SPR:$Sd),
686 IIC_fpCMP32, "vcmp", ".f32\t$Sd, #0",
687 [(arm_cmpfp0 SPR:$Sd)]> {
688 let Inst{3-0} = 0b0000;
691 // Some single precision VFP instructions may be executed on both NEON and
692 // VFP pipelines on A8.
693 let D = VFPNeonA8Domain;
696 def VCMPZH : AHuI<0b11101, 0b11, 0b0101, 0b01, 0,
697 (outs), (ins HPR:$Sd),
698 IIC_fpCMP16, "vcmp", ".f16\t$Sd, #0",
699 [(arm_cmpfp0 (f16 HPR:$Sd))]> {
700 let Inst{3-0} = 0b0000;
703 } // Defs = [FPSCR_NZCV]
705 def VCVTDS : ASuI<0b11101, 0b11, 0b0111, 0b11, 0,
706 (outs DPR:$Dd), (ins SPR:$Sm),
707 IIC_fpCVTDS, "vcvt", ".f64.f32\t$Dd, $Sm",
708 [(set DPR:$Dd, (fpextend SPR:$Sm))]>,
709 Sched<[WriteFPCVT]> {
710 // Instruction operands.
714 // Encode instruction operands.
715 let Inst{3-0} = Sm{4-1};
717 let Inst{15-12} = Dd{3-0};
718 let Inst{22} = Dd{4};
720 let Predicates = [HasVFP2, HasDPVFP];
721 let hasSideEffects = 0;
724 // Special case encoding: bits 11-8 is 0b1011.
725 def VCVTSD : VFPAI<(outs SPR:$Sd), (ins DPR:$Dm), VFPUnaryFrm,
726 IIC_fpCVTSD, "vcvt", ".f32.f64\t$Sd, $Dm",
727 [(set SPR:$Sd, (fpround DPR:$Dm))]>,
728 Sched<[WriteFPCVT]> {
729 // Instruction operands.
733 // Encode instruction operands.
734 let Inst{3-0} = Dm{3-0};
736 let Inst{15-12} = Sd{4-1};
737 let Inst{22} = Sd{0};
739 let Inst{27-23} = 0b11101;
740 let Inst{21-16} = 0b110111;
741 let Inst{11-8} = 0b1011;
742 let Inst{7-6} = 0b11;
745 let Predicates = [HasVFP2, HasDPVFP];
746 let hasSideEffects = 0;
749 // Between half, single and double-precision.
750 let hasSideEffects = 0 in
751 def VCVTBHS: ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm),
752 /* FIXME */ IIC_fpCVTSH, "vcvtb", ".f32.f16\t$Sd, $Sm",
753 [/* Intentionally left blank, see patterns below */]>,
757 def : FP16Pat<(f32 (fpextend (f16 HPR:$Sm))),
758 (VCVTBHS (COPY_TO_REGCLASS (f16 HPR:$Sm), SPR))>;
759 def : FP16Pat<(f16_to_fp GPR:$a),
760 (VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>;
762 let hasSideEffects = 0 in
763 def VCVTBSH: ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm),
764 /* FIXME */ IIC_fpCVTHS, "vcvtb", ".f16.f32\t$Sd, $Sm",
765 [/* Intentionally left blank, see patterns below */]>,
769 def : FP16Pat<(f16 (fpround SPR:$Sm)),
770 (COPY_TO_REGCLASS (VCVTBSH SPR:$Sm), HPR)>;
771 def : FP16Pat<(fp_to_f16 SPR:$a),
772 (i32 (COPY_TO_REGCLASS (VCVTBSH SPR:$a), GPR))>;
773 def : FP16Pat<(insertelt (v8f16 MQPR:$src1), (f16 (fpround (f32 SPR:$src2))), imm_even:$lane),
774 (v8f16 (INSERT_SUBREG (v8f16 MQPR:$src1), (VCVTBSH SPR:$src2),
775 (SSubReg_f16_reg imm:$lane)))>;
776 def : FP16Pat<(insertelt (v4f16 DPR:$src1), (f16 (fpround (f32 SPR:$src2))), imm_even:$lane),
777 (v4f16 (INSERT_SUBREG (v4f16 DPR:$src1), (VCVTBSH SPR:$src2),
778 (SSubReg_f16_reg imm:$lane)))>;
780 let hasSideEffects = 0 in
781 def VCVTTHS: ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm),
782 /* FIXME */ IIC_fpCVTSH, "vcvtt", ".f32.f16\t$Sd, $Sm",
783 [/* Intentionally left blank, see patterns below */]>,
787 def : FP16Pat<(f32 (fpextend (extractelt (v8f16 MQPR:$src), imm_odd:$lane))),
788 (VCVTTHS (EXTRACT_SUBREG MQPR:$src, (SSubReg_f16_reg imm_odd:$lane)))>;
789 def : FP16Pat<(f32 (fpextend (extractelt (v4f16 DPR:$src), imm_odd:$lane))),
790 (VCVTTHS (EXTRACT_SUBREG
791 (v2f32 (COPY_TO_REGCLASS (v4f16 DPR:$src), DPR_VFP2)),
792 (SSubReg_f16_reg imm_odd:$lane)))>;
794 let hasSideEffects = 0 in
795 def VCVTTSH: ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm),
796 /* FIXME */ IIC_fpCVTHS, "vcvtt", ".f16.f32\t$Sd, $Sm",
797 [/* Intentionally left blank, see patterns below */]>,
801 def : FP16Pat<(insertelt (v8f16 MQPR:$src1), (f16 (fpround (f32 SPR:$src2))), imm_odd:$lane),
802 (v8f16 (INSERT_SUBREG (v8f16 MQPR:$src1), (VCVTTSH SPR:$src2),
803 (SSubReg_f16_reg imm:$lane)))>;
804 def : FP16Pat<(insertelt (v4f16 DPR:$src1), (f16 (fpround (f32 SPR:$src2))), imm_odd:$lane),
805 (v4f16 (INSERT_SUBREG (v4f16 DPR:$src1), (VCVTTSH SPR:$src2),
806 (SSubReg_f16_reg imm:$lane)))>;
808 def VCVTBHD : ADuI<0b11101, 0b11, 0b0010, 0b01, 0,
809 (outs DPR:$Dd), (ins SPR:$Sm),
810 NoItinerary, "vcvtb", ".f64.f16\t$Dd, $Sm",
811 [/* Intentionally left blank, see patterns below */]>,
812 Requires<[HasFPARMv8, HasDPVFP]>,
813 Sched<[WriteFPCVT]> {
814 // Instruction operands.
817 // Encode instruction operands.
818 let Inst{3-0} = Sm{4-1};
821 let hasSideEffects = 0;
824 def : FullFP16Pat<(f64 (fpextend (f16 HPR:$Sm))),
825 (VCVTBHD (COPY_TO_REGCLASS (f16 HPR:$Sm), SPR))>,
826 Requires<[HasFPARMv8, HasDPVFP]>;
827 def : FP16Pat<(f64 (f16_to_fp GPR:$a)),
828 (VCVTBHD (COPY_TO_REGCLASS GPR:$a, SPR))>,
829 Requires<[HasFPARMv8, HasDPVFP]>;
831 def VCVTBDH : ADuI<0b11101, 0b11, 0b0011, 0b01, 0,
832 (outs SPR:$Sd), (ins DPR:$Dm),
833 NoItinerary, "vcvtb", ".f16.f64\t$Sd, $Dm",
834 [/* Intentionally left blank, see patterns below */]>,
835 Requires<[HasFPARMv8, HasDPVFP]> {
836 // Instruction operands.
840 // Encode instruction operands.
841 let Inst{3-0} = Dm{3-0};
843 let Inst{15-12} = Sd{4-1};
844 let Inst{22} = Sd{0};
846 let hasSideEffects = 0;
849 def : FullFP16Pat<(f16 (fpround DPR:$Dm)),
850 (COPY_TO_REGCLASS (VCVTBDH DPR:$Dm), HPR)>,
851 Requires<[HasFPARMv8, HasDPVFP]>;
852 def : FP16Pat<(fp_to_f16 (f64 DPR:$a)),
853 (i32 (COPY_TO_REGCLASS (VCVTBDH DPR:$a), GPR))>,
854 Requires<[HasFPARMv8, HasDPVFP]>;
856 def VCVTTHD : ADuI<0b11101, 0b11, 0b0010, 0b11, 0,
857 (outs DPR:$Dd), (ins SPR:$Sm),
858 NoItinerary, "vcvtt", ".f64.f16\t$Dd, $Sm",
859 []>, Requires<[HasFPARMv8, HasDPVFP]> {
860 // Instruction operands.
863 // Encode instruction operands.
864 let Inst{3-0} = Sm{4-1};
867 let hasSideEffects = 0;
870 def VCVTTDH : ADuI<0b11101, 0b11, 0b0011, 0b11, 0,
871 (outs SPR:$Sd), (ins DPR:$Dm),
872 NoItinerary, "vcvtt", ".f16.f64\t$Sd, $Dm",
873 []>, Requires<[HasFPARMv8, HasDPVFP]> {
874 // Instruction operands.
878 // Encode instruction operands.
879 let Inst{15-12} = Sd{4-1};
880 let Inst{22} = Sd{0};
881 let Inst{3-0} = Dm{3-0};
884 let hasSideEffects = 0;
887 multiclass vcvt_inst<string opc, bits<2> rm,
888 SDPatternOperator node = null_frag> {
889 let PostEncoderMethod = "", DecoderNamespace = "VFPV8", hasSideEffects = 0 in {
890 def SH : AHuInp<0b11101, 0b11, 0b1100, 0b11, 0,
891 (outs SPR:$Sd), (ins HPR:$Sm),
892 NoItinerary, !strconcat("vcvt", opc, ".s32.f16\t$Sd, $Sm"),
894 Requires<[HasFullFP16]> {
895 let Inst{17-16} = rm;
898 def UH : AHuInp<0b11101, 0b11, 0b1100, 0b01, 0,
899 (outs SPR:$Sd), (ins HPR:$Sm),
900 NoItinerary, !strconcat("vcvt", opc, ".u32.f16\t$Sd, $Sm"),
902 Requires<[HasFullFP16]> {
903 let Inst{17-16} = rm;
906 def SS : ASuInp<0b11101, 0b11, 0b1100, 0b11, 0,
907 (outs SPR:$Sd), (ins SPR:$Sm),
908 NoItinerary, !strconcat("vcvt", opc, ".s32.f32\t$Sd, $Sm"),
910 Requires<[HasFPARMv8]> {
911 let Inst{17-16} = rm;
914 def US : ASuInp<0b11101, 0b11, 0b1100, 0b01, 0,
915 (outs SPR:$Sd), (ins SPR:$Sm),
916 NoItinerary, !strconcat("vcvt", opc, ".u32.f32\t$Sd, $Sm"),
918 Requires<[HasFPARMv8]> {
919 let Inst{17-16} = rm;
922 def SD : ASuInp<0b11101, 0b11, 0b1100, 0b11, 0,
923 (outs SPR:$Sd), (ins DPR:$Dm),
924 NoItinerary, !strconcat("vcvt", opc, ".s32.f64\t$Sd, $Dm"),
926 Requires<[HasFPARMv8, HasDPVFP]> {
929 let Inst{17-16} = rm;
931 // Encode instruction operands.
932 let Inst{3-0} = Dm{3-0};
937 def UD : ASuInp<0b11101, 0b11, 0b1100, 0b01, 0,
938 (outs SPR:$Sd), (ins DPR:$Dm),
939 NoItinerary, !strconcat("vcvt", opc, ".u32.f64\t$Sd, $Dm"),
941 Requires<[HasFPARMv8, HasDPVFP]> {
944 let Inst{17-16} = rm;
946 // Encode instruction operands
947 let Inst{3-0} = Dm{3-0};
953 let Predicates = [HasFPARMv8] in {
954 let Predicates = [HasFullFP16] in {
955 def : Pat<(i32 (fp_to_sint (node (f16 HPR:$a)))),
957 (!cast<Instruction>(NAME#"SH") (f16 HPR:$a)),
960 def : Pat<(i32 (fp_to_uint (node (f16 HPR:$a)))),
962 (!cast<Instruction>(NAME#"UH") (f16 HPR:$a)),
965 def : Pat<(i32 (fp_to_sint (node SPR:$a))),
967 (!cast<Instruction>(NAME#"SS") SPR:$a),
969 def : Pat<(i32 (fp_to_uint (node SPR:$a))),
971 (!cast<Instruction>(NAME#"US") SPR:$a),
974 let Predicates = [HasFPARMv8, HasDPVFP] in {
975 def : Pat<(i32 (fp_to_sint (node (f64 DPR:$a)))),
977 (!cast<Instruction>(NAME#"SD") DPR:$a),
979 def : Pat<(i32 (fp_to_uint (node (f64 DPR:$a)))),
981 (!cast<Instruction>(NAME#"UD") DPR:$a),
986 defm VCVTA : vcvt_inst<"a", 0b00, fround>;
987 defm VCVTN : vcvt_inst<"n", 0b01>;
988 defm VCVTP : vcvt_inst<"p", 0b10, fceil>;
989 defm VCVTM : vcvt_inst<"m", 0b11, ffloor>;
991 def VNEGD : ADuI<0b11101, 0b11, 0b0001, 0b01, 0,
992 (outs DPR:$Dd), (ins DPR:$Dm),
993 IIC_fpUNA64, "vneg", ".f64\t$Dd, $Dm",
994 [(set DPR:$Dd, (fneg (f64 DPR:$Dm)))]>;
996 def VNEGS : ASuIn<0b11101, 0b11, 0b0001, 0b01, 0,
997 (outs SPR:$Sd), (ins SPR:$Sm),
998 IIC_fpUNA32, "vneg", ".f32\t$Sd, $Sm",
999 [(set SPR:$Sd, (fneg SPR:$Sm))]> {
1000 // Some single precision VFP instructions may be executed on both NEON and
1001 // VFP pipelines on A8.
1002 let D = VFPNeonA8Domain;
1005 def VNEGH : AHuI<0b11101, 0b11, 0b0001, 0b01, 0,
1006 (outs HPR:$Sd), (ins HPR:$Sm),
1007 IIC_fpUNA16, "vneg", ".f16\t$Sd, $Sm",
1008 [(set (f16 HPR:$Sd), (fneg (f16 HPR:$Sm)))]>;
1010 multiclass vrint_inst_zrx<string opc, bit op, bit op2, SDPatternOperator node> {
1011 def H : AHuI<0b11101, 0b11, 0b0110, 0b11, 0,
1012 (outs HPR:$Sd), (ins HPR:$Sm),
1013 NoItinerary, !strconcat("vrint", opc), ".f16\t$Sd, $Sm",
1014 [(set (f16 HPR:$Sd), (node (f16 HPR:$Sm)))]>,
1015 Requires<[HasFullFP16]> {
1020 def S : ASuI<0b11101, 0b11, 0b0110, 0b11, 0,
1021 (outs SPR:$Sd), (ins SPR:$Sm),
1022 NoItinerary, !strconcat("vrint", opc), ".f32\t$Sd, $Sm",
1023 [(set (f32 SPR:$Sd), (node (f32 SPR:$Sm)))]>,
1024 Requires<[HasFPARMv8]> {
1028 def D : ADuI<0b11101, 0b11, 0b0110, 0b11, 0,
1029 (outs DPR:$Dd), (ins DPR:$Dm),
1030 NoItinerary, !strconcat("vrint", opc), ".f64\t$Dd, $Dm",
1031 [(set (f64 DPR:$Dd), (node (f64 DPR:$Dm)))]>,
1032 Requires<[HasFPARMv8, HasDPVFP]> {
1037 def : InstAlias<!strconcat("vrint", opc, "$p.f16.f16\t$Sd, $Sm"),
1038 (!cast<Instruction>(NAME#"H") SPR:$Sd, SPR:$Sm, pred:$p), 0>,
1039 Requires<[HasFullFP16]>;
1040 def : InstAlias<!strconcat("vrint", opc, "$p.f32.f32\t$Sd, $Sm"),
1041 (!cast<Instruction>(NAME#"S") SPR:$Sd, SPR:$Sm, pred:$p), 0>,
1042 Requires<[HasFPARMv8]>;
1043 def : InstAlias<!strconcat("vrint", opc, "$p.f64.f64\t$Dd, $Dm"),
1044 (!cast<Instruction>(NAME#"D") DPR:$Dd, DPR:$Dm, pred:$p), 0>,
1045 Requires<[HasFPARMv8,HasDPVFP]>;
1048 defm VRINTZ : vrint_inst_zrx<"z", 0, 1, ftrunc>;
1049 defm VRINTR : vrint_inst_zrx<"r", 0, 0, fnearbyint>;
1050 defm VRINTX : vrint_inst_zrx<"x", 1, 0, frint>;
1052 multiclass vrint_inst_anpm<string opc, bits<2> rm,
1053 SDPatternOperator node = null_frag> {
1054 let PostEncoderMethod = "", DecoderNamespace = "VFPV8",
1055 isUnpredicable = 1 in {
1056 def H : AHuInp<0b11101, 0b11, 0b1000, 0b01, 0,
1057 (outs HPR:$Sd), (ins HPR:$Sm),
1058 NoItinerary, !strconcat("vrint", opc, ".f16\t$Sd, $Sm"),
1059 [(set (f16 HPR:$Sd), (node (f16 HPR:$Sm)))]>,
1060 Requires<[HasFullFP16]> {
1061 let Inst{17-16} = rm;
1063 def S : ASuInp<0b11101, 0b11, 0b1000, 0b01, 0,
1064 (outs SPR:$Sd), (ins SPR:$Sm),
1065 NoItinerary, !strconcat("vrint", opc, ".f32\t$Sd, $Sm"),
1066 [(set (f32 SPR:$Sd), (node (f32 SPR:$Sm)))]>,
1067 Requires<[HasFPARMv8]> {
1068 let Inst{17-16} = rm;
1070 def D : ADuInp<0b11101, 0b11, 0b1000, 0b01, 0,
1071 (outs DPR:$Dd), (ins DPR:$Dm),
1072 NoItinerary, !strconcat("vrint", opc, ".f64\t$Dd, $Dm"),
1073 [(set (f64 DPR:$Dd), (node (f64 DPR:$Dm)))]>,
1074 Requires<[HasFPARMv8, HasDPVFP]> {
1075 let Inst{17-16} = rm;
1079 def : InstAlias<!strconcat("vrint", opc, ".f32.f32\t$Sd, $Sm"),
1080 (!cast<Instruction>(NAME#"S") SPR:$Sd, SPR:$Sm), 0>,
1081 Requires<[HasFPARMv8]>;
1082 def : InstAlias<!strconcat("vrint", opc, ".f64.f64\t$Dd, $Dm"),
1083 (!cast<Instruction>(NAME#"D") DPR:$Dd, DPR:$Dm), 0>,
1084 Requires<[HasFPARMv8,HasDPVFP]>;
1087 defm VRINTA : vrint_inst_anpm<"a", 0b00, fround>;
1088 defm VRINTN : vrint_inst_anpm<"n", 0b01, int_arm_neon_vrintn>;
1089 defm VRINTP : vrint_inst_anpm<"p", 0b10, fceil>;
1090 defm VRINTM : vrint_inst_anpm<"m", 0b11, ffloor>;
1092 def VSQRTD : ADuI<0b11101, 0b11, 0b0001, 0b11, 0,
1093 (outs DPR:$Dd), (ins DPR:$Dm),
1094 IIC_fpSQRT64, "vsqrt", ".f64\t$Dd, $Dm",
1095 [(set DPR:$Dd, (fsqrt (f64 DPR:$Dm)))]>,
1096 Sched<[WriteFPSQRT64]>;
1098 def VSQRTS : ASuI<0b11101, 0b11, 0b0001, 0b11, 0,
1099 (outs SPR:$Sd), (ins SPR:$Sm),
1100 IIC_fpSQRT32, "vsqrt", ".f32\t$Sd, $Sm",
1101 [(set SPR:$Sd, (fsqrt SPR:$Sm))]>,
1102 Sched<[WriteFPSQRT32]>;
1104 def VSQRTH : AHuI<0b11101, 0b11, 0b0001, 0b11, 0,
1105 (outs HPR:$Sd), (ins HPR:$Sm),
1106 IIC_fpSQRT16, "vsqrt", ".f16\t$Sd, $Sm",
1107 [(set (f16 HPR:$Sd), (fsqrt (f16 HPR:$Sm)))]>;
1109 let hasSideEffects = 0 in {
1110 let isMoveReg = 1 in {
1111 def VMOVD : ADuI<0b11101, 0b11, 0b0000, 0b01, 0,
1112 (outs DPR:$Dd), (ins DPR:$Dm),
1113 IIC_fpUNA64, "vmov", ".f64\t$Dd, $Dm", []>,
1114 Requires<[HasFPRegs64]>;
1116 def VMOVS : ASuI<0b11101, 0b11, 0b0000, 0b01, 0,
1117 (outs SPR:$Sd), (ins SPR:$Sm),
1118 IIC_fpUNA32, "vmov", ".f32\t$Sd, $Sm", []>,
1119 Requires<[HasFPRegs]>;
1122 let PostEncoderMethod = "", DecoderNamespace = "VFPV8", isUnpredicable = 1 in {
1123 def VMOVH : ASuInp<0b11101, 0b11, 0b0000, 0b01, 0,
1124 (outs SPR:$Sd), (ins SPR:$Sm),
1125 IIC_fpUNA16, "vmovx.f16\t$Sd, $Sm", []>,
1126 Requires<[HasFullFP16]>;
1128 def VINSH : ASuInp<0b11101, 0b11, 0b0000, 0b11, 0,
1129 (outs SPR:$Sd), (ins SPR:$Sda, SPR:$Sm),
1130 IIC_fpUNA16, "vins.f16\t$Sd, $Sm", []>,
1131 Requires<[HasFullFP16]> {
1132 let Constraints = "$Sd = $Sda";
1135 } // PostEncoderMethod
1138 //===----------------------------------------------------------------------===//
1139 // FP <-> GPR Copies. Int <-> FP Conversions.
1142 let isMoveReg = 1 in {
1143 def VMOVRS : AVConv2I<0b11100001, 0b1010,
1144 (outs GPR:$Rt), (ins SPR:$Sn),
1145 IIC_fpMOVSI, "vmov", "\t$Rt, $Sn",
1146 [(set GPR:$Rt, (bitconvert SPR:$Sn))]>,
1147 Requires<[HasFPRegs]>,
1148 Sched<[WriteFPMOV]> {
1149 // Instruction operands.
1153 // Encode instruction operands.
1154 let Inst{19-16} = Sn{4-1};
1155 let Inst{7} = Sn{0};
1156 let Inst{15-12} = Rt;
1158 let Inst{6-5} = 0b00;
1159 let Inst{3-0} = 0b0000;
1161 // Some single precision VFP instructions may be executed on both NEON and VFP
1163 let D = VFPNeonDomain;
1166 // Bitcast i32 -> f32. NEON prefers to use VMOVDRR.
1167 def VMOVSR : AVConv4I<0b11100000, 0b1010,
1168 (outs SPR:$Sn), (ins GPR:$Rt),
1169 IIC_fpMOVIS, "vmov", "\t$Sn, $Rt",
1170 [(set SPR:$Sn, (bitconvert GPR:$Rt))]>,
1171 Requires<[HasFPRegs, UseVMOVSR]>,
1172 Sched<[WriteFPMOV]> {
1173 // Instruction operands.
1177 // Encode instruction operands.
1178 let Inst{19-16} = Sn{4-1};
1179 let Inst{7} = Sn{0};
1180 let Inst{15-12} = Rt;
1182 let Inst{6-5} = 0b00;
1183 let Inst{3-0} = 0b0000;
1185 // Some single precision VFP instructions may be executed on both NEON and VFP
1187 let D = VFPNeonDomain;
1190 def : Pat<(arm_vmovsr GPR:$Rt), (VMOVSR GPR:$Rt)>, Requires<[HasVFP2, UseVMOVSR]>;
1192 let hasSideEffects = 0 in {
1193 def VMOVRRD : AVConv3I<0b11000101, 0b1011,
1194 (outs GPR:$Rt, GPR:$Rt2), (ins DPR:$Dm),
1195 IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $Dm",
1196 [(set GPR:$Rt, GPR:$Rt2, (arm_fmrrd DPR:$Dm))]>,
1197 Requires<[HasFPRegs]>,
1198 Sched<[WriteFPMOV]> {
1199 // Instruction operands.
1204 // Encode instruction operands.
1205 let Inst{3-0} = Dm{3-0};
1206 let Inst{5} = Dm{4};
1207 let Inst{15-12} = Rt;
1208 let Inst{19-16} = Rt2;
1210 let Inst{7-6} = 0b00;
1212 // Some single precision VFP instructions may be executed on both NEON and VFP
1214 let D = VFPNeonDomain;
1216 // This instruction is equivalent to
1217 // $Rt = EXTRACT_SUBREG $Dm, ssub_0
1218 // $Rt2 = EXTRACT_SUBREG $Dm, ssub_1
1219 let isExtractSubreg = 1;
1222 def VMOVRRS : AVConv3I<0b11000101, 0b1010,
1223 (outs GPR:$Rt, GPR:$Rt2), (ins SPR:$src1, SPR:$src2),
1224 IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $src1, $src2",
1225 [/* For disassembly only; pattern left blank */]>,
1226 Requires<[HasFPRegs]>,
1227 Sched<[WriteFPMOV]> {
1232 // Encode instruction operands.
1233 let Inst{3-0} = src1{4-1};
1234 let Inst{5} = src1{0};
1235 let Inst{15-12} = Rt;
1236 let Inst{19-16} = Rt2;
1238 let Inst{7-6} = 0b00;
1240 // Some single precision VFP instructions may be executed on both NEON and VFP
1242 let D = VFPNeonDomain;
1243 let DecoderMethod = "DecodeVMOVRRS";
1247 // FMDHR: GPR -> SPR
1248 // FMDLR: GPR -> SPR
1250 def VMOVDRR : AVConv5I<0b11000100, 0b1011,
1251 (outs DPR:$Dm), (ins GPR:$Rt, GPR:$Rt2),
1252 IIC_fpMOVID, "vmov", "\t$Dm, $Rt, $Rt2",
1253 [(set DPR:$Dm, (arm_fmdrr GPR:$Rt, GPR:$Rt2))]>,
1254 Requires<[HasFPRegs]>,
1255 Sched<[WriteFPMOV]> {
1256 // Instruction operands.
1261 // Encode instruction operands.
1262 let Inst{3-0} = Dm{3-0};
1263 let Inst{5} = Dm{4};
1264 let Inst{15-12} = Rt;
1265 let Inst{19-16} = Rt2;
1267 let Inst{7-6} = 0b00;
1269 // Some single precision VFP instructions may be executed on both NEON and VFP
1271 let D = VFPNeonDomain;
1273 // This instruction is equivalent to
1274 // $Dm = REG_SEQUENCE $Rt, ssub_0, $Rt2, ssub_1
1275 let isRegSequence = 1;
1278 // Hoist an fabs or a fneg of a value coming from integer registers
1279 // and do the fabs/fneg on the integer value. This is never a lose
1280 // and could enable the conversion to float to be removed completely.
1281 def : Pat<(fabs (arm_fmdrr GPR:$Rl, GPR:$Rh)),
1282 (VMOVDRR GPR:$Rl, (BFC GPR:$Rh, (i32 0x7FFFFFFF)))>,
1283 Requires<[IsARM, HasV6T2]>;
1284 def : Pat<(fabs (arm_fmdrr GPR:$Rl, GPR:$Rh)),
1285 (VMOVDRR GPR:$Rl, (t2BFC GPR:$Rh, (i32 0x7FFFFFFF)))>,
1286 Requires<[IsThumb2, HasV6T2]>;
1287 def : Pat<(fneg (arm_fmdrr GPR:$Rl, GPR:$Rh)),
1288 (VMOVDRR GPR:$Rl, (EORri GPR:$Rh, (i32 0x80000000)))>,
1290 def : Pat<(fneg (arm_fmdrr GPR:$Rl, GPR:$Rh)),
1291 (VMOVDRR GPR:$Rl, (t2EORri GPR:$Rh, (i32 0x80000000)))>,
1292 Requires<[IsThumb2]>;
1294 let hasSideEffects = 0 in
1295 def VMOVSRR : AVConv5I<0b11000100, 0b1010,
1296 (outs SPR:$dst1, SPR:$dst2), (ins GPR:$src1, GPR:$src2),
1297 IIC_fpMOVID, "vmov", "\t$dst1, $dst2, $src1, $src2",
1298 [/* For disassembly only; pattern left blank */]>,
1299 Requires<[HasFPRegs]>,
1300 Sched<[WriteFPMOV]> {
1301 // Instruction operands.
1306 // Encode instruction operands.
1307 let Inst{3-0} = dst1{4-1};
1308 let Inst{5} = dst1{0};
1309 let Inst{15-12} = src1;
1310 let Inst{19-16} = src2;
1312 let Inst{7-6} = 0b00;
1314 // Some single precision VFP instructions may be executed on both NEON and VFP
1316 let D = VFPNeonDomain;
1318 let DecoderMethod = "DecodeVMOVSRR";
1321 // Move H->R, clearing top 16 bits
1322 def VMOVRH : AVConv2I<0b11100001, 0b1001,
1323 (outs rGPR:$Rt), (ins HPR:$Sn),
1324 IIC_fpMOVSI, "vmov", ".f16\t$Rt, $Sn",
1326 Requires<[HasFPRegs16]>,
1327 Sched<[WriteFPMOV]> {
1328 // Instruction operands.
1332 // Encode instruction operands.
1333 let Inst{19-16} = Sn{4-1};
1334 let Inst{7} = Sn{0};
1335 let Inst{15-12} = Rt;
1337 let Inst{6-5} = 0b00;
1338 let Inst{3-0} = 0b0000;
1340 let isUnpredicable = 1;
1343 // Move R->H, clearing top 16 bits
1344 def VMOVHR : AVConv4I<0b11100000, 0b1001,
1345 (outs HPR:$Sn), (ins rGPR:$Rt),
1346 IIC_fpMOVIS, "vmov", ".f16\t$Sn, $Rt",
1348 Requires<[HasFPRegs16]>,
1349 Sched<[WriteFPMOV]> {
1350 // Instruction operands.
1354 // Encode instruction operands.
1355 let Inst{19-16} = Sn{4-1};
1356 let Inst{7} = Sn{0};
1357 let Inst{15-12} = Rt;
1359 let Inst{6-5} = 0b00;
1360 let Inst{3-0} = 0b0000;
1362 let isUnpredicable = 1;
1365 def : FPRegs16Pat<(arm_vmovrh (f16 HPR:$Sn)), (VMOVRH (f16 HPR:$Sn))>;
1366 def : FPRegs16Pat<(arm_vmovrh (bf16 HPR:$Sn)), (VMOVRH (bf16 HPR:$Sn))>;
1367 def : FPRegs16Pat<(f16 (arm_vmovhr rGPR:$Rt)), (VMOVHR rGPR:$Rt)>;
1368 def : FPRegs16Pat<(bf16 (arm_vmovhr rGPR:$Rt)), (VMOVHR rGPR:$Rt)>;
1370 // FMRDH: SPR -> GPR
1371 // FMRDL: SPR -> GPR
1372 // FMRRS: SPR -> GPR
1373 // FMRX: SPR system reg -> GPR
1374 // FMSRR: GPR -> SPR
1375 // FMXR: GPR -> VFP system reg
1380 class AVConv1IDs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
1381 bits<4> opcod4, dag oops, dag iops,
1382 InstrItinClass itin, string opc, string asm,
1384 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
1386 // Instruction operands.
1390 // Encode instruction operands.
1391 let Inst{3-0} = Sm{4-1};
1392 let Inst{5} = Sm{0};
1393 let Inst{15-12} = Dd{3-0};
1394 let Inst{22} = Dd{4};
1396 let Predicates = [HasVFP2, HasDPVFP];
1397 let hasSideEffects = 0;
1400 class AVConv1InSs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
1401 bits<4> opcod4, dag oops, dag iops,InstrItinClass itin,
1402 string opc, string asm, list<dag> pattern>
1403 : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
1405 // Instruction operands.
1409 // Encode instruction operands.
1410 let Inst{3-0} = Sm{4-1};
1411 let Inst{5} = Sm{0};
1412 let Inst{15-12} = Sd{4-1};
1413 let Inst{22} = Sd{0};
1415 let hasSideEffects = 0;
1418 class AVConv1IHs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
1419 bits<4> opcod4, dag oops, dag iops,
1420 InstrItinClass itin, string opc, string asm,
1422 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
1424 // Instruction operands.
1428 // Encode instruction operands.
1429 let Inst{3-0} = Sm{4-1};
1430 let Inst{5} = Sm{0};
1431 let Inst{15-12} = Sd{4-1};
1432 let Inst{22} = Sd{0};
1434 let Predicates = [HasFullFP16];
1435 let hasSideEffects = 0;
1438 def VSITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
1439 (outs DPR:$Dd), (ins SPR:$Sm),
1440 IIC_fpCVTID, "vcvt", ".f64.s32\t$Dd, $Sm",
1442 Sched<[WriteFPCVT]> {
1443 let Inst{7} = 1; // s32
1446 let Predicates=[HasVFP2, HasDPVFP] in {
1447 def : VFPPat<(f64 (sint_to_fp GPR:$a)),
1448 (VSITOD (COPY_TO_REGCLASS GPR:$a, SPR))>;
1450 def : VFPPat<(f64 (sint_to_fp (i32 (alignedload32 addrmode5:$a)))),
1451 (VSITOD (VLDRS addrmode5:$a))>;
1454 def VSITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010,
1455 (outs SPR:$Sd),(ins SPR:$Sm),
1456 IIC_fpCVTIS, "vcvt", ".f32.s32\t$Sd, $Sm",
1458 Sched<[WriteFPCVT]> {
1459 let Inst{7} = 1; // s32
1461 // Some single precision VFP instructions may be executed on both NEON and
1462 // VFP pipelines on A8.
1463 let D = VFPNeonA8Domain;
1466 def : VFPNoNEONPat<(f32 (sint_to_fp GPR:$a)),
1467 (VSITOS (COPY_TO_REGCLASS GPR:$a, SPR))>;
1469 def : VFPNoNEONPat<(f32 (sint_to_fp (i32 (alignedload32 addrmode5:$a)))),
1470 (VSITOS (VLDRS addrmode5:$a))>;
1472 def VSITOH : AVConv1IHs_Encode<0b11101, 0b11, 0b1000, 0b1001,
1473 (outs HPR:$Sd), (ins SPR:$Sm),
1474 IIC_fpCVTIH, "vcvt", ".f16.s32\t$Sd, $Sm",
1476 Sched<[WriteFPCVT]> {
1477 let Inst{7} = 1; // s32
1478 let isUnpredicable = 1;
1481 def : VFPNoNEONPat<(f16 (sint_to_fp GPR:$a)),
1482 (VSITOH (COPY_TO_REGCLASS GPR:$a, SPR))>;
1484 def VUITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
1485 (outs DPR:$Dd), (ins SPR:$Sm),
1486 IIC_fpCVTID, "vcvt", ".f64.u32\t$Dd, $Sm",
1488 Sched<[WriteFPCVT]> {
1489 let Inst{7} = 0; // u32
1492 let Predicates=[HasVFP2, HasDPVFP] in {
1493 def : VFPPat<(f64 (uint_to_fp GPR:$a)),
1494 (VUITOD (COPY_TO_REGCLASS GPR:$a, SPR))>;
1496 def : VFPPat<(f64 (uint_to_fp (i32 (alignedload32 addrmode5:$a)))),
1497 (VUITOD (VLDRS addrmode5:$a))>;
1500 def VUITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010,
1501 (outs SPR:$Sd), (ins SPR:$Sm),
1502 IIC_fpCVTIS, "vcvt", ".f32.u32\t$Sd, $Sm",
1504 Sched<[WriteFPCVT]> {
1505 let Inst{7} = 0; // u32
1507 // Some single precision VFP instructions may be executed on both NEON and
1508 // VFP pipelines on A8.
1509 let D = VFPNeonA8Domain;
1512 def : VFPNoNEONPat<(f32 (uint_to_fp GPR:$a)),
1513 (VUITOS (COPY_TO_REGCLASS GPR:$a, SPR))>;
1515 def : VFPNoNEONPat<(f32 (uint_to_fp (i32 (alignedload32 addrmode5:$a)))),
1516 (VUITOS (VLDRS addrmode5:$a))>;
1518 def VUITOH : AVConv1IHs_Encode<0b11101, 0b11, 0b1000, 0b1001,
1519 (outs HPR:$Sd), (ins SPR:$Sm),
1520 IIC_fpCVTIH, "vcvt", ".f16.u32\t$Sd, $Sm",
1522 Sched<[WriteFPCVT]> {
1523 let Inst{7} = 0; // u32
1524 let isUnpredicable = 1;
1527 def : VFPNoNEONPat<(f16 (uint_to_fp GPR:$a)),
1528 (VUITOH (COPY_TO_REGCLASS GPR:$a, SPR))>;
1532 class AVConv1IsD_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
1533 bits<4> opcod4, dag oops, dag iops,
1534 InstrItinClass itin, string opc, string asm,
1536 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
1538 // Instruction operands.
1542 // Encode instruction operands.
1543 let Inst{3-0} = Dm{3-0};
1544 let Inst{5} = Dm{4};
1545 let Inst{15-12} = Sd{4-1};
1546 let Inst{22} = Sd{0};
1548 let Predicates = [HasVFP2, HasDPVFP];
1549 let hasSideEffects = 0;
1552 class AVConv1InsS_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
1553 bits<4> opcod4, dag oops, dag iops,
1554 InstrItinClass itin, string opc, string asm,
1556 : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
1558 // Instruction operands.
1562 // Encode instruction operands.
1563 let Inst{3-0} = Sm{4-1};
1564 let Inst{5} = Sm{0};
1565 let Inst{15-12} = Sd{4-1};
1566 let Inst{22} = Sd{0};
1568 let hasSideEffects = 0;
1571 class AVConv1IsH_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
1572 bits<4> opcod4, dag oops, dag iops,
1573 InstrItinClass itin, string opc, string asm,
1575 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
1577 // Instruction operands.
1581 // Encode instruction operands.
1582 let Inst{3-0} = Sm{4-1};
1583 let Inst{5} = Sm{0};
1584 let Inst{15-12} = Sd{4-1};
1585 let Inst{22} = Sd{0};
1587 let Predicates = [HasFullFP16];
1588 let hasSideEffects = 0;
1591 // Always set Z bit in the instruction, i.e. "round towards zero" variants.
1592 def VTOSIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
1593 (outs SPR:$Sd), (ins DPR:$Dm),
1594 IIC_fpCVTDI, "vcvt", ".s32.f64\t$Sd, $Dm",
1596 Sched<[WriteFPCVT]> {
1597 let Inst{7} = 1; // Z bit
1600 let Predicates=[HasVFP2, HasDPVFP] in {
1601 def : VFPPat<(i32 (fp_to_sint (f64 DPR:$a))),
1602 (COPY_TO_REGCLASS (VTOSIZD DPR:$a), GPR)>;
1604 def : VFPPat<(alignedstore32 (i32 (fp_to_sint (f64 DPR:$a))), addrmode5:$ptr),
1605 (VSTRS (VTOSIZD DPR:$a), addrmode5:$ptr)>;
1608 def VTOSIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
1609 (outs SPR:$Sd), (ins SPR:$Sm),
1610 IIC_fpCVTSI, "vcvt", ".s32.f32\t$Sd, $Sm",
1612 Sched<[WriteFPCVT]> {
1613 let Inst{7} = 1; // Z bit
1615 // Some single precision VFP instructions may be executed on both NEON and
1616 // VFP pipelines on A8.
1617 let D = VFPNeonA8Domain;
1620 def : VFPNoNEONPat<(i32 (fp_to_sint SPR:$a)),
1621 (COPY_TO_REGCLASS (VTOSIZS SPR:$a), GPR)>;
1623 def : VFPNoNEONPat<(alignedstore32 (i32 (fp_to_sint (f32 SPR:$a))),
1625 (VSTRS (VTOSIZS SPR:$a), addrmode5:$ptr)>;
1627 def VTOSIZH : AVConv1IsH_Encode<0b11101, 0b11, 0b1101, 0b1001,
1628 (outs SPR:$Sd), (ins HPR:$Sm),
1629 IIC_fpCVTHI, "vcvt", ".s32.f16\t$Sd, $Sm",
1631 Sched<[WriteFPCVT]> {
1632 let Inst{7} = 1; // Z bit
1633 let isUnpredicable = 1;
1636 def : VFPNoNEONPat<(i32 (fp_to_sint (f16 HPR:$a))),
1637 (COPY_TO_REGCLASS (VTOSIZH (f16 HPR:$a)), GPR)>;
1639 def VTOUIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
1640 (outs SPR:$Sd), (ins DPR:$Dm),
1641 IIC_fpCVTDI, "vcvt", ".u32.f64\t$Sd, $Dm",
1643 Sched<[WriteFPCVT]> {
1644 let Inst{7} = 1; // Z bit
1647 let Predicates=[HasVFP2, HasDPVFP] in {
1648 def : VFPPat<(i32 (fp_to_uint (f64 DPR:$a))),
1649 (COPY_TO_REGCLASS (VTOUIZD DPR:$a), GPR)>;
1651 def : VFPPat<(alignedstore32 (i32 (fp_to_uint (f64 DPR:$a))), addrmode5:$ptr),
1652 (VSTRS (VTOUIZD DPR:$a), addrmode5:$ptr)>;
1655 def VTOUIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
1656 (outs SPR:$Sd), (ins SPR:$Sm),
1657 IIC_fpCVTSI, "vcvt", ".u32.f32\t$Sd, $Sm",
1659 Sched<[WriteFPCVT]> {
1660 let Inst{7} = 1; // Z bit
1662 // Some single precision VFP instructions may be executed on both NEON and
1663 // VFP pipelines on A8.
1664 let D = VFPNeonA8Domain;
1667 def : VFPNoNEONPat<(i32 (fp_to_uint SPR:$a)),
1668 (COPY_TO_REGCLASS (VTOUIZS SPR:$a), GPR)>;
1670 def : VFPNoNEONPat<(alignedstore32 (i32 (fp_to_uint (f32 SPR:$a))),
1672 (VSTRS (VTOUIZS SPR:$a), addrmode5:$ptr)>;
1674 def VTOUIZH : AVConv1IsH_Encode<0b11101, 0b11, 0b1100, 0b1001,
1675 (outs SPR:$Sd), (ins HPR:$Sm),
1676 IIC_fpCVTHI, "vcvt", ".u32.f16\t$Sd, $Sm",
1678 Sched<[WriteFPCVT]> {
1679 let Inst{7} = 1; // Z bit
1680 let isUnpredicable = 1;
1683 def : VFPNoNEONPat<(i32 (fp_to_uint (f16 HPR:$a))),
1684 (COPY_TO_REGCLASS (VTOUIZH (f16 HPR:$a)), GPR)>;
1686 // And the Z bit '0' variants, i.e. use the rounding mode specified by FPSCR.
1687 let Uses = [FPSCR] in {
1688 def VTOSIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
1689 (outs SPR:$Sd), (ins DPR:$Dm),
1690 IIC_fpCVTDI, "vcvtr", ".s32.f64\t$Sd, $Dm",
1691 [(set SPR:$Sd, (int_arm_vcvtr (f64 DPR:$Dm)))]>,
1692 Sched<[WriteFPCVT]> {
1693 let Inst{7} = 0; // Z bit
1696 def VTOSIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
1697 (outs SPR:$Sd), (ins SPR:$Sm),
1698 IIC_fpCVTSI, "vcvtr", ".s32.f32\t$Sd, $Sm",
1699 [(set SPR:$Sd, (int_arm_vcvtr SPR:$Sm))]>,
1700 Sched<[WriteFPCVT]> {
1701 let Inst{7} = 0; // Z bit
1704 def VTOSIRH : AVConv1IsH_Encode<0b11101, 0b11, 0b1101, 0b1001,
1705 (outs SPR:$Sd), (ins SPR:$Sm),
1706 IIC_fpCVTHI, "vcvtr", ".s32.f16\t$Sd, $Sm",
1708 Sched<[WriteFPCVT]> {
1709 let Inst{7} = 0; // Z bit
1710 let isUnpredicable = 1;
1713 def VTOUIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
1714 (outs SPR:$Sd), (ins DPR:$Dm),
1715 IIC_fpCVTDI, "vcvtr", ".u32.f64\t$Sd, $Dm",
1716 [(set SPR:$Sd, (int_arm_vcvtru(f64 DPR:$Dm)))]>,
1717 Sched<[WriteFPCVT]> {
1718 let Inst{7} = 0; // Z bit
1721 def VTOUIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
1722 (outs SPR:$Sd), (ins SPR:$Sm),
1723 IIC_fpCVTSI, "vcvtr", ".u32.f32\t$Sd, $Sm",
1724 [(set SPR:$Sd, (int_arm_vcvtru SPR:$Sm))]>,
1725 Sched<[WriteFPCVT]> {
1726 let Inst{7} = 0; // Z bit
1729 def VTOUIRH : AVConv1IsH_Encode<0b11101, 0b11, 0b1100, 0b1001,
1730 (outs SPR:$Sd), (ins SPR:$Sm),
1731 IIC_fpCVTHI, "vcvtr", ".u32.f16\t$Sd, $Sm",
1733 Sched<[WriteFPCVT]> {
1734 let Inst{7} = 0; // Z bit
1735 let isUnpredicable = 1;
1739 // v8.3-a Javascript Convert to Signed fixed-point
1740 def VJCVT : AVConv1IsD_Encode<0b11101, 0b11, 0b1001, 0b1011,
1741 (outs SPR:$Sd), (ins DPR:$Dm),
1742 IIC_fpCVTDI, "vjcvt", ".s32.f64\t$Sd, $Dm",
1744 Requires<[HasFPARMv8, HasV8_3a]> {
1745 let Inst{7} = 1; // Z bit
1748 // Convert between floating-point and fixed-point
1749 // Data type for fixed-point naming convention:
1750 // S16 (U=0, sx=0) -> SH
1751 // U16 (U=1, sx=0) -> UH
1752 // S32 (U=0, sx=1) -> SL
1753 // U32 (U=1, sx=1) -> UL
1755 let Constraints = "$a = $dst" in {
1757 // FP to Fixed-Point:
1759 // Single Precision register
1760 class AVConv1XInsS_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4,
1761 bit op5, dag oops, dag iops, InstrItinClass itin,
1762 string opc, string asm, list<dag> pattern>
1763 : AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern> {
1765 // if dp_operation then UInt(D:Vd) else UInt(Vd:D);
1766 let Inst{22} = dst{0};
1767 let Inst{15-12} = dst{4-1};
1769 let hasSideEffects = 0;
1772 // Double Precision register
1773 class AVConv1XInsD_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4,
1774 bit op5, dag oops, dag iops, InstrItinClass itin,
1775 string opc, string asm, list<dag> pattern>
1776 : AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern> {
1778 // if dp_operation then UInt(D:Vd) else UInt(Vd:D);
1779 let Inst{22} = dst{4};
1780 let Inst{15-12} = dst{3-0};
1782 let hasSideEffects = 0;
1783 let Predicates = [HasVFP2, HasDPVFP];
1786 let isUnpredicable = 1 in {
1788 def VTOSHH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1001, 0,
1789 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1790 IIC_fpCVTHI, "vcvt", ".s16.f16\t$dst, $a, $fbits", []>,
1791 Requires<[HasFullFP16]>,
1792 Sched<[WriteFPCVT]>;
1794 def VTOUHH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1001, 0,
1795 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1796 IIC_fpCVTHI, "vcvt", ".u16.f16\t$dst, $a, $fbits", []>,
1797 Requires<[HasFullFP16]>,
1798 Sched<[WriteFPCVT]>;
1800 def VTOSLH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1001, 1,
1801 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1802 IIC_fpCVTHI, "vcvt", ".s32.f16\t$dst, $a, $fbits", []>,
1803 Requires<[HasFullFP16]>,
1804 Sched<[WriteFPCVT]>;
1806 def VTOULH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1001, 1,
1807 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1808 IIC_fpCVTHI, "vcvt", ".u32.f16\t$dst, $a, $fbits", []>,
1809 Requires<[HasFullFP16]>,
1810 Sched<[WriteFPCVT]>;
1812 } // End of 'let isUnpredicable = 1 in'
1814 def VTOSHS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1010, 0,
1815 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1816 IIC_fpCVTSI, "vcvt", ".s16.f32\t$dst, $a, $fbits", []>,
1817 Sched<[WriteFPCVT]> {
1818 // Some single precision VFP instructions may be executed on both NEON and
1819 // VFP pipelines on A8.
1820 let D = VFPNeonA8Domain;
1823 def VTOUHS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1010, 0,
1824 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1825 IIC_fpCVTSI, "vcvt", ".u16.f32\t$dst, $a, $fbits", []>,
1826 Sched<[WriteFPCVT]> {
1827 // Some single precision VFP instructions may be executed on both NEON and
1828 // VFP pipelines on A8.
1829 let D = VFPNeonA8Domain;
1832 def VTOSLS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1010, 1,
1833 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1834 IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a, $fbits", []>,
1835 Sched<[WriteFPCVT]> {
1836 // Some single precision VFP instructions may be executed on both NEON and
1837 // VFP pipelines on A8.
1838 let D = VFPNeonA8Domain;
1841 def VTOULS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1010, 1,
1842 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1843 IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a, $fbits", []>,
1844 Sched<[WriteFPCVT]> {
1845 // Some single precision VFP instructions may be executed on both NEON and
1846 // VFP pipelines on A8.
1847 let D = VFPNeonA8Domain;
1850 def VTOSHD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1110, 0b1011, 0,
1851 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
1852 IIC_fpCVTDI, "vcvt", ".s16.f64\t$dst, $a, $fbits", []>,
1853 Sched<[WriteFPCVT]>;
1855 def VTOUHD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1111, 0b1011, 0,
1856 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
1857 IIC_fpCVTDI, "vcvt", ".u16.f64\t$dst, $a, $fbits", []>,
1858 Sched<[WriteFPCVT]>;
1860 def VTOSLD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1110, 0b1011, 1,
1861 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
1862 IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a, $fbits", []>,
1863 Sched<[WriteFPCVT]>;
1865 def VTOULD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1111, 0b1011, 1,
1866 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
1867 IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a, $fbits", []>,
1868 Sched<[WriteFPCVT]>;
1870 // Fixed-Point to FP:
1872 let isUnpredicable = 1 in {
1874 def VSHTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1001, 0,
1875 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1876 IIC_fpCVTIH, "vcvt", ".f16.s16\t$dst, $a, $fbits", []>,
1877 Requires<[HasFullFP16]>,
1878 Sched<[WriteFPCVT]>;
1880 def VUHTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1001, 0,
1881 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1882 IIC_fpCVTIH, "vcvt", ".f16.u16\t$dst, $a, $fbits", []>,
1883 Requires<[HasFullFP16]>,
1884 Sched<[WriteFPCVT]>;
1886 def VSLTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1001, 1,
1887 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1888 IIC_fpCVTIH, "vcvt", ".f16.s32\t$dst, $a, $fbits", []>,
1889 Requires<[HasFullFP16]>,
1890 Sched<[WriteFPCVT]>;
1892 def VULTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1001, 1,
1893 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1894 IIC_fpCVTIH, "vcvt", ".f16.u32\t$dst, $a, $fbits", []>,
1895 Requires<[HasFullFP16]>,
1896 Sched<[WriteFPCVT]>;
1898 } // End of 'let isUnpredicable = 1 in'
1900 def VSHTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1010, 0,
1901 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1902 IIC_fpCVTIS, "vcvt", ".f32.s16\t$dst, $a, $fbits", []>,
1903 Sched<[WriteFPCVT]> {
1904 // Some single precision VFP instructions may be executed on both NEON and
1905 // VFP pipelines on A8.
1906 let D = VFPNeonA8Domain;
1909 def VUHTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1010, 0,
1910 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1911 IIC_fpCVTIS, "vcvt", ".f32.u16\t$dst, $a, $fbits", []>,
1912 Sched<[WriteFPCVT]> {
1913 // Some single precision VFP instructions may be executed on both NEON and
1914 // VFP pipelines on A8.
1915 let D = VFPNeonA8Domain;
1918 def VSLTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1010, 1,
1919 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1920 IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a, $fbits", []>,
1921 Sched<[WriteFPCVT]> {
1922 // Some single precision VFP instructions may be executed on both NEON and
1923 // VFP pipelines on A8.
1924 let D = VFPNeonA8Domain;
1927 def VULTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1010, 1,
1928 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1929 IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a, $fbits", []>,
1930 Sched<[WriteFPCVT]> {
1931 // Some single precision VFP instructions may be executed on both NEON and
1932 // VFP pipelines on A8.
1933 let D = VFPNeonA8Domain;
1936 def VSHTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1010, 0b1011, 0,
1937 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
1938 IIC_fpCVTID, "vcvt", ".f64.s16\t$dst, $a, $fbits", []>,
1939 Sched<[WriteFPCVT]>;
1941 def VUHTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1011, 0b1011, 0,
1942 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
1943 IIC_fpCVTID, "vcvt", ".f64.u16\t$dst, $a, $fbits", []>,
1944 Sched<[WriteFPCVT]>;
1946 def VSLTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1010, 0b1011, 1,
1947 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
1948 IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a, $fbits", []>,
1949 Sched<[WriteFPCVT]>;
1951 def VULTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1011, 0b1011, 1,
1952 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
1953 IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a, $fbits", []>,
1954 Sched<[WriteFPCVT]>;
1956 } // End of 'let Constraints = "$a = $dst" in'
1958 // BFloat16 - Single precision, unary, predicated
1959 class BF16_VCVT<string opc, bits<2> op7_6>
1960 : VFPAI<(outs SPR:$Sd), (ins SPR:$dst, SPR:$Sm),
1961 VFPUnaryFrm, NoItinerary,
1962 opc, ".bf16.f32\t$Sd, $Sm", []>,
1963 RegConstraint<"$dst = $Sd">,
1964 Requires<[HasBF16]>,
1969 // Encode instruction operands.
1970 let Inst{3-0} = Sm{4-1};
1971 let Inst{5} = Sm{0};
1972 let Inst{15-12} = Sd{4-1};
1973 let Inst{22} = Sd{0};
1975 let Inst{27-23} = 0b11101; // opcode1
1976 let Inst{21-20} = 0b11; // opcode2
1977 let Inst{19-16} = 0b0011; // opcode3
1978 let Inst{11-8} = 0b1001;
1979 let Inst{7-6} = op7_6;
1982 let DecoderNamespace = "VFPV8";
1983 let hasSideEffects = 0;
1986 def BF16_VCVTB : BF16_VCVT<"vcvtb", 0b01>;
1987 def BF16_VCVTT : BF16_VCVT<"vcvtt", 0b11>;
1989 //===----------------------------------------------------------------------===//
1990 // FP Multiply-Accumulate Operations.
1993 def VMLAD : ADbI<0b11100, 0b00, 0, 0,
1994 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
1995 IIC_fpMAC64, "vmla", ".f64\t$Dd, $Dn, $Dm",
1996 [(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm),
1997 (f64 DPR:$Ddin)))]>,
1998 RegConstraint<"$Ddin = $Dd">,
1999 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>,
2000 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2002 def VMLAS : ASbIn<0b11100, 0b00, 0, 0,
2003 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
2004 IIC_fpMAC32, "vmla", ".f32\t$Sd, $Sn, $Sm",
2005 [(set SPR:$Sd, (fadd_mlx (fmul_su SPR:$Sn, SPR:$Sm),
2007 RegConstraint<"$Sdin = $Sd">,
2008 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>,
2009 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
2010 // Some single precision VFP instructions may be executed on both NEON and
2011 // VFP pipelines on A8.
2012 let D = VFPNeonA8Domain;
2015 def VMLAH : AHbI<0b11100, 0b00, 0, 0,
2016 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
2017 IIC_fpMAC16, "vmla", ".f16\t$Sd, $Sn, $Sm",
2018 [(set (f16 HPR:$Sd), (fadd_mlx (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm)),
2019 (f16 HPR:$Sdin)))]>,
2020 RegConstraint<"$Sdin = $Sd">,
2021 Requires<[HasFullFP16,UseFPVMLx]>;
2023 def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
2024 (VMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
2025 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>;
2026 def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
2027 (VMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
2028 Requires<[HasVFP2,DontUseNEONForFP, UseFPVMLx]>;
2029 def : Pat<(fadd_mlx HPR:$dstin, (fmul_su (f16 HPR:$a), HPR:$b)),
2030 (VMLAH HPR:$dstin, (f16 HPR:$a), HPR:$b)>,
2031 Requires<[HasFullFP16,DontUseNEONForFP, UseFPVMLx]>;
2034 def VMLSD : ADbI<0b11100, 0b00, 1, 0,
2035 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
2036 IIC_fpMAC64, "vmls", ".f64\t$Dd, $Dn, $Dm",
2037 [(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
2038 (f64 DPR:$Ddin)))]>,
2039 RegConstraint<"$Ddin = $Dd">,
2040 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>,
2041 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2043 def VMLSS : ASbIn<0b11100, 0b00, 1, 0,
2044 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
2045 IIC_fpMAC32, "vmls", ".f32\t$Sd, $Sn, $Sm",
2046 [(set SPR:$Sd, (fadd_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
2048 RegConstraint<"$Sdin = $Sd">,
2049 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>,
2050 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
2051 // Some single precision VFP instructions may be executed on both NEON and
2052 // VFP pipelines on A8.
2053 let D = VFPNeonA8Domain;
2056 def VMLSH : AHbI<0b11100, 0b00, 1, 0,
2057 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
2058 IIC_fpMAC16, "vmls", ".f16\t$Sd, $Sn, $Sm",
2059 [(set (f16 HPR:$Sd), (fadd_mlx (fneg (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm))),
2060 (f16 HPR:$Sdin)))]>,
2061 RegConstraint<"$Sdin = $Sd">,
2062 Requires<[HasFullFP16,UseFPVMLx]>;
2064 def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
2065 (VMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
2066 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>;
2067 def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
2068 (VMLSS SPR:$dstin, SPR:$a, SPR:$b)>,
2069 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
2070 def : Pat<(fsub_mlx HPR:$dstin, (fmul_su (f16 HPR:$a), HPR:$b)),
2071 (VMLSH HPR:$dstin, (f16 HPR:$a), HPR:$b)>,
2072 Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>;
2074 def VNMLAD : ADbI<0b11100, 0b01, 1, 0,
2075 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
2076 IIC_fpMAC64, "vnmla", ".f64\t$Dd, $Dn, $Dm",
2077 [(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
2078 (f64 DPR:$Ddin)))]>,
2079 RegConstraint<"$Ddin = $Dd">,
2080 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>,
2081 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2083 def VNMLAS : ASbI<0b11100, 0b01, 1, 0,
2084 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
2085 IIC_fpMAC32, "vnmla", ".f32\t$Sd, $Sn, $Sm",
2086 [(set SPR:$Sd, (fsub_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
2088 RegConstraint<"$Sdin = $Sd">,
2089 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>,
2090 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
2091 // Some single precision VFP instructions may be executed on both NEON and
2092 // VFP pipelines on A8.
2093 let D = VFPNeonA8Domain;
2096 def VNMLAH : AHbI<0b11100, 0b01, 1, 0,
2097 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
2098 IIC_fpMAC16, "vnmla", ".f16\t$Sd, $Sn, $Sm",
2099 [(set (f16 HPR:$Sd), (fsub_mlx (fneg (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm))),
2100 (f16 HPR:$Sdin)))]>,
2101 RegConstraint<"$Sdin = $Sd">,
2102 Requires<[HasFullFP16,UseFPVMLx]>;
2104 // (-(a * b) - dst) -> -(dst + (a * b))
2105 def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin),
2106 (VNMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
2107 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>;
2108 def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin),
2109 (VNMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
2110 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
2111 def : Pat<(fsub_mlx (fneg (fmul_su (f16 HPR:$a), HPR:$b)), HPR:$dstin),
2112 (VNMLAH HPR:$dstin, (f16 HPR:$a), HPR:$b)>,
2113 Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>;
2115 // (-dst - (a * b)) -> -(dst + (a * b))
2116 def : Pat<(fsub_mlx (fneg DPR:$dstin), (fmul_su DPR:$a, (f64 DPR:$b))),
2117 (VNMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
2118 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>;
2119 def : Pat<(fsub_mlx (fneg SPR:$dstin), (fmul_su SPR:$a, SPR:$b)),
2120 (VNMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
2121 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
2122 def : Pat<(fsub_mlx (fneg HPR:$dstin), (fmul_su (f16 HPR:$a), HPR:$b)),
2123 (VNMLAH HPR:$dstin, (f16 HPR:$a), HPR:$b)>,
2124 Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>;
2126 def VNMLSD : ADbI<0b11100, 0b01, 0, 0,
2127 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
2128 IIC_fpMAC64, "vnmls", ".f64\t$Dd, $Dn, $Dm",
2129 [(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm),
2130 (f64 DPR:$Ddin)))]>,
2131 RegConstraint<"$Ddin = $Dd">,
2132 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>,
2133 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2135 def VNMLSS : ASbI<0b11100, 0b01, 0, 0,
2136 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
2137 IIC_fpMAC32, "vnmls", ".f32\t$Sd, $Sn, $Sm",
2138 [(set SPR:$Sd, (fsub_mlx (fmul_su SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>,
2139 RegConstraint<"$Sdin = $Sd">,
2140 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>,
2141 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
2142 // Some single precision VFP instructions may be executed on both NEON and
2143 // VFP pipelines on A8.
2144 let D = VFPNeonA8Domain;
2147 def VNMLSH : AHbI<0b11100, 0b01, 0, 0,
2148 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
2149 IIC_fpMAC16, "vnmls", ".f16\t$Sd, $Sn, $Sm",
2150 [(set (f16 HPR:$Sd), (fsub_mlx (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm)), (f16 HPR:$Sdin)))]>,
2151 RegConstraint<"$Sdin = $Sd">,
2152 Requires<[HasFullFP16,UseFPVMLx]>;
2154 def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin),
2155 (VNMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
2156 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>;
2157 def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin),
2158 (VNMLSS SPR:$dstin, SPR:$a, SPR:$b)>,
2159 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
2160 def : Pat<(fsub_mlx (fmul_su (f16 HPR:$a), HPR:$b), HPR:$dstin),
2161 (VNMLSH HPR:$dstin, (f16 HPR:$a), HPR:$b)>,
2162 Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>;
2164 //===----------------------------------------------------------------------===//
2165 // Fused FP Multiply-Accumulate Operations.
2167 def VFMAD : ADbI<0b11101, 0b10, 0, 0,
2168 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
2169 IIC_fpFMAC64, "vfma", ".f64\t$Dd, $Dn, $Dm",
2170 [(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm),
2171 (f64 DPR:$Ddin)))]>,
2172 RegConstraint<"$Ddin = $Dd">,
2173 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>,
2174 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2176 def VFMAS : ASbIn<0b11101, 0b10, 0, 0,
2177 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
2178 IIC_fpFMAC32, "vfma", ".f32\t$Sd, $Sn, $Sm",
2179 [(set SPR:$Sd, (fadd_mlx (fmul_su SPR:$Sn, SPR:$Sm),
2181 RegConstraint<"$Sdin = $Sd">,
2182 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>,
2183 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
2184 // Some single precision VFP instructions may be executed on both NEON and
2188 def VFMAH : AHbI<0b11101, 0b10, 0, 0,
2189 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
2190 IIC_fpFMAC16, "vfma", ".f16\t$Sd, $Sn, $Sm",
2191 [(set (f16 HPR:$Sd), (fadd_mlx (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm)),
2192 (f16 HPR:$Sdin)))]>,
2193 RegConstraint<"$Sdin = $Sd">,
2194 Requires<[HasFullFP16,UseFusedMAC]>,
2195 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2197 def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
2198 (VFMAD DPR:$dstin, DPR:$a, DPR:$b)>,
2199 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
2200 def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
2201 (VFMAS SPR:$dstin, SPR:$a, SPR:$b)>,
2202 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
2203 def : Pat<(fadd_mlx HPR:$dstin, (fmul_su (f16 HPR:$a), HPR:$b)),
2204 (VFMAH HPR:$dstin, (f16 HPR:$a), HPR:$b)>,
2205 Requires<[HasFullFP16,DontUseNEONForFP,UseFusedMAC]>;
2207 // Match @llvm.fma.* intrinsics
2208 // (fma x, y, z) -> (vfms z, x, y)
2209 def : Pat<(f64 (fma DPR:$Dn, DPR:$Dm, DPR:$Ddin)),
2210 (VFMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
2211 Requires<[HasVFP4,HasDPVFP]>;
2212 def : Pat<(f32 (fma SPR:$Sn, SPR:$Sm, SPR:$Sdin)),
2213 (VFMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
2214 Requires<[HasVFP4]>;
2215 def : Pat<(f16 (fma HPR:$Sn, HPR:$Sm, (f16 HPR:$Sdin))),
2216 (VFMAH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>,
2217 Requires<[HasFullFP16]>;
2219 def VFMSD : ADbI<0b11101, 0b10, 1, 0,
2220 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
2221 IIC_fpFMAC64, "vfms", ".f64\t$Dd, $Dn, $Dm",
2222 [(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
2223 (f64 DPR:$Ddin)))]>,
2224 RegConstraint<"$Ddin = $Dd">,
2225 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>,
2226 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2228 def VFMSS : ASbIn<0b11101, 0b10, 1, 0,
2229 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
2230 IIC_fpFMAC32, "vfms", ".f32\t$Sd, $Sn, $Sm",
2231 [(set SPR:$Sd, (fadd_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
2233 RegConstraint<"$Sdin = $Sd">,
2234 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>,
2235 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
2236 // Some single precision VFP instructions may be executed on both NEON and
2240 def VFMSH : AHbI<0b11101, 0b10, 1, 0,
2241 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
2242 IIC_fpFMAC16, "vfms", ".f16\t$Sd, $Sn, $Sm",
2243 [(set (f16 HPR:$Sd), (fadd_mlx (fneg (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm))),
2244 (f16 HPR:$Sdin)))]>,
2245 RegConstraint<"$Sdin = $Sd">,
2246 Requires<[HasFullFP16,UseFusedMAC]>,
2247 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2249 def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
2250 (VFMSD DPR:$dstin, DPR:$a, DPR:$b)>,
2251 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
2252 def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
2253 (VFMSS SPR:$dstin, SPR:$a, SPR:$b)>,
2254 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
2255 def : Pat<(fsub_mlx HPR:$dstin, (fmul_su (f16 HPR:$a), HPR:$b)),
2256 (VFMSH HPR:$dstin, (f16 HPR:$a), HPR:$b)>,
2257 Requires<[HasFullFP16,DontUseNEONForFP,UseFusedMAC]>;
2259 // Match @llvm.fma.* intrinsics
2260 // (fma (fneg x), y, z) -> (vfms z, x, y)
2261 def : Pat<(f64 (fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin)),
2262 (VFMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
2263 Requires<[HasVFP4,HasDPVFP]>;
2264 def : Pat<(f32 (fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin)),
2265 (VFMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
2266 Requires<[HasVFP4]>;
2267 def : Pat<(f16 (fma (fneg (f16 HPR:$Sn)), (f16 HPR:$Sm), (f16 HPR:$Sdin))),
2268 (VFMSH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>,
2269 Requires<[HasFullFP16]>;
2271 def VFNMAD : ADbI<0b11101, 0b01, 1, 0,
2272 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
2273 IIC_fpFMAC64, "vfnma", ".f64\t$Dd, $Dn, $Dm",
2274 [(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
2275 (f64 DPR:$Ddin)))]>,
2276 RegConstraint<"$Ddin = $Dd">,
2277 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>,
2278 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2280 def VFNMAS : ASbI<0b11101, 0b01, 1, 0,
2281 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
2282 IIC_fpFMAC32, "vfnma", ".f32\t$Sd, $Sn, $Sm",
2283 [(set SPR:$Sd, (fsub_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
2285 RegConstraint<"$Sdin = $Sd">,
2286 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>,
2287 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
2288 // Some single precision VFP instructions may be executed on both NEON and
2292 def VFNMAH : AHbI<0b11101, 0b01, 1, 0,
2293 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
2294 IIC_fpFMAC16, "vfnma", ".f16\t$Sd, $Sn, $Sm",
2295 [(set (f16 HPR:$Sd), (fsub_mlx (fneg (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm))),
2296 (f16 HPR:$Sdin)))]>,
2297 RegConstraint<"$Sdin = $Sd">,
2298 Requires<[HasFullFP16,UseFusedMAC]>,
2299 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2301 def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin),
2302 (VFNMAD DPR:$dstin, DPR:$a, DPR:$b)>,
2303 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
2304 def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin),
2305 (VFNMAS SPR:$dstin, SPR:$a, SPR:$b)>,
2306 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
2308 // Match @llvm.fma.* intrinsics
2309 // (fneg (fma x, y, z)) -> (vfnma z, x, y)
2310 def : Pat<(fneg (fma (f64 DPR:$Dn), (f64 DPR:$Dm), (f64 DPR:$Ddin))),
2311 (VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
2312 Requires<[HasVFP4,HasDPVFP]>;
2313 def : Pat<(fneg (fma (f32 SPR:$Sn), (f32 SPR:$Sm), (f32 SPR:$Sdin))),
2314 (VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
2315 Requires<[HasVFP4]>;
2316 def : Pat<(fneg (fma (f16 HPR:$Sn), (f16 HPR:$Sm), (f16 (f16 HPR:$Sdin)))),
2317 (VFNMAH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>,
2318 Requires<[HasFullFP16]>;
2319 // (fma (fneg x), y, (fneg z)) -> (vfnma z, x, y)
2320 def : Pat<(f64 (fma (fneg DPR:$Dn), DPR:$Dm, (fneg DPR:$Ddin))),
2321 (VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
2322 Requires<[HasVFP4,HasDPVFP]>;
2323 def : Pat<(f32 (fma (fneg SPR:$Sn), SPR:$Sm, (fneg SPR:$Sdin))),
2324 (VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
2325 Requires<[HasVFP4]>;
2326 def : Pat<(f16 (fma (fneg (f16 HPR:$Sn)), (f16 HPR:$Sm), (fneg (f16 HPR:$Sdin)))),
2327 (VFNMAH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>,
2328 Requires<[HasFullFP16]>;
2330 def VFNMSD : ADbI<0b11101, 0b01, 0, 0,
2331 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
2332 IIC_fpFMAC64, "vfnms", ".f64\t$Dd, $Dn, $Dm",
2333 [(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm),
2334 (f64 DPR:$Ddin)))]>,
2335 RegConstraint<"$Ddin = $Dd">,
2336 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>,
2337 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2339 def VFNMSS : ASbI<0b11101, 0b01, 0, 0,
2340 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
2341 IIC_fpFMAC32, "vfnms", ".f32\t$Sd, $Sn, $Sm",
2342 [(set SPR:$Sd, (fsub_mlx (fmul_su SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>,
2343 RegConstraint<"$Sdin = $Sd">,
2344 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>,
2345 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
2346 // Some single precision VFP instructions may be executed on both NEON and
2350 def VFNMSH : AHbI<0b11101, 0b01, 0, 0,
2351 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
2352 IIC_fpFMAC16, "vfnms", ".f16\t$Sd, $Sn, $Sm",
2353 [(set (f16 HPR:$Sd), (fsub_mlx (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm)), (f16 HPR:$Sdin)))]>,
2354 RegConstraint<"$Sdin = $Sd">,
2355 Requires<[HasFullFP16,UseFusedMAC]>,
2356 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2358 def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin),
2359 (VFNMSD DPR:$dstin, DPR:$a, DPR:$b)>,
2360 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
2361 def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin),
2362 (VFNMSS SPR:$dstin, SPR:$a, SPR:$b)>,
2363 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
2365 // Match @llvm.fma.* intrinsics
2367 // (fma x, y, (fneg z)) -> (vfnms z, x, y))
2368 def : Pat<(f64 (fma DPR:$Dn, DPR:$Dm, (fneg DPR:$Ddin))),
2369 (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
2370 Requires<[HasVFP4,HasDPVFP]>;
2371 def : Pat<(f32 (fma SPR:$Sn, SPR:$Sm, (fneg SPR:$Sdin))),
2372 (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
2373 Requires<[HasVFP4]>;
2374 def : Pat<(f16 (fma (f16 HPR:$Sn), (f16 HPR:$Sm), (fneg (f16 HPR:$Sdin)))),
2375 (VFNMSH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>,
2376 Requires<[HasFullFP16]>;
2377 // (fneg (fma (fneg x), y, z)) -> (vfnms z, x, y)
2378 def : Pat<(fneg (f64 (fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin))),
2379 (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
2380 Requires<[HasVFP4,HasDPVFP]>;
2381 def : Pat<(fneg (f32 (fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin))),
2382 (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
2383 Requires<[HasVFP4]>;
2384 def : Pat<(fneg (f16 (fma (fneg (f16 HPR:$Sn)), (f16 HPR:$Sm), (f16 HPR:$Sdin)))),
2385 (VFNMSH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>,
2386 Requires<[HasFullFP16]>;
2388 //===----------------------------------------------------------------------===//
2389 // FP Conditional moves.
2392 let hasSideEffects = 0 in {
2393 def VMOVDcc : PseudoInst<(outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm, cmovpred:$p),
2395 [(set (f64 DPR:$Dd),
2396 (ARMcmov DPR:$Dn, DPR:$Dm, cmovpred:$p))]>,
2397 RegConstraint<"$Dn = $Dd">, Requires<[HasFPRegs64]>;
2399 def VMOVScc : PseudoInst<(outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm, cmovpred:$p),
2401 [(set (f32 SPR:$Sd),
2402 (ARMcmov SPR:$Sn, SPR:$Sm, cmovpred:$p))]>,
2403 RegConstraint<"$Sn = $Sd">, Requires<[HasFPRegs]>;
2405 def VMOVHcc : PseudoInst<(outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm, cmovpred:$p),
2407 [(set (f16 HPR:$Sd),
2408 (ARMcmov (f16 HPR:$Sn), (f16 HPR:$Sm), cmovpred:$p))]>,
2409 RegConstraint<"$Sd = $Sn">, Requires<[HasFPRegs]>;
2412 //===----------------------------------------------------------------------===//
2413 // Move from VFP System Register to ARM core register.
2416 class MovFromVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm,
2418 VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, pattern> {
2420 // Instruction operand.
2423 let Inst{27-20} = 0b11101111;
2424 let Inst{19-16} = opc19_16;
2425 let Inst{15-12} = Rt;
2426 let Inst{11-8} = 0b1010;
2428 let Inst{6-5} = 0b00;
2430 let Inst{3-0} = 0b0000;
2431 let Unpredictable{7-5} = 0b111;
2432 let Unpredictable{3-0} = 0b1111;
2435 let DecoderMethod = "DecodeForVMRSandVMSR" in {
2436 // APSR is the application level alias of CPSR. This FPSCR N, Z, C, V flags
2438 let Defs = [CPSR], Uses = [FPSCR_NZCV], Predicates = [HasFPRegs],
2439 Rt = 0b1111 /* apsr_nzcv */ in
2440 def FMSTAT : MovFromVFP<0b0001 /* fpscr */, (outs), (ins),
2441 "vmrs", "\tAPSR_nzcv, fpscr", [(arm_fmstat)]>;
2443 // Application level FPSCR -> GPR
2444 let hasSideEffects = 1, Uses = [FPSCR], Predicates = [HasFPRegs] in
2445 def VMRS : MovFromVFP<0b0001 /* fpscr */, (outs GPRnopc:$Rt), (ins),
2446 "vmrs", "\t$Rt, fpscr",
2447 [(set GPRnopc:$Rt, (int_arm_get_fpscr))]>;
2449 // System level FPEXC, FPSID -> GPR
2450 let Uses = [FPSCR] in {
2451 def VMRS_FPEXC : MovFromVFP<0b1000 /* fpexc */, (outs GPRnopc:$Rt), (ins),
2452 "vmrs", "\t$Rt, fpexc", []>;
2453 def VMRS_FPSID : MovFromVFP<0b0000 /* fpsid */, (outs GPRnopc:$Rt), (ins),
2454 "vmrs", "\t$Rt, fpsid", []>;
2455 def VMRS_MVFR0 : MovFromVFP<0b0111 /* mvfr0 */, (outs GPRnopc:$Rt), (ins),
2456 "vmrs", "\t$Rt, mvfr0", []>;
2457 def VMRS_MVFR1 : MovFromVFP<0b0110 /* mvfr1 */, (outs GPRnopc:$Rt), (ins),
2458 "vmrs", "\t$Rt, mvfr1", []>;
2459 let Predicates = [HasFPARMv8] in {
2460 def VMRS_MVFR2 : MovFromVFP<0b0101 /* mvfr2 */, (outs GPRnopc:$Rt), (ins),
2461 "vmrs", "\t$Rt, mvfr2", []>;
2463 def VMRS_FPINST : MovFromVFP<0b1001 /* fpinst */, (outs GPRnopc:$Rt), (ins),
2464 "vmrs", "\t$Rt, fpinst", []>;
2465 def VMRS_FPINST2 : MovFromVFP<0b1010 /* fpinst2 */, (outs GPRnopc:$Rt),
2466 (ins), "vmrs", "\t$Rt, fpinst2", []>;
2467 let Predicates = [HasV8_1MMainline, HasFPRegs] in {
2468 // System level FPSCR_NZCVQC -> GPR
2469 def VMRS_FPSCR_NZCVQC
2470 : MovFromVFP<0b0010 /* fpscr_nzcvqc */,
2471 (outs GPR:$Rt), (ins cl_FPSCR_NZCV:$fpscr_in),
2472 "vmrs", "\t$Rt, fpscr_nzcvqc", []>;
2475 let Predicates = [HasV8_1MMainline, Has8MSecExt] in {
2476 // System level FPSCR -> GPR, with context saving for security extensions
2477 def VMRS_FPCXTNS : MovFromVFP<0b1110 /* fpcxtns */, (outs GPR:$Rt), (ins),
2478 "vmrs", "\t$Rt, fpcxtns", []>;
2480 let Predicates = [HasV8_1MMainline, Has8MSecExt] in {
2481 // System level FPSCR -> GPR, with context saving for security extensions
2482 def VMRS_FPCXTS : MovFromVFP<0b1111 /* fpcxts */, (outs GPR:$Rt), (ins),
2483 "vmrs", "\t$Rt, fpcxts", []>;
2486 let Predicates = [HasV8_1MMainline, HasMVEInt] in {
2487 // System level VPR/P0 -> GPR
2489 def VMRS_VPR : MovFromVFP<0b1100 /* vpr */, (outs GPR:$Rt), (ins),
2490 "vmrs", "\t$Rt, vpr", []>;
2492 def VMRS_P0 : MovFromVFP<0b1101 /* p0 */, (outs GPR:$Rt), (ins VCCR:$cond),
2493 "vmrs", "\t$Rt, p0", []>;
2497 //===----------------------------------------------------------------------===//
2498 // Move from ARM core register to VFP System Register.
2501 class MovToVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm,
2503 VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, pattern> {
2505 // Instruction operand.
2508 let Inst{27-20} = 0b11101110;
2509 let Inst{19-16} = opc19_16;
2510 let Inst{15-12} = Rt;
2511 let Inst{11-8} = 0b1010;
2513 let Inst{6-5} = 0b00;
2515 let Inst{3-0} = 0b0000;
2516 let Predicates = [HasVFP2];
2517 let Unpredictable{7-5} = 0b111;
2518 let Unpredictable{3-0} = 0b1111;
2521 let DecoderMethod = "DecodeForVMRSandVMSR" in {
2522 let Defs = [FPSCR] in {
2523 let Predicates = [HasFPRegs] in
2524 // Application level GPR -> FPSCR
2525 def VMSR : MovToVFP<0b0001 /* fpscr */, (outs), (ins GPRnopc:$Rt),
2526 "vmsr", "\tfpscr, $Rt",
2527 [(int_arm_set_fpscr GPRnopc:$Rt)]>;
2528 // System level GPR -> FPEXC
2529 def VMSR_FPEXC : MovToVFP<0b1000 /* fpexc */, (outs), (ins GPRnopc:$Rt),
2530 "vmsr", "\tfpexc, $Rt", []>;
2531 // System level GPR -> FPSID
2532 def VMSR_FPSID : MovToVFP<0b0000 /* fpsid */, (outs), (ins GPRnopc:$Rt),
2533 "vmsr", "\tfpsid, $Rt", []>;
2534 def VMSR_FPINST : MovToVFP<0b1001 /* fpinst */, (outs), (ins GPRnopc:$Rt),
2535 "vmsr", "\tfpinst, $Rt", []>;
2536 def VMSR_FPINST2 : MovToVFP<0b1010 /* fpinst2 */, (outs), (ins GPRnopc:$Rt),
2537 "vmsr", "\tfpinst2, $Rt", []>;
2539 let Predicates = [HasV8_1MMainline, Has8MSecExt] in {
2540 // System level GPR -> FPSCR with context saving for security extensions
2541 def VMSR_FPCXTNS : MovToVFP<0b1110 /* fpcxtns */, (outs), (ins GPR:$Rt),
2542 "vmsr", "\tfpcxtns, $Rt", []>;
2544 let Predicates = [HasV8_1MMainline, Has8MSecExt] in {
2545 // System level GPR -> FPSCR with context saving for security extensions
2546 def VMSR_FPCXTS : MovToVFP<0b1111 /* fpcxts */, (outs), (ins GPR:$Rt),
2547 "vmsr", "\tfpcxts, $Rt", []>;
2549 let Predicates = [HasV8_1MMainline, HasFPRegs] in {
2550 // System level GPR -> FPSCR_NZCVQC
2551 def VMSR_FPSCR_NZCVQC
2552 : MovToVFP<0b0010 /* fpscr_nzcvqc */,
2553 (outs cl_FPSCR_NZCV:$fpscr_out), (ins GPR:$Rt),
2554 "vmsr", "\tfpscr_nzcvqc, $Rt", []>;
2557 let Predicates = [HasV8_1MMainline, HasMVEInt] in {
2558 // System level GPR -> VPR/P0
2560 def VMSR_VPR : MovToVFP<0b1100 /* vpr */, (outs), (ins GPR:$Rt),
2561 "vmsr", "\tvpr, $Rt", []>;
2563 def VMSR_P0 : MovToVFP<0b1101 /* p0 */, (outs VCCR:$cond), (ins GPR:$Rt),
2564 "vmsr", "\tp0, $Rt", []>;
2568 //===----------------------------------------------------------------------===//
2572 // Materialize FP immediates. VFP3 only.
2573 let isReMaterializable = 1 in {
2574 def FCONSTD : VFPAI<(outs DPR:$Dd), (ins vfp_f64imm:$imm),
2575 VFPMiscFrm, IIC_fpUNA64,
2576 "vmov", ".f64\t$Dd, $imm",
2577 [(set DPR:$Dd, vfp_f64imm:$imm)]>,
2578 Requires<[HasVFP3,HasDPVFP]> {
2582 let Inst{27-23} = 0b11101;
2583 let Inst{22} = Dd{4};
2584 let Inst{21-20} = 0b11;
2585 let Inst{19-16} = imm{7-4};
2586 let Inst{15-12} = Dd{3-0};
2587 let Inst{11-9} = 0b101;
2588 let Inst{8} = 1; // Double precision.
2589 let Inst{7-4} = 0b0000;
2590 let Inst{3-0} = imm{3-0};
2593 def FCONSTS : VFPAI<(outs SPR:$Sd), (ins vfp_f32imm:$imm),
2594 VFPMiscFrm, IIC_fpUNA32,
2595 "vmov", ".f32\t$Sd, $imm",
2596 [(set SPR:$Sd, vfp_f32imm:$imm)]>, Requires<[HasVFP3]> {
2600 let Inst{27-23} = 0b11101;
2601 let Inst{22} = Sd{0};
2602 let Inst{21-20} = 0b11;
2603 let Inst{19-16} = imm{7-4};
2604 let Inst{15-12} = Sd{4-1};
2605 let Inst{11-9} = 0b101;
2606 let Inst{8} = 0; // Single precision.
2607 let Inst{7-4} = 0b0000;
2608 let Inst{3-0} = imm{3-0};
2611 def FCONSTH : VFPAI<(outs HPR:$Sd), (ins vfp_f16imm:$imm),
2612 VFPMiscFrm, IIC_fpUNA16,
2613 "vmov", ".f16\t$Sd, $imm",
2614 [(set (f16 HPR:$Sd), vfp_f16imm:$imm)]>,
2615 Requires<[HasFullFP16]> {
2619 let Inst{27-23} = 0b11101;
2620 let Inst{22} = Sd{0};
2621 let Inst{21-20} = 0b11;
2622 let Inst{19-16} = imm{7-4};
2623 let Inst{15-12} = Sd{4-1};
2624 let Inst{11-8} = 0b1001; // Half precision
2625 let Inst{7-4} = 0b0000;
2626 let Inst{3-0} = imm{3-0};
2628 let isUnpredicable = 1;
2632 def : Pat<(f32 (vfp_f32f16imm:$imm)),
2633 (f32 (COPY_TO_REGCLASS (f16 (FCONSTH (vfp_f32f16imm_xform (f32 $imm)))), SPR))> {
2634 let Predicates = [HasFullFP16];
2637 //===----------------------------------------------------------------------===//
2638 // Assembler aliases.
2640 // A few mnemonic aliases for pre-unifixed syntax. We don't guarantee to
2641 // support them all, but supporting at least some of the basics is
2642 // good to be friendly.
2643 def : VFP2MnemonicAlias<"flds", "vldr">;
2644 def : VFP2MnemonicAlias<"fldd", "vldr">;
2645 def : VFP2MnemonicAlias<"fmrs", "vmov">;
2646 def : VFP2MnemonicAlias<"fmsr", "vmov">;
2647 def : VFP2MnemonicAlias<"fsqrts", "vsqrt">;
2648 def : VFP2MnemonicAlias<"fsqrtd", "vsqrt">;
2649 def : VFP2MnemonicAlias<"fadds", "vadd.f32">;
2650 def : VFP2MnemonicAlias<"faddd", "vadd.f64">;
2651 def : VFP2MnemonicAlias<"fmrdd", "vmov">;
2652 def : VFP2MnemonicAlias<"fmrds", "vmov">;
2653 def : VFP2MnemonicAlias<"fmrrd", "vmov">;
2654 def : VFP2MnemonicAlias<"fmdrr", "vmov">;
2655 def : VFP2MnemonicAlias<"fmuls", "vmul.f32">;
2656 def : VFP2MnemonicAlias<"fmuld", "vmul.f64">;
2657 def : VFP2MnemonicAlias<"fnegs", "vneg.f32">;
2658 def : VFP2MnemonicAlias<"fnegd", "vneg.f64">;
2659 def : VFP2MnemonicAlias<"ftosizd", "vcvt.s32.f64">;
2660 def : VFP2MnemonicAlias<"ftosid", "vcvtr.s32.f64">;
2661 def : VFP2MnemonicAlias<"ftosizs", "vcvt.s32.f32">;
2662 def : VFP2MnemonicAlias<"ftosis", "vcvtr.s32.f32">;
2663 def : VFP2MnemonicAlias<"ftouizd", "vcvt.u32.f64">;
2664 def : VFP2MnemonicAlias<"ftouid", "vcvtr.u32.f64">;
2665 def : VFP2MnemonicAlias<"ftouizs", "vcvt.u32.f32">;
2666 def : VFP2MnemonicAlias<"ftouis", "vcvtr.u32.f32">;
2667 def : VFP2MnemonicAlias<"fsitod", "vcvt.f64.s32">;
2668 def : VFP2MnemonicAlias<"fsitos", "vcvt.f32.s32">;
2669 def : VFP2MnemonicAlias<"fuitod", "vcvt.f64.u32">;
2670 def : VFP2MnemonicAlias<"fuitos", "vcvt.f32.u32">;
2671 def : VFP2MnemonicAlias<"fsts", "vstr">;
2672 def : VFP2MnemonicAlias<"fstd", "vstr">;
2673 def : VFP2MnemonicAlias<"fmacd", "vmla.f64">;
2674 def : VFP2MnemonicAlias<"fmacs", "vmla.f32">;
2675 def : VFP2MnemonicAlias<"fcpys", "vmov.f32">;
2676 def : VFP2MnemonicAlias<"fcpyd", "vmov.f64">;
2677 def : VFP2MnemonicAlias<"fcmps", "vcmp.f32">;
2678 def : VFP2MnemonicAlias<"fcmpd", "vcmp.f64">;
2679 def : VFP2MnemonicAlias<"fdivs", "vdiv.f32">;
2680 def : VFP2MnemonicAlias<"fdivd", "vdiv.f64">;
2681 def : VFP2MnemonicAlias<"fmrx", "vmrs">;
2682 def : VFP2MnemonicAlias<"fmxr", "vmsr">;
2684 // Be friendly and accept the old form of zero-compare
2685 def : VFP2DPInstAlias<"fcmpzd${p} $val", (VCMPZD DPR:$val, pred:$p)>;
2686 def : VFP2InstAlias<"fcmpzs${p} $val", (VCMPZS SPR:$val, pred:$p)>;
2689 def : InstAlias<"fmstat${p}", (FMSTAT pred:$p), 0>, Requires<[HasFPRegs]>;
2690 def : VFP2InstAlias<"fadds${p} $Sd, $Sn, $Sm",
2691 (VADDS SPR:$Sd, SPR:$Sn, SPR:$Sm, pred:$p)>;
2692 def : VFP2DPInstAlias<"faddd${p} $Dd, $Dn, $Dm",
2693 (VADDD DPR:$Dd, DPR:$Dn, DPR:$Dm, pred:$p)>;
2694 def : VFP2InstAlias<"fsubs${p} $Sd, $Sn, $Sm",
2695 (VSUBS SPR:$Sd, SPR:$Sn, SPR:$Sm, pred:$p)>;
2696 def : VFP2DPInstAlias<"fsubd${p} $Dd, $Dn, $Dm",
2697 (VSUBD DPR:$Dd, DPR:$Dn, DPR:$Dm, pred:$p)>;
2699 // No need for the size suffix on VSQRT. It's implied by the register classes.
2700 def : VFP2InstAlias<"vsqrt${p} $Sd, $Sm", (VSQRTS SPR:$Sd, SPR:$Sm, pred:$p)>;
2701 def : VFP2DPInstAlias<"vsqrt${p} $Dd, $Dm", (VSQRTD DPR:$Dd, DPR:$Dm, pred:$p)>;
2703 // VLDR/VSTR accept an optional type suffix.
2704 def : VFP2InstAlias<"vldr${p}.32 $Sd, $addr",
2705 (VLDRS SPR:$Sd, addrmode5:$addr, pred:$p)>;
2706 def : VFP2InstAlias<"vstr${p}.32 $Sd, $addr",
2707 (VSTRS SPR:$Sd, addrmode5:$addr, pred:$p)>;
2708 def : VFP2InstAlias<"vldr${p}.64 $Dd, $addr",
2709 (VLDRD DPR:$Dd, addrmode5:$addr, pred:$p)>;
2710 def : VFP2InstAlias<"vstr${p}.64 $Dd, $addr",
2711 (VSTRD DPR:$Dd, addrmode5:$addr, pred:$p)>;
2713 // VMOV can accept optional 32-bit or less data type suffix suffix.
2714 def : VFP2InstAlias<"vmov${p}.8 $Rt, $Sn",
2715 (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>;
2716 def : VFP2InstAlias<"vmov${p}.16 $Rt, $Sn",
2717 (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>;
2718 def : VFP2InstAlias<"vmov${p}.32 $Rt, $Sn",
2719 (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>;
2720 def : VFP2InstAlias<"vmov${p}.8 $Sn, $Rt",
2721 (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>;
2722 def : VFP2InstAlias<"vmov${p}.16 $Sn, $Rt",
2723 (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>;
2724 def : VFP2InstAlias<"vmov${p}.32 $Sn, $Rt",
2725 (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>;
2727 def : VFP2InstAlias<"vmov${p}.f64 $Rt, $Rt2, $Dn",
2728 (VMOVRRD GPR:$Rt, GPR:$Rt2, DPR:$Dn, pred:$p)>;
2729 def : VFP2InstAlias<"vmov${p}.f64 $Dn, $Rt, $Rt2",
2730 (VMOVDRR DPR:$Dn, GPR:$Rt, GPR:$Rt2, pred:$p)>;
2732 // VMOVS doesn't need the .f32 to disambiguate from the NEON encoding the way
2734 def : VFP2InstAlias<"vmov${p} $Sd, $Sm",
2735 (VMOVS SPR:$Sd, SPR:$Sm, pred:$p)>;
2737 // FCONSTD/FCONSTS alias for vmov.f64/vmov.f32
2738 // These aliases provide added functionality over vmov.f instructions by
2739 // allowing users to write assembly containing encoded floating point constants
2740 // (e.g. #0x70 vs #1.0). Without these alises there is no way for the
2741 // assembler to accept encoded fp constants (but the equivalent fp-literal is
2742 // accepted directly by vmovf).
2743 def : VFP3InstAlias<"fconstd${p} $Dd, $val",
2744 (FCONSTD DPR:$Dd, vfp_f64imm:$val, pred:$p)>;
2745 def : VFP3InstAlias<"fconsts${p} $Sd, $val",
2746 (FCONSTS SPR:$Sd, vfp_f32imm:$val, pred:$p)>;
2748 def VSCCLRMD : VFPXI<(outs), (ins pred:$p, fp_dreglist_with_vpr:$regs, variable_ops),
2749 AddrModeNone, 4, IndexModeNone, VFPMiscFrm, NoItinerary,
2750 "vscclrm{$p}\t$regs", "", []>, Sched<[]> {
2752 let Inst{31-23} = 0b111011001;
2753 let Inst{22} = regs{12};
2754 let Inst{21-16} = 0b011111;
2755 let Inst{15-12} = regs{11-8};
2756 let Inst{11-8} = 0b1011;
2757 let Inst{7-1} = regs{7-1};
2760 let DecoderMethod = "DecodeVSCCLRM";
2762 list<Predicate> Predicates = [HasV8_1MMainline, Has8MSecExt];
2765 def VSCCLRMS : VFPXI<(outs), (ins pred:$p, fp_sreglist_with_vpr:$regs, variable_ops),
2766 AddrModeNone, 4, IndexModeNone, VFPMiscFrm, NoItinerary,
2767 "vscclrm{$p}\t$regs", "", []>, Sched<[]> {
2769 let Inst{31-23} = 0b111011001;
2770 let Inst{22} = regs{8};
2771 let Inst{21-16} = 0b011111;
2772 let Inst{15-12} = regs{12-9};
2773 let Inst{11-8} = 0b1010;
2774 let Inst{7-0} = regs{7-0};
2776 let DecoderMethod = "DecodeVSCCLRM";
2778 list<Predicate> Predicates = [HasV8_1MMainline, Has8MSecExt];
2781 //===----------------------------------------------------------------------===//
2782 // Store VFP System Register to memory.
2785 class vfp_vstrldr<bit opc, bit P, bit W, bits<4> SysReg, string sysreg,
2786 dag oops, dag iops, IndexMode im, string Dest, string cstr>
2787 : VFPI<oops, iops, AddrModeT2_i7s4, 4, im, VFPLdStFrm, IIC_fpSTAT,
2788 !if(opc,"vldr","vstr"), !strconcat("\t", sysreg, ", ", Dest), cstr, []>,
2791 let Inst{27-25} = 0b110;
2793 let Inst{23} = addr{7};
2794 let Inst{22} = SysReg{3};
2797 let Inst{19-16} = addr{11-8};
2798 let Inst{15-13} = SysReg{2-0};
2799 let Inst{12-7} = 0b011111;
2800 let Inst{6-0} = addr{6-0};
2801 list<Predicate> Predicates = [HasFPRegs, HasV8_1MMainline];
2803 let mayStore = !if(opc, 0b0, 0b1);
2804 let hasSideEffects = 1;
2807 multiclass vfp_vstrldr_sysreg<bit opc, bits<4> SysReg, string sysreg,
2808 dag oops=(outs), dag iops=(ins)> {
2810 vfp_vstrldr<opc, 1, 0, SysReg, sysreg,
2811 oops, !con(iops, (ins t2addrmode_imm7s4:$addr)),
2812 IndexModePost, "$addr", "" > {
2813 let DecoderMethod = "DecodeVSTRVLDR_SYSREG<false>";
2817 vfp_vstrldr<opc, 1, 1, SysReg, sysreg,
2818 !con(oops, (outs GPRnopc:$wb)),
2819 !con(iops, (ins t2addrmode_imm7s4_pre:$addr)),
2820 IndexModePre, "$addr!", "$addr.base = $wb"> {
2821 let DecoderMethod = "DecodeVSTRVLDR_SYSREG<true>";
2825 vfp_vstrldr<opc, 0, 1, SysReg, sysreg,
2826 !con(oops, (outs GPRnopc:$wb)),
2827 !con(iops, (ins t2_addr_offset_none:$Rn,
2828 t2am_imm7s4_offset:$addr)),
2829 IndexModePost, "$Rn$addr", "$Rn.base = $wb"> {
2831 let Inst{19-16} = Rn{3-0};
2832 let DecoderMethod = "DecodeVSTRVLDR_SYSREG<true>";
2836 let Defs = [FPSCR] in {
2837 defm VSTR_FPSCR : vfp_vstrldr_sysreg<0b0,0b0001, "fpscr">;
2838 defm VSTR_FPSCR_NZCVQC : vfp_vstrldr_sysreg<0b0,0b0010, "fpscr_nzcvqc">;
2840 let Predicates = [HasV8_1MMainline, Has8MSecExt] in {
2841 defm VSTR_FPCXTNS : vfp_vstrldr_sysreg<0b0,0b1110, "fpcxtns">;
2842 defm VSTR_FPCXTS : vfp_vstrldr_sysreg<0b0,0b1111, "fpcxts">;
2846 let Predicates = [HasV8_1MMainline, HasMVEInt] in {
2847 let Uses = [VPR] in {
2848 defm VSTR_VPR : vfp_vstrldr_sysreg<0b0,0b1100, "vpr">;
2850 defm VSTR_P0 : vfp_vstrldr_sysreg<0b0,0b1101, "p0",
2851 (outs), (ins VCCR:$P0)>;
2853 let Defs = [VPR] in {
2854 defm VLDR_VPR : vfp_vstrldr_sysreg<0b1,0b1100, "vpr">;
2856 defm VLDR_P0 : vfp_vstrldr_sysreg<0b1,0b1101, "p0",
2857 (outs VCCR:$P0), (ins)>;
2860 let Uses = [FPSCR] in {
2861 defm VLDR_FPSCR : vfp_vstrldr_sysreg<0b1,0b0001, "fpscr">;
2862 defm VLDR_FPSCR_NZCVQC : vfp_vstrldr_sysreg<0b1,0b0010, "fpscr_nzcvqc">;
2864 let Predicates = [HasV8_1MMainline, Has8MSecExt] in {
2865 defm VLDR_FPCXTNS : vfp_vstrldr_sysreg<0b1,0b1110, "fpcxtns">;
2866 defm VLDR_FPCXTS : vfp_vstrldr_sysreg<0b1,0b1111, "fpcxts">;