1 //===-- ARMInstrVFP.td - VFP support for ARM ---------------*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file describes the ARM VFP instruction set.
11 //===----------------------------------------------------------------------===//
13 def SDT_CMPFP : SDTypeProfile<1, 2, [
14 SDTCisVT<0, FlagsVT>, // out flags
16 SDTCisSameAs<2, 1> // rhs
19 def SDT_CMPFP0 : SDTypeProfile<1, 1, [
20 SDTCisVT<0, FlagsVT>, // out flags
21 SDTCisFP<1> // operand
24 def SDT_VMOVDRR : SDTypeProfile<1, 2, [SDTCisVT<0, f64>, SDTCisVT<1, i32>,
26 def SDT_VMOVRRD : SDTypeProfile<2, 1, [SDTCisVT<0, i32>, SDTCisSameAs<0, 1>,
29 def SDT_VMOVSR : SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisVT<1, i32>]>;
31 def arm_cmpfp : SDNode<"ARMISD::CMPFP", SDT_CMPFP>;
32 def arm_cmpfp0 : SDNode<"ARMISD::CMPFPw0", SDT_CMPFP0>;
33 def arm_cmpfpe : SDNode<"ARMISD::CMPFPE", SDT_CMPFP>;
34 def arm_cmpfpe0 : SDNode<"ARMISD::CMPFPEw0", SDT_CMPFP0>;
36 def arm_fmstat : SDNode<"ARMISD::FMSTAT",
38 SDTCisVT<0, FlagsVT>, // out flags
39 SDTCisVT<1, FlagsVT> // in flags
43 def arm_fmdrr : SDNode<"ARMISD::VMOVDRR", SDT_VMOVDRR>;
44 def arm_fmrrd : SDNode<"ARMISD::VMOVRRD", SDT_VMOVRRD>;
45 def arm_vmovsr : SDNode<"ARMISD::VMOVSR", SDT_VMOVSR>;
47 def SDT_VMOVhr : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, i32>] >;
48 def SDT_VMOVrh : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisFP<1>] >;
49 def arm_vmovhr : SDNode<"ARMISD::VMOVhr", SDT_VMOVhr>;
50 def arm_vmovrh : SDNode<"ARMISD::VMOVrh", SDT_VMOVrh>;
52 //===----------------------------------------------------------------------===//
53 // Operand Definitions.
56 // 8-bit floating-point immediate encodings.
57 def FPImmOperand : AsmOperandClass {
59 let ParserMethod = "parseFPImm";
62 def vfp_f16imm : Operand<f16>,
63 PatLeaf<(f16 fpimm), [{
64 return ARM_AM::getFP16Imm(N->getValueAPF()) != -1;
65 }], SDNodeXForm<fpimm, [{
66 uint32_t Enc = ARM_AM::getFP16Imm(N->getValueAPF());
67 return CurDAG->getTargetConstant(Enc, SDLoc(N), MVT::i32);
69 let PrintMethod = "printFPImmOperand";
70 let ParserMatchClass = FPImmOperand;
73 def vfp_f32f16imm_xform : SDNodeXForm<fpimm, [{
74 uint32_t Enc = ARM_AM::getFP32FP16Imm(N->getValueAPF());
75 return CurDAG->getTargetConstant(Enc, SDLoc(N), MVT::i32);
78 def vfp_f32f16imm : PatLeaf<(f32 fpimm), [{
79 return ARM_AM::getFP32FP16Imm(N->getValueAPF()) != -1;
80 }], vfp_f32f16imm_xform>;
82 def vfp_f32imm_xform : SDNodeXForm<fpimm, [{
83 uint32_t Enc = ARM_AM::getFP32Imm(N->getValueAPF());
84 return CurDAG->getTargetConstant(Enc, SDLoc(N), MVT::i32);
87 def gi_vfp_f32imm : GICustomOperandRenderer<"renderVFPF32Imm">,
88 GISDNodeXFormEquiv<vfp_f32imm_xform>;
90 def vfp_f32imm : Operand<f32>,
91 PatLeaf<(f32 fpimm), [{
92 return ARM_AM::getFP32Imm(N->getValueAPF()) != -1;
93 }], vfp_f32imm_xform> {
94 let PrintMethod = "printFPImmOperand";
95 let ParserMatchClass = FPImmOperand;
96 let GISelPredicateCode = [{
97 const auto &MO = MI.getOperand(1);
100 return ARM_AM::getFP32Imm(MO.getFPImm()->getValueAPF()) != -1;
104 def vfp_f64imm_xform : SDNodeXForm<fpimm, [{
105 uint32_t Enc = ARM_AM::getFP64Imm(N->getValueAPF());
106 return CurDAG->getTargetConstant(Enc, SDLoc(N), MVT::i32);
109 def gi_vfp_f64imm : GICustomOperandRenderer<"renderVFPF64Imm">,
110 GISDNodeXFormEquiv<vfp_f64imm_xform>;
112 def vfp_f64imm : Operand<f64>,
113 PatLeaf<(f64 fpimm), [{
114 return ARM_AM::getFP64Imm(N->getValueAPF()) != -1;
115 }], vfp_f64imm_xform> {
116 let PrintMethod = "printFPImmOperand";
117 let ParserMatchClass = FPImmOperand;
118 let GISelPredicateCode = [{
119 const auto &MO = MI.getOperand(1);
122 return ARM_AM::getFP64Imm(MO.getFPImm()->getValueAPF()) != -1;
126 def alignedload16 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
127 return cast<LoadSDNode>(N)->getAlign() >= 2;
130 def alignedload32 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
131 return cast<LoadSDNode>(N)->getAlign() >= 4;
134 def alignedstore16 : PatFrag<(ops node:$val, node:$ptr),
135 (store node:$val, node:$ptr), [{
136 return cast<StoreSDNode>(N)->getAlign() >= 2;
139 def alignedstore32 : PatFrag<(ops node:$val, node:$ptr),
140 (store node:$val, node:$ptr), [{
141 return cast<StoreSDNode>(N)->getAlign() >= 4;
144 // The VCVT to/from fixed-point instructions encode the 'fbits' operand
145 // (the number of fixed bits) differently than it appears in the assembly
146 // source. It's encoded as "Size - fbits" where Size is the size of the
147 // fixed-point representation (32 or 16) and fbits is the value appearing
148 // in the assembly source, an integer in [0,16] or (0,32], depending on size.
149 def fbits32_asm_operand : AsmOperandClass { let Name = "FBits32"; }
150 def fbits32 : Operand<i32> {
151 let PrintMethod = "printFBits32";
152 let ParserMatchClass = fbits32_asm_operand;
155 def fbits16_asm_operand : AsmOperandClass { let Name = "FBits16"; }
156 def fbits16 : Operand<i32> {
157 let PrintMethod = "printFBits16";
158 let ParserMatchClass = fbits16_asm_operand;
161 //===----------------------------------------------------------------------===//
162 // Load / store Instructions.
165 let canFoldAsLoad = 1, isReMaterializable = 1 in {
167 def VLDRD : ADI5<0b1101, 0b01, (outs DPR:$Dd), (ins addrmode5:$addr),
168 IIC_fpLoad64, "vldr", "\t$Dd, $addr",
169 [(set DPR:$Dd, (f64 (alignedload32 addrmode5:$addr)))]>,
170 Requires<[HasFPRegs]>;
172 def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$Sd), (ins addrmode5:$addr),
173 IIC_fpLoad32, "vldr", "\t$Sd, $addr",
174 [(set SPR:$Sd, (alignedload32 addrmode5:$addr))]>,
175 Requires<[HasFPRegs]> {
176 // Some single precision VFP instructions may be executed on both NEON and VFP
178 let D = VFPNeonDomain;
181 let isUnpredicable = 1 in
182 def VLDRH : AHI5<0b1101, 0b01, (outs HPR:$Sd), (ins addrmode5fp16:$addr),
183 IIC_fpLoad16, "vldr", ".16\t$Sd, $addr",
184 [(set HPR:$Sd, (f16 (alignedload16 addrmode5fp16:$addr)))]>,
185 Requires<[HasFPRegs16]>;
187 } // End of 'let canFoldAsLoad = 1, isReMaterializable = 1 in'
189 def : Pat<(bf16 (alignedload16 addrmode5fp16:$addr)),
190 (VLDRH addrmode5fp16:$addr)> {
191 let Predicates = [HasFPRegs16];
193 def : Pat<(bf16 (alignedload16 addrmode3:$addr)),
194 (COPY_TO_REGCLASS (LDRH addrmode3:$addr), HPR)> {
195 let Predicates = [HasNoFPRegs16, IsARM];
197 def : Pat<(bf16 (alignedload16 t2addrmode_imm12:$addr)),
198 (COPY_TO_REGCLASS (t2LDRHi12 t2addrmode_imm12:$addr), HPR)> {
199 let Predicates = [HasNoFPRegs16, IsThumb];
202 def VSTRD : ADI5<0b1101, 0b00, (outs), (ins DPR:$Dd, addrmode5:$addr),
203 IIC_fpStore64, "vstr", "\t$Dd, $addr",
204 [(alignedstore32 (f64 DPR:$Dd), addrmode5:$addr)]>,
205 Requires<[HasFPRegs]>;
207 def VSTRS : ASI5<0b1101, 0b00, (outs), (ins SPR:$Sd, addrmode5:$addr),
208 IIC_fpStore32, "vstr", "\t$Sd, $addr",
209 [(alignedstore32 SPR:$Sd, addrmode5:$addr)]>,
210 Requires<[HasFPRegs]> {
211 // Some single precision VFP instructions may be executed on both NEON and VFP
213 let D = VFPNeonDomain;
216 let isUnpredicable = 1 in
217 def VSTRH : AHI5<0b1101, 0b00, (outs), (ins HPR:$Sd, addrmode5fp16:$addr),
218 IIC_fpStore16, "vstr", ".16\t$Sd, $addr",
219 [(alignedstore16 (f16 HPR:$Sd), addrmode5fp16:$addr)]>,
220 Requires<[HasFPRegs16]>;
222 def : Pat<(alignedstore16 (bf16 HPR:$Sd), addrmode5fp16:$addr),
223 (VSTRH (bf16 HPR:$Sd), addrmode5fp16:$addr)> {
224 let Predicates = [HasFPRegs16];
226 def : Pat<(alignedstore16 (bf16 HPR:$Sd), addrmode3:$addr),
227 (STRH (COPY_TO_REGCLASS $Sd, GPR), addrmode3:$addr)> {
228 let Predicates = [HasNoFPRegs16, IsARM];
230 def : Pat<(alignedstore16 (bf16 HPR:$Sd), t2addrmode_imm12:$addr),
231 (t2STRHi12 (COPY_TO_REGCLASS $Sd, GPR), t2addrmode_imm12:$addr)> {
232 let Predicates = [HasNoFPRegs16, IsThumb];
235 //===----------------------------------------------------------------------===//
236 // Load / store multiple Instructions.
239 multiclass vfp_ldst_mult<string asm, bit L_bit,
240 InstrItinClass itin, InstrItinClass itin_upd> {
241 let Predicates = [HasFPRegs] in {
244 AXDI4<(outs), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
246 !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> {
247 let Inst{24-23} = 0b01; // Increment After
248 let Inst{21} = 0; // No writeback
249 let Inst{20} = L_bit;
252 AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs,
254 IndexModeUpd, itin_upd,
255 !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
256 let Inst{24-23} = 0b01; // Increment After
257 let Inst{21} = 1; // Writeback
258 let Inst{20} = L_bit;
261 AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs,
263 IndexModeUpd, itin_upd,
264 !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
265 let Inst{24-23} = 0b10; // Decrement Before
266 let Inst{21} = 1; // Writeback
267 let Inst{20} = L_bit;
272 AXSI4<(outs), (ins GPR:$Rn, pred:$p, spr_reglist:$regs, variable_ops),
274 !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> {
275 let Inst{24-23} = 0b01; // Increment After
276 let Inst{21} = 0; // No writeback
277 let Inst{20} = L_bit;
279 // Some single precision VFP instructions may be executed on both NEON and
281 let D = VFPNeonDomain;
284 AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs,
286 IndexModeUpd, itin_upd,
287 !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
288 let Inst{24-23} = 0b01; // Increment After
289 let Inst{21} = 1; // Writeback
290 let Inst{20} = L_bit;
292 // Some single precision VFP instructions may be executed on both NEON and
294 let D = VFPNeonDomain;
297 AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs,
299 IndexModeUpd, itin_upd,
300 !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
301 let Inst{24-23} = 0b10; // Decrement Before
302 let Inst{21} = 1; // Writeback
303 let Inst{20} = L_bit;
305 // Some single precision VFP instructions may be executed on both NEON and
307 let D = VFPNeonDomain;
312 let hasSideEffects = 0 in {
314 let mayLoad = 1, hasExtraDefRegAllocReq = 1 in
315 defm VLDM : vfp_ldst_mult<"vldm", 1, IIC_fpLoad_m, IIC_fpLoad_mu>;
317 let mayStore = 1, hasExtraSrcRegAllocReq = 1 in
318 defm VSTM : vfp_ldst_mult<"vstm", 0, IIC_fpStore_m, IIC_fpStore_mu>;
322 def : MnemonicAlias<"vldm", "vldmia">;
323 def : MnemonicAlias<"vstm", "vstmia">;
326 //===----------------------------------------------------------------------===//
327 // Lazy load / store multiple Instructions
330 // 2 encoding options:
332 // T1 takes an optional dpr_reglist, must be '{d0-d15}' (exactly)
333 // T1 require v8-M.Main, secure state, target with 16 D registers (or with no D registers - NOP)
335 // T2 takes a mandatory dpr_reglist, must be '{d0-d31}' (exactly)
336 // T2 require v8.1-M.Main, secure state, target with 16/32 D registers (or with no D registers - NOP)
337 // (source: Arm v8-M ARM, DDI0553B.v ID16122022)
339 def VLLDM : AXSI4FR<"vlldm${p}\t$Rn, $regs", 0, 1>,
340 Requires<[HasV8MMainline, Has8MSecExt]> {
341 let Defs = [VPR, FPSCR, FPSCR_NZCV, D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, D11, D12, D13, D14, D15];
342 let DecoderMethod = "DecodeLazyLoadStoreMul";
344 // T1: assembly does not contains the register list.
345 def : InstAlias<"vlldm${p}\t$Rn", (VLLDM GPRnopc:$Rn, pred:$p, 0)>,
346 Requires<[HasV8MMainline, Has8MSecExt]>;
347 // T2: assembly must contains the register list.
348 // The register list has no effect on the encoding, it is for assembly/disassembly purposes only.
349 def VLLDM_T2 : AXSI4FR<"vlldm${p}\t$Rn, $regs", 1, 1>,
350 Requires<[HasV8_1MMainline, Has8MSecExt]> {
351 let Defs = [VPR, FPSCR, FPSCR_NZCV, D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, D11, D12, D13, D14, D15,
352 D16, D17, D18, D19, D20, D21, D22, D23, D24, D25, D26, D27, D28, D29, D30, D31];
353 let DecoderMethod = "DecodeLazyLoadStoreMul";
355 // T1: assembly contains the register list.
356 // The register list has no effect on the encoding, it is for assembly/disassembly purposes only.
357 def VLSTM : AXSI4FR<"vlstm${p}\t$Rn, $regs", 0, 0>,
358 Requires<[HasV8MMainline, Has8MSecExt]> {
359 let Defs = [VPR, FPSCR, FPSCR_NZCV];
360 let Uses = [VPR, FPSCR, FPSCR_NZCV, D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, D11, D12, D13, D14, D15];
361 let DecoderMethod = "DecodeLazyLoadStoreMul";
363 // T1: assembly does not contain the register list.
364 def : InstAlias<"vlstm${p}\t$Rn", (VLSTM GPRnopc:$Rn, pred:$p, 0)>,
365 Requires<[HasV8MMainline, Has8MSecExt]>;
366 // T2: assembly must contain the register list.
367 // The register list has no effect on the encoding, it is for assembly/disassembly purposes only.
368 def VLSTM_T2 : AXSI4FR<"vlstm${p}\t$Rn, $regs", 1, 0>,
369 Requires<[HasV8_1MMainline, Has8MSecExt]> {
370 let Defs = [VPR, FPSCR, FPSCR_NZCV];
371 let Uses = [VPR, FPSCR, FPSCR_NZCV, D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, D11, D12, D13, D14, D15,
372 D16, D17, D18, D19, D20, D21, D22, D23, D24, D25, D26, D27, D28, D29, D30, D31];
373 let DecoderMethod = "DecodeLazyLoadStoreMul";
376 def : InstAlias<"vpush${p} $r", (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r), 0>,
377 Requires<[HasFPRegs]>;
378 def : InstAlias<"vpush${p} $r", (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r), 0>,
379 Requires<[HasFPRegs]>;
380 def : InstAlias<"vpop${p} $r", (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r), 0>,
381 Requires<[HasFPRegs]>;
382 def : InstAlias<"vpop${p} $r", (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r), 0>,
383 Requires<[HasFPRegs]>;
384 defm : VFPDTAnyInstAlias<"vpush${p}", "$r",
385 (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r)>;
386 defm : VFPDTAnyInstAlias<"vpush${p}", "$r",
387 (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r)>;
388 defm : VFPDTAnyInstAlias<"vpop${p}", "$r",
389 (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r)>;
390 defm : VFPDTAnyInstAlias<"vpop${p}", "$r",
391 (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r)>;
393 // FLDMX, FSTMX - Load and store multiple unknown precision registers for
395 // These instruction are deprecated so we don't want them to get selected.
396 // However, there is no UAL syntax for them, so we keep them around for
397 // (dis)assembly only.
398 multiclass vfp_ldstx_mult<string asm, bit L_bit> {
399 let Predicates = [HasFPRegs], hasNoSchedulingInfo = 1 in {
402 AXXI4<(outs), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
403 IndexModeNone, !strconcat(asm, "iax${p}\t$Rn, $regs"), "", []> {
404 let Inst{24-23} = 0b01; // Increment After
405 let Inst{21} = 0; // No writeback
406 let Inst{20} = L_bit;
409 AXXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
410 IndexModeUpd, !strconcat(asm, "iax${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
411 let Inst{24-23} = 0b01; // Increment After
412 let Inst{21} = 1; // Writeback
413 let Inst{20} = L_bit;
416 AXXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
417 IndexModeUpd, !strconcat(asm, "dbx${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
418 let Inst{24-23} = 0b10; // Decrement Before
419 let Inst{21} = 1; // Writeback
420 let Inst{20} = L_bit;
425 defm FLDM : vfp_ldstx_mult<"fldm", 1>;
426 defm FSTM : vfp_ldstx_mult<"fstm", 0>;
428 def : VFP2MnemonicAlias<"fldmeax", "fldmdbx">;
429 def : VFP2MnemonicAlias<"fldmfdx", "fldmiax">;
431 def : VFP2MnemonicAlias<"fstmeax", "fstmiax">;
432 def : VFP2MnemonicAlias<"fstmfdx", "fstmdbx">;
434 //===----------------------------------------------------------------------===//
435 // FP Binary Operations.
438 let TwoOperandAliasConstraint = "$Dn = $Dd" in
439 def VADDD : ADbI<0b11100, 0b11, 0, 0,
440 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
441 IIC_fpALU64, "vadd", ".f64\t$Dd, $Dn, $Dm",
442 [(set DPR:$Dd, (fadd DPR:$Dn, (f64 DPR:$Dm)))]>,
443 Sched<[WriteFPALU64]>;
445 let TwoOperandAliasConstraint = "$Sn = $Sd" in
446 def VADDS : ASbIn<0b11100, 0b11, 0, 0,
447 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
448 IIC_fpALU32, "vadd", ".f32\t$Sd, $Sn, $Sm",
449 [(set SPR:$Sd, (fadd SPR:$Sn, SPR:$Sm))]>,
450 Sched<[WriteFPALU32]> {
451 // Some single precision VFP instructions may be executed on both NEON and
452 // VFP pipelines on A8.
453 let D = VFPNeonA8Domain;
456 let TwoOperandAliasConstraint = "$Sn = $Sd" in
457 def VADDH : AHbI<0b11100, 0b11, 0, 0,
458 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
459 IIC_fpALU16, "vadd", ".f16\t$Sd, $Sn, $Sm",
460 [(set (f16 HPR:$Sd), (fadd (f16 HPR:$Sn), (f16 HPR:$Sm)))]>,
461 Sched<[WriteFPALU32]>;
463 let TwoOperandAliasConstraint = "$Dn = $Dd" in
464 def VSUBD : ADbI<0b11100, 0b11, 1, 0,
465 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
466 IIC_fpALU64, "vsub", ".f64\t$Dd, $Dn, $Dm",
467 [(set DPR:$Dd, (fsub DPR:$Dn, (f64 DPR:$Dm)))]>,
468 Sched<[WriteFPALU64]>;
470 let TwoOperandAliasConstraint = "$Sn = $Sd" in
471 def VSUBS : ASbIn<0b11100, 0b11, 1, 0,
472 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
473 IIC_fpALU32, "vsub", ".f32\t$Sd, $Sn, $Sm",
474 [(set SPR:$Sd, (fsub SPR:$Sn, SPR:$Sm))]>,
475 Sched<[WriteFPALU32]>{
476 // Some single precision VFP instructions may be executed on both NEON and
477 // VFP pipelines on A8.
478 let D = VFPNeonA8Domain;
481 let TwoOperandAliasConstraint = "$Sn = $Sd" in
482 def VSUBH : AHbI<0b11100, 0b11, 1, 0,
483 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
484 IIC_fpALU16, "vsub", ".f16\t$Sd, $Sn, $Sm",
485 [(set (f16 HPR:$Sd), (fsub (f16 HPR:$Sn), (f16 HPR:$Sm)))]>,
486 Sched<[WriteFPALU32]>;
488 let TwoOperandAliasConstraint = "$Dn = $Dd" in
489 def VDIVD : ADbI<0b11101, 0b00, 0, 0,
490 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
491 IIC_fpDIV64, "vdiv", ".f64\t$Dd, $Dn, $Dm",
492 [(set DPR:$Dd, (fdiv DPR:$Dn, (f64 DPR:$Dm)))]>,
493 Sched<[WriteFPDIV64]>;
495 let TwoOperandAliasConstraint = "$Sn = $Sd" in
496 def VDIVS : ASbI<0b11101, 0b00, 0, 0,
497 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
498 IIC_fpDIV32, "vdiv", ".f32\t$Sd, $Sn, $Sm",
499 [(set SPR:$Sd, (fdiv SPR:$Sn, SPR:$Sm))]>,
500 Sched<[WriteFPDIV32]>;
502 let TwoOperandAliasConstraint = "$Sn = $Sd" in
503 def VDIVH : AHbI<0b11101, 0b00, 0, 0,
504 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
505 IIC_fpDIV16, "vdiv", ".f16\t$Sd, $Sn, $Sm",
506 [(set (f16 HPR:$Sd), (fdiv (f16 HPR:$Sn), (f16 HPR:$Sm)))]>,
507 Sched<[WriteFPDIV32]>;
509 let TwoOperandAliasConstraint = "$Dn = $Dd" in
510 def VMULD : ADbI<0b11100, 0b10, 0, 0,
511 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
512 IIC_fpMUL64, "vmul", ".f64\t$Dd, $Dn, $Dm",
513 [(set DPR:$Dd, (fmul DPR:$Dn, (f64 DPR:$Dm)))]>,
514 Sched<[WriteFPMUL64, ReadFPMUL, ReadFPMUL]>;
516 let TwoOperandAliasConstraint = "$Sn = $Sd" in
517 def VMULS : ASbIn<0b11100, 0b10, 0, 0,
518 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
519 IIC_fpMUL32, "vmul", ".f32\t$Sd, $Sn, $Sm",
520 [(set SPR:$Sd, (fmul SPR:$Sn, SPR:$Sm))]>,
521 Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]> {
522 // Some single precision VFP instructions may be executed on both NEON and
523 // VFP pipelines on A8.
524 let D = VFPNeonA8Domain;
527 let TwoOperandAliasConstraint = "$Sn = $Sd" in
528 def VMULH : AHbI<0b11100, 0b10, 0, 0,
529 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
530 IIC_fpMUL16, "vmul", ".f16\t$Sd, $Sn, $Sm",
531 [(set (f16 HPR:$Sd), (fmul (f16 HPR:$Sn), (f16 HPR:$Sm)))]>,
532 Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]>;
534 let TwoOperandAliasConstraint = "$Dn = $Dd" in
535 def VNMULD : ADbI<0b11100, 0b10, 1, 0,
536 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
537 IIC_fpMUL64, "vnmul", ".f64\t$Dd, $Dn, $Dm",
538 [(set DPR:$Dd, (fneg (fmul DPR:$Dn, (f64 DPR:$Dm))))]>,
539 Sched<[WriteFPMUL64, ReadFPMUL, ReadFPMUL]>;
541 let TwoOperandAliasConstraint = "$Sn = $Sd" in
542 def VNMULS : ASbI<0b11100, 0b10, 1, 0,
543 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
544 IIC_fpMUL32, "vnmul", ".f32\t$Sd, $Sn, $Sm",
545 [(set SPR:$Sd, (fneg (fmul SPR:$Sn, SPR:$Sm)))]>,
546 Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]> {
547 // Some single precision VFP instructions may be executed on both NEON and
548 // VFP pipelines on A8.
549 let D = VFPNeonA8Domain;
552 let TwoOperandAliasConstraint = "$Sn = $Sd" in
553 def VNMULH : AHbI<0b11100, 0b10, 1, 0,
554 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
555 IIC_fpMUL16, "vnmul", ".f16\t$Sd, $Sn, $Sm",
556 [(set (f16 HPR:$Sd), (fneg (fmul (f16 HPR:$Sn), (f16 HPR:$Sm))))]>,
557 Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]>;
559 multiclass vsel_inst<string op, bits<2> opc, int CC> {
560 let DecoderNamespace = "VFPV8", PostEncoderMethod = "",
561 Uses = [CPSR], AddedComplexity = 4, isUnpredicable = 1 in {
562 def H : AHbInp<0b11100, opc, 0,
563 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
564 NoItinerary, !strconcat("vsel", op, ".f16\t$Sd, $Sn, $Sm"),
566 (ARMcmov (f16 HPR:$Sm), (f16 HPR:$Sn), CC, CPSR))]>,
567 Requires<[HasFullFP16]>;
569 def S : ASbInp<0b11100, opc, 0,
570 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
571 NoItinerary, !strconcat("vsel", op, ".f32\t$Sd, $Sn, $Sm"),
572 [(set SPR:$Sd, (ARMcmov SPR:$Sm, SPR:$Sn, CC, CPSR))]>,
573 Requires<[HasFPARMv8]>;
575 def D : ADbInp<0b11100, opc, 0,
576 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
577 NoItinerary, !strconcat("vsel", op, ".f64\t$Dd, $Dn, $Dm"),
579 (ARMcmov (f64 DPR:$Dm), (f64 DPR:$Dn), CC, CPSR))]>,
580 Requires<[HasFPARMv8, HasDPVFP]>;
584 // The CC constants here match ARMCC::CondCodes.
585 defm VSELGT : vsel_inst<"gt", 0b11, 12>;
586 defm VSELGE : vsel_inst<"ge", 0b10, 10>;
587 defm VSELEQ : vsel_inst<"eq", 0b00, 0>;
588 defm VSELVS : vsel_inst<"vs", 0b01, 6>;
590 multiclass vmaxmin_inst<string op, bit opc, SDNode SD> {
591 let DecoderNamespace = "VFPV8", PostEncoderMethod = "",
592 isUnpredicable = 1 in {
593 def H : AHbInp<0b11101, 0b00, opc,
594 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
595 NoItinerary, !strconcat(op, ".f16\t$Sd, $Sn, $Sm"),
596 [(set (f16 HPR:$Sd), (SD (f16 HPR:$Sn), (f16 HPR:$Sm)))]>,
597 Requires<[HasFullFP16]>;
599 def S : ASbInp<0b11101, 0b00, opc,
600 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
601 NoItinerary, !strconcat(op, ".f32\t$Sd, $Sn, $Sm"),
602 [(set SPR:$Sd, (SD SPR:$Sn, SPR:$Sm))]>,
603 Requires<[HasFPARMv8]>;
605 def D : ADbInp<0b11101, 0b00, opc,
606 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
607 NoItinerary, !strconcat(op, ".f64\t$Dd, $Dn, $Dm"),
608 [(set DPR:$Dd, (f64 (SD (f64 DPR:$Dn), (f64 DPR:$Dm))))]>,
609 Requires<[HasFPARMv8, HasDPVFP]>;
613 defm VFP_VMAXNM : vmaxmin_inst<"vmaxnm", 0, fmaxnum>;
614 defm VFP_VMINNM : vmaxmin_inst<"vminnm", 1, fminnum>;
616 // Match reassociated forms only if not sign dependent rounding.
617 def : Pat<(fmul (fneg DPR:$a), (f64 DPR:$b)),
618 (VNMULD DPR:$a, DPR:$b)>,
619 Requires<[NoHonorSignDependentRounding,HasDPVFP]>;
620 def : Pat<(fmul (fneg SPR:$a), SPR:$b),
621 (VNMULS SPR:$a, SPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
623 // These are encoded as unary instructions.
624 let Defs = [FPSCR_NZCV] in {
625 def VCMPED : ADuI<0b11101, 0b11, 0b0100, 0b11, 0,
626 (outs), (ins DPR:$Dd, DPR:$Dm),
627 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, $Dm", "",
628 [(set FPSCR_NZCV, (arm_cmpfpe DPR:$Dd, (f64 DPR:$Dm)))]>;
630 def VCMPES : ASuI<0b11101, 0b11, 0b0100, 0b11, 0,
631 (outs), (ins SPR:$Sd, SPR:$Sm),
632 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, $Sm", "",
633 [(set FPSCR_NZCV, (arm_cmpfpe SPR:$Sd, SPR:$Sm))]> {
634 // Some single precision VFP instructions may be executed on both NEON and
635 // VFP pipelines on A8.
636 let D = VFPNeonA8Domain;
639 def VCMPEH : AHuI<0b11101, 0b11, 0b0100, 0b11, 0,
640 (outs), (ins HPR:$Sd, HPR:$Sm),
641 IIC_fpCMP16, "vcmpe", ".f16\t$Sd, $Sm",
642 [(set FPSCR_NZCV, (arm_cmpfpe (f16 HPR:$Sd), (f16 HPR:$Sm)))]>;
644 def VCMPD : ADuI<0b11101, 0b11, 0b0100, 0b01, 0,
645 (outs), (ins DPR:$Dd, DPR:$Dm),
646 IIC_fpCMP64, "vcmp", ".f64\t$Dd, $Dm", "",
647 [(set FPSCR_NZCV, (arm_cmpfp DPR:$Dd, (f64 DPR:$Dm)))]>;
649 def VCMPS : ASuI<0b11101, 0b11, 0b0100, 0b01, 0,
650 (outs), (ins SPR:$Sd, SPR:$Sm),
651 IIC_fpCMP32, "vcmp", ".f32\t$Sd, $Sm", "",
652 [(set FPSCR_NZCV, (arm_cmpfp SPR:$Sd, SPR:$Sm))]> {
653 // Some single precision VFP instructions may be executed on both NEON and
654 // VFP pipelines on A8.
655 let D = VFPNeonA8Domain;
658 def VCMPH : AHuI<0b11101, 0b11, 0b0100, 0b01, 0,
659 (outs), (ins HPR:$Sd, HPR:$Sm),
660 IIC_fpCMP16, "vcmp", ".f16\t$Sd, $Sm",
661 [(set FPSCR_NZCV, (arm_cmpfp (f16 HPR:$Sd), (f16 HPR:$Sm)))]>;
662 } // Defs = [FPSCR_NZCV]
664 //===----------------------------------------------------------------------===//
665 // FP Unary Operations.
668 def VABSD : ADuI<0b11101, 0b11, 0b0000, 0b11, 0,
669 (outs DPR:$Dd), (ins DPR:$Dm),
670 IIC_fpUNA64, "vabs", ".f64\t$Dd, $Dm", "",
671 [(set DPR:$Dd, (fabs (f64 DPR:$Dm)))]>;
673 def VABSS : ASuIn<0b11101, 0b11, 0b0000, 0b11, 0,
674 (outs SPR:$Sd), (ins SPR:$Sm),
675 IIC_fpUNA32, "vabs", ".f32\t$Sd, $Sm",
676 [(set SPR:$Sd, (fabs SPR:$Sm))]> {
677 // Some single precision VFP instructions may be executed on both NEON and
678 // VFP pipelines on A8.
679 let D = VFPNeonA8Domain;
682 def VABSH : AHuI<0b11101, 0b11, 0b0000, 0b11, 0,
683 (outs HPR:$Sd), (ins HPR:$Sm),
684 IIC_fpUNA16, "vabs", ".f16\t$Sd, $Sm",
685 [(set (f16 HPR:$Sd), (fabs (f16 HPR:$Sm)))]>;
687 let Defs = [FPSCR_NZCV] in {
688 def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0,
689 (outs), (ins DPR:$Dd),
690 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, #0", "",
691 [(set FPSCR_NZCV, (arm_cmpfpe0 (f64 DPR:$Dd)))]> {
692 let Inst{3-0} = 0b0000;
696 def VCMPEZS : ASuI<0b11101, 0b11, 0b0101, 0b11, 0,
697 (outs), (ins SPR:$Sd),
698 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, #0", "",
699 [(set FPSCR_NZCV, (arm_cmpfpe0 SPR:$Sd))]> {
700 let Inst{3-0} = 0b0000;
703 // Some single precision VFP instructions may be executed on both NEON and
704 // VFP pipelines on A8.
705 let D = VFPNeonA8Domain;
708 def VCMPEZH : AHuI<0b11101, 0b11, 0b0101, 0b11, 0,
709 (outs), (ins HPR:$Sd),
710 IIC_fpCMP16, "vcmpe", ".f16\t$Sd, #0",
711 [(set FPSCR_NZCV, (arm_cmpfpe0 (f16 HPR:$Sd)))]> {
712 let Inst{3-0} = 0b0000;
716 def VCMPZD : ADuI<0b11101, 0b11, 0b0101, 0b01, 0,
717 (outs), (ins DPR:$Dd),
718 IIC_fpCMP64, "vcmp", ".f64\t$Dd, #0", "",
719 [(set FPSCR_NZCV, (arm_cmpfp0 (f64 DPR:$Dd)))]> {
720 let Inst{3-0} = 0b0000;
724 def VCMPZS : ASuI<0b11101, 0b11, 0b0101, 0b01, 0,
725 (outs), (ins SPR:$Sd),
726 IIC_fpCMP32, "vcmp", ".f32\t$Sd, #0", "",
727 [(set FPSCR_NZCV, (arm_cmpfp0 SPR:$Sd))]> {
728 let Inst{3-0} = 0b0000;
731 // Some single precision VFP instructions may be executed on both NEON and
732 // VFP pipelines on A8.
733 let D = VFPNeonA8Domain;
736 def VCMPZH : AHuI<0b11101, 0b11, 0b0101, 0b01, 0,
737 (outs), (ins HPR:$Sd),
738 IIC_fpCMP16, "vcmp", ".f16\t$Sd, #0",
739 [(set FPSCR_NZCV, (arm_cmpfp0 (f16 HPR:$Sd)))]> {
740 let Inst{3-0} = 0b0000;
743 } // Defs = [FPSCR_NZCV]
745 def VCVTDS : ASuI<0b11101, 0b11, 0b0111, 0b11, 0,
746 (outs DPR:$Dd), (ins SPR:$Sm),
747 IIC_fpCVTDS, "vcvt", ".f64.f32\t$Dd, $Sm", "",
748 [(set DPR:$Dd, (fpextend SPR:$Sm))]>,
749 Sched<[WriteFPCVT]> {
750 // Instruction operands.
754 // Encode instruction operands.
755 let Inst{3-0} = Sm{4-1};
757 let Inst{15-12} = Dd{3-0};
758 let Inst{22} = Dd{4};
760 let Predicates = [HasVFP2, HasDPVFP];
761 let hasSideEffects = 0;
764 // Special case encoding: bits 11-8 is 0b1011.
765 def VCVTSD : VFPAI<(outs SPR:$Sd), (ins DPR:$Dm), VFPUnaryFrm,
766 IIC_fpCVTSD, "vcvt", ".f32.f64\t$Sd, $Dm", "",
767 [(set SPR:$Sd, (fpround DPR:$Dm))]>,
768 Sched<[WriteFPCVT]> {
769 // Instruction operands.
773 // Encode instruction operands.
774 let Inst{3-0} = Dm{3-0};
776 let Inst{15-12} = Sd{4-1};
777 let Inst{22} = Sd{0};
779 let Inst{27-23} = 0b11101;
780 let Inst{21-16} = 0b110111;
781 let Inst{11-8} = 0b1011;
782 let Inst{7-6} = 0b11;
785 let Predicates = [HasVFP2, HasDPVFP];
786 let hasSideEffects = 0;
789 // Between half, single and double-precision.
790 let hasSideEffects = 0 in
791 def VCVTBHS: ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm),
792 /* FIXME */ IIC_fpCVTSH, "vcvtb", ".f32.f16\t$Sd, $Sm", "",
793 [/* Intentionally left blank, see patterns below */]>,
797 def : FP16Pat<(f32 (fpextend (f16 HPR:$Sm))),
798 (VCVTBHS (COPY_TO_REGCLASS (f16 HPR:$Sm), SPR))>;
799 def : FP16Pat<(f16_to_fp GPR:$a),
800 (VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>;
802 let hasSideEffects = 0 in
803 def VCVTBSH: ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sda, SPR:$Sm),
804 /* FIXME */ IIC_fpCVTHS, "vcvtb", ".f16.f32\t$Sd, $Sm", "$Sd = $Sda",
805 [/* Intentionally left blank, see patterns below */]>,
809 def : FP16Pat<(f16 (fpround SPR:$Sm)),
810 (COPY_TO_REGCLASS (VCVTBSH (IMPLICIT_DEF), SPR:$Sm), HPR)>;
811 def : FP16Pat<(fp_to_f16 SPR:$a),
812 (i32 (COPY_TO_REGCLASS (VCVTBSH (IMPLICIT_DEF), SPR:$a), GPR))>;
813 def : FP16Pat<(insertelt (v8f16 MQPR:$src1), (f16 (fpround (f32 SPR:$src2))), imm_even:$lane),
814 (v8f16 (INSERT_SUBREG (v8f16 MQPR:$src1),
815 (VCVTBSH (EXTRACT_SUBREG (v8f16 MQPR:$src1), (SSubReg_f16_reg imm:$lane)),
817 (SSubReg_f16_reg imm:$lane)))>;
818 def : FP16Pat<(insertelt (v4f16 DPR:$src1), (f16 (fpround (f32 SPR:$src2))), imm_even:$lane),
819 (v4f16 (INSERT_SUBREG (v4f16 DPR:$src1),
820 (VCVTBSH (EXTRACT_SUBREG (v4f16 DPR:$src1), (SSubReg_f16_reg imm:$lane)),
822 (SSubReg_f16_reg imm:$lane)))>;
824 let hasSideEffects = 0 in
825 def VCVTTHS: ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm),
826 /* FIXME */ IIC_fpCVTSH, "vcvtt", ".f32.f16\t$Sd, $Sm", "",
827 [/* Intentionally left blank, see patterns below */]>,
831 def : FP16Pat<(f32 (fpextend (extractelt (v8f16 MQPR:$src), imm_odd:$lane))),
832 (VCVTTHS (EXTRACT_SUBREG MQPR:$src, (SSubReg_f16_reg imm_odd:$lane)))>;
833 def : FP16Pat<(f32 (fpextend (extractelt (v4f16 DPR:$src), imm_odd:$lane))),
834 (VCVTTHS (EXTRACT_SUBREG
835 (v2f32 (COPY_TO_REGCLASS (v4f16 DPR:$src), DPR_VFP2)),
836 (SSubReg_f16_reg imm_odd:$lane)))>;
838 let hasSideEffects = 0 in
839 def VCVTTSH: ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sda, SPR:$Sm),
840 /* FIXME */ IIC_fpCVTHS, "vcvtt", ".f16.f32\t$Sd, $Sm", "$Sd = $Sda",
841 [/* Intentionally left blank, see patterns below */]>,
845 def : FP16Pat<(insertelt (v8f16 MQPR:$src1), (f16 (fpround (f32 SPR:$src2))), imm_odd:$lane),
846 (v8f16 (INSERT_SUBREG (v8f16 MQPR:$src1),
847 (VCVTTSH (EXTRACT_SUBREG (v8f16 MQPR:$src1), (SSubReg_f16_reg imm:$lane)),
849 (SSubReg_f16_reg imm:$lane)))>;
850 def : FP16Pat<(insertelt (v4f16 DPR:$src1), (f16 (fpround (f32 SPR:$src2))), imm_odd:$lane),
851 (v4f16 (INSERT_SUBREG (v4f16 DPR:$src1),
852 (VCVTTSH (EXTRACT_SUBREG (v4f16 DPR:$src1), (SSubReg_f16_reg imm:$lane)),
854 (SSubReg_f16_reg imm:$lane)))>;
856 def VCVTBHD : ADuI<0b11101, 0b11, 0b0010, 0b01, 0,
857 (outs DPR:$Dd), (ins SPR:$Sm),
858 NoItinerary, "vcvtb", ".f64.f16\t$Dd, $Sm", "",
859 [/* Intentionally left blank, see patterns below */]>,
860 Requires<[HasFPARMv8, HasDPVFP]>,
861 Sched<[WriteFPCVT]> {
862 // Instruction operands.
865 // Encode instruction operands.
866 let Inst{3-0} = Sm{4-1};
869 let hasSideEffects = 0;
872 def : FullFP16Pat<(f64 (fpextend (f16 HPR:$Sm))),
873 (VCVTBHD (COPY_TO_REGCLASS (f16 HPR:$Sm), SPR))>,
874 Requires<[HasFPARMv8, HasDPVFP]>;
875 def : FP16Pat<(f64 (f16_to_fp GPR:$a)),
876 (VCVTBHD (COPY_TO_REGCLASS GPR:$a, SPR))>,
877 Requires<[HasFPARMv8, HasDPVFP]>;
879 def VCVTBDH : ADuI<0b11101, 0b11, 0b0011, 0b01, 0,
880 (outs SPR:$Sd), (ins SPR:$Sda, DPR:$Dm),
881 NoItinerary, "vcvtb", ".f16.f64\t$Sd, $Dm", "$Sd = $Sda",
882 [/* Intentionally left blank, see patterns below */]>,
883 Requires<[HasFPARMv8, HasDPVFP]> {
884 // Instruction operands.
888 // Encode instruction operands.
889 let Inst{3-0} = Dm{3-0};
891 let Inst{15-12} = Sd{4-1};
892 let Inst{22} = Sd{0};
894 let hasSideEffects = 0;
897 def : FullFP16Pat<(f16 (fpround DPR:$Dm)),
898 (COPY_TO_REGCLASS (VCVTBDH (IMPLICIT_DEF), DPR:$Dm), HPR)>,
899 Requires<[HasFPARMv8, HasDPVFP]>;
900 def : FP16Pat<(fp_to_f16 (f64 DPR:$a)),
901 (i32 (COPY_TO_REGCLASS (VCVTBDH (IMPLICIT_DEF), DPR:$a), GPR))>,
902 Requires<[HasFPARMv8, HasDPVFP]>;
904 def VCVTTHD : ADuI<0b11101, 0b11, 0b0010, 0b11, 0,
905 (outs DPR:$Dd), (ins SPR:$Sm),
906 NoItinerary, "vcvtt", ".f64.f16\t$Dd, $Sm", "",
907 []>, Requires<[HasFPARMv8, HasDPVFP]> {
908 // Instruction operands.
911 // Encode instruction operands.
912 let Inst{3-0} = Sm{4-1};
915 let hasSideEffects = 0;
918 def VCVTTDH : ADuI<0b11101, 0b11, 0b0011, 0b11, 0,
919 (outs SPR:$Sd), (ins SPR:$Sda, DPR:$Dm),
920 NoItinerary, "vcvtt", ".f16.f64\t$Sd, $Dm", "$Sd = $Sda",
921 []>, Requires<[HasFPARMv8, HasDPVFP]> {
922 // Instruction operands.
926 // Encode instruction operands.
927 let Inst{15-12} = Sd{4-1};
928 let Inst{22} = Sd{0};
929 let Inst{3-0} = Dm{3-0};
932 let hasSideEffects = 0;
935 multiclass vcvt_inst<string opc, bits<2> rm,
936 SDPatternOperator node = null_frag> {
937 let PostEncoderMethod = "", DecoderNamespace = "VFPV8", hasSideEffects = 0 in {
938 def SH : AHuInp<0b11101, 0b11, 0b1100, 0b11, 0,
939 (outs SPR:$Sd), (ins HPR:$Sm),
940 NoItinerary, !strconcat("vcvt", opc, ".s32.f16\t$Sd, $Sm"),
942 Requires<[HasFullFP16]> {
943 let Inst{17-16} = rm;
946 def UH : AHuInp<0b11101, 0b11, 0b1100, 0b01, 0,
947 (outs SPR:$Sd), (ins HPR:$Sm),
948 NoItinerary, !strconcat("vcvt", opc, ".u32.f16\t$Sd, $Sm"),
950 Requires<[HasFullFP16]> {
951 let Inst{17-16} = rm;
954 def SS : ASuInp<0b11101, 0b11, 0b1100, 0b11, 0,
955 (outs SPR:$Sd), (ins SPR:$Sm),
956 NoItinerary, !strconcat("vcvt", opc, ".s32.f32\t$Sd, $Sm"),
958 Requires<[HasFPARMv8]> {
959 let Inst{17-16} = rm;
962 def US : ASuInp<0b11101, 0b11, 0b1100, 0b01, 0,
963 (outs SPR:$Sd), (ins SPR:$Sm),
964 NoItinerary, !strconcat("vcvt", opc, ".u32.f32\t$Sd, $Sm"),
966 Requires<[HasFPARMv8]> {
967 let Inst{17-16} = rm;
970 def SD : ASuInp<0b11101, 0b11, 0b1100, 0b11, 0,
971 (outs SPR:$Sd), (ins DPR:$Dm),
972 NoItinerary, !strconcat("vcvt", opc, ".s32.f64\t$Sd, $Dm"),
974 Requires<[HasFPARMv8, HasDPVFP]> {
977 let Inst{17-16} = rm;
979 // Encode instruction operands.
980 let Inst{3-0} = Dm{3-0};
985 def UD : ASuInp<0b11101, 0b11, 0b1100, 0b01, 0,
986 (outs SPR:$Sd), (ins DPR:$Dm),
987 NoItinerary, !strconcat("vcvt", opc, ".u32.f64\t$Sd, $Dm"),
989 Requires<[HasFPARMv8, HasDPVFP]> {
992 let Inst{17-16} = rm;
994 // Encode instruction operands
995 let Inst{3-0} = Dm{3-0};
1001 let Predicates = [HasFPARMv8] in {
1002 let Predicates = [HasFullFP16] in {
1003 def : Pat<(i32 (fp_to_sint (node (f16 HPR:$a)))),
1005 (!cast<Instruction>(NAME#"SH") (f16 HPR:$a)),
1008 def : Pat<(i32 (fp_to_uint (node (f16 HPR:$a)))),
1010 (!cast<Instruction>(NAME#"UH") (f16 HPR:$a)),
1013 def : Pat<(i32 (fp_to_sint (node SPR:$a))),
1015 (!cast<Instruction>(NAME#"SS") SPR:$a),
1017 def : Pat<(i32 (fp_to_uint (node SPR:$a))),
1019 (!cast<Instruction>(NAME#"US") SPR:$a),
1022 let Predicates = [HasFPARMv8, HasDPVFP] in {
1023 def : Pat<(i32 (fp_to_sint (node (f64 DPR:$a)))),
1025 (!cast<Instruction>(NAME#"SD") DPR:$a),
1027 def : Pat<(i32 (fp_to_uint (node (f64 DPR:$a)))),
1029 (!cast<Instruction>(NAME#"UD") DPR:$a),
1034 defm VCVTA : vcvt_inst<"a", 0b00, fround>;
1035 defm VCVTN : vcvt_inst<"n", 0b01>;
1036 defm VCVTP : vcvt_inst<"p", 0b10, fceil>;
1037 defm VCVTM : vcvt_inst<"m", 0b11, ffloor>;
1039 def VNEGD : ADuI<0b11101, 0b11, 0b0001, 0b01, 0,
1040 (outs DPR:$Dd), (ins DPR:$Dm),
1041 IIC_fpUNA64, "vneg", ".f64\t$Dd, $Dm", "",
1042 [(set DPR:$Dd, (fneg (f64 DPR:$Dm)))]>;
1044 def VNEGS : ASuIn<0b11101, 0b11, 0b0001, 0b01, 0,
1045 (outs SPR:$Sd), (ins SPR:$Sm),
1046 IIC_fpUNA32, "vneg", ".f32\t$Sd, $Sm",
1047 [(set SPR:$Sd, (fneg SPR:$Sm))]> {
1048 // Some single precision VFP instructions may be executed on both NEON and
1049 // VFP pipelines on A8.
1050 let D = VFPNeonA8Domain;
1053 def VNEGH : AHuI<0b11101, 0b11, 0b0001, 0b01, 0,
1054 (outs HPR:$Sd), (ins HPR:$Sm),
1055 IIC_fpUNA16, "vneg", ".f16\t$Sd, $Sm",
1056 [(set (f16 HPR:$Sd), (fneg (f16 HPR:$Sm)))]>;
1058 multiclass vrint_inst_zrx<string opc, bit op, bit op2, SDPatternOperator node> {
1059 def H : AHuI<0b11101, 0b11, 0b0110, 0b11, 0,
1060 (outs HPR:$Sd), (ins HPR:$Sm),
1061 NoItinerary, !strconcat("vrint", opc), ".f16\t$Sd, $Sm",
1062 [(set (f16 HPR:$Sd), (node (f16 HPR:$Sm)))]>,
1063 Requires<[HasFullFP16]> {
1068 def S : ASuI<0b11101, 0b11, 0b0110, 0b11, 0,
1069 (outs SPR:$Sd), (ins SPR:$Sm),
1070 NoItinerary, !strconcat("vrint", opc), ".f32\t$Sd, $Sm", "",
1071 [(set (f32 SPR:$Sd), (node (f32 SPR:$Sm)))]>,
1072 Requires<[HasFPARMv8]> {
1076 def D : ADuI<0b11101, 0b11, 0b0110, 0b11, 0,
1077 (outs DPR:$Dd), (ins DPR:$Dm),
1078 NoItinerary, !strconcat("vrint", opc), ".f64\t$Dd, $Dm", "",
1079 [(set (f64 DPR:$Dd), (node (f64 DPR:$Dm)))]>,
1080 Requires<[HasFPARMv8, HasDPVFP]> {
1085 def : InstAlias<!strconcat("vrint", opc, "$p.f16.f16\t$Sd, $Sm"),
1086 (!cast<Instruction>(NAME#"H") SPR:$Sd, SPR:$Sm, pred:$p), 0>,
1087 Requires<[HasFullFP16]>;
1088 def : InstAlias<!strconcat("vrint", opc, "$p.f32.f32\t$Sd, $Sm"),
1089 (!cast<Instruction>(NAME#"S") SPR:$Sd, SPR:$Sm, pred:$p), 0>,
1090 Requires<[HasFPARMv8]>;
1091 def : InstAlias<!strconcat("vrint", opc, "$p.f64.f64\t$Dd, $Dm"),
1092 (!cast<Instruction>(NAME#"D") DPR:$Dd, DPR:$Dm, pred:$p), 0>,
1093 Requires<[HasFPARMv8,HasDPVFP]>;
1096 defm VRINTZ : vrint_inst_zrx<"z", 0, 1, ftrunc>;
1097 defm VRINTR : vrint_inst_zrx<"r", 0, 0, fnearbyint>;
1098 defm VRINTX : vrint_inst_zrx<"x", 1, 0, frint>;
1100 multiclass vrint_inst_anpm<string opc, bits<2> rm,
1101 SDPatternOperator node = null_frag> {
1102 let PostEncoderMethod = "", DecoderNamespace = "VFPV8",
1103 isUnpredicable = 1 in {
1104 def H : AHuInp<0b11101, 0b11, 0b1000, 0b01, 0,
1105 (outs HPR:$Sd), (ins HPR:$Sm),
1106 NoItinerary, !strconcat("vrint", opc, ".f16\t$Sd, $Sm"),
1107 [(set (f16 HPR:$Sd), (node (f16 HPR:$Sm)))]>,
1108 Requires<[HasFullFP16]> {
1109 let Inst{17-16} = rm;
1111 def S : ASuInp<0b11101, 0b11, 0b1000, 0b01, 0,
1112 (outs SPR:$Sd), (ins SPR:$Sm),
1113 NoItinerary, !strconcat("vrint", opc, ".f32\t$Sd, $Sm"),
1114 [(set (f32 SPR:$Sd), (node (f32 SPR:$Sm)))]>,
1115 Requires<[HasFPARMv8]> {
1116 let Inst{17-16} = rm;
1118 def D : ADuInp<0b11101, 0b11, 0b1000, 0b01, 0,
1119 (outs DPR:$Dd), (ins DPR:$Dm),
1120 NoItinerary, !strconcat("vrint", opc, ".f64\t$Dd, $Dm"),
1121 [(set (f64 DPR:$Dd), (node (f64 DPR:$Dm)))]>,
1122 Requires<[HasFPARMv8, HasDPVFP]> {
1123 let Inst{17-16} = rm;
1127 def : InstAlias<!strconcat("vrint", opc, ".f16.f16\t$Sd, $Sm"),
1128 (!cast<Instruction>(NAME#"H") HPR:$Sd, HPR:$Sm), 0>,
1129 Requires<[HasFullFP16]>;
1130 def : InstAlias<!strconcat("vrint", opc, ".f32.f32\t$Sd, $Sm"),
1131 (!cast<Instruction>(NAME#"S") SPR:$Sd, SPR:$Sm), 0>,
1132 Requires<[HasFPARMv8]>;
1133 def : InstAlias<!strconcat("vrint", opc, ".f64.f64\t$Dd, $Dm"),
1134 (!cast<Instruction>(NAME#"D") DPR:$Dd, DPR:$Dm), 0>,
1135 Requires<[HasFPARMv8,HasDPVFP]>;
1138 defm VRINTA : vrint_inst_anpm<"a", 0b00, fround>;
1139 defm VRINTN : vrint_inst_anpm<"n", 0b01, int_arm_neon_vrintn>;
1140 defm VRINTP : vrint_inst_anpm<"p", 0b10, fceil>;
1141 defm VRINTM : vrint_inst_anpm<"m", 0b11, ffloor>;
1143 def VSQRTD : ADuI<0b11101, 0b11, 0b0001, 0b11, 0,
1144 (outs DPR:$Dd), (ins DPR:$Dm),
1145 IIC_fpSQRT64, "vsqrt", ".f64\t$Dd, $Dm", "",
1146 [(set DPR:$Dd, (fsqrt (f64 DPR:$Dm)))]>,
1147 Sched<[WriteFPSQRT64]>;
1149 def VSQRTS : ASuI<0b11101, 0b11, 0b0001, 0b11, 0,
1150 (outs SPR:$Sd), (ins SPR:$Sm),
1151 IIC_fpSQRT32, "vsqrt", ".f32\t$Sd, $Sm", "",
1152 [(set SPR:$Sd, (fsqrt SPR:$Sm))]>,
1153 Sched<[WriteFPSQRT32]>;
1155 def VSQRTH : AHuI<0b11101, 0b11, 0b0001, 0b11, 0,
1156 (outs HPR:$Sd), (ins HPR:$Sm),
1157 IIC_fpSQRT16, "vsqrt", ".f16\t$Sd, $Sm",
1158 [(set (f16 HPR:$Sd), (fsqrt (f16 HPR:$Sm)))]>;
1160 let hasSideEffects = 0 in {
1161 let isMoveReg = 1 in {
1162 def VMOVD : ADuI<0b11101, 0b11, 0b0000, 0b01, 0,
1163 (outs DPR:$Dd), (ins DPR:$Dm),
1164 IIC_fpUNA64, "vmov", ".f64\t$Dd, $Dm", "", []>,
1165 Requires<[HasFPRegs64]>;
1167 def VMOVS : ASuI<0b11101, 0b11, 0b0000, 0b01, 0,
1168 (outs SPR:$Sd), (ins SPR:$Sm),
1169 IIC_fpUNA32, "vmov", ".f32\t$Sd, $Sm", "", []>,
1170 Requires<[HasFPRegs]>;
1173 let PostEncoderMethod = "", DecoderNamespace = "VFPV8", isUnpredicable = 1 in {
1174 def VMOVH : ASuInp<0b11101, 0b11, 0b0000, 0b01, 0,
1175 (outs SPR:$Sd), (ins SPR:$Sm),
1176 IIC_fpUNA16, "vmovx.f16\t$Sd, $Sm", []>,
1177 Requires<[HasFullFP16]>;
1179 def VINSH : ASuInp<0b11101, 0b11, 0b0000, 0b11, 0,
1180 (outs SPR:$Sd), (ins SPR:$Sda, SPR:$Sm),
1181 IIC_fpUNA16, "vins.f16\t$Sd, $Sm", []>,
1182 Requires<[HasFullFP16]> {
1183 let Constraints = "$Sd = $Sda";
1186 } // PostEncoderMethod
1189 //===----------------------------------------------------------------------===//
1190 // FP <-> GPR Copies. Int <-> FP Conversions.
1193 let isMoveReg = 1 in {
1194 def VMOVRS : AVConv2I<0b11100001, 0b1010,
1195 (outs GPR:$Rt), (ins SPR:$Sn),
1196 IIC_fpMOVSI, "vmov", "\t$Rt, $Sn",
1197 [(set GPR:$Rt, (bitconvert SPR:$Sn))]>,
1198 Requires<[HasFPRegs]>,
1199 Sched<[WriteFPMOV]> {
1200 // Instruction operands.
1204 // Encode instruction operands.
1205 let Inst{19-16} = Sn{4-1};
1206 let Inst{7} = Sn{0};
1207 let Inst{15-12} = Rt;
1209 let Inst{6-5} = 0b00;
1210 let Inst{3-0} = 0b0000;
1212 // Some single precision VFP instructions may be executed on both NEON and VFP
1214 let D = VFPNeonDomain;
1217 // Bitcast i32 -> f32. NEON prefers to use VMOVDRR.
1218 def VMOVSR : AVConv4I<0b11100000, 0b1010,
1219 (outs SPR:$Sn), (ins GPR:$Rt),
1220 IIC_fpMOVIS, "vmov", "\t$Sn, $Rt",
1221 [(set SPR:$Sn, (bitconvert GPR:$Rt))]>,
1222 Requires<[HasFPRegs, UseVMOVSR]>,
1223 Sched<[WriteFPMOV]> {
1224 // Instruction operands.
1228 // Encode instruction operands.
1229 let Inst{19-16} = Sn{4-1};
1230 let Inst{7} = Sn{0};
1231 let Inst{15-12} = Rt;
1233 let Inst{6-5} = 0b00;
1234 let Inst{3-0} = 0b0000;
1236 // Some single precision VFP instructions may be executed on both NEON and VFP
1238 let D = VFPNeonDomain;
1241 def : Pat<(arm_vmovsr GPR:$Rt), (VMOVSR GPR:$Rt)>, Requires<[HasFPRegs, UseVMOVSR]>;
1243 let hasSideEffects = 0 in {
1244 def VMOVRRD : AVConv3I<0b11000101, 0b1011,
1245 (outs GPR:$Rt, GPR:$Rt2), (ins DPR:$Dm),
1246 IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $Dm",
1247 [(set GPR:$Rt, GPR:$Rt2, (arm_fmrrd DPR:$Dm))]>,
1248 Requires<[HasFPRegs]>,
1249 Sched<[WriteFPMOV]> {
1250 // Instruction operands.
1255 // Encode instruction operands.
1256 let Inst{3-0} = Dm{3-0};
1257 let Inst{5} = Dm{4};
1258 let Inst{15-12} = Rt;
1259 let Inst{19-16} = Rt2;
1261 let Inst{7-6} = 0b00;
1263 // Some single precision VFP instructions may be executed on both NEON and VFP
1265 let D = VFPNeonDomain;
1267 // This instruction is equivalent to
1268 // $Rt = EXTRACT_SUBREG $Dm, ssub_0
1269 // $Rt2 = EXTRACT_SUBREG $Dm, ssub_1
1270 let isExtractSubreg = 1;
1273 def VMOVRRS : AVConv3I<0b11000101, 0b1010,
1274 (outs GPR:$Rt, GPR:$Rt2), (ins SPR:$src1, SPR:$src2),
1275 IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $src1, $src2",
1276 [/* For disassembly only; pattern left blank */]>,
1277 Requires<[HasFPRegs]>,
1278 Sched<[WriteFPMOV]> {
1283 // Encode instruction operands.
1284 let Inst{3-0} = src1{4-1};
1285 let Inst{5} = src1{0};
1286 let Inst{15-12} = Rt;
1287 let Inst{19-16} = Rt2;
1289 let Inst{7-6} = 0b00;
1291 // Some single precision VFP instructions may be executed on both NEON and VFP
1293 let D = VFPNeonDomain;
1294 let DecoderMethod = "DecodeVMOVRRS";
1298 // FMDHR: GPR -> SPR
1299 // FMDLR: GPR -> SPR
1301 def VMOVDRR : AVConv5I<0b11000100, 0b1011,
1302 (outs DPR:$Dm), (ins GPR:$Rt, GPR:$Rt2),
1303 IIC_fpMOVID, "vmov", "\t$Dm, $Rt, $Rt2",
1304 [(set DPR:$Dm, (arm_fmdrr GPR:$Rt, GPR:$Rt2))]>,
1305 Requires<[HasFPRegs]>,
1306 Sched<[WriteFPMOV]> {
1307 // Instruction operands.
1312 // Encode instruction operands.
1313 let Inst{3-0} = Dm{3-0};
1314 let Inst{5} = Dm{4};
1315 let Inst{15-12} = Rt;
1316 let Inst{19-16} = Rt2;
1318 let Inst{7-6} = 0b00;
1320 // Some single precision VFP instructions may be executed on both NEON and VFP
1322 let D = VFPNeonDomain;
1324 // This instruction is equivalent to
1325 // $Dm = REG_SEQUENCE $Rt, ssub_0, $Rt2, ssub_1
1326 let isRegSequence = 1;
1329 // Hoist an fabs or a fneg of a value coming from integer registers
1330 // and do the fabs/fneg on the integer value. This is never a lose
1331 // and could enable the conversion to float to be removed completely.
1332 def : Pat<(fabs (arm_fmdrr GPR:$Rl, GPR:$Rh)),
1333 (VMOVDRR GPR:$Rl, (BFC GPR:$Rh, (i32 0x7FFFFFFF)))>,
1334 Requires<[IsARM, HasV6T2]>;
1335 def : Pat<(fabs (arm_fmdrr GPR:$Rl, GPR:$Rh)),
1336 (VMOVDRR GPR:$Rl, (t2BFC GPR:$Rh, (i32 0x7FFFFFFF)))>,
1337 Requires<[IsThumb2, HasV6T2]>;
1338 def : Pat<(fneg (arm_fmdrr GPR:$Rl, GPR:$Rh)),
1339 (VMOVDRR GPR:$Rl, (EORri GPR:$Rh, (i32 0x80000000)))>,
1341 def : Pat<(fneg (arm_fmdrr GPR:$Rl, GPR:$Rh)),
1342 (VMOVDRR GPR:$Rl, (t2EORri GPR:$Rh, (i32 0x80000000)))>,
1343 Requires<[IsThumb2]>;
1345 let hasSideEffects = 0 in
1346 def VMOVSRR : AVConv5I<0b11000100, 0b1010,
1347 (outs SPR:$dst1, SPR:$dst2), (ins GPR:$src1, GPR:$src2),
1348 IIC_fpMOVID, "vmov", "\t$dst1, $dst2, $src1, $src2",
1349 [/* For disassembly only; pattern left blank */]>,
1350 Requires<[HasFPRegs]>,
1351 Sched<[WriteFPMOV]> {
1352 // Instruction operands.
1357 // Encode instruction operands.
1358 let Inst{3-0} = dst1{4-1};
1359 let Inst{5} = dst1{0};
1360 let Inst{15-12} = src1;
1361 let Inst{19-16} = src2;
1363 let Inst{7-6} = 0b00;
1365 // Some single precision VFP instructions may be executed on both NEON and VFP
1367 let D = VFPNeonDomain;
1369 let DecoderMethod = "DecodeVMOVSRR";
1372 // Move H->R, clearing top 16 bits
1373 def VMOVRH : AVConv2I<0b11100001, 0b1001,
1374 (outs rGPR:$Rt), (ins HPR:$Sn),
1375 IIC_fpMOVSI, "vmov", ".f16\t$Rt, $Sn",
1377 Requires<[HasFPRegs16]>,
1378 Sched<[WriteFPMOV]> {
1379 // Instruction operands.
1383 // Encode instruction operands.
1384 let Inst{19-16} = Sn{4-1};
1385 let Inst{7} = Sn{0};
1386 let Inst{15-12} = Rt;
1388 let Inst{6-5} = 0b00;
1389 let Inst{3-0} = 0b0000;
1391 let isUnpredicable = 1;
1394 // Move R->H, clearing top 16 bits
1395 def VMOVHR : AVConv4I<0b11100000, 0b1001,
1396 (outs HPR:$Sn), (ins rGPR:$Rt),
1397 IIC_fpMOVIS, "vmov", ".f16\t$Sn, $Rt",
1399 Requires<[HasFPRegs16]>,
1400 Sched<[WriteFPMOV]> {
1401 // Instruction operands.
1405 // Encode instruction operands.
1406 let Inst{19-16} = Sn{4-1};
1407 let Inst{7} = Sn{0};
1408 let Inst{15-12} = Rt;
1410 let Inst{6-5} = 0b00;
1411 let Inst{3-0} = 0b0000;
1413 let isUnpredicable = 1;
1416 def : FPRegs16Pat<(arm_vmovrh (f16 HPR:$Sn)), (VMOVRH (f16 HPR:$Sn))>;
1417 def : FPRegs16Pat<(arm_vmovrh (bf16 HPR:$Sn)), (VMOVRH (bf16 HPR:$Sn))>;
1418 def : FPRegs16Pat<(f16 (arm_vmovhr rGPR:$Rt)), (VMOVHR rGPR:$Rt)>;
1419 def : FPRegs16Pat<(bf16 (arm_vmovhr rGPR:$Rt)), (VMOVHR rGPR:$Rt)>;
1421 // FMRDH: SPR -> GPR
1422 // FMRDL: SPR -> GPR
1423 // FMRRS: SPR -> GPR
1424 // FMRX: SPR system reg -> GPR
1425 // FMSRR: GPR -> SPR
1426 // FMXR: GPR -> VFP system reg
1431 class AVConv1IDs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
1432 bits<4> opcod4, dag oops, dag iops,
1433 InstrItinClass itin, string opc, string asm,
1435 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
1437 // Instruction operands.
1441 // Encode instruction operands.
1442 let Inst{3-0} = Sm{4-1};
1443 let Inst{5} = Sm{0};
1444 let Inst{15-12} = Dd{3-0};
1445 let Inst{22} = Dd{4};
1447 let Predicates = [HasVFP2, HasDPVFP];
1448 let hasSideEffects = 0;
1451 class AVConv1InSs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
1452 bits<4> opcod4, dag oops, dag iops,InstrItinClass itin,
1453 string opc, string asm, list<dag> pattern>
1454 : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
1456 // Instruction operands.
1460 // Encode instruction operands.
1461 let Inst{3-0} = Sm{4-1};
1462 let Inst{5} = Sm{0};
1463 let Inst{15-12} = Sd{4-1};
1464 let Inst{22} = Sd{0};
1466 let hasSideEffects = 0;
1469 class AVConv1IHs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
1470 bits<4> opcod4, dag oops, dag iops,
1471 InstrItinClass itin, string opc, string asm,
1473 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
1475 // Instruction operands.
1479 // Encode instruction operands.
1480 let Inst{3-0} = Sm{4-1};
1481 let Inst{5} = Sm{0};
1482 let Inst{15-12} = Sd{4-1};
1483 let Inst{22} = Sd{0};
1485 let Predicates = [HasFullFP16];
1486 let hasSideEffects = 0;
1489 def VSITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
1490 (outs DPR:$Dd), (ins SPR:$Sm),
1491 IIC_fpCVTID, "vcvt", ".f64.s32\t$Dd, $Sm",
1493 Sched<[WriteFPCVT]> {
1494 let Inst{7} = 1; // s32
1497 let Predicates=[HasVFP2, HasDPVFP] in {
1498 def : VFPPat<(f64 (sint_to_fp GPR:$a)),
1499 (VSITOD (COPY_TO_REGCLASS GPR:$a, SPR))>;
1501 def : VFPPat<(f64 (sint_to_fp (i32 (alignedload32 addrmode5:$a)))),
1502 (VSITOD (VLDRS addrmode5:$a))>;
1505 def VSITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010,
1506 (outs SPR:$Sd),(ins SPR:$Sm),
1507 IIC_fpCVTIS, "vcvt", ".f32.s32\t$Sd, $Sm",
1509 Sched<[WriteFPCVT]> {
1510 let Inst{7} = 1; // s32
1512 // Some single precision VFP instructions may be executed on both NEON and
1513 // VFP pipelines on A8.
1514 let D = VFPNeonA8Domain;
1517 def : VFPNoNEONPat<(f32 (sint_to_fp GPR:$a)),
1518 (VSITOS (COPY_TO_REGCLASS GPR:$a, SPR))>;
1520 def : VFPNoNEONPat<(f32 (sint_to_fp (i32 (alignedload32 addrmode5:$a)))),
1521 (VSITOS (VLDRS addrmode5:$a))>;
1523 def VSITOH : AVConv1IHs_Encode<0b11101, 0b11, 0b1000, 0b1001,
1524 (outs HPR:$Sd), (ins SPR:$Sm),
1525 IIC_fpCVTIH, "vcvt", ".f16.s32\t$Sd, $Sm",
1527 Sched<[WriteFPCVT]> {
1528 let Inst{7} = 1; // s32
1529 let isUnpredicable = 1;
1532 def : VFPNoNEONPat<(f16 (sint_to_fp GPR:$a)),
1533 (VSITOH (COPY_TO_REGCLASS GPR:$a, SPR))>;
1535 def VUITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
1536 (outs DPR:$Dd), (ins SPR:$Sm),
1537 IIC_fpCVTID, "vcvt", ".f64.u32\t$Dd, $Sm",
1539 Sched<[WriteFPCVT]> {
1540 let Inst{7} = 0; // u32
1543 let Predicates=[HasVFP2, HasDPVFP] in {
1544 def : VFPPat<(f64 (uint_to_fp GPR:$a)),
1545 (VUITOD (COPY_TO_REGCLASS GPR:$a, SPR))>;
1547 def : VFPPat<(f64 (uint_to_fp (i32 (alignedload32 addrmode5:$a)))),
1548 (VUITOD (VLDRS addrmode5:$a))>;
1551 def VUITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010,
1552 (outs SPR:$Sd), (ins SPR:$Sm),
1553 IIC_fpCVTIS, "vcvt", ".f32.u32\t$Sd, $Sm",
1555 Sched<[WriteFPCVT]> {
1556 let Inst{7} = 0; // u32
1558 // Some single precision VFP instructions may be executed on both NEON and
1559 // VFP pipelines on A8.
1560 let D = VFPNeonA8Domain;
1563 def : VFPNoNEONPat<(f32 (uint_to_fp GPR:$a)),
1564 (VUITOS (COPY_TO_REGCLASS GPR:$a, SPR))>;
1566 def : VFPNoNEONPat<(f32 (uint_to_fp (i32 (alignedload32 addrmode5:$a)))),
1567 (VUITOS (VLDRS addrmode5:$a))>;
1569 def VUITOH : AVConv1IHs_Encode<0b11101, 0b11, 0b1000, 0b1001,
1570 (outs HPR:$Sd), (ins SPR:$Sm),
1571 IIC_fpCVTIH, "vcvt", ".f16.u32\t$Sd, $Sm",
1573 Sched<[WriteFPCVT]> {
1574 let Inst{7} = 0; // u32
1575 let isUnpredicable = 1;
1578 def : VFPNoNEONPat<(f16 (uint_to_fp GPR:$a)),
1579 (VUITOH (COPY_TO_REGCLASS GPR:$a, SPR))>;
1583 class AVConv1IsD_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
1584 bits<4> opcod4, dag oops, dag iops,
1585 InstrItinClass itin, string opc, string asm,
1587 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
1589 // Instruction operands.
1593 // Encode instruction operands.
1594 let Inst{3-0} = Dm{3-0};
1595 let Inst{5} = Dm{4};
1596 let Inst{15-12} = Sd{4-1};
1597 let Inst{22} = Sd{0};
1599 let Predicates = [HasVFP2, HasDPVFP];
1600 let hasSideEffects = 0;
1603 class AVConv1InsS_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
1604 bits<4> opcod4, dag oops, dag iops,
1605 InstrItinClass itin, string opc, string asm,
1607 : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
1609 // Instruction operands.
1613 // Encode instruction operands.
1614 let Inst{3-0} = Sm{4-1};
1615 let Inst{5} = Sm{0};
1616 let Inst{15-12} = Sd{4-1};
1617 let Inst{22} = Sd{0};
1619 let hasSideEffects = 0;
1622 class AVConv1IsH_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
1623 bits<4> opcod4, dag oops, dag iops,
1624 InstrItinClass itin, string opc, string asm,
1626 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
1628 // Instruction operands.
1632 // Encode instruction operands.
1633 let Inst{3-0} = Sm{4-1};
1634 let Inst{5} = Sm{0};
1635 let Inst{15-12} = Sd{4-1};
1636 let Inst{22} = Sd{0};
1638 let Predicates = [HasFullFP16];
1639 let hasSideEffects = 0;
1642 // Always set Z bit in the instruction, i.e. "round towards zero" variants.
1643 def VTOSIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
1644 (outs SPR:$Sd), (ins DPR:$Dm),
1645 IIC_fpCVTDI, "vcvt", ".s32.f64\t$Sd, $Dm",
1647 Sched<[WriteFPCVT]> {
1648 let Inst{7} = 1; // Z bit
1651 let Predicates=[HasVFP2, HasDPVFP] in {
1652 def : VFPPat<(i32 (fp_to_sint (f64 DPR:$a))),
1653 (COPY_TO_REGCLASS (VTOSIZD DPR:$a), GPR)>;
1654 def : VFPPat<(i32 (fp_to_sint_sat (f64 DPR:$a), i32)),
1655 (COPY_TO_REGCLASS (VTOSIZD DPR:$a), GPR)>;
1657 def : VFPPat<(alignedstore32 (i32 (fp_to_sint (f64 DPR:$a))), addrmode5:$ptr),
1658 (VSTRS (VTOSIZD DPR:$a), addrmode5:$ptr)>;
1659 def : VFPPat<(alignedstore32 (i32 (fp_to_sint_sat (f64 DPR:$a), i32)), addrmode5:$ptr),
1660 (VSTRS (VTOSIZD DPR:$a), addrmode5:$ptr)>;
1663 def VTOSIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
1664 (outs SPR:$Sd), (ins SPR:$Sm),
1665 IIC_fpCVTSI, "vcvt", ".s32.f32\t$Sd, $Sm",
1667 Sched<[WriteFPCVT]> {
1668 let Inst{7} = 1; // Z bit
1670 // Some single precision VFP instructions may be executed on both NEON and
1671 // VFP pipelines on A8.
1672 let D = VFPNeonA8Domain;
1675 def : VFPNoNEONPat<(i32 (fp_to_sint SPR:$a)),
1676 (COPY_TO_REGCLASS (VTOSIZS SPR:$a), GPR)>;
1677 def : VFPPat<(i32 (fp_to_sint_sat SPR:$a, i32)),
1678 (COPY_TO_REGCLASS (VTOSIZS SPR:$a), GPR)>;
1680 def : VFPNoNEONPat<(alignedstore32 (i32 (fp_to_sint (f32 SPR:$a))),
1682 (VSTRS (VTOSIZS SPR:$a), addrmode5:$ptr)>;
1683 def : VFPPat<(alignedstore32 (i32 (fp_to_sint_sat (f32 SPR:$a), i32)),
1685 (VSTRS (VTOSIZS SPR:$a), addrmode5:$ptr)>;
1687 def VTOSIZH : AVConv1IsH_Encode<0b11101, 0b11, 0b1101, 0b1001,
1688 (outs SPR:$Sd), (ins HPR:$Sm),
1689 IIC_fpCVTHI, "vcvt", ".s32.f16\t$Sd, $Sm",
1691 Sched<[WriteFPCVT]> {
1692 let Inst{7} = 1; // Z bit
1693 let isUnpredicable = 1;
1696 def : VFPNoNEONPat<(i32 (fp_to_sint (f16 HPR:$a))),
1697 (COPY_TO_REGCLASS (VTOSIZH (f16 HPR:$a)), GPR)>;
1698 def : VFPPat<(i32 (fp_to_sint_sat (f16 HPR:$a), i32)),
1699 (COPY_TO_REGCLASS (VTOSIZH (f16 HPR:$a)), GPR)>;
1701 def VTOUIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
1702 (outs SPR:$Sd), (ins DPR:$Dm),
1703 IIC_fpCVTDI, "vcvt", ".u32.f64\t$Sd, $Dm",
1705 Sched<[WriteFPCVT]> {
1706 let Inst{7} = 1; // Z bit
1709 let Predicates=[HasVFP2, HasDPVFP] in {
1710 def : VFPPat<(i32 (fp_to_uint (f64 DPR:$a))),
1711 (COPY_TO_REGCLASS (VTOUIZD DPR:$a), GPR)>;
1712 def : VFPPat<(i32 (fp_to_uint_sat (f64 DPR:$a), i32)),
1713 (COPY_TO_REGCLASS (VTOUIZD DPR:$a), GPR)>;
1715 def : VFPPat<(alignedstore32 (i32 (fp_to_uint (f64 DPR:$a))), addrmode5:$ptr),
1716 (VSTRS (VTOUIZD DPR:$a), addrmode5:$ptr)>;
1717 def : VFPPat<(alignedstore32 (i32 (fp_to_uint_sat (f64 DPR:$a), i32)), addrmode5:$ptr),
1718 (VSTRS (VTOUIZD DPR:$a), addrmode5:$ptr)>;
1721 def VTOUIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
1722 (outs SPR:$Sd), (ins SPR:$Sm),
1723 IIC_fpCVTSI, "vcvt", ".u32.f32\t$Sd, $Sm",
1725 Sched<[WriteFPCVT]> {
1726 let Inst{7} = 1; // Z bit
1728 // Some single precision VFP instructions may be executed on both NEON and
1729 // VFP pipelines on A8.
1730 let D = VFPNeonA8Domain;
1733 def : VFPNoNEONPat<(i32 (fp_to_uint SPR:$a)),
1734 (COPY_TO_REGCLASS (VTOUIZS SPR:$a), GPR)>;
1735 def : VFPPat<(i32 (fp_to_uint_sat SPR:$a, i32)),
1736 (COPY_TO_REGCLASS (VTOUIZS SPR:$a), GPR)>;
1738 def : VFPNoNEONPat<(alignedstore32 (i32 (fp_to_uint (f32 SPR:$a))),
1740 (VSTRS (VTOUIZS SPR:$a), addrmode5:$ptr)>;
1741 def : VFPPat<(alignedstore32 (i32 (fp_to_uint_sat (f32 SPR:$a), i32)),
1743 (VSTRS (VTOUIZS SPR:$a), addrmode5:$ptr)>;
1745 def VTOUIZH : AVConv1IsH_Encode<0b11101, 0b11, 0b1100, 0b1001,
1746 (outs SPR:$Sd), (ins HPR:$Sm),
1747 IIC_fpCVTHI, "vcvt", ".u32.f16\t$Sd, $Sm",
1749 Sched<[WriteFPCVT]> {
1750 let Inst{7} = 1; // Z bit
1751 let isUnpredicable = 1;
1754 def : VFPNoNEONPat<(i32 (fp_to_uint (f16 HPR:$a))),
1755 (COPY_TO_REGCLASS (VTOUIZH (f16 HPR:$a)), GPR)>;
1756 def : VFPPat<(i32 (fp_to_uint_sat (f16 HPR:$a), i32)),
1757 (COPY_TO_REGCLASS (VTOUIZH (f16 HPR:$a)), GPR)>;
1759 // And the Z bit '0' variants, i.e. use the rounding mode specified by FPSCR.
1760 let Uses = [FPSCR] in {
1761 def VTOSIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
1762 (outs SPR:$Sd), (ins DPR:$Dm),
1763 IIC_fpCVTDI, "vcvtr", ".s32.f64\t$Sd, $Dm",
1764 [(set SPR:$Sd, (int_arm_vcvtr (f64 DPR:$Dm)))]>,
1765 Sched<[WriteFPCVT]> {
1766 let Inst{7} = 0; // Z bit
1769 def VTOSIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
1770 (outs SPR:$Sd), (ins SPR:$Sm),
1771 IIC_fpCVTSI, "vcvtr", ".s32.f32\t$Sd, $Sm",
1772 [(set SPR:$Sd, (int_arm_vcvtr SPR:$Sm))]>,
1773 Sched<[WriteFPCVT]> {
1774 let Inst{7} = 0; // Z bit
1777 def VTOSIRH : AVConv1IsH_Encode<0b11101, 0b11, 0b1101, 0b1001,
1778 (outs SPR:$Sd), (ins SPR:$Sm),
1779 IIC_fpCVTHI, "vcvtr", ".s32.f16\t$Sd, $Sm",
1781 Sched<[WriteFPCVT]> {
1782 let Inst{7} = 0; // Z bit
1783 let isUnpredicable = 1;
1786 def VTOUIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
1787 (outs SPR:$Sd), (ins DPR:$Dm),
1788 IIC_fpCVTDI, "vcvtr", ".u32.f64\t$Sd, $Dm",
1789 [(set SPR:$Sd, (int_arm_vcvtru(f64 DPR:$Dm)))]>,
1790 Sched<[WriteFPCVT]> {
1791 let Inst{7} = 0; // Z bit
1794 def VTOUIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
1795 (outs SPR:$Sd), (ins SPR:$Sm),
1796 IIC_fpCVTSI, "vcvtr", ".u32.f32\t$Sd, $Sm",
1797 [(set SPR:$Sd, (int_arm_vcvtru SPR:$Sm))]>,
1798 Sched<[WriteFPCVT]> {
1799 let Inst{7} = 0; // Z bit
1802 def VTOUIRH : AVConv1IsH_Encode<0b11101, 0b11, 0b1100, 0b1001,
1803 (outs SPR:$Sd), (ins SPR:$Sm),
1804 IIC_fpCVTHI, "vcvtr", ".u32.f16\t$Sd, $Sm",
1806 Sched<[WriteFPCVT]> {
1807 let Inst{7} = 0; // Z bit
1808 let isUnpredicable = 1;
1812 // v8.3-a Javascript Convert to Signed fixed-point
1813 def VJCVT : AVConv1IsD_Encode<0b11101, 0b11, 0b1001, 0b1011,
1814 (outs SPR:$Sd), (ins DPR:$Dm),
1815 IIC_fpCVTDI, "vjcvt", ".s32.f64\t$Sd, $Dm",
1817 Requires<[HasFPARMv8, HasV8_3a]> {
1818 let Inst{7} = 1; // Z bit
1821 // Convert between floating-point and fixed-point
1822 // Data type for fixed-point naming convention:
1823 // S16 (U=0, sx=0) -> SH
1824 // U16 (U=1, sx=0) -> UH
1825 // S32 (U=0, sx=1) -> SL
1826 // U32 (U=1, sx=1) -> UL
1828 let Constraints = "$a = $dst" in {
1830 // FP to Fixed-Point:
1832 // Single Precision register
1833 class AVConv1XInsS_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4,
1834 bit op5, dag oops, dag iops, InstrItinClass itin,
1835 string opc, string asm, list<dag> pattern>
1836 : AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern> {
1838 // if dp_operation then UInt(D:Vd) else UInt(Vd:D);
1839 let Inst{22} = dst{0};
1840 let Inst{15-12} = dst{4-1};
1842 let hasSideEffects = 0;
1845 // Double Precision register
1846 class AVConv1XInsD_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4,
1847 bit op5, dag oops, dag iops, InstrItinClass itin,
1848 string opc, string asm, list<dag> pattern>
1849 : AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern> {
1851 // if dp_operation then UInt(D:Vd) else UInt(Vd:D);
1852 let Inst{22} = dst{4};
1853 let Inst{15-12} = dst{3-0};
1855 let hasSideEffects = 0;
1856 let Predicates = [HasVFP2, HasDPVFP];
1859 let isUnpredicable = 1 in {
1861 def VTOSHH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1001, 0,
1862 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1863 IIC_fpCVTHI, "vcvt", ".s16.f16\t$dst, $a, $fbits", []>,
1864 Requires<[HasFullFP16]>,
1865 Sched<[WriteFPCVT]>;
1867 def VTOUHH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1001, 0,
1868 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1869 IIC_fpCVTHI, "vcvt", ".u16.f16\t$dst, $a, $fbits", []>,
1870 Requires<[HasFullFP16]>,
1871 Sched<[WriteFPCVT]>;
1873 def VTOSLH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1001, 1,
1874 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1875 IIC_fpCVTHI, "vcvt", ".s32.f16\t$dst, $a, $fbits", []>,
1876 Requires<[HasFullFP16]>,
1877 Sched<[WriteFPCVT]>;
1879 def VTOULH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1001, 1,
1880 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1881 IIC_fpCVTHI, "vcvt", ".u32.f16\t$dst, $a, $fbits", []>,
1882 Requires<[HasFullFP16]>,
1883 Sched<[WriteFPCVT]>;
1885 } // End of 'let isUnpredicable = 1 in'
1887 def VTOSHS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1010, 0,
1888 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1889 IIC_fpCVTSI, "vcvt", ".s16.f32\t$dst, $a, $fbits", []>,
1890 Sched<[WriteFPCVT]> {
1891 // Some single precision VFP instructions may be executed on both NEON and
1892 // VFP pipelines on A8.
1893 let D = VFPNeonA8Domain;
1896 def VTOUHS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1010, 0,
1897 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1898 IIC_fpCVTSI, "vcvt", ".u16.f32\t$dst, $a, $fbits", []>,
1899 Sched<[WriteFPCVT]> {
1900 // Some single precision VFP instructions may be executed on both NEON and
1901 // VFP pipelines on A8.
1902 let D = VFPNeonA8Domain;
1905 def VTOSLS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1010, 1,
1906 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1907 IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a, $fbits", []>,
1908 Sched<[WriteFPCVT]> {
1909 // Some single precision VFP instructions may be executed on both NEON and
1910 // VFP pipelines on A8.
1911 let D = VFPNeonA8Domain;
1914 def VTOULS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1010, 1,
1915 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1916 IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a, $fbits", []>,
1917 Sched<[WriteFPCVT]> {
1918 // Some single precision VFP instructions may be executed on both NEON and
1919 // VFP pipelines on A8.
1920 let D = VFPNeonA8Domain;
1923 def VTOSHD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1110, 0b1011, 0,
1924 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
1925 IIC_fpCVTDI, "vcvt", ".s16.f64\t$dst, $a, $fbits", []>,
1926 Sched<[WriteFPCVT]>;
1928 def VTOUHD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1111, 0b1011, 0,
1929 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
1930 IIC_fpCVTDI, "vcvt", ".u16.f64\t$dst, $a, $fbits", []>,
1931 Sched<[WriteFPCVT]>;
1933 def VTOSLD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1110, 0b1011, 1,
1934 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
1935 IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a, $fbits", []>,
1936 Sched<[WriteFPCVT]>;
1938 def VTOULD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1111, 0b1011, 1,
1939 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
1940 IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a, $fbits", []>,
1941 Sched<[WriteFPCVT]>;
1943 // Fixed-Point to FP:
1945 let isUnpredicable = 1 in {
1947 def VSHTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1001, 0,
1948 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1949 IIC_fpCVTIH, "vcvt", ".f16.s16\t$dst, $a, $fbits", []>,
1950 Requires<[HasFullFP16]>,
1951 Sched<[WriteFPCVT]>;
1953 def VUHTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1001, 0,
1954 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1955 IIC_fpCVTIH, "vcvt", ".f16.u16\t$dst, $a, $fbits", []>,
1956 Requires<[HasFullFP16]>,
1957 Sched<[WriteFPCVT]>;
1959 def VSLTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1001, 1,
1960 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1961 IIC_fpCVTIH, "vcvt", ".f16.s32\t$dst, $a, $fbits", []>,
1962 Requires<[HasFullFP16]>,
1963 Sched<[WriteFPCVT]>;
1965 def VULTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1001, 1,
1966 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1967 IIC_fpCVTIH, "vcvt", ".f16.u32\t$dst, $a, $fbits", []>,
1968 Requires<[HasFullFP16]>,
1969 Sched<[WriteFPCVT]>;
1971 } // End of 'let isUnpredicable = 1 in'
1973 def VSHTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1010, 0,
1974 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1975 IIC_fpCVTIS, "vcvt", ".f32.s16\t$dst, $a, $fbits", []>,
1976 Sched<[WriteFPCVT]> {
1977 // Some single precision VFP instructions may be executed on both NEON and
1978 // VFP pipelines on A8.
1979 let D = VFPNeonA8Domain;
1982 def VUHTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1010, 0,
1983 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1984 IIC_fpCVTIS, "vcvt", ".f32.u16\t$dst, $a, $fbits", []>,
1985 Sched<[WriteFPCVT]> {
1986 // Some single precision VFP instructions may be executed on both NEON and
1987 // VFP pipelines on A8.
1988 let D = VFPNeonA8Domain;
1991 def VSLTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1010, 1,
1992 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1993 IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a, $fbits", []>,
1994 Sched<[WriteFPCVT]> {
1995 // Some single precision VFP instructions may be executed on both NEON and
1996 // VFP pipelines on A8.
1997 let D = VFPNeonA8Domain;
2000 def VULTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1010, 1,
2001 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
2002 IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a, $fbits", []>,
2003 Sched<[WriteFPCVT]> {
2004 // Some single precision VFP instructions may be executed on both NEON and
2005 // VFP pipelines on A8.
2006 let D = VFPNeonA8Domain;
2009 def VSHTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1010, 0b1011, 0,
2010 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
2011 IIC_fpCVTID, "vcvt", ".f64.s16\t$dst, $a, $fbits", []>,
2012 Sched<[WriteFPCVT]>;
2014 def VUHTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1011, 0b1011, 0,
2015 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
2016 IIC_fpCVTID, "vcvt", ".f64.u16\t$dst, $a, $fbits", []>,
2017 Sched<[WriteFPCVT]>;
2019 def VSLTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1010, 0b1011, 1,
2020 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
2021 IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a, $fbits", []>,
2022 Sched<[WriteFPCVT]>;
2024 def VULTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1011, 0b1011, 1,
2025 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
2026 IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a, $fbits", []>,
2027 Sched<[WriteFPCVT]>;
2029 } // End of 'let Constraints = "$a = $dst" in'
2031 // BFloat16 - Single precision, unary, predicated
2032 class BF16_VCVT<string opc, bits<2> op7_6>
2033 : VFPAI<(outs SPR:$Sd), (ins SPR:$dst, SPR:$Sm),
2034 VFPUnaryFrm, NoItinerary,
2035 opc, ".bf16.f32\t$Sd, $Sm", "", []>,
2036 RegConstraint<"$dst = $Sd">,
2037 Requires<[HasBF16]>,
2042 // Encode instruction operands.
2043 let Inst{3-0} = Sm{4-1};
2044 let Inst{5} = Sm{0};
2045 let Inst{15-12} = Sd{4-1};
2046 let Inst{22} = Sd{0};
2048 let Inst{27-23} = 0b11101; // opcode1
2049 let Inst{21-20} = 0b11; // opcode2
2050 let Inst{19-16} = 0b0011; // opcode3
2051 let Inst{11-8} = 0b1001;
2052 let Inst{7-6} = op7_6;
2055 let DecoderNamespace = "VFPV8";
2056 let hasSideEffects = 0;
2059 def BF16_VCVTB : BF16_VCVT<"vcvtb", 0b01>;
2060 def BF16_VCVTT : BF16_VCVT<"vcvtt", 0b11>;
2062 //===----------------------------------------------------------------------===//
2063 // FP Multiply-Accumulate Operations.
2066 def VMLAD : ADbI<0b11100, 0b00, 0, 0,
2067 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
2068 IIC_fpMAC64, "vmla", ".f64\t$Dd, $Dn, $Dm",
2069 [(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm),
2070 (f64 DPR:$Ddin)))]>,
2071 RegConstraint<"$Ddin = $Dd">,
2072 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>,
2073 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2075 def VMLAS : ASbIn<0b11100, 0b00, 0, 0,
2076 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
2077 IIC_fpMAC32, "vmla", ".f32\t$Sd, $Sn, $Sm",
2078 [(set SPR:$Sd, (fadd_mlx (fmul_su SPR:$Sn, SPR:$Sm),
2080 RegConstraint<"$Sdin = $Sd">,
2081 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>,
2082 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
2083 // Some single precision VFP instructions may be executed on both NEON and
2084 // VFP pipelines on A8.
2085 let D = VFPNeonA8Domain;
2088 def VMLAH : AHbI<0b11100, 0b00, 0, 0,
2089 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
2090 IIC_fpMAC16, "vmla", ".f16\t$Sd, $Sn, $Sm",
2091 [(set (f16 HPR:$Sd), (fadd_mlx (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm)),
2092 (f16 HPR:$Sdin)))]>,
2093 RegConstraint<"$Sdin = $Sd">,
2094 Requires<[HasFullFP16,UseFPVMLx]>;
2096 def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
2097 (VMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
2098 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>;
2099 def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
2100 (VMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
2101 Requires<[HasVFP2,DontUseNEONForFP, UseFPVMLx]>;
2102 def : Pat<(fadd_mlx HPR:$dstin, (fmul_su (f16 HPR:$a), HPR:$b)),
2103 (VMLAH HPR:$dstin, (f16 HPR:$a), HPR:$b)>,
2104 Requires<[HasFullFP16,DontUseNEONForFP, UseFPVMLx]>;
2107 def VMLSD : ADbI<0b11100, 0b00, 1, 0,
2108 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
2109 IIC_fpMAC64, "vmls", ".f64\t$Dd, $Dn, $Dm",
2110 [(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
2111 (f64 DPR:$Ddin)))]>,
2112 RegConstraint<"$Ddin = $Dd">,
2113 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>,
2114 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2116 def VMLSS : ASbIn<0b11100, 0b00, 1, 0,
2117 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
2118 IIC_fpMAC32, "vmls", ".f32\t$Sd, $Sn, $Sm",
2119 [(set SPR:$Sd, (fadd_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
2121 RegConstraint<"$Sdin = $Sd">,
2122 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>,
2123 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
2124 // Some single precision VFP instructions may be executed on both NEON and
2125 // VFP pipelines on A8.
2126 let D = VFPNeonA8Domain;
2129 def VMLSH : AHbI<0b11100, 0b00, 1, 0,
2130 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
2131 IIC_fpMAC16, "vmls", ".f16\t$Sd, $Sn, $Sm",
2132 [(set (f16 HPR:$Sd), (fadd_mlx (fneg (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm))),
2133 (f16 HPR:$Sdin)))]>,
2134 RegConstraint<"$Sdin = $Sd">,
2135 Requires<[HasFullFP16,UseFPVMLx]>;
2137 def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
2138 (VMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
2139 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>;
2140 def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
2141 (VMLSS SPR:$dstin, SPR:$a, SPR:$b)>,
2142 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
2143 def : Pat<(fsub_mlx HPR:$dstin, (fmul_su (f16 HPR:$a), HPR:$b)),
2144 (VMLSH HPR:$dstin, (f16 HPR:$a), HPR:$b)>,
2145 Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>;
2147 def VNMLAD : ADbI<0b11100, 0b01, 1, 0,
2148 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
2149 IIC_fpMAC64, "vnmla", ".f64\t$Dd, $Dn, $Dm",
2150 [(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
2151 (f64 DPR:$Ddin)))]>,
2152 RegConstraint<"$Ddin = $Dd">,
2153 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>,
2154 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2156 def VNMLAS : ASbI<0b11100, 0b01, 1, 0,
2157 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
2158 IIC_fpMAC32, "vnmla", ".f32\t$Sd, $Sn, $Sm",
2159 [(set SPR:$Sd, (fsub_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
2161 RegConstraint<"$Sdin = $Sd">,
2162 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>,
2163 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
2164 // Some single precision VFP instructions may be executed on both NEON and
2165 // VFP pipelines on A8.
2166 let D = VFPNeonA8Domain;
2169 def VNMLAH : AHbI<0b11100, 0b01, 1, 0,
2170 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
2171 IIC_fpMAC16, "vnmla", ".f16\t$Sd, $Sn, $Sm",
2172 [(set (f16 HPR:$Sd), (fsub_mlx (fneg (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm))),
2173 (f16 HPR:$Sdin)))]>,
2174 RegConstraint<"$Sdin = $Sd">,
2175 Requires<[HasFullFP16,UseFPVMLx]>;
2177 // (-(a * b) - dst) -> -(dst + (a * b))
2178 def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin),
2179 (VNMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
2180 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>;
2181 def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin),
2182 (VNMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
2183 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
2184 def : Pat<(fsub_mlx (fneg (fmul_su (f16 HPR:$a), HPR:$b)), HPR:$dstin),
2185 (VNMLAH HPR:$dstin, (f16 HPR:$a), HPR:$b)>,
2186 Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>;
2188 // (-dst - (a * b)) -> -(dst + (a * b))
2189 def : Pat<(fsub_mlx (fneg DPR:$dstin), (fmul_su DPR:$a, (f64 DPR:$b))),
2190 (VNMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
2191 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>;
2192 def : Pat<(fsub_mlx (fneg SPR:$dstin), (fmul_su SPR:$a, SPR:$b)),
2193 (VNMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
2194 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
2195 def : Pat<(fsub_mlx (fneg HPR:$dstin), (fmul_su (f16 HPR:$a), HPR:$b)),
2196 (VNMLAH HPR:$dstin, (f16 HPR:$a), HPR:$b)>,
2197 Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>;
2199 def VNMLSD : ADbI<0b11100, 0b01, 0, 0,
2200 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
2201 IIC_fpMAC64, "vnmls", ".f64\t$Dd, $Dn, $Dm",
2202 [(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm),
2203 (f64 DPR:$Ddin)))]>,
2204 RegConstraint<"$Ddin = $Dd">,
2205 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>,
2206 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2208 def VNMLSS : ASbI<0b11100, 0b01, 0, 0,
2209 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
2210 IIC_fpMAC32, "vnmls", ".f32\t$Sd, $Sn, $Sm",
2211 [(set SPR:$Sd, (fsub_mlx (fmul_su SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>,
2212 RegConstraint<"$Sdin = $Sd">,
2213 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>,
2214 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
2215 // Some single precision VFP instructions may be executed on both NEON and
2216 // VFP pipelines on A8.
2217 let D = VFPNeonA8Domain;
2220 def VNMLSH : AHbI<0b11100, 0b01, 0, 0,
2221 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
2222 IIC_fpMAC16, "vnmls", ".f16\t$Sd, $Sn, $Sm",
2223 [(set (f16 HPR:$Sd), (fsub_mlx (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm)), (f16 HPR:$Sdin)))]>,
2224 RegConstraint<"$Sdin = $Sd">,
2225 Requires<[HasFullFP16,UseFPVMLx]>;
2227 def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin),
2228 (VNMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
2229 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>;
2230 def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin),
2231 (VNMLSS SPR:$dstin, SPR:$a, SPR:$b)>,
2232 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
2233 def : Pat<(fsub_mlx (fmul_su (f16 HPR:$a), HPR:$b), HPR:$dstin),
2234 (VNMLSH HPR:$dstin, (f16 HPR:$a), HPR:$b)>,
2235 Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>;
2237 //===----------------------------------------------------------------------===//
2238 // Fused FP Multiply-Accumulate Operations.
2240 def VFMAD : ADbI<0b11101, 0b10, 0, 0,
2241 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
2242 IIC_fpFMAC64, "vfma", ".f64\t$Dd, $Dn, $Dm",
2243 [(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm),
2244 (f64 DPR:$Ddin)))]>,
2245 RegConstraint<"$Ddin = $Dd">,
2246 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>,
2247 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2249 def VFMAS : ASbIn<0b11101, 0b10, 0, 0,
2250 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
2251 IIC_fpFMAC32, "vfma", ".f32\t$Sd, $Sn, $Sm",
2252 [(set SPR:$Sd, (fadd_mlx (fmul_su SPR:$Sn, SPR:$Sm),
2254 RegConstraint<"$Sdin = $Sd">,
2255 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>,
2256 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
2257 // Some single precision VFP instructions may be executed on both NEON and
2261 def VFMAH : AHbI<0b11101, 0b10, 0, 0,
2262 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
2263 IIC_fpFMAC16, "vfma", ".f16\t$Sd, $Sn, $Sm",
2264 [(set (f16 HPR:$Sd), (fadd_mlx (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm)),
2265 (f16 HPR:$Sdin)))]>,
2266 RegConstraint<"$Sdin = $Sd">,
2267 Requires<[HasFullFP16,UseFusedMAC]>,
2268 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2270 def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
2271 (VFMAD DPR:$dstin, DPR:$a, DPR:$b)>,
2272 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
2273 def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
2274 (VFMAS SPR:$dstin, SPR:$a, SPR:$b)>,
2275 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
2276 def : Pat<(fadd_mlx HPR:$dstin, (fmul_su (f16 HPR:$a), HPR:$b)),
2277 (VFMAH HPR:$dstin, (f16 HPR:$a), HPR:$b)>,
2278 Requires<[HasFullFP16,DontUseNEONForFP,UseFusedMAC]>;
2280 // Match @llvm.fma.* intrinsics
2281 // (fma x, y, z) -> (vfms z, x, y)
2282 def : Pat<(f64 (fma DPR:$Dn, DPR:$Dm, DPR:$Ddin)),
2283 (VFMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
2284 Requires<[HasVFP4,HasDPVFP]>;
2285 def : Pat<(f32 (fma SPR:$Sn, SPR:$Sm, SPR:$Sdin)),
2286 (VFMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
2287 Requires<[HasVFP4]>;
2288 def : Pat<(f16 (fma HPR:$Sn, HPR:$Sm, (f16 HPR:$Sdin))),
2289 (VFMAH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>,
2290 Requires<[HasFullFP16]>;
2292 def VFMSD : ADbI<0b11101, 0b10, 1, 0,
2293 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
2294 IIC_fpFMAC64, "vfms", ".f64\t$Dd, $Dn, $Dm",
2295 [(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
2296 (f64 DPR:$Ddin)))]>,
2297 RegConstraint<"$Ddin = $Dd">,
2298 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>,
2299 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2301 def VFMSS : ASbIn<0b11101, 0b10, 1, 0,
2302 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
2303 IIC_fpFMAC32, "vfms", ".f32\t$Sd, $Sn, $Sm",
2304 [(set SPR:$Sd, (fadd_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
2306 RegConstraint<"$Sdin = $Sd">,
2307 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>,
2308 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
2309 // Some single precision VFP instructions may be executed on both NEON and
2313 def VFMSH : AHbI<0b11101, 0b10, 1, 0,
2314 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
2315 IIC_fpFMAC16, "vfms", ".f16\t$Sd, $Sn, $Sm",
2316 [(set (f16 HPR:$Sd), (fadd_mlx (fneg (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm))),
2317 (f16 HPR:$Sdin)))]>,
2318 RegConstraint<"$Sdin = $Sd">,
2319 Requires<[HasFullFP16,UseFusedMAC]>,
2320 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2322 def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
2323 (VFMSD DPR:$dstin, DPR:$a, DPR:$b)>,
2324 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
2325 def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
2326 (VFMSS SPR:$dstin, SPR:$a, SPR:$b)>,
2327 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
2328 def : Pat<(fsub_mlx HPR:$dstin, (fmul_su (f16 HPR:$a), HPR:$b)),
2329 (VFMSH HPR:$dstin, (f16 HPR:$a), HPR:$b)>,
2330 Requires<[HasFullFP16,DontUseNEONForFP,UseFusedMAC]>;
2332 // Match @llvm.fma.* intrinsics
2333 // (fma (fneg x), y, z) -> (vfms z, x, y)
2334 def : Pat<(f64 (fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin)),
2335 (VFMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
2336 Requires<[HasVFP4,HasDPVFP]>;
2337 def : Pat<(f32 (fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin)),
2338 (VFMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
2339 Requires<[HasVFP4]>;
2340 def : Pat<(f16 (fma (fneg (f16 HPR:$Sn)), (f16 HPR:$Sm), (f16 HPR:$Sdin))),
2341 (VFMSH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>,
2342 Requires<[HasFullFP16]>;
2344 def VFNMAD : ADbI<0b11101, 0b01, 1, 0,
2345 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
2346 IIC_fpFMAC64, "vfnma", ".f64\t$Dd, $Dn, $Dm",
2347 [(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
2348 (f64 DPR:$Ddin)))]>,
2349 RegConstraint<"$Ddin = $Dd">,
2350 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>,
2351 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2353 def VFNMAS : ASbI<0b11101, 0b01, 1, 0,
2354 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
2355 IIC_fpFMAC32, "vfnma", ".f32\t$Sd, $Sn, $Sm",
2356 [(set SPR:$Sd, (fsub_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
2358 RegConstraint<"$Sdin = $Sd">,
2359 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>,
2360 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
2361 // Some single precision VFP instructions may be executed on both NEON and
2365 def VFNMAH : AHbI<0b11101, 0b01, 1, 0,
2366 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
2367 IIC_fpFMAC16, "vfnma", ".f16\t$Sd, $Sn, $Sm",
2368 [(set (f16 HPR:$Sd), (fsub_mlx (fneg (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm))),
2369 (f16 HPR:$Sdin)))]>,
2370 RegConstraint<"$Sdin = $Sd">,
2371 Requires<[HasFullFP16,UseFusedMAC]>,
2372 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2374 def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin),
2375 (VFNMAD DPR:$dstin, DPR:$a, DPR:$b)>,
2376 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
2377 def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin),
2378 (VFNMAS SPR:$dstin, SPR:$a, SPR:$b)>,
2379 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
2381 // Match @llvm.fma.* intrinsics
2382 // (fneg (fma x, y, z)) -> (vfnma z, x, y)
2383 def : Pat<(fneg (fma (f64 DPR:$Dn), (f64 DPR:$Dm), (f64 DPR:$Ddin))),
2384 (VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
2385 Requires<[HasVFP4,HasDPVFP]>;
2386 def : Pat<(fneg (fma (f32 SPR:$Sn), (f32 SPR:$Sm), (f32 SPR:$Sdin))),
2387 (VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
2388 Requires<[HasVFP4]>;
2389 def : Pat<(fneg (fma (f16 HPR:$Sn), (f16 HPR:$Sm), (f16 (f16 HPR:$Sdin)))),
2390 (VFNMAH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>,
2391 Requires<[HasFullFP16]>;
2392 // (fma (fneg x), y, (fneg z)) -> (vfnma z, x, y)
2393 def : Pat<(f64 (fma (fneg DPR:$Dn), DPR:$Dm, (fneg DPR:$Ddin))),
2394 (VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
2395 Requires<[HasVFP4,HasDPVFP]>;
2396 def : Pat<(f32 (fma (fneg SPR:$Sn), SPR:$Sm, (fneg SPR:$Sdin))),
2397 (VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
2398 Requires<[HasVFP4]>;
2399 def : Pat<(f16 (fma (fneg (f16 HPR:$Sn)), (f16 HPR:$Sm), (fneg (f16 HPR:$Sdin)))),
2400 (VFNMAH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>,
2401 Requires<[HasFullFP16]>;
2403 def VFNMSD : ADbI<0b11101, 0b01, 0, 0,
2404 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
2405 IIC_fpFMAC64, "vfnms", ".f64\t$Dd, $Dn, $Dm",
2406 [(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm),
2407 (f64 DPR:$Ddin)))]>,
2408 RegConstraint<"$Ddin = $Dd">,
2409 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>,
2410 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2412 def VFNMSS : ASbI<0b11101, 0b01, 0, 0,
2413 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
2414 IIC_fpFMAC32, "vfnms", ".f32\t$Sd, $Sn, $Sm",
2415 [(set SPR:$Sd, (fsub_mlx (fmul_su SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>,
2416 RegConstraint<"$Sdin = $Sd">,
2417 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>,
2418 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
2419 // Some single precision VFP instructions may be executed on both NEON and
2423 def VFNMSH : AHbI<0b11101, 0b01, 0, 0,
2424 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
2425 IIC_fpFMAC16, "vfnms", ".f16\t$Sd, $Sn, $Sm",
2426 [(set (f16 HPR:$Sd), (fsub_mlx (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm)), (f16 HPR:$Sdin)))]>,
2427 RegConstraint<"$Sdin = $Sd">,
2428 Requires<[HasFullFP16,UseFusedMAC]>,
2429 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2431 def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin),
2432 (VFNMSD DPR:$dstin, DPR:$a, DPR:$b)>,
2433 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
2434 def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin),
2435 (VFNMSS SPR:$dstin, SPR:$a, SPR:$b)>,
2436 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
2438 // Match @llvm.fma.* intrinsics
2440 // (fma x, y, (fneg z)) -> (vfnms z, x, y))
2441 def : Pat<(f64 (fma DPR:$Dn, DPR:$Dm, (fneg DPR:$Ddin))),
2442 (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
2443 Requires<[HasVFP4,HasDPVFP]>;
2444 def : Pat<(f32 (fma SPR:$Sn, SPR:$Sm, (fneg SPR:$Sdin))),
2445 (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
2446 Requires<[HasVFP4]>;
2447 def : Pat<(f16 (fma (f16 HPR:$Sn), (f16 HPR:$Sm), (fneg (f16 HPR:$Sdin)))),
2448 (VFNMSH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>,
2449 Requires<[HasFullFP16]>;
2450 // (fneg (fma (fneg x), y, z)) -> (vfnms z, x, y)
2451 def : Pat<(fneg (f64 (fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin))),
2452 (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
2453 Requires<[HasVFP4,HasDPVFP]>;
2454 def : Pat<(fneg (f32 (fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin))),
2455 (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
2456 Requires<[HasVFP4]>;
2457 def : Pat<(fneg (f16 (fma (fneg (f16 HPR:$Sn)), (f16 HPR:$Sm), (f16 HPR:$Sdin)))),
2458 (VFNMSH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>,
2459 Requires<[HasFullFP16]>;
2461 //===----------------------------------------------------------------------===//
2462 // FP Conditional moves.
2465 let hasSideEffects = 0 in {
2466 def VMOVDcc : PseudoInst<(outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm, pred:$p),
2468 RegConstraint<"$Dn = $Dd">, Requires<[HasFPRegs64]>;
2470 def VMOVScc : PseudoInst<(outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm, pred:$p),
2472 RegConstraint<"$Sn = $Sd">, Requires<[HasFPRegs]>;
2474 def VMOVHcc : PseudoInst<(outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm, pred:$p),
2476 RegConstraint<"$Sd = $Sn">, Requires<[HasFPRegs]>;
2479 // The following patterns have to be defined out-of-line because the number
2480 // of instruction operands does not match the number of SDNode operands
2481 // (`pred` counts as one operand).
2483 def : Pat<(ARMcmov f64:$Dn, f64:$Dm, imm:$cc, CPSR),
2484 (VMOVDcc $Dn, $Dm, imm:$cc, CPSR)>,
2485 Requires<[HasFPRegs64]>;
2487 def : Pat<(ARMcmov f32:$Sn, f32:$Sm, imm:$cc, CPSR),
2488 (VMOVScc $Sn, $Sm, imm:$cc, CPSR)>,
2489 Requires<[HasFPRegs]>;
2491 def : Pat<(ARMcmov f16:$Sn, f16:$Sm, imm:$cc, CPSR),
2492 (VMOVHcc $Sn, $Sm, imm:$cc, CPSR)>,
2493 Requires<[HasFPRegs]>; // FIXME: Shouldn't this be HasFPRegs16?
2495 //===----------------------------------------------------------------------===//
2496 // Move from VFP System Register to ARM core register.
2499 class MovFromVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm,
2501 VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, "", pattern> {
2503 // Instruction operand.
2506 let Inst{27-20} = 0b11101111;
2507 let Inst{19-16} = opc19_16;
2508 let Inst{15-12} = Rt;
2509 let Inst{11-8} = 0b1010;
2511 let Inst{6-5} = 0b00;
2513 let Inst{3-0} = 0b0000;
2514 let Unpredictable{7-5} = 0b111;
2515 let Unpredictable{3-0} = 0b1111;
2518 let DecoderMethod = "DecodeForVMRSandVMSR" in {
2519 // APSR is the application level alias of CPSR. This FPSCR N, Z, C, V flags
2521 let Defs = [CPSR], Uses = [FPSCR_NZCV], Predicates = [HasFPRegs],
2522 Rt = 0b1111 /* apsr_nzcv */ in
2523 def FMSTAT : MovFromVFP<0b0001 /* fpscr */, (outs), (ins),
2524 "vmrs", "\tAPSR_nzcv, fpscr",
2525 [(set CPSR, (arm_fmstat FPSCR_NZCV))]>;
2527 // Application level FPSCR -> GPR
2528 let hasSideEffects = 1, Uses = [FPSCR], Predicates = [HasFPRegs] in
2529 def VMRS : MovFromVFP<0b0001 /* fpscr */, (outs GPRnopc:$Rt), (ins),
2530 "vmrs", "\t$Rt, fpscr",
2531 [(set GPRnopc:$Rt, (int_arm_get_fpscr))]>;
2533 // System level FPEXC, FPSID -> GPR
2534 let Uses = [FPSCR] in {
2535 def VMRS_FPEXC : MovFromVFP<0b1000 /* fpexc */, (outs GPRnopc:$Rt), (ins),
2536 "vmrs", "\t$Rt, fpexc", []>;
2537 def VMRS_FPSID : MovFromVFP<0b0000 /* fpsid */, (outs GPRnopc:$Rt), (ins),
2538 "vmrs", "\t$Rt, fpsid", []>;
2539 def VMRS_MVFR0 : MovFromVFP<0b0111 /* mvfr0 */, (outs GPRnopc:$Rt), (ins),
2540 "vmrs", "\t$Rt, mvfr0", []>;
2541 def VMRS_MVFR1 : MovFromVFP<0b0110 /* mvfr1 */, (outs GPRnopc:$Rt), (ins),
2542 "vmrs", "\t$Rt, mvfr1", []>;
2543 let Predicates = [HasFPARMv8] in {
2544 def VMRS_MVFR2 : MovFromVFP<0b0101 /* mvfr2 */, (outs GPRnopc:$Rt), (ins),
2545 "vmrs", "\t$Rt, mvfr2", []>;
2547 def VMRS_FPINST : MovFromVFP<0b1001 /* fpinst */, (outs GPRnopc:$Rt), (ins),
2548 "vmrs", "\t$Rt, fpinst", []>;
2549 def VMRS_FPINST2 : MovFromVFP<0b1010 /* fpinst2 */, (outs GPRnopc:$Rt),
2550 (ins), "vmrs", "\t$Rt, fpinst2", []>;
2551 let Predicates = [HasV8_1MMainline, HasFPRegs] in {
2552 // System level FPSCR_NZCVQC -> GPR
2553 def VMRS_FPSCR_NZCVQC
2554 : MovFromVFP<0b0010 /* fpscr_nzcvqc */,
2555 (outs GPR:$Rt), (ins cl_FPSCR_NZCV:$fpscr_in),
2556 "vmrs", "\t$Rt, fpscr_nzcvqc", []>;
2559 let Predicates = [HasV8_1MMainline, Has8MSecExt] in {
2560 // System level FPSCR -> GPR, with context saving for security extensions
2561 def VMRS_FPCXTNS : MovFromVFP<0b1110 /* fpcxtns */, (outs GPR:$Rt), (ins),
2562 "vmrs", "\t$Rt, fpcxtns", []>;
2564 let Predicates = [HasV8_1MMainline, Has8MSecExt] in {
2565 // System level FPSCR -> GPR, with context saving for security extensions
2566 def VMRS_FPCXTS : MovFromVFP<0b1111 /* fpcxts */, (outs GPR:$Rt), (ins),
2567 "vmrs", "\t$Rt, fpcxts", []>;
2570 let Predicates = [HasV8_1MMainline, HasMVEInt] in {
2571 // System level VPR/P0 -> GPR
2573 def VMRS_VPR : MovFromVFP<0b1100 /* vpr */, (outs GPR:$Rt), (ins),
2574 "vmrs", "\t$Rt, vpr", []>;
2576 def VMRS_P0 : MovFromVFP<0b1101 /* p0 */, (outs GPR:$Rt), (ins VCCR:$cond),
2577 "vmrs", "\t$Rt, p0", []>;
2581 //===----------------------------------------------------------------------===//
2582 // Move from ARM core register to VFP System Register.
2585 class MovToVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm,
2587 VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, "", pattern> {
2589 // Instruction operand.
2592 let Inst{27-20} = 0b11101110;
2593 let Inst{19-16} = opc19_16;
2594 let Inst{15-12} = Rt;
2595 let Inst{11-8} = 0b1010;
2597 let Inst{6-5} = 0b00;
2599 let Inst{3-0} = 0b0000;
2600 let Predicates = [HasVFP2];
2601 let Unpredictable{7-5} = 0b111;
2602 let Unpredictable{3-0} = 0b1111;
2605 let DecoderMethod = "DecodeForVMRSandVMSR" in {
2606 let Defs = [FPSCR] in {
2607 let Predicates = [HasFPRegs] in
2608 // Application level GPR -> FPSCR
2609 def VMSR : MovToVFP<0b0001 /* fpscr */, (outs), (ins GPRnopc:$Rt),
2610 "vmsr", "\tfpscr, $Rt",
2611 [(int_arm_set_fpscr GPRnopc:$Rt)]>;
2612 // System level GPR -> FPEXC
2613 def VMSR_FPEXC : MovToVFP<0b1000 /* fpexc */, (outs), (ins GPRnopc:$Rt),
2614 "vmsr", "\tfpexc, $Rt", []>;
2615 // System level GPR -> FPSID
2616 def VMSR_FPSID : MovToVFP<0b0000 /* fpsid */, (outs), (ins GPRnopc:$Rt),
2617 "vmsr", "\tfpsid, $Rt", []>;
2618 def VMSR_FPINST : MovToVFP<0b1001 /* fpinst */, (outs), (ins GPRnopc:$Rt),
2619 "vmsr", "\tfpinst, $Rt", []>;
2620 def VMSR_FPINST2 : MovToVFP<0b1010 /* fpinst2 */, (outs), (ins GPRnopc:$Rt),
2621 "vmsr", "\tfpinst2, $Rt", []>;
2623 let Predicates = [HasV8_1MMainline, Has8MSecExt] in {
2624 // System level GPR -> FPSCR with context saving for security extensions
2625 def VMSR_FPCXTNS : MovToVFP<0b1110 /* fpcxtns */, (outs), (ins GPR:$Rt),
2626 "vmsr", "\tfpcxtns, $Rt", []>;
2628 let Predicates = [HasV8_1MMainline, Has8MSecExt] in {
2629 // System level GPR -> FPSCR with context saving for security extensions
2630 def VMSR_FPCXTS : MovToVFP<0b1111 /* fpcxts */, (outs), (ins GPR:$Rt),
2631 "vmsr", "\tfpcxts, $Rt", []>;
2633 let Predicates = [HasV8_1MMainline, HasFPRegs] in {
2634 // System level GPR -> FPSCR_NZCVQC
2635 def VMSR_FPSCR_NZCVQC
2636 : MovToVFP<0b0010 /* fpscr_nzcvqc */,
2637 (outs cl_FPSCR_NZCV:$fpscr_out), (ins GPR:$Rt),
2638 "vmsr", "\tfpscr_nzcvqc, $Rt", []>;
2641 let Predicates = [HasV8_1MMainline, HasMVEInt] in {
2642 // System level GPR -> VPR/P0
2644 def VMSR_VPR : MovToVFP<0b1100 /* vpr */, (outs), (ins GPR:$Rt),
2645 "vmsr", "\tvpr, $Rt", []>;
2647 def VMSR_P0 : MovToVFP<0b1101 /* p0 */, (outs VCCR:$cond), (ins GPR:$Rt),
2648 "vmsr", "\tp0, $Rt", []>;
2652 //===----------------------------------------------------------------------===//
2656 // Materialize FP immediates. VFP3 only.
2657 let isReMaterializable = 1 in {
2658 def FCONSTD : VFPAI<(outs DPR:$Dd), (ins vfp_f64imm:$imm),
2659 VFPMiscFrm, IIC_fpUNA64,
2660 "vmov", ".f64\t$Dd, $imm", "",
2661 [(set DPR:$Dd, vfp_f64imm:$imm)]>,
2662 Requires<[HasVFP3,HasDPVFP]> {
2666 let Inst{27-23} = 0b11101;
2667 let Inst{22} = Dd{4};
2668 let Inst{21-20} = 0b11;
2669 let Inst{19-16} = imm{7-4};
2670 let Inst{15-12} = Dd{3-0};
2671 let Inst{11-9} = 0b101;
2672 let Inst{8} = 1; // Double precision.
2673 let Inst{7-4} = 0b0000;
2674 let Inst{3-0} = imm{3-0};
2677 def FCONSTS : VFPAI<(outs SPR:$Sd), (ins vfp_f32imm:$imm),
2678 VFPMiscFrm, IIC_fpUNA32,
2679 "vmov", ".f32\t$Sd, $imm", "",
2680 [(set SPR:$Sd, vfp_f32imm:$imm)]>, Requires<[HasVFP3]> {
2684 let Inst{27-23} = 0b11101;
2685 let Inst{22} = Sd{0};
2686 let Inst{21-20} = 0b11;
2687 let Inst{19-16} = imm{7-4};
2688 let Inst{15-12} = Sd{4-1};
2689 let Inst{11-9} = 0b101;
2690 let Inst{8} = 0; // Single precision.
2691 let Inst{7-4} = 0b0000;
2692 let Inst{3-0} = imm{3-0};
2695 def FCONSTH : VFPAI<(outs HPR:$Sd), (ins vfp_f16imm:$imm),
2696 VFPMiscFrm, IIC_fpUNA16,
2697 "vmov", ".f16\t$Sd, $imm", "",
2698 [(set (f16 HPR:$Sd), vfp_f16imm:$imm)]>,
2699 Requires<[HasFullFP16]> {
2703 let Inst{27-23} = 0b11101;
2704 let Inst{22} = Sd{0};
2705 let Inst{21-20} = 0b11;
2706 let Inst{19-16} = imm{7-4};
2707 let Inst{15-12} = Sd{4-1};
2708 let Inst{11-8} = 0b1001; // Half precision
2709 let Inst{7-4} = 0b0000;
2710 let Inst{3-0} = imm{3-0};
2712 let isUnpredicable = 1;
2716 def : Pat<(f32 (vfp_f32f16imm:$imm)),
2717 (f32 (COPY_TO_REGCLASS (f16 (FCONSTH (vfp_f32f16imm_xform (f32 $imm)))), SPR))> {
2718 let Predicates = [HasFullFP16];
2721 // Floating-point environment management.
2722 def : Pat<(get_fpenv), (VMRS)>;
2723 def : Pat<(set_fpenv GPRnopc:$Rt), (VMSR GPRnopc:$Rt)>;
2724 def : Pat<(reset_fpenv), (VMSR (MOVi 0))>, Requires<[IsARM]>;
2725 def : Pat<(reset_fpenv), (VMSR (tMOVi8 0))>, Requires<[IsThumb]>;
2726 def : Pat<(get_fpmode), (VMRS)>;
2728 //===----------------------------------------------------------------------===//
2729 // Assembler aliases.
2731 // A few mnemonic aliases for pre-unifixed syntax. We don't guarantee to
2732 // support them all, but supporting at least some of the basics is
2733 // good to be friendly.
2734 def : VFP2MnemonicAlias<"flds", "vldr">;
2735 def : VFP2MnemonicAlias<"fldd", "vldr">;
2736 def : VFP2MnemonicAlias<"fmrs", "vmov">;
2737 def : VFP2MnemonicAlias<"fmsr", "vmov">;
2738 def : VFP2MnemonicAlias<"fsqrts", "vsqrt">;
2739 def : VFP2MnemonicAlias<"fsqrtd", "vsqrt">;
2740 def : VFP2MnemonicAlias<"fadds", "vadd.f32">;
2741 def : VFP2MnemonicAlias<"faddd", "vadd.f64">;
2742 def : VFP2MnemonicAlias<"fmrdd", "vmov">;
2743 def : VFP2MnemonicAlias<"fmrds", "vmov">;
2744 def : VFP2MnemonicAlias<"fmrrd", "vmov">;
2745 def : VFP2MnemonicAlias<"fmdrr", "vmov">;
2746 def : VFP2MnemonicAlias<"fmuls", "vmul.f32">;
2747 def : VFP2MnemonicAlias<"fmuld", "vmul.f64">;
2748 def : VFP2MnemonicAlias<"fnegs", "vneg.f32">;
2749 def : VFP2MnemonicAlias<"fnegd", "vneg.f64">;
2750 def : VFP2MnemonicAlias<"ftosizd", "vcvt.s32.f64">;
2751 def : VFP2MnemonicAlias<"ftosid", "vcvtr.s32.f64">;
2752 def : VFP2MnemonicAlias<"ftosizs", "vcvt.s32.f32">;
2753 def : VFP2MnemonicAlias<"ftosis", "vcvtr.s32.f32">;
2754 def : VFP2MnemonicAlias<"ftouizd", "vcvt.u32.f64">;
2755 def : VFP2MnemonicAlias<"ftouid", "vcvtr.u32.f64">;
2756 def : VFP2MnemonicAlias<"ftouizs", "vcvt.u32.f32">;
2757 def : VFP2MnemonicAlias<"ftouis", "vcvtr.u32.f32">;
2758 def : VFP2MnemonicAlias<"fsitod", "vcvt.f64.s32">;
2759 def : VFP2MnemonicAlias<"fsitos", "vcvt.f32.s32">;
2760 def : VFP2MnemonicAlias<"fuitod", "vcvt.f64.u32">;
2761 def : VFP2MnemonicAlias<"fuitos", "vcvt.f32.u32">;
2762 def : VFP2MnemonicAlias<"fsts", "vstr">;
2763 def : VFP2MnemonicAlias<"fstd", "vstr">;
2764 def : VFP2MnemonicAlias<"fmacd", "vmla.f64">;
2765 def : VFP2MnemonicAlias<"fmacs", "vmla.f32">;
2766 def : VFP2MnemonicAlias<"fcpys", "vmov.f32">;
2767 def : VFP2MnemonicAlias<"fcpyd", "vmov.f64">;
2768 def : VFP2MnemonicAlias<"fcmps", "vcmp.f32">;
2769 def : VFP2MnemonicAlias<"fcmpd", "vcmp.f64">;
2770 def : VFP2MnemonicAlias<"fdivs", "vdiv.f32">;
2771 def : VFP2MnemonicAlias<"fdivd", "vdiv.f64">;
2772 def : VFP2MnemonicAlias<"fmrx", "vmrs">;
2773 def : VFP2MnemonicAlias<"fmxr", "vmsr">;
2775 // Be friendly and accept the old form of zero-compare
2776 def : VFP2DPInstAlias<"fcmpzd${p} $val", (VCMPZD DPR:$val, pred:$p)>;
2777 def : VFP2InstAlias<"fcmpzs${p} $val", (VCMPZS SPR:$val, pred:$p)>;
2780 def : InstAlias<"fmstat${p}", (FMSTAT pred:$p), 0>, Requires<[HasFPRegs]>;
2781 def : VFP2InstAlias<"fadds${p} $Sd, $Sn, $Sm",
2782 (VADDS SPR:$Sd, SPR:$Sn, SPR:$Sm, pred:$p)>;
2783 def : VFP2DPInstAlias<"faddd${p} $Dd, $Dn, $Dm",
2784 (VADDD DPR:$Dd, DPR:$Dn, DPR:$Dm, pred:$p)>;
2785 def : VFP2InstAlias<"fsubs${p} $Sd, $Sn, $Sm",
2786 (VSUBS SPR:$Sd, SPR:$Sn, SPR:$Sm, pred:$p)>;
2787 def : VFP2DPInstAlias<"fsubd${p} $Dd, $Dn, $Dm",
2788 (VSUBD DPR:$Dd, DPR:$Dn, DPR:$Dm, pred:$p)>;
2790 // No need for the size suffix on VSQRT. It's implied by the register classes.
2791 def : VFP2InstAlias<"vsqrt${p} $Sd, $Sm", (VSQRTS SPR:$Sd, SPR:$Sm, pred:$p)>;
2792 def : VFP2DPInstAlias<"vsqrt${p} $Dd, $Dm", (VSQRTD DPR:$Dd, DPR:$Dm, pred:$p)>;
2794 // VLDR/VSTR accept an optional type suffix.
2795 def : VFP2InstAlias<"vldr${p}.32 $Sd, $addr",
2796 (VLDRS SPR:$Sd, addrmode5:$addr, pred:$p)>;
2797 def : VFP2InstAlias<"vstr${p}.32 $Sd, $addr",
2798 (VSTRS SPR:$Sd, addrmode5:$addr, pred:$p)>;
2799 def : VFP2InstAlias<"vldr${p}.64 $Dd, $addr",
2800 (VLDRD DPR:$Dd, addrmode5:$addr, pred:$p)>;
2801 def : VFP2InstAlias<"vstr${p}.64 $Dd, $addr",
2802 (VSTRD DPR:$Dd, addrmode5:$addr, pred:$p)>;
2804 // VMOV can accept optional 32-bit or less data type suffix suffix.
2805 def : VFP2InstAlias<"vmov${p}.8 $Rt, $Sn",
2806 (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>;
2807 def : VFP2InstAlias<"vmov${p}.16 $Rt, $Sn",
2808 (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>;
2809 def : VFP2InstAlias<"vmov${p}.32 $Rt, $Sn",
2810 (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>;
2811 def : VFP2InstAlias<"vmov${p}.8 $Sn, $Rt",
2812 (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>;
2813 def : VFP2InstAlias<"vmov${p}.16 $Sn, $Rt",
2814 (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>;
2815 def : VFP2InstAlias<"vmov${p}.32 $Sn, $Rt",
2816 (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>;
2818 def : VFP2InstAlias<"vmov${p}.f64 $Rt, $Rt2, $Dn",
2819 (VMOVRRD GPR:$Rt, GPR:$Rt2, DPR:$Dn, pred:$p)>;
2820 def : VFP2InstAlias<"vmov${p}.f64 $Dn, $Rt, $Rt2",
2821 (VMOVDRR DPR:$Dn, GPR:$Rt, GPR:$Rt2, pred:$p)>;
2823 // VMOVS doesn't need the .f32 to disambiguate from the NEON encoding the way
2825 def : VFP2InstAlias<"vmov${p} $Sd, $Sm",
2826 (VMOVS SPR:$Sd, SPR:$Sm, pred:$p)>;
2828 // FCONSTD/FCONSTS alias for vmov.f64/vmov.f32
2829 // These aliases provide added functionality over vmov.f instructions by
2830 // allowing users to write assembly containing encoded floating point constants
2831 // (e.g. #0x70 vs #1.0). Without these alises there is no way for the
2832 // assembler to accept encoded fp constants (but the equivalent fp-literal is
2833 // accepted directly by vmovf).
2834 def : VFP3InstAlias<"fconstd${p} $Dd, $val",
2835 (FCONSTD DPR:$Dd, vfp_f64imm:$val, pred:$p)>;
2836 def : VFP3InstAlias<"fconsts${p} $Sd, $val",
2837 (FCONSTS SPR:$Sd, vfp_f32imm:$val, pred:$p)>;
2839 def VSCCLRMD : VFPXI<(outs), (ins pred:$p, fp_dreglist_with_vpr:$regs, variable_ops),
2840 AddrModeNone, 4, IndexModeNone, VFPMiscFrm, NoItinerary,
2841 "vscclrm{$p}\t$regs", "", []>, Sched<[]> {
2843 let Inst{31-23} = 0b111011001;
2844 let Inst{22} = regs{12};
2845 let Inst{21-16} = 0b011111;
2846 let Inst{15-12} = regs{11-8};
2847 let Inst{11-8} = 0b1011;
2848 let Inst{7-1} = regs{7-1};
2851 let DecoderMethod = "DecodeVSCCLRM";
2853 list<Predicate> Predicates = [HasV8_1MMainline, Has8MSecExt];
2856 def VSCCLRMS : VFPXI<(outs), (ins pred:$p, fp_sreglist_with_vpr:$regs, variable_ops),
2857 AddrModeNone, 4, IndexModeNone, VFPMiscFrm, NoItinerary,
2858 "vscclrm{$p}\t$regs", "", []>, Sched<[]> {
2860 let Inst{31-23} = 0b111011001;
2861 let Inst{22} = regs{8};
2862 let Inst{21-16} = 0b011111;
2863 let Inst{15-12} = regs{12-9};
2864 let Inst{11-8} = 0b1010;
2865 let Inst{7-0} = regs{7-0};
2867 let DecoderMethod = "DecodeVSCCLRM";
2869 list<Predicate> Predicates = [HasV8_1MMainline, Has8MSecExt];
2872 //===----------------------------------------------------------------------===//
2873 // Store VFP System Register to memory.
2876 class vfp_vstrldr<bit opc, bit P, bit W, bits<4> SysReg, string sysreg,
2877 dag oops, dag iops, IndexMode im, string Dest, string cstr>
2878 : VFPI<oops, iops, AddrModeT2_i7s4, 4, im, VFPLdStFrm, IIC_fpSTAT,
2879 !if(opc,"vldr","vstr"), !strconcat("\t", sysreg, ", ", Dest), cstr, []>,
2882 let Inst{27-25} = 0b110;
2884 let Inst{23} = addr{7};
2885 let Inst{22} = SysReg{3};
2888 let Inst{19-16} = addr{11-8};
2889 let Inst{15-13} = SysReg{2-0};
2890 let Inst{12-7} = 0b011111;
2891 let Inst{6-0} = addr{6-0};
2892 list<Predicate> Predicates = [HasFPRegs, HasV8_1MMainline];
2894 let mayStore = !if(opc, 0b0, 0b1);
2895 let hasSideEffects = 1;
2898 multiclass vfp_vstrldr_sysreg<bit opc, bits<4> SysReg, string sysreg,
2899 dag oops=(outs), dag iops=(ins)> {
2901 vfp_vstrldr<opc, 1, 0, SysReg, sysreg,
2902 oops, !con(iops, (ins t2addrmode_imm7s4:$addr)),
2903 IndexModePost, "$addr", "" > {
2904 let DecoderMethod = "DecodeVSTRVLDR_SYSREG<false>";
2908 vfp_vstrldr<opc, 1, 1, SysReg, sysreg,
2909 !con(oops, (outs GPRnopc:$wb)),
2910 !con(iops, (ins t2addrmode_imm7s4_pre:$addr)),
2911 IndexModePre, "$addr!", "$addr.base = $wb"> {
2912 let DecoderMethod = "DecodeVSTRVLDR_SYSREG<true>";
2916 vfp_vstrldr<opc, 0, 1, SysReg, sysreg,
2917 !con(oops, (outs GPRnopc:$wb)),
2918 !con(iops, (ins t2_addr_offset_none:$Rn,
2919 t2am_imm7s4_offset:$addr)),
2920 IndexModePost, "$Rn$addr", "$Rn.base = $wb"> {
2922 let Inst{19-16} = Rn{3-0};
2923 let DecoderMethod = "DecodeVSTRVLDR_SYSREG<true>";
2927 let Uses = [FPSCR] in {
2928 defm VSTR_FPSCR : vfp_vstrldr_sysreg<0b0,0b0001, "fpscr">;
2930 let Predicates = [HasV8_1MMainline, Has8MSecExt] in {
2931 defm VSTR_FPCXTNS : vfp_vstrldr_sysreg<0b0,0b1110, "fpcxtns">;
2932 defm VSTR_FPCXTS : vfp_vstrldr_sysreg<0b0,0b1111, "fpcxts">;
2936 let Predicates = [HasV8_1MMainline, HasMVEInt] in {
2937 let Uses = [VPR] in {
2938 defm VSTR_VPR : vfp_vstrldr_sysreg<0b0,0b1100, "vpr">;
2940 defm VSTR_P0 : vfp_vstrldr_sysreg<0b0,0b1101, "p0",
2941 (outs), (ins VCCR:$P0)>;
2943 let Defs = [VPR] in {
2944 defm VLDR_VPR : vfp_vstrldr_sysreg<0b1,0b1100, "vpr">;
2946 defm VLDR_P0 : vfp_vstrldr_sysreg<0b1,0b1101, "p0",
2947 (outs VCCR:$P0), (ins)>;
2950 let Defs = [FPSCR] in {
2951 defm VLDR_FPSCR : vfp_vstrldr_sysreg<0b1,0b0001, "fpscr">;
2953 let Predicates = [HasV8_1MMainline, Has8MSecExt] in {
2954 defm VLDR_FPCXTNS : vfp_vstrldr_sysreg<0b1,0b1110, "fpcxtns">;
2955 defm VLDR_FPCXTS : vfp_vstrldr_sysreg<0b1,0b1111, "fpcxts">;
2959 defm VSTR_FPSCR_NZCVQC : vfp_vstrldr_sysreg<0b0,0b0010, "fpscr_nzcvqc",
2960 (outs), (ins cl_FPSCR_NZCV:$fpscr)>;
2961 let canFoldAsLoad = 1, isReMaterializable = 1 in {
2962 defm VLDR_FPSCR_NZCVQC : vfp_vstrldr_sysreg<0b1,0b0010, "fpscr_nzcvqc",
2963 (outs cl_FPSCR_NZCV:$fpscr), (ins)>;