1 //===- ARMInstrNEON.td - NEON support for ARM -----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the ARM NEON instruction set.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // NEON-specific DAG Nodes.
16 //===----------------------------------------------------------------------===//
18 def SDTARMVCMP : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisSameAs<1, 2>]>;
20 def NEONvceq : SDNode<"ARMISD::VCEQ", SDTARMVCMP>;
21 def NEONvcge : SDNode<"ARMISD::VCGE", SDTARMVCMP>;
22 def NEONvcgeu : SDNode<"ARMISD::VCGEU", SDTARMVCMP>;
23 def NEONvcgt : SDNode<"ARMISD::VCGT", SDTARMVCMP>;
24 def NEONvcgtu : SDNode<"ARMISD::VCGTU", SDTARMVCMP>;
25 def NEONvtst : SDNode<"ARMISD::VTST", SDTARMVCMP>;
27 // Types for vector shift by immediates. The "SHX" version is for long and
28 // narrow operations where the source and destination vectors have different
29 // types. The "SHINS" version is for shift and insert operations.
30 def SDTARMVSH : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisSameAs<0, 1>,
32 def SDTARMVSHX : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>,
34 def SDTARMVSHINS : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<0, 1>,
35 SDTCisSameAs<0, 2>, SDTCisVT<3, i32>]>;
37 def NEONvshl : SDNode<"ARMISD::VSHL", SDTARMVSH>;
38 def NEONvshrs : SDNode<"ARMISD::VSHRs", SDTARMVSH>;
39 def NEONvshru : SDNode<"ARMISD::VSHRu", SDTARMVSH>;
40 def NEONvshlls : SDNode<"ARMISD::VSHLLs", SDTARMVSHX>;
41 def NEONvshllu : SDNode<"ARMISD::VSHLLu", SDTARMVSHX>;
42 def NEONvshlli : SDNode<"ARMISD::VSHLLi", SDTARMVSHX>;
43 def NEONvshrn : SDNode<"ARMISD::VSHRN", SDTARMVSHX>;
45 def NEONvrshrs : SDNode<"ARMISD::VRSHRs", SDTARMVSH>;
46 def NEONvrshru : SDNode<"ARMISD::VRSHRu", SDTARMVSH>;
47 def NEONvrshrn : SDNode<"ARMISD::VRSHRN", SDTARMVSHX>;
49 def NEONvqshls : SDNode<"ARMISD::VQSHLs", SDTARMVSH>;
50 def NEONvqshlu : SDNode<"ARMISD::VQSHLu", SDTARMVSH>;
51 def NEONvqshlsu : SDNode<"ARMISD::VQSHLsu", SDTARMVSH>;
52 def NEONvqshrns : SDNode<"ARMISD::VQSHRNs", SDTARMVSHX>;
53 def NEONvqshrnu : SDNode<"ARMISD::VQSHRNu", SDTARMVSHX>;
54 def NEONvqshrnsu : SDNode<"ARMISD::VQSHRNsu", SDTARMVSHX>;
56 def NEONvqrshrns : SDNode<"ARMISD::VQRSHRNs", SDTARMVSHX>;
57 def NEONvqrshrnu : SDNode<"ARMISD::VQRSHRNu", SDTARMVSHX>;
58 def NEONvqrshrnsu : SDNode<"ARMISD::VQRSHRNsu", SDTARMVSHX>;
60 def NEONvsli : SDNode<"ARMISD::VSLI", SDTARMVSHINS>;
61 def NEONvsri : SDNode<"ARMISD::VSRI", SDTARMVSHINS>;
63 def SDTARMVGETLN : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisInt<1>,
65 def NEONvgetlaneu : SDNode<"ARMISD::VGETLANEu", SDTARMVGETLN>;
66 def NEONvgetlanes : SDNode<"ARMISD::VGETLANEs", SDTARMVGETLN>;
68 def NEONvdup : SDNode<"ARMISD::VDUP", SDTypeProfile<1, 1, [SDTCisVec<0>]>>;
70 // VDUPLANE can produce a quad-register result from a double-register source,
71 // so the result is not constrained to match the source.
72 def NEONvduplane : SDNode<"ARMISD::VDUPLANE",
73 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
76 def SDTARMVEXT : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
77 SDTCisSameAs<0, 2>, SDTCisVT<3, i32>]>;
78 def NEONvext : SDNode<"ARMISD::VEXT", SDTARMVEXT>;
80 def SDTARMVSHUF : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0, 1>]>;
81 def NEONvrev64 : SDNode<"ARMISD::VREV64", SDTARMVSHUF>;
82 def NEONvrev32 : SDNode<"ARMISD::VREV32", SDTARMVSHUF>;
83 def NEONvrev16 : SDNode<"ARMISD::VREV16", SDTARMVSHUF>;
85 def SDTARMVSHUF2 : SDTypeProfile<2, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
86 SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>]>;
87 def NEONzip : SDNode<"ARMISD::VZIP", SDTARMVSHUF2>;
88 def NEONuzp : SDNode<"ARMISD::VUZP", SDTARMVSHUF2>;
89 def NEONtrn : SDNode<"ARMISD::VTRN", SDTARMVSHUF2>;
91 //===----------------------------------------------------------------------===//
92 // NEON operand definitions
93 //===----------------------------------------------------------------------===//
95 // addrmode_neonldstm := reg
97 /* TODO: Take advantage of vldm.
98 def addrmode_neonldstm : Operand<i32>,
99 ComplexPattern<i32, 2, "SelectAddrModeNeonLdStM", []> {
100 let PrintMethod = "printAddrNeonLdStMOperand";
101 let MIOperandInfo = (ops GPR, i32imm);
105 //===----------------------------------------------------------------------===//
106 // NEON load / store instructions
107 //===----------------------------------------------------------------------===//
109 /* TODO: Take advantage of vldm.
111 def VLDMD : NI<(outs),
112 (ins addrmode_neonldstm:$addr, reglist:$dst1, variable_ops),
114 "vldm${addr:submode} ${addr:base}, $dst1",
116 let Inst{27-25} = 0b110;
118 let Inst{11-9} = 0b101;
121 def VLDMS : NI<(outs),
122 (ins addrmode_neonldstm:$addr, reglist:$dst1, variable_ops),
124 "vldm${addr:submode} ${addr:base}, $dst1",
126 let Inst{27-25} = 0b110;
128 let Inst{11-9} = 0b101;
133 // Use vldmia to load a Q register as a D register pair.
134 def VLDRQ : NI4<(outs QPR:$dst), (ins addrmode4:$addr),
136 "vldmia $addr, ${dst:dregpair}",
137 [(set QPR:$dst, (v2f64 (load addrmode4:$addr)))]> {
138 let Inst{27-25} = 0b110;
139 let Inst{24} = 0; // P bit
140 let Inst{23} = 1; // U bit
142 let Inst{11-9} = 0b101;
145 // Use vstmia to store a Q register as a D register pair.
146 def VSTRQ : NI4<(outs), (ins QPR:$src, addrmode4:$addr),
148 "vstmia $addr, ${src:dregpair}",
149 [(store (v2f64 QPR:$src), addrmode4:$addr)]> {
150 let Inst{27-25} = 0b110;
151 let Inst{24} = 0; // P bit
152 let Inst{23} = 1; // U bit
154 let Inst{11-9} = 0b101;
157 // VLD1 : Vector Load (multiple single elements)
158 class VLD1D<string OpcodeStr, ValueType Ty, Intrinsic IntOp>
159 : NLdSt<(outs DPR:$dst), (ins addrmode6:$addr), NoItinerary,
160 !strconcat(OpcodeStr, "\t\\{$dst\\}, $addr"), "",
161 [(set DPR:$dst, (Ty (IntOp addrmode6:$addr)))]>;
162 class VLD1Q<string OpcodeStr, ValueType Ty, Intrinsic IntOp>
163 : NLdSt<(outs QPR:$dst), (ins addrmode6:$addr), NoItinerary,
164 !strconcat(OpcodeStr, "\t${dst:dregpair}, $addr"), "",
165 [(set QPR:$dst, (Ty (IntOp addrmode6:$addr)))]>;
167 def VLD1d8 : VLD1D<"vld1.8", v8i8, int_arm_neon_vld1>;
168 def VLD1d16 : VLD1D<"vld1.16", v4i16, int_arm_neon_vld1>;
169 def VLD1d32 : VLD1D<"vld1.32", v2i32, int_arm_neon_vld1>;
170 def VLD1df : VLD1D<"vld1.32", v2f32, int_arm_neon_vld1>;
171 def VLD1d64 : VLD1D<"vld1.64", v1i64, int_arm_neon_vld1>;
173 def VLD1q8 : VLD1Q<"vld1.8", v16i8, int_arm_neon_vld1>;
174 def VLD1q16 : VLD1Q<"vld1.16", v8i16, int_arm_neon_vld1>;
175 def VLD1q32 : VLD1Q<"vld1.32", v4i32, int_arm_neon_vld1>;
176 def VLD1qf : VLD1Q<"vld1.32", v4f32, int_arm_neon_vld1>;
177 def VLD1q64 : VLD1Q<"vld1.64", v2i64, int_arm_neon_vld1>;
181 // VLD2 : Vector Load (multiple 2-element structures)
182 class VLD2D<string OpcodeStr>
183 : NLdSt<(outs DPR:$dst1, DPR:$dst2), (ins addrmode6:$addr), NoItinerary,
184 !strconcat(OpcodeStr, "\t\\{$dst1,$dst2\\}, $addr"), "", []>;
186 def VLD2d8 : VLD2D<"vld2.8">;
187 def VLD2d16 : VLD2D<"vld2.16">;
188 def VLD2d32 : VLD2D<"vld2.32">;
190 // VLD3 : Vector Load (multiple 3-element structures)
191 class VLD3D<string OpcodeStr>
192 : NLdSt<(outs DPR:$dst1, DPR:$dst2, DPR:$dst3), (ins addrmode6:$addr),
194 !strconcat(OpcodeStr, "\t\\{$dst1,$dst2,$dst3\\}, $addr"), "", []>;
196 def VLD3d8 : VLD3D<"vld3.8">;
197 def VLD3d16 : VLD3D<"vld3.16">;
198 def VLD3d32 : VLD3D<"vld3.32">;
200 // VLD4 : Vector Load (multiple 4-element structures)
201 class VLD4D<string OpcodeStr>
202 : NLdSt<(outs DPR:$dst1, DPR:$dst2, DPR:$dst3, DPR:$dst4),
203 (ins addrmode6:$addr), NoItinerary,
204 !strconcat(OpcodeStr, "\t\\{$dst1,$dst2,$dst3,$dst4\\}, $addr"),
207 def VLD4d8 : VLD4D<"vld4.8">;
208 def VLD4d16 : VLD4D<"vld4.16">;
209 def VLD4d32 : VLD4D<"vld4.32">;
211 // VLD2LN : Vector Load (single 2-element structure to one lane)
212 class VLD2LND<string OpcodeStr>
213 : NLdSt<(outs DPR:$dst1, DPR:$dst2),
214 (ins addrmode6:$addr, DPR:$src1, DPR:$src2, nohash_imm:$lane),
216 !strconcat(OpcodeStr, "\t\\{$dst1[$lane],$dst2[$lane]\\}, $addr"),
217 "$src1 = $dst1, $src2 = $dst2", []>;
219 def VLD2LNd8 : VLD2LND<"vld2.8">;
220 def VLD2LNd16 : VLD2LND<"vld2.16">;
221 def VLD2LNd32 : VLD2LND<"vld2.32">;
223 // VLD3LN : Vector Load (single 3-element structure to one lane)
224 class VLD3LND<string OpcodeStr>
225 : NLdSt<(outs DPR:$dst1, DPR:$dst2, DPR:$dst3),
226 (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3,
227 nohash_imm:$lane), NoItinerary,
228 !strconcat(OpcodeStr,
229 "\t\\{$dst1[$lane],$dst2[$lane],$dst3[$lane]\\}, $addr"),
230 "$src1 = $dst1, $src2 = $dst2, $src3 = $dst3", []>;
232 def VLD3LNd8 : VLD3LND<"vld3.8">;
233 def VLD3LNd16 : VLD3LND<"vld3.16">;
234 def VLD3LNd32 : VLD3LND<"vld3.32">;
236 // VLD4LN : Vector Load (single 4-element structure to one lane)
237 class VLD4LND<string OpcodeStr>
238 : NLdSt<(outs DPR:$dst1, DPR:$dst2, DPR:$dst3, DPR:$dst4),
239 (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4,
240 nohash_imm:$lane), NoItinerary,
241 !strconcat(OpcodeStr,
242 "\t\\{$dst1[$lane],$dst2[$lane],$dst3[$lane],$dst4[$lane]\\}, $addr"),
243 "$src1 = $dst1, $src2 = $dst2, $src3 = $dst3, $src4 = $dst4", []>;
245 def VLD4LNd8 : VLD4LND<"vld4.8">;
246 def VLD4LNd16 : VLD4LND<"vld4.16">;
247 def VLD4LNd32 : VLD4LND<"vld4.32">;
250 // VST1 : Vector Store (multiple single elements)
251 class VST1D<string OpcodeStr, ValueType Ty, Intrinsic IntOp>
252 : NLdSt<(outs), (ins addrmode6:$addr, DPR:$src), NoItinerary,
253 !strconcat(OpcodeStr, "\t\\{$src\\}, $addr"), "",
254 [(IntOp addrmode6:$addr, (Ty DPR:$src))]>;
255 class VST1Q<string OpcodeStr, ValueType Ty, Intrinsic IntOp>
256 : NLdSt<(outs), (ins addrmode6:$addr, QPR:$src), NoItinerary,
257 !strconcat(OpcodeStr, "\t${src:dregpair}, $addr"), "",
258 [(IntOp addrmode6:$addr, (Ty QPR:$src))]>;
260 def VST1d8 : VST1D<"vst1.8", v8i8, int_arm_neon_vst1>;
261 def VST1d16 : VST1D<"vst1.16", v4i16, int_arm_neon_vst1>;
262 def VST1d32 : VST1D<"vst1.32", v2i32, int_arm_neon_vst1>;
263 def VST1df : VST1D<"vst1.32", v2f32, int_arm_neon_vst1>;
264 def VST1d64 : VST1D<"vst1.64", v1i64, int_arm_neon_vst1>;
266 def VST1q8 : VST1Q<"vst1.8", v16i8, int_arm_neon_vst1>;
267 def VST1q16 : VST1Q<"vst1.16", v8i16, int_arm_neon_vst1>;
268 def VST1q32 : VST1Q<"vst1.32", v4i32, int_arm_neon_vst1>;
269 def VST1qf : VST1Q<"vst1.32", v4f32, int_arm_neon_vst1>;
270 def VST1q64 : VST1Q<"vst1.64", v2i64, int_arm_neon_vst1>;
272 let mayStore = 1 in {
274 // VST2 : Vector Store (multiple 2-element structures)
275 class VST2D<string OpcodeStr>
276 : NLdSt<(outs), (ins addrmode6:$addr, DPR:$src1, DPR:$src2), NoItinerary,
277 !strconcat(OpcodeStr, "\t\\{$src1,$src2\\}, $addr"), "", []>;
279 def VST2d8 : VST2D<"vst2.8">;
280 def VST2d16 : VST2D<"vst2.16">;
281 def VST2d32 : VST2D<"vst2.32">;
283 // VST3 : Vector Store (multiple 3-element structures)
284 class VST3D<string OpcodeStr>
285 : NLdSt<(outs), (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3),
287 !strconcat(OpcodeStr, "\t\\{$src1,$src2,$src3\\}, $addr"), "", []>;
289 def VST3d8 : VST3D<"vst3.8">;
290 def VST3d16 : VST3D<"vst3.16">;
291 def VST3d32 : VST3D<"vst3.32">;
293 // VST4 : Vector Store (multiple 4-element structures)
294 class VST4D<string OpcodeStr>
295 : NLdSt<(outs), (ins addrmode6:$addr,
296 DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4), NoItinerary,
297 !strconcat(OpcodeStr, "\t\\{$src1,$src2,$src3,$src4\\}, $addr"),
300 def VST4d8 : VST4D<"vst4.8">;
301 def VST4d16 : VST4D<"vst4.16">;
302 def VST4d32 : VST4D<"vst4.32">;
304 // VST2LN : Vector Store (single 2-element structure from one lane)
305 class VST2LND<string OpcodeStr>
306 : NLdSt<(outs), (ins addrmode6:$addr, DPR:$src1, DPR:$src2, nohash_imm:$lane),
308 !strconcat(OpcodeStr, "\t\\{$src1[$lane],$src2[$lane]\\}, $addr"),
311 def VST2LNd8 : VST2LND<"vst2.8">;
312 def VST2LNd16 : VST2LND<"vst2.16">;
313 def VST2LNd32 : VST2LND<"vst2.32">;
315 // VST3LN : Vector Store (single 3-element structure from one lane)
316 class VST3LND<string OpcodeStr>
317 : NLdSt<(outs), (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3,
318 nohash_imm:$lane), NoItinerary,
319 !strconcat(OpcodeStr,
320 "\t\\{$src1[$lane],$src2[$lane],$src3[$lane]\\}, $addr"), "", []>;
322 def VST3LNd8 : VST3LND<"vst3.8">;
323 def VST3LNd16 : VST3LND<"vst3.16">;
324 def VST3LNd32 : VST3LND<"vst3.32">;
326 // VST4LN : Vector Store (single 4-element structure from one lane)
327 class VST4LND<string OpcodeStr>
328 : NLdSt<(outs), (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3,
329 DPR:$src4, nohash_imm:$lane), NoItinerary,
330 !strconcat(OpcodeStr,
331 "\t\\{$src1[$lane],$src2[$lane],$src3[$lane],$src4[$lane]\\}, $addr"),
334 def VST4LNd8 : VST4LND<"vst4.8">;
335 def VST4LNd16 : VST4LND<"vst4.16">;
336 def VST4LNd32 : VST4LND<"vst4.32">;
340 //===----------------------------------------------------------------------===//
341 // NEON pattern fragments
342 //===----------------------------------------------------------------------===//
344 // Extract D sub-registers of Q registers.
345 // (arm_dsubreg_0 is 5; arm_dsubreg_1 is 6)
346 def DSubReg_i8_reg : SDNodeXForm<imm, [{
347 return CurDAG->getTargetConstant(5 + N->getZExtValue() / 8, MVT::i32);
349 def DSubReg_i16_reg : SDNodeXForm<imm, [{
350 return CurDAG->getTargetConstant(5 + N->getZExtValue() / 4, MVT::i32);
352 def DSubReg_i32_reg : SDNodeXForm<imm, [{
353 return CurDAG->getTargetConstant(5 + N->getZExtValue() / 2, MVT::i32);
355 def DSubReg_f64_reg : SDNodeXForm<imm, [{
356 return CurDAG->getTargetConstant(5 + N->getZExtValue(), MVT::i32);
358 def DSubReg_f64_other_reg : SDNodeXForm<imm, [{
359 return CurDAG->getTargetConstant(5 + (1 - N->getZExtValue()), MVT::i32);
362 // Extract S sub-registers of Q/D registers.
363 // (arm_ssubreg_0 is 1; arm_ssubreg_1 is 2; etc.)
364 def SSubReg_f32_reg : SDNodeXForm<imm, [{
365 return CurDAG->getTargetConstant(1 + N->getZExtValue(), MVT::i32);
368 // Translate lane numbers from Q registers to D subregs.
369 def SubReg_i8_lane : SDNodeXForm<imm, [{
370 return CurDAG->getTargetConstant(N->getZExtValue() & 7, MVT::i32);
372 def SubReg_i16_lane : SDNodeXForm<imm, [{
373 return CurDAG->getTargetConstant(N->getZExtValue() & 3, MVT::i32);
375 def SubReg_i32_lane : SDNodeXForm<imm, [{
376 return CurDAG->getTargetConstant(N->getZExtValue() & 1, MVT::i32);
379 //===----------------------------------------------------------------------===//
380 // Instruction Classes
381 //===----------------------------------------------------------------------===//
383 // Basic 2-register operations, both double- and quad-register.
384 class N2VD<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
385 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
386 ValueType ResTy, ValueType OpTy, SDNode OpNode>
387 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4, (outs DPR:$dst),
388 (ins DPR:$src), NoItinerary, !strconcat(OpcodeStr, "\t$dst, $src"), "",
389 [(set DPR:$dst, (ResTy (OpNode (OpTy DPR:$src))))]>;
390 class N2VQ<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
391 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
392 ValueType ResTy, ValueType OpTy, SDNode OpNode>
393 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4, (outs QPR:$dst),
394 (ins QPR:$src), NoItinerary, !strconcat(OpcodeStr, "\t$dst, $src"), "",
395 [(set QPR:$dst, (ResTy (OpNode (OpTy QPR:$src))))]>;
397 // Basic 2-register operations, scalar single-precision.
398 class N2VDs<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
399 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
400 ValueType ResTy, ValueType OpTy, SDNode OpNode>
401 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4,
402 (outs DPR_VFP2:$dst), (ins DPR_VFP2:$src),
403 NoItinerary, !strconcat(OpcodeStr, "\t$dst, $src"), "", []>;
405 class N2VDsPat<SDNode OpNode, ValueType ResTy, ValueType OpTy, NeonI Inst>
406 : NEONFPPat<(ResTy (OpNode SPR:$a)),
408 (Inst (INSERT_SUBREG (OpTy (IMPLICIT_DEF)), SPR:$a, arm_ssubreg_0)),
411 // Basic 2-register intrinsics, both double- and quad-register.
412 class N2VDInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
413 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
414 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
415 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4, (outs DPR:$dst),
416 (ins DPR:$src), NoItinerary, !strconcat(OpcodeStr, "\t$dst, $src"), "",
417 [(set DPR:$dst, (ResTy (IntOp (OpTy DPR:$src))))]>;
418 class N2VQInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
419 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
420 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
421 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4, (outs QPR:$dst),
422 (ins QPR:$src), NoItinerary, !strconcat(OpcodeStr, "\t$dst, $src"), "",
423 [(set QPR:$dst, (ResTy (IntOp (OpTy QPR:$src))))]>;
425 // Basic 2-register intrinsics, scalar single-precision
426 class N2VDInts<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
427 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
428 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
429 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4,
430 (outs DPR_VFP2:$dst), (ins DPR_VFP2:$src), NoItinerary,
431 !strconcat(OpcodeStr, "\t$dst, $src"), "", []>;
433 class N2VDIntsPat<SDNode OpNode, NeonI Inst>
434 : NEONFPPat<(f32 (OpNode SPR:$a)),
436 (Inst (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), SPR:$a, arm_ssubreg_0)),
439 // Narrow 2-register intrinsics.
440 class N2VNInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
441 bits<2> op17_16, bits<5> op11_7, bit op6, bit op4,
442 string OpcodeStr, ValueType TyD, ValueType TyQ, Intrinsic IntOp>
443 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, op6, op4, (outs DPR:$dst),
444 (ins QPR:$src), NoItinerary, !strconcat(OpcodeStr, "\t$dst, $src"), "",
445 [(set DPR:$dst, (TyD (IntOp (TyQ QPR:$src))))]>;
447 // Long 2-register intrinsics. (This is currently only used for VMOVL and is
448 // derived from N2VImm instead of N2V because of the way the size is encoded.)
449 class N2VLInt<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
450 bit op6, bit op4, string OpcodeStr, ValueType TyQ, ValueType TyD,
452 : N2VImm<op24, op23, op21_16, op11_8, op7, op6, op4, (outs QPR:$dst),
453 (ins DPR:$src), NoItinerary, !strconcat(OpcodeStr, "\t$dst, $src"), "",
454 [(set QPR:$dst, (TyQ (IntOp (TyD DPR:$src))))]>;
456 // 2-register shuffles (VTRN/VZIP/VUZP), both double- and quad-register.
457 class N2VDShuffle<bits<2> op19_18, bits<5> op11_7, string OpcodeStr>
458 : N2V<0b11, 0b11, op19_18, 0b10, op11_7, 0, 0, (outs DPR:$dst1, DPR:$dst2),
459 (ins DPR:$src1, DPR:$src2), NoItinerary,
460 !strconcat(OpcodeStr, "\t$dst1, $dst2"),
461 "$src1 = $dst1, $src2 = $dst2", []>;
462 class N2VQShuffle<bits<2> op19_18, bits<5> op11_7, string OpcodeStr>
463 : N2V<0b11, 0b11, op19_18, 0b10, op11_7, 1, 0, (outs QPR:$dst1, QPR:$dst2),
464 (ins QPR:$src1, QPR:$src2), NoItinerary,
465 !strconcat(OpcodeStr, "\t$dst1, $dst2"),
466 "$src1 = $dst1, $src2 = $dst2", []>;
468 // Basic 3-register operations, both double- and quad-register.
469 class N3VD<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
470 string OpcodeStr, ValueType ResTy, ValueType OpTy,
471 SDNode OpNode, bit Commutable>
472 : N3V<op24, op23, op21_20, op11_8, 0, op4,
473 (outs DPR:$dst), (ins DPR:$src1, DPR:$src2), NoItinerary,
474 !strconcat(OpcodeStr, "\t$dst, $src1, $src2"), "",
475 [(set DPR:$dst, (ResTy (OpNode (OpTy DPR:$src1), (OpTy DPR:$src2))))]> {
476 let isCommutable = Commutable;
478 class N3VDSL<bits<2> op21_20, bits<4> op11_8,
479 string OpcodeStr, ValueType Ty, SDNode ShOp>
480 : N3V<0, 1, op21_20, op11_8, 1, 0,
481 (outs DPR:$dst), (ins DPR:$src1, DPR_VFP2:$src2, nohash_imm:$lane),
483 !strconcat(OpcodeStr, "\t$dst, $src1, $src2[$lane]"), "",
485 (Ty (ShOp (Ty DPR:$src1),
486 (Ty (NEONvduplane (Ty DPR_VFP2:$src2),
488 let isCommutable = 0;
490 class N3VDSL16<bits<2> op21_20, bits<4> op11_8,
491 string OpcodeStr, ValueType Ty, SDNode ShOp>
492 : N3V<0, 1, op21_20, op11_8, 1, 0,
493 (outs DPR:$dst), (ins DPR:$src1, DPR_8:$src2, nohash_imm:$lane),
495 !strconcat(OpcodeStr, "\t$dst, $src1, $src2[$lane]"), "",
497 (Ty (ShOp (Ty DPR:$src1),
498 (Ty (NEONvduplane (Ty DPR_8:$src2),
500 let isCommutable = 0;
503 class N3VQ<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
504 string OpcodeStr, ValueType ResTy, ValueType OpTy,
505 SDNode OpNode, bit Commutable>
506 : N3V<op24, op23, op21_20, op11_8, 1, op4,
507 (outs QPR:$dst), (ins QPR:$src1, QPR:$src2), NoItinerary,
508 !strconcat(OpcodeStr, "\t$dst, $src1, $src2"), "",
509 [(set QPR:$dst, (ResTy (OpNode (OpTy QPR:$src1), (OpTy QPR:$src2))))]> {
510 let isCommutable = Commutable;
512 class N3VQSL<bits<2> op21_20, bits<4> op11_8,
513 string OpcodeStr, ValueType ResTy, ValueType OpTy, SDNode ShOp>
514 : N3V<1, 1, op21_20, op11_8, 1, 0,
515 (outs QPR:$dst), (ins QPR:$src1, DPR_VFP2:$src2, nohash_imm:$lane),
517 !strconcat(OpcodeStr, "\t$dst, $src1, $src2[$lane]"), "",
518 [(set (ResTy QPR:$dst),
519 (ResTy (ShOp (ResTy QPR:$src1),
520 (ResTy (NEONvduplane (OpTy DPR_VFP2:$src2),
522 let isCommutable = 0;
524 class N3VQSL16<bits<2> op21_20, bits<4> op11_8,
525 string OpcodeStr, ValueType ResTy, ValueType OpTy, SDNode ShOp>
526 : N3V<1, 1, op21_20, op11_8, 1, 0,
527 (outs QPR:$dst), (ins QPR:$src1, DPR_8:$src2, nohash_imm:$lane),
529 !strconcat(OpcodeStr, "\t$dst, $src1, $src2[$lane]"), "",
530 [(set (ResTy QPR:$dst),
531 (ResTy (ShOp (ResTy QPR:$src1),
532 (ResTy (NEONvduplane (OpTy DPR_8:$src2),
534 let isCommutable = 0;
537 // Basic 3-register operations, scalar single-precision
538 class N3VDs<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
539 string OpcodeStr, ValueType ResTy, ValueType OpTy,
540 SDNode OpNode, bit Commutable>
541 : N3V<op24, op23, op21_20, op11_8, 0, op4,
542 (outs DPR_VFP2:$dst), (ins DPR_VFP2:$src1, DPR_VFP2:$src2), NoItinerary,
543 !strconcat(OpcodeStr, "\t$dst, $src1, $src2"), "", []> {
544 let isCommutable = Commutable;
546 class N3VDsPat<SDNode OpNode, NeonI Inst>
547 : NEONFPPat<(f32 (OpNode SPR:$a, SPR:$b)),
549 (Inst (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), SPR:$a, arm_ssubreg_0),
550 (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), SPR:$b, arm_ssubreg_0)),
553 // Basic 3-register intrinsics, both double- and quad-register.
554 class N3VDInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
555 string OpcodeStr, ValueType ResTy, ValueType OpTy,
556 Intrinsic IntOp, bit Commutable>
557 : N3V<op24, op23, op21_20, op11_8, 0, op4,
558 (outs DPR:$dst), (ins DPR:$src1, DPR:$src2), NoItinerary,
559 !strconcat(OpcodeStr, "\t$dst, $src1, $src2"), "",
560 [(set DPR:$dst, (ResTy (IntOp (OpTy DPR:$src1), (OpTy DPR:$src2))))]> {
561 let isCommutable = Commutable;
563 class N3VDIntSL<bits<2> op21_20, bits<4> op11_8,
564 string OpcodeStr, ValueType Ty, Intrinsic IntOp>
565 : N3V<0, 1, op21_20, op11_8, 1, 0,
566 (outs DPR:$dst), (ins DPR:$src1, DPR_VFP2:$src2, nohash_imm:$lane),
568 !strconcat(OpcodeStr, "\t$dst, $src1, $src2[$lane]"), "",
570 (Ty (IntOp (Ty DPR:$src1),
571 (Ty (NEONvduplane (Ty DPR_VFP2:$src2),
573 let isCommutable = 0;
575 class N3VDIntSL16<bits<2> op21_20, bits<4> op11_8,
576 string OpcodeStr, ValueType Ty, Intrinsic IntOp>
577 : N3V<0, 1, op21_20, op11_8, 1, 0,
578 (outs DPR:$dst), (ins DPR:$src1, DPR_8:$src2, nohash_imm:$lane),
580 !strconcat(OpcodeStr, "\t$dst, $src1, $src2[$lane]"), "",
582 (Ty (IntOp (Ty DPR:$src1),
583 (Ty (NEONvduplane (Ty DPR_8:$src2),
585 let isCommutable = 0;
588 class N3VQInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
589 string OpcodeStr, ValueType ResTy, ValueType OpTy,
590 Intrinsic IntOp, bit Commutable>
591 : N3V<op24, op23, op21_20, op11_8, 1, op4,
592 (outs QPR:$dst), (ins QPR:$src1, QPR:$src2), NoItinerary,
593 !strconcat(OpcodeStr, "\t$dst, $src1, $src2"), "",
594 [(set QPR:$dst, (ResTy (IntOp (OpTy QPR:$src1), (OpTy QPR:$src2))))]> {
595 let isCommutable = Commutable;
597 class N3VQIntSL<bits<2> op21_20, bits<4> op11_8,
598 string OpcodeStr, ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
599 : N3V<1, 1, op21_20, op11_8, 1, 0,
600 (outs QPR:$dst), (ins QPR:$src1, DPR_VFP2:$src2, nohash_imm:$lane),
602 !strconcat(OpcodeStr, "\t$dst, $src1, $src2[$lane]"), "",
603 [(set (ResTy QPR:$dst),
604 (ResTy (IntOp (ResTy QPR:$src1),
605 (ResTy (NEONvduplane (OpTy DPR_VFP2:$src2),
607 let isCommutable = 0;
609 class N3VQIntSL16<bits<2> op21_20, bits<4> op11_8,
610 string OpcodeStr, ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
611 : N3V<1, 1, op21_20, op11_8, 1, 0,
612 (outs QPR:$dst), (ins QPR:$src1, DPR_8:$src2, nohash_imm:$lane),
614 !strconcat(OpcodeStr, "\t$dst, $src1, $src2[$lane]"), "",
615 [(set (ResTy QPR:$dst),
616 (ResTy (IntOp (ResTy QPR:$src1),
617 (ResTy (NEONvduplane (OpTy DPR_8:$src2),
619 let isCommutable = 0;
622 // Multiply-Add/Sub operations, both double- and quad-register.
623 class N3VDMulOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
624 string OpcodeStr, ValueType Ty, SDNode MulOp, SDNode OpNode>
625 : N3V<op24, op23, op21_20, op11_8, 0, op4,
626 (outs DPR:$dst), (ins DPR:$src1, DPR:$src2, DPR:$src3), NoItinerary,
627 !strconcat(OpcodeStr, "\t$dst, $src2, $src3"), "$src1 = $dst",
628 [(set DPR:$dst, (Ty (OpNode DPR:$src1,
629 (Ty (MulOp DPR:$src2, DPR:$src3)))))]>;
630 class N3VDMulOpSL<bits<2> op21_20, bits<4> op11_8,
631 string OpcodeStr, ValueType Ty, SDNode MulOp, SDNode ShOp>
632 : N3V<0, 1, op21_20, op11_8, 1, 0,
634 (ins DPR:$src1, DPR:$src2, DPR_VFP2:$src3, nohash_imm:$lane),
636 !strconcat(OpcodeStr, "\t$dst, $src2, $src3[$lane]"), "$src1 = $dst",
638 (Ty (ShOp (Ty DPR:$src1),
639 (Ty (MulOp DPR:$src2,
640 (Ty (NEONvduplane (Ty DPR_VFP2:$src3),
642 class N3VDMulOpSL16<bits<2> op21_20, bits<4> op11_8,
643 string OpcodeStr, ValueType Ty, SDNode MulOp, SDNode ShOp>
644 : N3V<0, 1, op21_20, op11_8, 1, 0,
646 (ins DPR:$src1, DPR:$src2, DPR_8:$src3, nohash_imm:$lane),
648 !strconcat(OpcodeStr, "\t$dst, $src2, $src3[$lane]"), "$src1 = $dst",
650 (Ty (ShOp (Ty DPR:$src1),
651 (Ty (MulOp DPR:$src2,
652 (Ty (NEONvduplane (Ty DPR_8:$src3),
655 class N3VQMulOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
656 string OpcodeStr, ValueType Ty, SDNode MulOp, SDNode OpNode>
657 : N3V<op24, op23, op21_20, op11_8, 1, op4,
658 (outs QPR:$dst), (ins QPR:$src1, QPR:$src2, QPR:$src3), NoItinerary,
659 !strconcat(OpcodeStr, "\t$dst, $src2, $src3"), "$src1 = $dst",
660 [(set QPR:$dst, (Ty (OpNode QPR:$src1,
661 (Ty (MulOp QPR:$src2, QPR:$src3)))))]>;
662 class N3VQMulOpSL<bits<2> op21_20, bits<4> op11_8,
663 string OpcodeStr, ValueType ResTy, ValueType OpTy,
664 SDNode MulOp, SDNode ShOp>
665 : N3V<1, 1, op21_20, op11_8, 1, 0,
667 (ins QPR:$src1, QPR:$src2, DPR_VFP2:$src3, nohash_imm:$lane),
669 !strconcat(OpcodeStr, "\t$dst, $src2, $src3[$lane]"), "$src1 = $dst",
670 [(set (ResTy QPR:$dst),
671 (ResTy (ShOp (ResTy QPR:$src1),
672 (ResTy (MulOp QPR:$src2,
673 (ResTy (NEONvduplane (OpTy DPR_VFP2:$src3),
675 class N3VQMulOpSL16<bits<2> op21_20, bits<4> op11_8,
676 string OpcodeStr, ValueType ResTy, ValueType OpTy,
677 SDNode MulOp, SDNode ShOp>
678 : N3V<1, 1, op21_20, op11_8, 1, 0,
680 (ins QPR:$src1, QPR:$src2, DPR_8:$src3, nohash_imm:$lane),
682 !strconcat(OpcodeStr, "\t$dst, $src2, $src3[$lane]"), "$src1 = $dst",
683 [(set (ResTy QPR:$dst),
684 (ResTy (ShOp (ResTy QPR:$src1),
685 (ResTy (MulOp QPR:$src2,
686 (ResTy (NEONvduplane (OpTy DPR_8:$src3),
689 // Multiply-Add/Sub operations, scalar single-precision
690 class N3VDMulOps<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
691 string OpcodeStr, ValueType Ty, SDNode MulOp, SDNode OpNode>
692 : N3V<op24, op23, op21_20, op11_8, 0, op4,
693 (outs DPR_VFP2:$dst),
694 (ins DPR_VFP2:$src1, DPR_VFP2:$src2, DPR_VFP2:$src3), NoItinerary,
695 !strconcat(OpcodeStr, "\t$dst, $src2, $src3"), "$src1 = $dst", []>;
697 class N3VDMulOpsPat<SDNode MulNode, SDNode OpNode, NeonI Inst>
698 : NEONFPPat<(f32 (OpNode SPR:$acc, (f32 (MulNode SPR:$a, SPR:$b)))),
700 (Inst (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), SPR:$acc, arm_ssubreg_0),
701 (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), SPR:$a, arm_ssubreg_0),
702 (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), SPR:$b, arm_ssubreg_0)),
705 // Neon 3-argument intrinsics, both double- and quad-register.
706 // The destination register is also used as the first source operand register.
707 class N3VDInt3<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
708 string OpcodeStr, ValueType ResTy, ValueType OpTy,
710 : N3V<op24, op23, op21_20, op11_8, 0, op4,
711 (outs DPR:$dst), (ins DPR:$src1, DPR:$src2, DPR:$src3), NoItinerary,
712 !strconcat(OpcodeStr, "\t$dst, $src2, $src3"), "$src1 = $dst",
713 [(set DPR:$dst, (ResTy (IntOp (OpTy DPR:$src1),
714 (OpTy DPR:$src2), (OpTy DPR:$src3))))]>;
715 class N3VQInt3<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
716 string OpcodeStr, ValueType ResTy, ValueType OpTy,
718 : N3V<op24, op23, op21_20, op11_8, 1, op4,
719 (outs QPR:$dst), (ins QPR:$src1, QPR:$src2, QPR:$src3), NoItinerary,
720 !strconcat(OpcodeStr, "\t$dst, $src2, $src3"), "$src1 = $dst",
721 [(set QPR:$dst, (ResTy (IntOp (OpTy QPR:$src1),
722 (OpTy QPR:$src2), (OpTy QPR:$src3))))]>;
724 // Neon Long 3-argument intrinsic. The destination register is
725 // a quad-register and is also used as the first source operand register.
726 class N3VLInt3<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
727 string OpcodeStr, ValueType TyQ, ValueType TyD, Intrinsic IntOp>
728 : N3V<op24, op23, op21_20, op11_8, 0, op4,
729 (outs QPR:$dst), (ins QPR:$src1, DPR:$src2, DPR:$src3), NoItinerary,
730 !strconcat(OpcodeStr, "\t$dst, $src2, $src3"), "$src1 = $dst",
732 (TyQ (IntOp (TyQ QPR:$src1), (TyD DPR:$src2), (TyD DPR:$src3))))]>;
733 class N3VLInt3SL<bit op24, bits<2> op21_20, bits<4> op11_8,
734 string OpcodeStr, ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
735 : N3V<op24, 1, op21_20, op11_8, 1, 0,
737 (ins QPR:$src1, DPR:$src2, DPR_VFP2:$src3, nohash_imm:$lane),
739 !strconcat(OpcodeStr, "\t$dst, $src2, $src3[$lane]"), "$src1 = $dst",
740 [(set (ResTy QPR:$dst),
741 (ResTy (IntOp (ResTy QPR:$src1),
743 (OpTy (NEONvduplane (OpTy DPR_VFP2:$src3),
745 class N3VLInt3SL16<bit op24, bits<2> op21_20, bits<4> op11_8,
746 string OpcodeStr, ValueType ResTy, ValueType OpTy,
748 : N3V<op24, 1, op21_20, op11_8, 1, 0,
750 (ins QPR:$src1, DPR:$src2, DPR_8:$src3, nohash_imm:$lane),
752 !strconcat(OpcodeStr, "\t$dst, $src2, $src3[$lane]"), "$src1 = $dst",
753 [(set (ResTy QPR:$dst),
754 (ResTy (IntOp (ResTy QPR:$src1),
756 (OpTy (NEONvduplane (OpTy DPR_8:$src3),
760 // Narrowing 3-register intrinsics.
761 class N3VNInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
762 string OpcodeStr, ValueType TyD, ValueType TyQ,
763 Intrinsic IntOp, bit Commutable>
764 : N3V<op24, op23, op21_20, op11_8, 0, op4,
765 (outs DPR:$dst), (ins QPR:$src1, QPR:$src2), NoItinerary,
766 !strconcat(OpcodeStr, "\t$dst, $src1, $src2"), "",
767 [(set DPR:$dst, (TyD (IntOp (TyQ QPR:$src1), (TyQ QPR:$src2))))]> {
768 let isCommutable = Commutable;
771 // Long 3-register intrinsics.
772 class N3VLInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
773 string OpcodeStr, ValueType TyQ, ValueType TyD,
774 Intrinsic IntOp, bit Commutable>
775 : N3V<op24, op23, op21_20, op11_8, 0, op4,
776 (outs QPR:$dst), (ins DPR:$src1, DPR:$src2), NoItinerary,
777 !strconcat(OpcodeStr, "\t$dst, $src1, $src2"), "",
778 [(set QPR:$dst, (TyQ (IntOp (TyD DPR:$src1), (TyD DPR:$src2))))]> {
779 let isCommutable = Commutable;
781 class N3VLIntSL<bit op24, bits<2> op21_20, bits<4> op11_8,
782 string OpcodeStr, ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
783 : N3V<op24, 1, op21_20, op11_8, 1, 0,
784 (outs QPR:$dst), (ins DPR:$src1, DPR_VFP2:$src2, nohash_imm:$lane),
786 !strconcat(OpcodeStr, "\t$dst, $src1, $src2[$lane]"), "",
787 [(set (ResTy QPR:$dst),
788 (ResTy (IntOp (OpTy DPR:$src1),
789 (OpTy (NEONvduplane (OpTy DPR_VFP2:$src2),
791 class N3VLIntSL16<bit op24, bits<2> op21_20, bits<4> op11_8,
792 string OpcodeStr, ValueType ResTy, ValueType OpTy,
794 : N3V<op24, 1, op21_20, op11_8, 1, 0,
795 (outs QPR:$dst), (ins DPR:$src1, DPR_8:$src2, nohash_imm:$lane),
797 !strconcat(OpcodeStr, "\t$dst, $src1, $src2[$lane]"), "",
798 [(set (ResTy QPR:$dst),
799 (ResTy (IntOp (OpTy DPR:$src1),
800 (OpTy (NEONvduplane (OpTy DPR_8:$src2),
803 // Wide 3-register intrinsics.
804 class N3VWInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
805 string OpcodeStr, ValueType TyQ, ValueType TyD,
806 Intrinsic IntOp, bit Commutable>
807 : N3V<op24, op23, op21_20, op11_8, 0, op4,
808 (outs QPR:$dst), (ins QPR:$src1, DPR:$src2), NoItinerary,
809 !strconcat(OpcodeStr, "\t$dst, $src1, $src2"), "",
810 [(set QPR:$dst, (TyQ (IntOp (TyQ QPR:$src1), (TyD DPR:$src2))))]> {
811 let isCommutable = Commutable;
814 // Pairwise long 2-register intrinsics, both double- and quad-register.
815 class N2VDPLInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
816 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
817 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
818 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4, (outs DPR:$dst),
819 (ins DPR:$src), NoItinerary, !strconcat(OpcodeStr, "\t$dst, $src"), "",
820 [(set DPR:$dst, (ResTy (IntOp (OpTy DPR:$src))))]>;
821 class N2VQPLInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
822 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
823 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
824 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4, (outs QPR:$dst),
825 (ins QPR:$src), NoItinerary, !strconcat(OpcodeStr, "\t$dst, $src"), "",
826 [(set QPR:$dst, (ResTy (IntOp (OpTy QPR:$src))))]>;
828 // Pairwise long 2-register accumulate intrinsics,
829 // both double- and quad-register.
830 // The destination register is also used as the first source operand register.
831 class N2VDPLInt2<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
832 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
833 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
834 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4,
835 (outs DPR:$dst), (ins DPR:$src1, DPR:$src2), NoItinerary,
836 !strconcat(OpcodeStr, "\t$dst, $src2"), "$src1 = $dst",
837 [(set DPR:$dst, (ResTy (IntOp (ResTy DPR:$src1), (OpTy DPR:$src2))))]>;
838 class N2VQPLInt2<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
839 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
840 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
841 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4,
842 (outs QPR:$dst), (ins QPR:$src1, QPR:$src2), NoItinerary,
843 !strconcat(OpcodeStr, "\t$dst, $src2"), "$src1 = $dst",
844 [(set QPR:$dst, (ResTy (IntOp (ResTy QPR:$src1), (OpTy QPR:$src2))))]>;
846 // Shift by immediate,
847 // both double- and quad-register.
848 class N2VDSh<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
849 bit op4, string OpcodeStr, ValueType Ty, SDNode OpNode>
850 : N2VImm<op24, op23, op21_16, op11_8, op7, 0, op4,
851 (outs DPR:$dst), (ins DPR:$src, i32imm:$SIMM), NoItinerary,
852 !strconcat(OpcodeStr, "\t$dst, $src, $SIMM"), "",
853 [(set DPR:$dst, (Ty (OpNode (Ty DPR:$src), (i32 imm:$SIMM))))]>;
854 class N2VQSh<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
855 bit op4, string OpcodeStr, ValueType Ty, SDNode OpNode>
856 : N2VImm<op24, op23, op21_16, op11_8, op7, 1, op4,
857 (outs QPR:$dst), (ins QPR:$src, i32imm:$SIMM), NoItinerary,
858 !strconcat(OpcodeStr, "\t$dst, $src, $SIMM"), "",
859 [(set QPR:$dst, (Ty (OpNode (Ty QPR:$src), (i32 imm:$SIMM))))]>;
861 // Long shift by immediate.
862 class N2VLSh<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
863 bit op6, bit op4, string OpcodeStr, ValueType ResTy,
864 ValueType OpTy, SDNode OpNode>
865 : N2VImm<op24, op23, op21_16, op11_8, op7, op6, op4,
866 (outs QPR:$dst), (ins DPR:$src, i32imm:$SIMM), NoItinerary,
867 !strconcat(OpcodeStr, "\t$dst, $src, $SIMM"), "",
868 [(set QPR:$dst, (ResTy (OpNode (OpTy DPR:$src),
869 (i32 imm:$SIMM))))]>;
871 // Narrow shift by immediate.
872 class N2VNSh<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
873 bit op6, bit op4, string OpcodeStr, ValueType ResTy,
874 ValueType OpTy, SDNode OpNode>
875 : N2VImm<op24, op23, op21_16, op11_8, op7, op6, op4,
876 (outs DPR:$dst), (ins QPR:$src, i32imm:$SIMM), NoItinerary,
877 !strconcat(OpcodeStr, "\t$dst, $src, $SIMM"), "",
878 [(set DPR:$dst, (ResTy (OpNode (OpTy QPR:$src),
879 (i32 imm:$SIMM))))]>;
881 // Shift right by immediate and accumulate,
882 // both double- and quad-register.
883 class N2VDShAdd<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
884 bit op4, string OpcodeStr, ValueType Ty, SDNode ShOp>
885 : N2VImm<op24, op23, op21_16, op11_8, op7, 0, op4,
886 (outs DPR:$dst), (ins DPR:$src1, DPR:$src2, i32imm:$SIMM),
888 !strconcat(OpcodeStr, "\t$dst, $src2, $SIMM"), "$src1 = $dst",
889 [(set DPR:$dst, (Ty (add DPR:$src1,
890 (Ty (ShOp DPR:$src2, (i32 imm:$SIMM))))))]>;
891 class N2VQShAdd<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
892 bit op4, string OpcodeStr, ValueType Ty, SDNode ShOp>
893 : N2VImm<op24, op23, op21_16, op11_8, op7, 1, op4,
894 (outs QPR:$dst), (ins QPR:$src1, QPR:$src2, i32imm:$SIMM),
896 !strconcat(OpcodeStr, "\t$dst, $src2, $SIMM"), "$src1 = $dst",
897 [(set QPR:$dst, (Ty (add QPR:$src1,
898 (Ty (ShOp QPR:$src2, (i32 imm:$SIMM))))))]>;
900 // Shift by immediate and insert,
901 // both double- and quad-register.
902 class N2VDShIns<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
903 bit op4, string OpcodeStr, ValueType Ty, SDNode ShOp>
904 : N2VImm<op24, op23, op21_16, op11_8, op7, 0, op4,
905 (outs DPR:$dst), (ins DPR:$src1, DPR:$src2, i32imm:$SIMM),
907 !strconcat(OpcodeStr, "\t$dst, $src2, $SIMM"), "$src1 = $dst",
908 [(set DPR:$dst, (Ty (ShOp DPR:$src1, DPR:$src2, (i32 imm:$SIMM))))]>;
909 class N2VQShIns<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
910 bit op4, string OpcodeStr, ValueType Ty, SDNode ShOp>
911 : N2VImm<op24, op23, op21_16, op11_8, op7, 1, op4,
912 (outs QPR:$dst), (ins QPR:$src1, QPR:$src2, i32imm:$SIMM),
914 !strconcat(OpcodeStr, "\t$dst, $src2, $SIMM"), "$src1 = $dst",
915 [(set QPR:$dst, (Ty (ShOp QPR:$src1, QPR:$src2, (i32 imm:$SIMM))))]>;
917 // Convert, with fractional bits immediate,
918 // both double- and quad-register.
919 class N2VCvtD<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
920 bit op4, string OpcodeStr, ValueType ResTy, ValueType OpTy,
922 : N2VImm<op24, op23, op21_16, op11_8, op7, 0, op4,
923 (outs DPR:$dst), (ins DPR:$src, i32imm:$SIMM), NoItinerary,
924 !strconcat(OpcodeStr, "\t$dst, $src, $SIMM"), "",
925 [(set DPR:$dst, (ResTy (IntOp (OpTy DPR:$src), (i32 imm:$SIMM))))]>;
926 class N2VCvtQ<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
927 bit op4, string OpcodeStr, ValueType ResTy, ValueType OpTy,
929 : N2VImm<op24, op23, op21_16, op11_8, op7, 1, op4,
930 (outs QPR:$dst), (ins QPR:$src, i32imm:$SIMM), NoItinerary,
931 !strconcat(OpcodeStr, "\t$dst, $src, $SIMM"), "",
932 [(set QPR:$dst, (ResTy (IntOp (OpTy QPR:$src), (i32 imm:$SIMM))))]>;
934 //===----------------------------------------------------------------------===//
936 //===----------------------------------------------------------------------===//
938 // Neon 3-register vector operations.
940 // First with only element sizes of 8, 16 and 32 bits:
941 multiclass N3V_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
942 string OpcodeStr, SDNode OpNode, bit Commutable = 0> {
943 // 64-bit vector types.
944 def v8i8 : N3VD<op24, op23, 0b00, op11_8, op4, !strconcat(OpcodeStr, "8"),
945 v8i8, v8i8, OpNode, Commutable>;
946 def v4i16 : N3VD<op24, op23, 0b01, op11_8, op4, !strconcat(OpcodeStr, "16"),
947 v4i16, v4i16, OpNode, Commutable>;
948 def v2i32 : N3VD<op24, op23, 0b10, op11_8, op4, !strconcat(OpcodeStr, "32"),
949 v2i32, v2i32, OpNode, Commutable>;
951 // 128-bit vector types.
952 def v16i8 : N3VQ<op24, op23, 0b00, op11_8, op4, !strconcat(OpcodeStr, "8"),
953 v16i8, v16i8, OpNode, Commutable>;
954 def v8i16 : N3VQ<op24, op23, 0b01, op11_8, op4, !strconcat(OpcodeStr, "16"),
955 v8i16, v8i16, OpNode, Commutable>;
956 def v4i32 : N3VQ<op24, op23, 0b10, op11_8, op4, !strconcat(OpcodeStr, "32"),
957 v4i32, v4i32, OpNode, Commutable>;
960 multiclass N3VSL_HS<bits<4> op11_8, string OpcodeStr, SDNode ShOp> {
961 def v4i16 : N3VDSL16<0b01, op11_8, !strconcat(OpcodeStr, "16"), v4i16, ShOp>;
962 def v2i32 : N3VDSL<0b10, op11_8, !strconcat(OpcodeStr, "32"), v2i32, ShOp>;
963 def v8i16 : N3VQSL16<0b01, op11_8, !strconcat(OpcodeStr, "16"), v8i16, v4i16, ShOp>;
964 def v4i32 : N3VQSL<0b10, op11_8, !strconcat(OpcodeStr, "32"), v4i32, v2i32, ShOp>;
967 // ....then also with element size 64 bits:
968 multiclass N3V_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
969 string OpcodeStr, SDNode OpNode, bit Commutable = 0>
970 : N3V_QHS<op24, op23, op11_8, op4, OpcodeStr, OpNode, Commutable> {
971 def v1i64 : N3VD<op24, op23, 0b11, op11_8, op4, !strconcat(OpcodeStr, "64"),
972 v1i64, v1i64, OpNode, Commutable>;
973 def v2i64 : N3VQ<op24, op23, 0b11, op11_8, op4, !strconcat(OpcodeStr, "64"),
974 v2i64, v2i64, OpNode, Commutable>;
978 // Neon Narrowing 2-register vector intrinsics,
979 // source operand element sizes of 16, 32 and 64 bits:
980 multiclass N2VNInt_HSD<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
981 bits<5> op11_7, bit op6, bit op4, string OpcodeStr,
983 def v8i8 : N2VNInt<op24_23, op21_20, 0b00, op17_16, op11_7, op6, op4,
984 !strconcat(OpcodeStr, "16"), v8i8, v8i16, IntOp>;
985 def v4i16 : N2VNInt<op24_23, op21_20, 0b01, op17_16, op11_7, op6, op4,
986 !strconcat(OpcodeStr, "32"), v4i16, v4i32, IntOp>;
987 def v2i32 : N2VNInt<op24_23, op21_20, 0b10, op17_16, op11_7, op6, op4,
988 !strconcat(OpcodeStr, "64"), v2i32, v2i64, IntOp>;
992 // Neon Lengthening 2-register vector intrinsic (currently specific to VMOVL).
993 // source operand element sizes of 16, 32 and 64 bits:
994 multiclass N2VLInt_QHS<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6,
995 bit op4, string OpcodeStr, Intrinsic IntOp> {
996 def v8i16 : N2VLInt<op24, op23, 0b001000, op11_8, op7, op6, op4,
997 !strconcat(OpcodeStr, "8"), v8i16, v8i8, IntOp>;
998 def v4i32 : N2VLInt<op24, op23, 0b010000, op11_8, op7, op6, op4,
999 !strconcat(OpcodeStr, "16"), v4i32, v4i16, IntOp>;
1000 def v2i64 : N2VLInt<op24, op23, 0b100000, op11_8, op7, op6, op4,
1001 !strconcat(OpcodeStr, "32"), v2i64, v2i32, IntOp>;
1005 // Neon 3-register vector intrinsics.
1007 // First with only element sizes of 16 and 32 bits:
1008 multiclass N3VInt_HS<bit op24, bit op23, bits<4> op11_8, bit op4,
1009 string OpcodeStr, Intrinsic IntOp, bit Commutable = 0> {
1010 // 64-bit vector types.
1011 def v4i16 : N3VDInt<op24, op23, 0b01, op11_8, op4, !strconcat(OpcodeStr,"16"),
1012 v4i16, v4i16, IntOp, Commutable>;
1013 def v2i32 : N3VDInt<op24, op23, 0b10, op11_8, op4, !strconcat(OpcodeStr,"32"),
1014 v2i32, v2i32, IntOp, Commutable>;
1016 // 128-bit vector types.
1017 def v8i16 : N3VQInt<op24, op23, 0b01, op11_8, op4, !strconcat(OpcodeStr,"16"),
1018 v8i16, v8i16, IntOp, Commutable>;
1019 def v4i32 : N3VQInt<op24, op23, 0b10, op11_8, op4, !strconcat(OpcodeStr,"32"),
1020 v4i32, v4i32, IntOp, Commutable>;
1023 multiclass N3VIntSL_HS<bits<4> op11_8, string OpcodeStr, Intrinsic IntOp> {
1024 def v4i16 : N3VDIntSL16<0b01, op11_8, !strconcat(OpcodeStr, "16"), v4i16, IntOp>;
1025 def v2i32 : N3VDIntSL<0b10, op11_8, !strconcat(OpcodeStr, "32"), v2i32, IntOp>;
1026 def v8i16 : N3VQIntSL16<0b01, op11_8, !strconcat(OpcodeStr, "16"), v8i16, v4i16, IntOp>;
1027 def v4i32 : N3VQIntSL<0b10, op11_8, !strconcat(OpcodeStr, "32"), v4i32, v2i32, IntOp>;
1030 // ....then also with element size of 8 bits:
1031 multiclass N3VInt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
1032 string OpcodeStr, Intrinsic IntOp, bit Commutable = 0>
1033 : N3VInt_HS<op24, op23, op11_8, op4, OpcodeStr, IntOp, Commutable> {
1034 def v8i8 : N3VDInt<op24, op23, 0b00, op11_8, op4, !strconcat(OpcodeStr, "8"),
1035 v8i8, v8i8, IntOp, Commutable>;
1036 def v16i8 : N3VQInt<op24, op23, 0b00, op11_8, op4, !strconcat(OpcodeStr, "8"),
1037 v16i8, v16i8, IntOp, Commutable>;
1040 // ....then also with element size of 64 bits:
1041 multiclass N3VInt_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
1042 string OpcodeStr, Intrinsic IntOp, bit Commutable = 0>
1043 : N3VInt_QHS<op24, op23, op11_8, op4, OpcodeStr, IntOp, Commutable> {
1044 def v1i64 : N3VDInt<op24, op23, 0b11, op11_8, op4, !strconcat(OpcodeStr,"64"),
1045 v1i64, v1i64, IntOp, Commutable>;
1046 def v2i64 : N3VQInt<op24, op23, 0b11, op11_8, op4, !strconcat(OpcodeStr,"64"),
1047 v2i64, v2i64, IntOp, Commutable>;
1051 // Neon Narrowing 3-register vector intrinsics,
1052 // source operand element sizes of 16, 32 and 64 bits:
1053 multiclass N3VNInt_HSD<bit op24, bit op23, bits<4> op11_8, bit op4,
1054 string OpcodeStr, Intrinsic IntOp, bit Commutable = 0> {
1055 def v8i8 : N3VNInt<op24, op23, 0b00, op11_8, op4, !strconcat(OpcodeStr,"16"),
1056 v8i8, v8i16, IntOp, Commutable>;
1057 def v4i16 : N3VNInt<op24, op23, 0b01, op11_8, op4, !strconcat(OpcodeStr,"32"),
1058 v4i16, v4i32, IntOp, Commutable>;
1059 def v2i32 : N3VNInt<op24, op23, 0b10, op11_8, op4, !strconcat(OpcodeStr,"64"),
1060 v2i32, v2i64, IntOp, Commutable>;
1064 // Neon Long 3-register vector intrinsics.
1066 // First with only element sizes of 16 and 32 bits:
1067 multiclass N3VLInt_HS<bit op24, bit op23, bits<4> op11_8, bit op4,
1068 string OpcodeStr, Intrinsic IntOp, bit Commutable = 0> {
1069 def v4i32 : N3VLInt<op24, op23, 0b01, op11_8, op4, !strconcat(OpcodeStr,"16"),
1070 v4i32, v4i16, IntOp, Commutable>;
1071 def v2i64 : N3VLInt<op24, op23, 0b10, op11_8, op4, !strconcat(OpcodeStr,"32"),
1072 v2i64, v2i32, IntOp, Commutable>;
1075 multiclass N3VLIntSL_HS<bit op24, bits<4> op11_8,
1076 string OpcodeStr, Intrinsic IntOp> {
1077 def v4i16 : N3VLIntSL16<op24, 0b01, op11_8,
1078 !strconcat(OpcodeStr, "16"), v4i32, v4i16, IntOp>;
1079 def v2i32 : N3VLIntSL<op24, 0b10, op11_8,
1080 !strconcat(OpcodeStr, "32"), v2i64, v2i32, IntOp>;
1083 // ....then also with element size of 8 bits:
1084 multiclass N3VLInt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
1085 string OpcodeStr, Intrinsic IntOp, bit Commutable = 0>
1086 : N3VLInt_HS<op24, op23, op11_8, op4, OpcodeStr, IntOp, Commutable> {
1087 def v8i16 : N3VLInt<op24, op23, 0b00, op11_8, op4, !strconcat(OpcodeStr, "8"),
1088 v8i16, v8i8, IntOp, Commutable>;
1092 // Neon Wide 3-register vector intrinsics,
1093 // source operand element sizes of 8, 16 and 32 bits:
1094 multiclass N3VWInt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
1095 string OpcodeStr, Intrinsic IntOp, bit Commutable = 0> {
1096 def v8i16 : N3VWInt<op24, op23, 0b00, op11_8, op4, !strconcat(OpcodeStr, "8"),
1097 v8i16, v8i8, IntOp, Commutable>;
1098 def v4i32 : N3VWInt<op24, op23, 0b01, op11_8, op4, !strconcat(OpcodeStr,"16"),
1099 v4i32, v4i16, IntOp, Commutable>;
1100 def v2i64 : N3VWInt<op24, op23, 0b10, op11_8, op4, !strconcat(OpcodeStr,"32"),
1101 v2i64, v2i32, IntOp, Commutable>;
1105 // Neon Multiply-Op vector operations,
1106 // element sizes of 8, 16 and 32 bits:
1107 multiclass N3VMulOp_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
1108 string OpcodeStr, SDNode OpNode> {
1109 // 64-bit vector types.
1110 def v8i8 : N3VDMulOp<op24, op23, 0b00, op11_8, op4,
1111 !strconcat(OpcodeStr, "8"), v8i8, mul, OpNode>;
1112 def v4i16 : N3VDMulOp<op24, op23, 0b01, op11_8, op4,
1113 !strconcat(OpcodeStr, "16"), v4i16, mul, OpNode>;
1114 def v2i32 : N3VDMulOp<op24, op23, 0b10, op11_8, op4,
1115 !strconcat(OpcodeStr, "32"), v2i32, mul, OpNode>;
1117 // 128-bit vector types.
1118 def v16i8 : N3VQMulOp<op24, op23, 0b00, op11_8, op4,
1119 !strconcat(OpcodeStr, "8"), v16i8, mul, OpNode>;
1120 def v8i16 : N3VQMulOp<op24, op23, 0b01, op11_8, op4,
1121 !strconcat(OpcodeStr, "16"), v8i16, mul, OpNode>;
1122 def v4i32 : N3VQMulOp<op24, op23, 0b10, op11_8, op4,
1123 !strconcat(OpcodeStr, "32"), v4i32, mul, OpNode>;
1126 multiclass N3VMulOpSL_HS<bits<4> op11_8, string OpcodeStr, SDNode ShOp> {
1127 def v4i16 : N3VDMulOpSL16<0b01, op11_8,
1128 !strconcat(OpcodeStr, "16"), v4i16, mul, ShOp>;
1129 def v2i32 : N3VDMulOpSL<0b10, op11_8,
1130 !strconcat(OpcodeStr, "32"), v2i32, mul, ShOp>;
1131 def v8i16 : N3VQMulOpSL16<0b01, op11_8,
1132 !strconcat(OpcodeStr, "16"), v8i16, v4i16, mul, ShOp>;
1133 def v4i32 : N3VQMulOpSL<0b10, op11_8,
1134 !strconcat(OpcodeStr, "32"), v4i32, v2i32, mul, ShOp>;
1137 // Neon 3-argument intrinsics,
1138 // element sizes of 8, 16 and 32 bits:
1139 multiclass N3VInt3_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
1140 string OpcodeStr, Intrinsic IntOp> {
1141 // 64-bit vector types.
1142 def v8i8 : N3VDInt3<op24, op23, 0b00, op11_8, op4,
1143 !strconcat(OpcodeStr, "8"), v8i8, v8i8, IntOp>;
1144 def v4i16 : N3VDInt3<op24, op23, 0b01, op11_8, op4,
1145 !strconcat(OpcodeStr, "16"), v4i16, v4i16, IntOp>;
1146 def v2i32 : N3VDInt3<op24, op23, 0b10, op11_8, op4,
1147 !strconcat(OpcodeStr, "32"), v2i32, v2i32, IntOp>;
1149 // 128-bit vector types.
1150 def v16i8 : N3VQInt3<op24, op23, 0b00, op11_8, op4,
1151 !strconcat(OpcodeStr, "8"), v16i8, v16i8, IntOp>;
1152 def v8i16 : N3VQInt3<op24, op23, 0b01, op11_8, op4,
1153 !strconcat(OpcodeStr, "16"), v8i16, v8i16, IntOp>;
1154 def v4i32 : N3VQInt3<op24, op23, 0b10, op11_8, op4,
1155 !strconcat(OpcodeStr, "32"), v4i32, v4i32, IntOp>;
1159 // Neon Long 3-argument intrinsics.
1161 // First with only element sizes of 16 and 32 bits:
1162 multiclass N3VLInt3_HS<bit op24, bit op23, bits<4> op11_8, bit op4,
1163 string OpcodeStr, Intrinsic IntOp> {
1164 def v4i32 : N3VLInt3<op24, op23, 0b01, op11_8, op4,
1165 !strconcat(OpcodeStr, "16"), v4i32, v4i16, IntOp>;
1166 def v2i64 : N3VLInt3<op24, op23, 0b10, op11_8, op4,
1167 !strconcat(OpcodeStr, "32"), v2i64, v2i32, IntOp>;
1170 multiclass N3VLInt3SL_HS<bit op24, bits<4> op11_8,
1171 string OpcodeStr, Intrinsic IntOp> {
1172 def v4i16 : N3VLInt3SL16<op24, 0b01, op11_8,
1173 !strconcat(OpcodeStr, "16"), v4i32, v4i16, IntOp>;
1174 def v2i32 : N3VLInt3SL<op24, 0b10, op11_8,
1175 !strconcat(OpcodeStr, "32"), v2i64, v2i32, IntOp>;
1178 // ....then also with element size of 8 bits:
1179 multiclass N3VLInt3_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
1180 string OpcodeStr, Intrinsic IntOp>
1181 : N3VLInt3_HS<op24, op23, op11_8, op4, OpcodeStr, IntOp> {
1182 def v8i16 : N3VLInt3<op24, op23, 0b01, op11_8, op4,
1183 !strconcat(OpcodeStr, "8"), v8i16, v8i8, IntOp>;
1187 // Neon 2-register vector intrinsics,
1188 // element sizes of 8, 16 and 32 bits:
1189 multiclass N2VInt_QHS<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
1190 bits<5> op11_7, bit op4, string OpcodeStr,
1192 // 64-bit vector types.
1193 def v8i8 : N2VDInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
1194 !strconcat(OpcodeStr, "8"), v8i8, v8i8, IntOp>;
1195 def v4i16 : N2VDInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
1196 !strconcat(OpcodeStr, "16"), v4i16, v4i16, IntOp>;
1197 def v2i32 : N2VDInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
1198 !strconcat(OpcodeStr, "32"), v2i32, v2i32, IntOp>;
1200 // 128-bit vector types.
1201 def v16i8 : N2VQInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
1202 !strconcat(OpcodeStr, "8"), v16i8, v16i8, IntOp>;
1203 def v8i16 : N2VQInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
1204 !strconcat(OpcodeStr, "16"), v8i16, v8i16, IntOp>;
1205 def v4i32 : N2VQInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
1206 !strconcat(OpcodeStr, "32"), v4i32, v4i32, IntOp>;
1210 // Neon Pairwise long 2-register intrinsics,
1211 // element sizes of 8, 16 and 32 bits:
1212 multiclass N2VPLInt_QHS<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
1213 bits<5> op11_7, bit op4,
1214 string OpcodeStr, Intrinsic IntOp> {
1215 // 64-bit vector types.
1216 def v8i8 : N2VDPLInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
1217 !strconcat(OpcodeStr, "8"), v4i16, v8i8, IntOp>;
1218 def v4i16 : N2VDPLInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
1219 !strconcat(OpcodeStr, "16"), v2i32, v4i16, IntOp>;
1220 def v2i32 : N2VDPLInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
1221 !strconcat(OpcodeStr, "32"), v1i64, v2i32, IntOp>;
1223 // 128-bit vector types.
1224 def v16i8 : N2VQPLInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
1225 !strconcat(OpcodeStr, "8"), v8i16, v16i8, IntOp>;
1226 def v8i16 : N2VQPLInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
1227 !strconcat(OpcodeStr, "16"), v4i32, v8i16, IntOp>;
1228 def v4i32 : N2VQPLInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
1229 !strconcat(OpcodeStr, "32"), v2i64, v4i32, IntOp>;
1233 // Neon Pairwise long 2-register accumulate intrinsics,
1234 // element sizes of 8, 16 and 32 bits:
1235 multiclass N2VPLInt2_QHS<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
1236 bits<5> op11_7, bit op4,
1237 string OpcodeStr, Intrinsic IntOp> {
1238 // 64-bit vector types.
1239 def v8i8 : N2VDPLInt2<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
1240 !strconcat(OpcodeStr, "8"), v4i16, v8i8, IntOp>;
1241 def v4i16 : N2VDPLInt2<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
1242 !strconcat(OpcodeStr, "16"), v2i32, v4i16, IntOp>;
1243 def v2i32 : N2VDPLInt2<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
1244 !strconcat(OpcodeStr, "32"), v1i64, v2i32, IntOp>;
1246 // 128-bit vector types.
1247 def v16i8 : N2VQPLInt2<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
1248 !strconcat(OpcodeStr, "8"), v8i16, v16i8, IntOp>;
1249 def v8i16 : N2VQPLInt2<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
1250 !strconcat(OpcodeStr, "16"), v4i32, v8i16, IntOp>;
1251 def v4i32 : N2VQPLInt2<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
1252 !strconcat(OpcodeStr, "32"), v2i64, v4i32, IntOp>;
1256 // Neon 2-register vector shift by immediate,
1257 // element sizes of 8, 16, 32 and 64 bits:
1258 multiclass N2VSh_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
1259 string OpcodeStr, SDNode OpNode> {
1260 // 64-bit vector types.
1261 def v8i8 : N2VDSh<op24, op23, 0b001000, op11_8, 0, op4,
1262 !strconcat(OpcodeStr, "8"), v8i8, OpNode>;
1263 def v4i16 : N2VDSh<op24, op23, 0b010000, op11_8, 0, op4,
1264 !strconcat(OpcodeStr, "16"), v4i16, OpNode>;
1265 def v2i32 : N2VDSh<op24, op23, 0b100000, op11_8, 0, op4,
1266 !strconcat(OpcodeStr, "32"), v2i32, OpNode>;
1267 def v1i64 : N2VDSh<op24, op23, 0b000000, op11_8, 1, op4,
1268 !strconcat(OpcodeStr, "64"), v1i64, OpNode>;
1270 // 128-bit vector types.
1271 def v16i8 : N2VQSh<op24, op23, 0b001000, op11_8, 0, op4,
1272 !strconcat(OpcodeStr, "8"), v16i8, OpNode>;
1273 def v8i16 : N2VQSh<op24, op23, 0b010000, op11_8, 0, op4,
1274 !strconcat(OpcodeStr, "16"), v8i16, OpNode>;
1275 def v4i32 : N2VQSh<op24, op23, 0b100000, op11_8, 0, op4,
1276 !strconcat(OpcodeStr, "32"), v4i32, OpNode>;
1277 def v2i64 : N2VQSh<op24, op23, 0b000000, op11_8, 1, op4,
1278 !strconcat(OpcodeStr, "64"), v2i64, OpNode>;
1282 // Neon Shift-Accumulate vector operations,
1283 // element sizes of 8, 16, 32 and 64 bits:
1284 multiclass N2VShAdd_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
1285 string OpcodeStr, SDNode ShOp> {
1286 // 64-bit vector types.
1287 def v8i8 : N2VDShAdd<op24, op23, 0b001000, op11_8, 0, op4,
1288 !strconcat(OpcodeStr, "8"), v8i8, ShOp>;
1289 def v4i16 : N2VDShAdd<op24, op23, 0b010000, op11_8, 0, op4,
1290 !strconcat(OpcodeStr, "16"), v4i16, ShOp>;
1291 def v2i32 : N2VDShAdd<op24, op23, 0b100000, op11_8, 0, op4,
1292 !strconcat(OpcodeStr, "32"), v2i32, ShOp>;
1293 def v1i64 : N2VDShAdd<op24, op23, 0b000000, op11_8, 1, op4,
1294 !strconcat(OpcodeStr, "64"), v1i64, ShOp>;
1296 // 128-bit vector types.
1297 def v16i8 : N2VQShAdd<op24, op23, 0b001000, op11_8, 0, op4,
1298 !strconcat(OpcodeStr, "8"), v16i8, ShOp>;
1299 def v8i16 : N2VQShAdd<op24, op23, 0b010000, op11_8, 0, op4,
1300 !strconcat(OpcodeStr, "16"), v8i16, ShOp>;
1301 def v4i32 : N2VQShAdd<op24, op23, 0b100000, op11_8, 0, op4,
1302 !strconcat(OpcodeStr, "32"), v4i32, ShOp>;
1303 def v2i64 : N2VQShAdd<op24, op23, 0b000000, op11_8, 1, op4,
1304 !strconcat(OpcodeStr, "64"), v2i64, ShOp>;
1308 // Neon Shift-Insert vector operations,
1309 // element sizes of 8, 16, 32 and 64 bits:
1310 multiclass N2VShIns_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
1311 string OpcodeStr, SDNode ShOp> {
1312 // 64-bit vector types.
1313 def v8i8 : N2VDShIns<op24, op23, 0b001000, op11_8, 0, op4,
1314 !strconcat(OpcodeStr, "8"), v8i8, ShOp>;
1315 def v4i16 : N2VDShIns<op24, op23, 0b010000, op11_8, 0, op4,
1316 !strconcat(OpcodeStr, "16"), v4i16, ShOp>;
1317 def v2i32 : N2VDShIns<op24, op23, 0b100000, op11_8, 0, op4,
1318 !strconcat(OpcodeStr, "32"), v2i32, ShOp>;
1319 def v1i64 : N2VDShIns<op24, op23, 0b000000, op11_8, 1, op4,
1320 !strconcat(OpcodeStr, "64"), v1i64, ShOp>;
1322 // 128-bit vector types.
1323 def v16i8 : N2VQShIns<op24, op23, 0b001000, op11_8, 0, op4,
1324 !strconcat(OpcodeStr, "8"), v16i8, ShOp>;
1325 def v8i16 : N2VQShIns<op24, op23, 0b010000, op11_8, 0, op4,
1326 !strconcat(OpcodeStr, "16"), v8i16, ShOp>;
1327 def v4i32 : N2VQShIns<op24, op23, 0b100000, op11_8, 0, op4,
1328 !strconcat(OpcodeStr, "32"), v4i32, ShOp>;
1329 def v2i64 : N2VQShIns<op24, op23, 0b000000, op11_8, 1, op4,
1330 !strconcat(OpcodeStr, "64"), v2i64, ShOp>;
1333 //===----------------------------------------------------------------------===//
1334 // Instruction Definitions.
1335 //===----------------------------------------------------------------------===//
1337 // Vector Add Operations.
1339 // VADD : Vector Add (integer and floating-point)
1340 defm VADD : N3V_QHSD<0, 0, 0b1000, 0, "vadd.i", add, 1>;
1341 def VADDfd : N3VD<0, 0, 0b00, 0b1101, 0, "vadd.f32", v2f32, v2f32, fadd, 1>;
1342 def VADDfq : N3VQ<0, 0, 0b00, 0b1101, 0, "vadd.f32", v4f32, v4f32, fadd, 1>;
1343 // VADDL : Vector Add Long (Q = D + D)
1344 defm VADDLs : N3VLInt_QHS<0,1,0b0000,0, "vaddl.s", int_arm_neon_vaddls, 1>;
1345 defm VADDLu : N3VLInt_QHS<1,1,0b0000,0, "vaddl.u", int_arm_neon_vaddlu, 1>;
1346 // VADDW : Vector Add Wide (Q = Q + D)
1347 defm VADDWs : N3VWInt_QHS<0,1,0b0001,0, "vaddw.s", int_arm_neon_vaddws, 0>;
1348 defm VADDWu : N3VWInt_QHS<1,1,0b0001,0, "vaddw.u", int_arm_neon_vaddwu, 0>;
1349 // VHADD : Vector Halving Add
1350 defm VHADDs : N3VInt_QHS<0,0,0b0000,0, "vhadd.s", int_arm_neon_vhadds, 1>;
1351 defm VHADDu : N3VInt_QHS<1,0,0b0000,0, "vhadd.u", int_arm_neon_vhaddu, 1>;
1352 // VRHADD : Vector Rounding Halving Add
1353 defm VRHADDs : N3VInt_QHS<0,0,0b0001,0, "vrhadd.s", int_arm_neon_vrhadds, 1>;
1354 defm VRHADDu : N3VInt_QHS<1,0,0b0001,0, "vrhadd.u", int_arm_neon_vrhaddu, 1>;
1355 // VQADD : Vector Saturating Add
1356 defm VQADDs : N3VInt_QHSD<0,0,0b0000,1, "vqadd.s", int_arm_neon_vqadds, 1>;
1357 defm VQADDu : N3VInt_QHSD<1,0,0b0000,1, "vqadd.u", int_arm_neon_vqaddu, 1>;
1358 // VADDHN : Vector Add and Narrow Returning High Half (D = Q + Q)
1359 defm VADDHN : N3VNInt_HSD<0,1,0b0100,0, "vaddhn.i", int_arm_neon_vaddhn, 1>;
1360 // VRADDHN : Vector Rounding Add and Narrow Returning High Half (D = Q + Q)
1361 defm VRADDHN : N3VNInt_HSD<1,1,0b0100,0, "vraddhn.i", int_arm_neon_vraddhn, 1>;
1363 // Vector Multiply Operations.
1365 // VMUL : Vector Multiply (integer, polynomial and floating-point)
1366 defm VMUL : N3V_QHS<0, 0, 0b1001, 1, "vmul.i", mul, 1>;
1367 def VMULpd : N3VDInt<1, 0, 0b00, 0b1001, 1, "vmul.p8", v8i8, v8i8,
1368 int_arm_neon_vmulp, 1>;
1369 def VMULpq : N3VQInt<1, 0, 0b00, 0b1001, 1, "vmul.p8", v16i8, v16i8,
1370 int_arm_neon_vmulp, 1>;
1371 def VMULfd : N3VD<1, 0, 0b00, 0b1101, 1, "vmul.f32", v2f32, v2f32, fmul, 1>;
1372 def VMULfq : N3VQ<1, 0, 0b00, 0b1101, 1, "vmul.f32", v4f32, v4f32, fmul, 1>;
1373 defm VMULsl : N3VSL_HS<0b1000, "vmul.i", mul>;
1374 def VMULslfd : N3VDSL<0b10, 0b1001, "vmul.f32", v2f32, fmul>;
1375 def VMULslfq : N3VQSL<0b10, 0b1001, "vmul.f32", v4f32, v2f32, fmul>;
1376 def : Pat<(v8i16 (mul (v8i16 QPR:$src1),
1377 (v8i16 (NEONvduplane (v8i16 QPR:$src2), imm:$lane)))),
1378 (v8i16 (VMULslv8i16 (v8i16 QPR:$src1),
1379 (v4i16 (EXTRACT_SUBREG QPR:$src2,
1380 (DSubReg_i16_reg imm:$lane))),
1381 (SubReg_i16_lane imm:$lane)))>;
1382 def : Pat<(v4i32 (mul (v4i32 QPR:$src1),
1383 (v4i32 (NEONvduplane (v4i32 QPR:$src2), imm:$lane)))),
1384 (v4i32 (VMULslv4i32 (v4i32 QPR:$src1),
1385 (v2i32 (EXTRACT_SUBREG QPR:$src2,
1386 (DSubReg_i32_reg imm:$lane))),
1387 (SubReg_i32_lane imm:$lane)))>;
1388 def : Pat<(v4f32 (fmul (v4f32 QPR:$src1),
1389 (v4f32 (NEONvduplane (v4f32 QPR:$src2), imm:$lane)))),
1390 (v4f32 (VMULslfq (v4f32 QPR:$src1),
1391 (v2f32 (EXTRACT_SUBREG QPR:$src2,
1392 (DSubReg_i32_reg imm:$lane))),
1393 (SubReg_i32_lane imm:$lane)))>;
1395 // VQDMULH : Vector Saturating Doubling Multiply Returning High Half
1396 defm VQDMULH : N3VInt_HS<0,0,0b1011,0, "vqdmulh.s", int_arm_neon_vqdmulh, 1>;
1397 defm VQDMULHsl: N3VIntSL_HS<0b1100, "vqdmulh.s", int_arm_neon_vqdmulh>;
1398 def : Pat<(v8i16 (int_arm_neon_vqdmulh (v8i16 QPR:$src1),
1399 (v8i16 (NEONvduplane (v8i16 QPR:$src2), imm:$lane)))),
1400 (v8i16 (VQDMULHslv8i16 (v8i16 QPR:$src1),
1401 (v4i16 (EXTRACT_SUBREG QPR:$src2,
1402 (DSubReg_i16_reg imm:$lane))),
1403 (SubReg_i16_lane imm:$lane)))>;
1404 def : Pat<(v4i32 (int_arm_neon_vqdmulh (v4i32 QPR:$src1),
1405 (v4i32 (NEONvduplane (v4i32 QPR:$src2), imm:$lane)))),
1406 (v4i32 (VQDMULHslv4i32 (v4i32 QPR:$src1),
1407 (v2i32 (EXTRACT_SUBREG QPR:$src2,
1408 (DSubReg_i32_reg imm:$lane))),
1409 (SubReg_i32_lane imm:$lane)))>;
1411 // VQRDMULH : Vector Rounding Saturating Doubling Multiply Returning High Half
1412 defm VQRDMULH : N3VInt_HS<1,0,0b1011,0, "vqrdmulh.s", int_arm_neon_vqrdmulh, 1>;
1413 defm VQRDMULHsl : N3VIntSL_HS<0b1101, "vqrdmulh.s", int_arm_neon_vqrdmulh>;
1414 def : Pat<(v8i16 (int_arm_neon_vqrdmulh (v8i16 QPR:$src1),
1415 (v8i16 (NEONvduplane (v8i16 QPR:$src2), imm:$lane)))),
1416 (v8i16 (VQRDMULHslv8i16 (v8i16 QPR:$src1),
1417 (v4i16 (EXTRACT_SUBREG QPR:$src2,
1418 (DSubReg_i16_reg imm:$lane))),
1419 (SubReg_i16_lane imm:$lane)))>;
1420 def : Pat<(v4i32 (int_arm_neon_vqrdmulh (v4i32 QPR:$src1),
1421 (v4i32 (NEONvduplane (v4i32 QPR:$src2), imm:$lane)))),
1422 (v4i32 (VQRDMULHslv4i32 (v4i32 QPR:$src1),
1423 (v2i32 (EXTRACT_SUBREG QPR:$src2,
1424 (DSubReg_i32_reg imm:$lane))),
1425 (SubReg_i32_lane imm:$lane)))>;
1427 // VMULL : Vector Multiply Long (integer and polynomial) (Q = D * D)
1428 defm VMULLs : N3VLInt_QHS<0,1,0b1100,0, "vmull.s", int_arm_neon_vmulls, 1>;
1429 defm VMULLu : N3VLInt_QHS<1,1,0b1100,0, "vmull.u", int_arm_neon_vmullu, 1>;
1430 def VMULLp : N3VLInt<0, 1, 0b00, 0b1110, 0, "vmull.p8", v8i16, v8i8,
1431 int_arm_neon_vmullp, 1>;
1432 defm VMULLsls : N3VLIntSL_HS<0, 0b1010, "vmull.s", int_arm_neon_vmulls>;
1433 defm VMULLslu : N3VLIntSL_HS<1, 0b1010, "vmull.u", int_arm_neon_vmullu>;
1435 // VQDMULL : Vector Saturating Doubling Multiply Long (Q = D * D)
1436 defm VQDMULL : N3VLInt_HS<0,1,0b1101,0, "vqdmull.s", int_arm_neon_vqdmull, 1>;
1437 defm VQDMULLsl: N3VLIntSL_HS<0, 0b1011, "vqdmull.s", int_arm_neon_vqdmull>;
1439 // Vector Multiply-Accumulate and Multiply-Subtract Operations.
1441 // VMLA : Vector Multiply Accumulate (integer and floating-point)
1442 defm VMLA : N3VMulOp_QHS<0, 0, 0b1001, 0, "vmla.i", add>;
1443 def VMLAfd : N3VDMulOp<0, 0, 0b00, 0b1101, 1, "vmla.f32", v2f32, fmul, fadd>;
1444 def VMLAfq : N3VQMulOp<0, 0, 0b00, 0b1101, 1, "vmla.f32", v4f32, fmul, fadd>;
1445 defm VMLAsl : N3VMulOpSL_HS<0b0000, "vmla.i", add>;
1446 def VMLAslfd : N3VDMulOpSL<0b10, 0b0001, "vmla.f32", v2f32, fmul, fadd>;
1447 def VMLAslfq : N3VQMulOpSL<0b10, 0b0001, "vmla.f32", v4f32, v2f32, fmul, fadd>;
1449 def : Pat<(v8i16 (add (v8i16 QPR:$src1),
1450 (mul (v8i16 QPR:$src2),
1451 (v8i16 (NEONvduplane (v8i16 QPR:$src3), imm:$lane))))),
1452 (v8i16 (VMLAslv8i16 (v8i16 QPR:$src1),
1454 (v4i16 (EXTRACT_SUBREG QPR:$src3,
1455 (DSubReg_i16_reg imm:$lane))),
1456 (SubReg_i16_lane imm:$lane)))>;
1458 def : Pat<(v4i32 (add (v4i32 QPR:$src1),
1459 (mul (v4i32 QPR:$src2),
1460 (v4i32 (NEONvduplane (v4i32 QPR:$src3), imm:$lane))))),
1461 (v4i32 (VMLAslv4i32 (v4i32 QPR:$src1),
1463 (v2i32 (EXTRACT_SUBREG QPR:$src3,
1464 (DSubReg_i32_reg imm:$lane))),
1465 (SubReg_i32_lane imm:$lane)))>;
1467 def : Pat<(v4f32 (fadd (v4f32 QPR:$src1),
1468 (fmul (v4f32 QPR:$src2),
1469 (v4f32 (NEONvduplane (v4f32 QPR:$src3), imm:$lane))))),
1470 (v4f32 (VMLAslfq (v4f32 QPR:$src1),
1472 (v2f32 (EXTRACT_SUBREG QPR:$src3,
1473 (DSubReg_i32_reg imm:$lane))),
1474 (SubReg_i32_lane imm:$lane)))>;
1476 // VMLAL : Vector Multiply Accumulate Long (Q += D * D)
1477 defm VMLALs : N3VLInt3_QHS<0,1,0b1000,0, "vmlal.s", int_arm_neon_vmlals>;
1478 defm VMLALu : N3VLInt3_QHS<1,1,0b1000,0, "vmlal.u", int_arm_neon_vmlalu>;
1480 defm VMLALsls : N3VLInt3SL_HS<0, 0b0010, "vmlal.s", int_arm_neon_vmlals>;
1481 defm VMLALslu : N3VLInt3SL_HS<1, 0b0010, "vmlal.u", int_arm_neon_vmlalu>;
1483 // VQDMLAL : Vector Saturating Doubling Multiply Accumulate Long (Q += D * D)
1484 defm VQDMLAL : N3VLInt3_HS<0, 1, 0b1001, 0, "vqdmlal.s", int_arm_neon_vqdmlal>;
1485 defm VQDMLALsl: N3VLInt3SL_HS<0, 0b0011, "vqdmlal.s", int_arm_neon_vqdmlal>;
1487 // VMLS : Vector Multiply Subtract (integer and floating-point)
1488 defm VMLS : N3VMulOp_QHS<0, 0, 0b1001, 0, "vmls.i", sub>;
1489 def VMLSfd : N3VDMulOp<0, 0, 0b10, 0b1101, 1, "vmls.f32", v2f32, fmul, fsub>;
1490 def VMLSfq : N3VQMulOp<0, 0, 0b10, 0b1101, 1, "vmls.f32", v4f32, fmul, fsub>;
1491 defm VMLSsl : N3VMulOpSL_HS<0b0100, "vmls.i", sub>;
1492 def VMLSslfd : N3VDMulOpSL<0b10, 0b0101, "vmls.f32", v2f32, fmul, fsub>;
1493 def VMLSslfq : N3VQMulOpSL<0b10, 0b0101, "vmls.f32", v4f32, v2f32, fmul, fsub>;
1495 def : Pat<(v8i16 (sub (v8i16 QPR:$src1),
1496 (mul (v8i16 QPR:$src2),
1497 (v8i16 (NEONvduplane (v8i16 QPR:$src3), imm:$lane))))),
1498 (v8i16 (VMLSslv8i16 (v8i16 QPR:$src1),
1500 (v4i16 (EXTRACT_SUBREG QPR:$src3,
1501 (DSubReg_i16_reg imm:$lane))),
1502 (SubReg_i16_lane imm:$lane)))>;
1504 def : Pat<(v4i32 (sub (v4i32 QPR:$src1),
1505 (mul (v4i32 QPR:$src2),
1506 (v4i32 (NEONvduplane (v4i32 QPR:$src3), imm:$lane))))),
1507 (v4i32 (VMLSslv4i32 (v4i32 QPR:$src1),
1509 (v2i32 (EXTRACT_SUBREG QPR:$src3,
1510 (DSubReg_i32_reg imm:$lane))),
1511 (SubReg_i32_lane imm:$lane)))>;
1513 def : Pat<(v4f32 (fsub (v4f32 QPR:$src1),
1514 (fmul (v4f32 QPR:$src2),
1515 (v4f32 (NEONvduplane (v4f32 QPR:$src3), imm:$lane))))),
1516 (v4f32 (VMLSslfq (v4f32 QPR:$src1),
1518 (v2f32 (EXTRACT_SUBREG QPR:$src3,
1519 (DSubReg_i32_reg imm:$lane))),
1520 (SubReg_i32_lane imm:$lane)))>;
1522 // VMLSL : Vector Multiply Subtract Long (Q -= D * D)
1523 defm VMLSLs : N3VLInt3_QHS<0,1,0b1010,0, "vmlsl.s", int_arm_neon_vmlsls>;
1524 defm VMLSLu : N3VLInt3_QHS<1,1,0b1010,0, "vmlsl.u", int_arm_neon_vmlslu>;
1526 defm VMLSLsls : N3VLInt3SL_HS<0, 0b0110, "vmlsl.s", int_arm_neon_vmlsls>;
1527 defm VMLSLslu : N3VLInt3SL_HS<1, 0b0110, "vmlsl.u", int_arm_neon_vmlslu>;
1529 // VQDMLSL : Vector Saturating Doubling Multiply Subtract Long (Q -= D * D)
1530 defm VQDMLSL : N3VLInt3_HS<0, 1, 0b1011, 0, "vqdmlsl.s", int_arm_neon_vqdmlsl>;
1531 defm VQDMLSLsl: N3VLInt3SL_HS<0, 0b111, "vqdmlsl.s", int_arm_neon_vqdmlsl>;
1533 // Vector Subtract Operations.
1535 // VSUB : Vector Subtract (integer and floating-point)
1536 defm VSUB : N3V_QHSD<1, 0, 0b1000, 0, "vsub.i", sub, 0>;
1537 def VSUBfd : N3VD<0, 0, 0b10, 0b1101, 0, "vsub.f32", v2f32, v2f32, fsub, 0>;
1538 def VSUBfq : N3VQ<0, 0, 0b10, 0b1101, 0, "vsub.f32", v4f32, v4f32, fsub, 0>;
1539 // VSUBL : Vector Subtract Long (Q = D - D)
1540 defm VSUBLs : N3VLInt_QHS<0,1,0b0010,0, "vsubl.s", int_arm_neon_vsubls, 1>;
1541 defm VSUBLu : N3VLInt_QHS<1,1,0b0010,0, "vsubl.u", int_arm_neon_vsublu, 1>;
1542 // VSUBW : Vector Subtract Wide (Q = Q - D)
1543 defm VSUBWs : N3VWInt_QHS<0,1,0b0011,0, "vsubw.s", int_arm_neon_vsubws, 0>;
1544 defm VSUBWu : N3VWInt_QHS<1,1,0b0011,0, "vsubw.u", int_arm_neon_vsubwu, 0>;
1545 // VHSUB : Vector Halving Subtract
1546 defm VHSUBs : N3VInt_QHS<0, 0, 0b0010, 0, "vhsub.s", int_arm_neon_vhsubs, 0>;
1547 defm VHSUBu : N3VInt_QHS<1, 0, 0b0010, 0, "vhsub.u", int_arm_neon_vhsubu, 0>;
1548 // VQSUB : Vector Saturing Subtract
1549 defm VQSUBs : N3VInt_QHSD<0, 0, 0b0010, 1, "vqsub.s", int_arm_neon_vqsubs, 0>;
1550 defm VQSUBu : N3VInt_QHSD<1, 0, 0b0010, 1, "vqsub.u", int_arm_neon_vqsubu, 0>;
1551 // VSUBHN : Vector Subtract and Narrow Returning High Half (D = Q - Q)
1552 defm VSUBHN : N3VNInt_HSD<0,1,0b0110,0, "vsubhn.i", int_arm_neon_vsubhn, 0>;
1553 // VRSUBHN : Vector Rounding Subtract and Narrow Returning High Half (D=Q-Q)
1554 defm VRSUBHN : N3VNInt_HSD<1,1,0b0110,0, "vrsubhn.i", int_arm_neon_vrsubhn, 0>;
1556 // Vector Comparisons.
1558 // VCEQ : Vector Compare Equal
1559 defm VCEQ : N3V_QHS<1, 0, 0b1000, 1, "vceq.i", NEONvceq, 1>;
1560 def VCEQfd : N3VD<0,0,0b00,0b1110,0, "vceq.f32", v2i32, v2f32, NEONvceq, 1>;
1561 def VCEQfq : N3VQ<0,0,0b00,0b1110,0, "vceq.f32", v4i32, v4f32, NEONvceq, 1>;
1562 // VCGE : Vector Compare Greater Than or Equal
1563 defm VCGEs : N3V_QHS<0, 0, 0b0011, 1, "vcge.s", NEONvcge, 0>;
1564 defm VCGEu : N3V_QHS<1, 0, 0b0011, 1, "vcge.u", NEONvcgeu, 0>;
1565 def VCGEfd : N3VD<1,0,0b00,0b1110,0, "vcge.f32", v2i32, v2f32, NEONvcge, 0>;
1566 def VCGEfq : N3VQ<1,0,0b00,0b1110,0, "vcge.f32", v4i32, v4f32, NEONvcge, 0>;
1567 // VCGT : Vector Compare Greater Than
1568 defm VCGTs : N3V_QHS<0, 0, 0b0011, 0, "vcgt.s", NEONvcgt, 0>;
1569 defm VCGTu : N3V_QHS<1, 0, 0b0011, 0, "vcgt.u", NEONvcgtu, 0>;
1570 def VCGTfd : N3VD<1,0,0b10,0b1110,0, "vcgt.f32", v2i32, v2f32, NEONvcgt, 0>;
1571 def VCGTfq : N3VQ<1,0,0b10,0b1110,0, "vcgt.f32", v4i32, v4f32, NEONvcgt, 0>;
1572 // VACGE : Vector Absolute Compare Greater Than or Equal (aka VCAGE)
1573 def VACGEd : N3VDInt<1, 0, 0b00, 0b1110, 1, "vacge.f32", v2i32, v2f32,
1574 int_arm_neon_vacged, 0>;
1575 def VACGEq : N3VQInt<1, 0, 0b00, 0b1110, 1, "vacge.f32", v4i32, v4f32,
1576 int_arm_neon_vacgeq, 0>;
1577 // VACGT : Vector Absolute Compare Greater Than (aka VCAGT)
1578 def VACGTd : N3VDInt<1, 0, 0b10, 0b1110, 1, "vacgt.f32", v2i32, v2f32,
1579 int_arm_neon_vacgtd, 0>;
1580 def VACGTq : N3VQInt<1, 0, 0b10, 0b1110, 1, "vacgt.f32", v4i32, v4f32,
1581 int_arm_neon_vacgtq, 0>;
1582 // VTST : Vector Test Bits
1583 defm VTST : N3V_QHS<0, 0, 0b1000, 1, "vtst.i", NEONvtst, 1>;
1585 // Vector Bitwise Operations.
1587 // VAND : Vector Bitwise AND
1588 def VANDd : N3VD<0, 0, 0b00, 0b0001, 1, "vand", v2i32, v2i32, and, 1>;
1589 def VANDq : N3VQ<0, 0, 0b00, 0b0001, 1, "vand", v4i32, v4i32, and, 1>;
1591 // VEOR : Vector Bitwise Exclusive OR
1592 def VEORd : N3VD<1, 0, 0b00, 0b0001, 1, "veor", v2i32, v2i32, xor, 1>;
1593 def VEORq : N3VQ<1, 0, 0b00, 0b0001, 1, "veor", v4i32, v4i32, xor, 1>;
1595 // VORR : Vector Bitwise OR
1596 def VORRd : N3VD<0, 0, 0b10, 0b0001, 1, "vorr", v2i32, v2i32, or, 1>;
1597 def VORRq : N3VQ<0, 0, 0b10, 0b0001, 1, "vorr", v4i32, v4i32, or, 1>;
1599 // VBIC : Vector Bitwise Bit Clear (AND NOT)
1600 def VBICd : N3V<0, 0, 0b01, 0b0001, 0, 1, (outs DPR:$dst),
1601 (ins DPR:$src1, DPR:$src2), NoItinerary,
1602 "vbic\t$dst, $src1, $src2", "",
1603 [(set DPR:$dst, (v2i32 (and DPR:$src1,
1604 (vnot_conv DPR:$src2))))]>;
1605 def VBICq : N3V<0, 0, 0b01, 0b0001, 1, 1, (outs QPR:$dst),
1606 (ins QPR:$src1, QPR:$src2), NoItinerary,
1607 "vbic\t$dst, $src1, $src2", "",
1608 [(set QPR:$dst, (v4i32 (and QPR:$src1,
1609 (vnot_conv QPR:$src2))))]>;
1611 // VORN : Vector Bitwise OR NOT
1612 def VORNd : N3V<0, 0, 0b11, 0b0001, 0, 1, (outs DPR:$dst),
1613 (ins DPR:$src1, DPR:$src2), NoItinerary,
1614 "vorn\t$dst, $src1, $src2", "",
1615 [(set DPR:$dst, (v2i32 (or DPR:$src1,
1616 (vnot_conv DPR:$src2))))]>;
1617 def VORNq : N3V<0, 0, 0b11, 0b0001, 1, 1, (outs QPR:$dst),
1618 (ins QPR:$src1, QPR:$src2), NoItinerary,
1619 "vorn\t$dst, $src1, $src2", "",
1620 [(set QPR:$dst, (v4i32 (or QPR:$src1,
1621 (vnot_conv QPR:$src2))))]>;
1623 // VMVN : Vector Bitwise NOT
1624 def VMVNd : N2V<0b11, 0b11, 0b00, 0b00, 0b01011, 0, 0,
1625 (outs DPR:$dst), (ins DPR:$src), NoItinerary,
1626 "vmvn\t$dst, $src", "",
1627 [(set DPR:$dst, (v2i32 (vnot DPR:$src)))]>;
1628 def VMVNq : N2V<0b11, 0b11, 0b00, 0b00, 0b01011, 1, 0,
1629 (outs QPR:$dst), (ins QPR:$src), NoItinerary,
1630 "vmvn\t$dst, $src", "",
1631 [(set QPR:$dst, (v4i32 (vnot QPR:$src)))]>;
1632 def : Pat<(v2i32 (vnot_conv DPR:$src)), (VMVNd DPR:$src)>;
1633 def : Pat<(v4i32 (vnot_conv QPR:$src)), (VMVNq QPR:$src)>;
1635 // VBSL : Vector Bitwise Select
1636 def VBSLd : N3V<1, 0, 0b01, 0b0001, 0, 1, (outs DPR:$dst),
1637 (ins DPR:$src1, DPR:$src2, DPR:$src3), NoItinerary,
1638 "vbsl\t$dst, $src2, $src3", "$src1 = $dst",
1640 (v2i32 (or (and DPR:$src2, DPR:$src1),
1641 (and DPR:$src3, (vnot_conv DPR:$src1)))))]>;
1642 def VBSLq : N3V<1, 0, 0b01, 0b0001, 1, 1, (outs QPR:$dst),
1643 (ins QPR:$src1, QPR:$src2, QPR:$src3), NoItinerary,
1644 "vbsl\t$dst, $src2, $src3", "$src1 = $dst",
1646 (v4i32 (or (and QPR:$src2, QPR:$src1),
1647 (and QPR:$src3, (vnot_conv QPR:$src1)))))]>;
1649 // VBIF : Vector Bitwise Insert if False
1650 // like VBSL but with: "vbif\t$dst, $src3, $src1", "$src2 = $dst",
1651 // VBIT : Vector Bitwise Insert if True
1652 // like VBSL but with: "vbit\t$dst, $src2, $src1", "$src3 = $dst",
1653 // These are not yet implemented. The TwoAddress pass will not go looking
1654 // for equivalent operations with different register constraints; it just
1657 // Vector Absolute Differences.
1659 // VABD : Vector Absolute Difference
1660 defm VABDs : N3VInt_QHS<0, 0, 0b0111, 0, "vabd.s", int_arm_neon_vabds, 0>;
1661 defm VABDu : N3VInt_QHS<1, 0, 0b0111, 0, "vabd.u", int_arm_neon_vabdu, 0>;
1662 def VABDfd : N3VDInt<1, 0, 0b10, 0b1101, 0, "vabd.f32", v2f32, v2f32,
1663 int_arm_neon_vabds, 0>;
1664 def VABDfq : N3VQInt<1, 0, 0b10, 0b1101, 0, "vabd.f32", v4f32, v4f32,
1665 int_arm_neon_vabds, 0>;
1667 // VABDL : Vector Absolute Difference Long (Q = | D - D |)
1668 defm VABDLs : N3VLInt_QHS<0,1,0b0111,0, "vabdl.s", int_arm_neon_vabdls, 0>;
1669 defm VABDLu : N3VLInt_QHS<1,1,0b0111,0, "vabdl.u", int_arm_neon_vabdlu, 0>;
1671 // VABA : Vector Absolute Difference and Accumulate
1672 defm VABAs : N3VInt3_QHS<0,1,0b0101,0, "vaba.s", int_arm_neon_vabas>;
1673 defm VABAu : N3VInt3_QHS<1,1,0b0101,0, "vaba.u", int_arm_neon_vabau>;
1675 // VABAL : Vector Absolute Difference and Accumulate Long (Q += | D - D |)
1676 defm VABALs : N3VLInt3_QHS<0,1,0b0101,0, "vabal.s", int_arm_neon_vabals>;
1677 defm VABALu : N3VLInt3_QHS<1,1,0b0101,0, "vabal.u", int_arm_neon_vabalu>;
1679 // Vector Maximum and Minimum.
1681 // VMAX : Vector Maximum
1682 defm VMAXs : N3VInt_QHS<0, 0, 0b0110, 0, "vmax.s", int_arm_neon_vmaxs, 1>;
1683 defm VMAXu : N3VInt_QHS<1, 0, 0b0110, 0, "vmax.u", int_arm_neon_vmaxu, 1>;
1684 def VMAXfd : N3VDInt<0, 0, 0b00, 0b1111, 0, "vmax.f32", v2f32, v2f32,
1685 int_arm_neon_vmaxs, 1>;
1686 def VMAXfq : N3VQInt<0, 0, 0b00, 0b1111, 0, "vmax.f32", v4f32, v4f32,
1687 int_arm_neon_vmaxs, 1>;
1689 // VMIN : Vector Minimum
1690 defm VMINs : N3VInt_QHS<0, 0, 0b0110, 1, "vmin.s", int_arm_neon_vmins, 1>;
1691 defm VMINu : N3VInt_QHS<1, 0, 0b0110, 1, "vmin.u", int_arm_neon_vminu, 1>;
1692 def VMINfd : N3VDInt<0, 0, 0b10, 0b1111, 0, "vmin.f32", v2f32, v2f32,
1693 int_arm_neon_vmins, 1>;
1694 def VMINfq : N3VQInt<0, 0, 0b10, 0b1111, 0, "vmin.f32", v4f32, v4f32,
1695 int_arm_neon_vmins, 1>;
1697 // Vector Pairwise Operations.
1699 // VPADD : Vector Pairwise Add
1700 def VPADDi8 : N3VDInt<0, 0, 0b00, 0b1011, 1, "vpadd.i8", v8i8, v8i8,
1701 int_arm_neon_vpadd, 0>;
1702 def VPADDi16 : N3VDInt<0, 0, 0b01, 0b1011, 1, "vpadd.i16", v4i16, v4i16,
1703 int_arm_neon_vpadd, 0>;
1704 def VPADDi32 : N3VDInt<0, 0, 0b10, 0b1011, 1, "vpadd.i32", v2i32, v2i32,
1705 int_arm_neon_vpadd, 0>;
1706 def VPADDf : N3VDInt<1, 0, 0b00, 0b1101, 0, "vpadd.f32", v2f32, v2f32,
1707 int_arm_neon_vpadd, 0>;
1709 // VPADDL : Vector Pairwise Add Long
1710 defm VPADDLs : N2VPLInt_QHS<0b11, 0b11, 0b00, 0b00100, 0, "vpaddl.s",
1711 int_arm_neon_vpaddls>;
1712 defm VPADDLu : N2VPLInt_QHS<0b11, 0b11, 0b00, 0b00101, 0, "vpaddl.u",
1713 int_arm_neon_vpaddlu>;
1715 // VPADAL : Vector Pairwise Add and Accumulate Long
1716 defm VPADALs : N2VPLInt2_QHS<0b11, 0b11, 0b00, 0b00100, 0, "vpadal.s",
1717 int_arm_neon_vpadals>;
1718 defm VPADALu : N2VPLInt2_QHS<0b11, 0b11, 0b00, 0b00101, 0, "vpadal.u",
1719 int_arm_neon_vpadalu>;
1721 // VPMAX : Vector Pairwise Maximum
1722 def VPMAXs8 : N3VDInt<0, 0, 0b00, 0b1010, 0, "vpmax.s8", v8i8, v8i8,
1723 int_arm_neon_vpmaxs, 0>;
1724 def VPMAXs16 : N3VDInt<0, 0, 0b01, 0b1010, 0, "vpmax.s16", v4i16, v4i16,
1725 int_arm_neon_vpmaxs, 0>;
1726 def VPMAXs32 : N3VDInt<0, 0, 0b10, 0b1010, 0, "vpmax.s32", v2i32, v2i32,
1727 int_arm_neon_vpmaxs, 0>;
1728 def VPMAXu8 : N3VDInt<1, 0, 0b00, 0b1010, 0, "vpmax.u8", v8i8, v8i8,
1729 int_arm_neon_vpmaxu, 0>;
1730 def VPMAXu16 : N3VDInt<1, 0, 0b01, 0b1010, 0, "vpmax.u16", v4i16, v4i16,
1731 int_arm_neon_vpmaxu, 0>;
1732 def VPMAXu32 : N3VDInt<1, 0, 0b10, 0b1010, 0, "vpmax.u32", v2i32, v2i32,
1733 int_arm_neon_vpmaxu, 0>;
1734 def VPMAXf : N3VDInt<1, 0, 0b00, 0b1111, 0, "vpmax.f32", v2f32, v2f32,
1735 int_arm_neon_vpmaxs, 0>;
1737 // VPMIN : Vector Pairwise Minimum
1738 def VPMINs8 : N3VDInt<0, 0, 0b00, 0b1010, 1, "vpmin.s8", v8i8, v8i8,
1739 int_arm_neon_vpmins, 0>;
1740 def VPMINs16 : N3VDInt<0, 0, 0b01, 0b1010, 1, "vpmin.s16", v4i16, v4i16,
1741 int_arm_neon_vpmins, 0>;
1742 def VPMINs32 : N3VDInt<0, 0, 0b10, 0b1010, 1, "vpmin.s32", v2i32, v2i32,
1743 int_arm_neon_vpmins, 0>;
1744 def VPMINu8 : N3VDInt<1, 0, 0b00, 0b1010, 1, "vpmin.u8", v8i8, v8i8,
1745 int_arm_neon_vpminu, 0>;
1746 def VPMINu16 : N3VDInt<1, 0, 0b01, 0b1010, 1, "vpmin.u16", v4i16, v4i16,
1747 int_arm_neon_vpminu, 0>;
1748 def VPMINu32 : N3VDInt<1, 0, 0b10, 0b1010, 1, "vpmin.u32", v2i32, v2i32,
1749 int_arm_neon_vpminu, 0>;
1750 def VPMINf : N3VDInt<1, 0, 0b10, 0b1111, 0, "vpmin.f32", v2f32, v2f32,
1751 int_arm_neon_vpmins, 0>;
1753 // Vector Reciprocal and Reciprocal Square Root Estimate and Step.
1755 // VRECPE : Vector Reciprocal Estimate
1756 def VRECPEd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01000, 0, "vrecpe.u32",
1757 v2i32, v2i32, int_arm_neon_vrecpe>;
1758 def VRECPEq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01000, 0, "vrecpe.u32",
1759 v4i32, v4i32, int_arm_neon_vrecpe>;
1760 def VRECPEfd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01010, 0, "vrecpe.f32",
1761 v2f32, v2f32, int_arm_neon_vrecpe>;
1762 def VRECPEfq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01010, 0, "vrecpe.f32",
1763 v4f32, v4f32, int_arm_neon_vrecpe>;
1765 // VRECPS : Vector Reciprocal Step
1766 def VRECPSfd : N3VDInt<0, 0, 0b00, 0b1111, 1, "vrecps.f32", v2f32, v2f32,
1767 int_arm_neon_vrecps, 1>;
1768 def VRECPSfq : N3VQInt<0, 0, 0b00, 0b1111, 1, "vrecps.f32", v4f32, v4f32,
1769 int_arm_neon_vrecps, 1>;
1771 // VRSQRTE : Vector Reciprocal Square Root Estimate
1772 def VRSQRTEd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01001, 0, "vrsqrte.u32",
1773 v2i32, v2i32, int_arm_neon_vrsqrte>;
1774 def VRSQRTEq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01001, 0, "vrsqrte.u32",
1775 v4i32, v4i32, int_arm_neon_vrsqrte>;
1776 def VRSQRTEfd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01011, 0, "vrsqrte.f32",
1777 v2f32, v2f32, int_arm_neon_vrsqrte>;
1778 def VRSQRTEfq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01011, 0, "vrsqrte.f32",
1779 v4f32, v4f32, int_arm_neon_vrsqrte>;
1781 // VRSQRTS : Vector Reciprocal Square Root Step
1782 def VRSQRTSfd : N3VDInt<0, 0, 0b10, 0b1111, 1, "vrsqrts.f32", v2f32, v2f32,
1783 int_arm_neon_vrsqrts, 1>;
1784 def VRSQRTSfq : N3VQInt<0, 0, 0b10, 0b1111, 1, "vrsqrts.f32", v4f32, v4f32,
1785 int_arm_neon_vrsqrts, 1>;
1789 // VSHL : Vector Shift
1790 defm VSHLs : N3VInt_QHSD<0, 0, 0b0100, 0, "vshl.s", int_arm_neon_vshifts, 0>;
1791 defm VSHLu : N3VInt_QHSD<1, 0, 0b0100, 0, "vshl.u", int_arm_neon_vshiftu, 0>;
1792 // VSHL : Vector Shift Left (Immediate)
1793 defm VSHLi : N2VSh_QHSD<0, 1, 0b0111, 1, "vshl.i", NEONvshl>;
1794 // VSHR : Vector Shift Right (Immediate)
1795 defm VSHRs : N2VSh_QHSD<0, 1, 0b0000, 1, "vshr.s", NEONvshrs>;
1796 defm VSHRu : N2VSh_QHSD<1, 1, 0b0000, 1, "vshr.u", NEONvshru>;
1798 // VSHLL : Vector Shift Left Long
1799 def VSHLLs8 : N2VLSh<0, 1, 0b001000, 0b1010, 0, 0, 1, "vshll.s8",
1800 v8i16, v8i8, NEONvshlls>;
1801 def VSHLLs16 : N2VLSh<0, 1, 0b010000, 0b1010, 0, 0, 1, "vshll.s16",
1802 v4i32, v4i16, NEONvshlls>;
1803 def VSHLLs32 : N2VLSh<0, 1, 0b100000, 0b1010, 0, 0, 1, "vshll.s32",
1804 v2i64, v2i32, NEONvshlls>;
1805 def VSHLLu8 : N2VLSh<1, 1, 0b001000, 0b1010, 0, 0, 1, "vshll.u8",
1806 v8i16, v8i8, NEONvshllu>;
1807 def VSHLLu16 : N2VLSh<1, 1, 0b010000, 0b1010, 0, 0, 1, "vshll.u16",
1808 v4i32, v4i16, NEONvshllu>;
1809 def VSHLLu32 : N2VLSh<1, 1, 0b100000, 0b1010, 0, 0, 1, "vshll.u32",
1810 v2i64, v2i32, NEONvshllu>;
1812 // VSHLL : Vector Shift Left Long (with maximum shift count)
1813 def VSHLLi8 : N2VLSh<1, 1, 0b110010, 0b0011, 0, 0, 0, "vshll.i8",
1814 v8i16, v8i8, NEONvshlli>;
1815 def VSHLLi16 : N2VLSh<1, 1, 0b110110, 0b0011, 0, 0, 0, "vshll.i16",
1816 v4i32, v4i16, NEONvshlli>;
1817 def VSHLLi32 : N2VLSh<1, 1, 0b111010, 0b0011, 0, 0, 0, "vshll.i32",
1818 v2i64, v2i32, NEONvshlli>;
1820 // VSHRN : Vector Shift Right and Narrow
1821 def VSHRN16 : N2VNSh<0, 1, 0b001000, 0b1000, 0, 0, 1, "vshrn.i16",
1822 v8i8, v8i16, NEONvshrn>;
1823 def VSHRN32 : N2VNSh<0, 1, 0b010000, 0b1000, 0, 0, 1, "vshrn.i32",
1824 v4i16, v4i32, NEONvshrn>;
1825 def VSHRN64 : N2VNSh<0, 1, 0b100000, 0b1000, 0, 0, 1, "vshrn.i64",
1826 v2i32, v2i64, NEONvshrn>;
1828 // VRSHL : Vector Rounding Shift
1829 defm VRSHLs : N3VInt_QHSD<0,0,0b0101,0, "vrshl.s", int_arm_neon_vrshifts, 0>;
1830 defm VRSHLu : N3VInt_QHSD<1,0,0b0101,0, "vrshl.u", int_arm_neon_vrshiftu, 0>;
1831 // VRSHR : Vector Rounding Shift Right
1832 defm VRSHRs : N2VSh_QHSD<0, 1, 0b0010, 1, "vrshr.s", NEONvrshrs>;
1833 defm VRSHRu : N2VSh_QHSD<1, 1, 0b0010, 1, "vrshr.u", NEONvrshru>;
1835 // VRSHRN : Vector Rounding Shift Right and Narrow
1836 def VRSHRN16 : N2VNSh<0, 1, 0b001000, 0b1000, 0, 1, 1, "vrshrn.i16",
1837 v8i8, v8i16, NEONvrshrn>;
1838 def VRSHRN32 : N2VNSh<0, 1, 0b010000, 0b1000, 0, 1, 1, "vrshrn.i32",
1839 v4i16, v4i32, NEONvrshrn>;
1840 def VRSHRN64 : N2VNSh<0, 1, 0b100000, 0b1000, 0, 1, 1, "vrshrn.i64",
1841 v2i32, v2i64, NEONvrshrn>;
1843 // VQSHL : Vector Saturating Shift
1844 defm VQSHLs : N3VInt_QHSD<0,0,0b0100,1, "vqshl.s", int_arm_neon_vqshifts, 0>;
1845 defm VQSHLu : N3VInt_QHSD<1,0,0b0100,1, "vqshl.u", int_arm_neon_vqshiftu, 0>;
1846 // VQSHL : Vector Saturating Shift Left (Immediate)
1847 defm VQSHLsi : N2VSh_QHSD<0, 1, 0b0111, 1, "vqshl.s", NEONvqshls>;
1848 defm VQSHLui : N2VSh_QHSD<1, 1, 0b0111, 1, "vqshl.u", NEONvqshlu>;
1849 // VQSHLU : Vector Saturating Shift Left (Immediate, Unsigned)
1850 defm VQSHLsu : N2VSh_QHSD<1, 1, 0b0110, 1, "vqshlu.s", NEONvqshlsu>;
1852 // VQSHRN : Vector Saturating Shift Right and Narrow
1853 def VQSHRNs16 : N2VNSh<0, 1, 0b001000, 0b1001, 0, 0, 1, "vqshrn.s16",
1854 v8i8, v8i16, NEONvqshrns>;
1855 def VQSHRNs32 : N2VNSh<0, 1, 0b010000, 0b1001, 0, 0, 1, "vqshrn.s32",
1856 v4i16, v4i32, NEONvqshrns>;
1857 def VQSHRNs64 : N2VNSh<0, 1, 0b100000, 0b1001, 0, 0, 1, "vqshrn.s64",
1858 v2i32, v2i64, NEONvqshrns>;
1859 def VQSHRNu16 : N2VNSh<1, 1, 0b001000, 0b1001, 0, 0, 1, "vqshrn.u16",
1860 v8i8, v8i16, NEONvqshrnu>;
1861 def VQSHRNu32 : N2VNSh<1, 1, 0b010000, 0b1001, 0, 0, 1, "vqshrn.u32",
1862 v4i16, v4i32, NEONvqshrnu>;
1863 def VQSHRNu64 : N2VNSh<1, 1, 0b100000, 0b1001, 0, 0, 1, "vqshrn.u64",
1864 v2i32, v2i64, NEONvqshrnu>;
1866 // VQSHRUN : Vector Saturating Shift Right and Narrow (Unsigned)
1867 def VQSHRUN16 : N2VNSh<1, 1, 0b001000, 0b1000, 0, 0, 1, "vqshrun.s16",
1868 v8i8, v8i16, NEONvqshrnsu>;
1869 def VQSHRUN32 : N2VNSh<1, 1, 0b010000, 0b1000, 0, 0, 1, "vqshrun.s32",
1870 v4i16, v4i32, NEONvqshrnsu>;
1871 def VQSHRUN64 : N2VNSh<1, 1, 0b100000, 0b1000, 0, 0, 1, "vqshrun.s64",
1872 v2i32, v2i64, NEONvqshrnsu>;
1874 // VQRSHL : Vector Saturating Rounding Shift
1875 defm VQRSHLs : N3VInt_QHSD<0, 0, 0b0101, 1, "vqrshl.s",
1876 int_arm_neon_vqrshifts, 0>;
1877 defm VQRSHLu : N3VInt_QHSD<1, 0, 0b0101, 1, "vqrshl.u",
1878 int_arm_neon_vqrshiftu, 0>;
1880 // VQRSHRN : Vector Saturating Rounding Shift Right and Narrow
1881 def VQRSHRNs16: N2VNSh<0, 1, 0b001000, 0b1001, 0, 1, 1, "vqrshrn.s16",
1882 v8i8, v8i16, NEONvqrshrns>;
1883 def VQRSHRNs32: N2VNSh<0, 1, 0b010000, 0b1001, 0, 1, 1, "vqrshrn.s32",
1884 v4i16, v4i32, NEONvqrshrns>;
1885 def VQRSHRNs64: N2VNSh<0, 1, 0b100000, 0b1001, 0, 1, 1, "vqrshrn.s64",
1886 v2i32, v2i64, NEONvqrshrns>;
1887 def VQRSHRNu16: N2VNSh<1, 1, 0b001000, 0b1001, 0, 1, 1, "vqrshrn.u16",
1888 v8i8, v8i16, NEONvqrshrnu>;
1889 def VQRSHRNu32: N2VNSh<1, 1, 0b010000, 0b1001, 0, 1, 1, "vqrshrn.u32",
1890 v4i16, v4i32, NEONvqrshrnu>;
1891 def VQRSHRNu64: N2VNSh<1, 1, 0b100000, 0b1001, 0, 1, 1, "vqrshrn.u64",
1892 v2i32, v2i64, NEONvqrshrnu>;
1894 // VQRSHRUN : Vector Saturating Rounding Shift Right and Narrow (Unsigned)
1895 def VQRSHRUN16: N2VNSh<1, 1, 0b001000, 0b1000, 0, 1, 1, "vqrshrun.s16",
1896 v8i8, v8i16, NEONvqrshrnsu>;
1897 def VQRSHRUN32: N2VNSh<1, 1, 0b010000, 0b1000, 0, 1, 1, "vqrshrun.s32",
1898 v4i16, v4i32, NEONvqrshrnsu>;
1899 def VQRSHRUN64: N2VNSh<1, 1, 0b100000, 0b1000, 0, 1, 1, "vqrshrun.s64",
1900 v2i32, v2i64, NEONvqrshrnsu>;
1902 // VSRA : Vector Shift Right and Accumulate
1903 defm VSRAs : N2VShAdd_QHSD<0, 1, 0b0001, 1, "vsra.s", NEONvshrs>;
1904 defm VSRAu : N2VShAdd_QHSD<1, 1, 0b0001, 1, "vsra.u", NEONvshru>;
1905 // VRSRA : Vector Rounding Shift Right and Accumulate
1906 defm VRSRAs : N2VShAdd_QHSD<0, 1, 0b0011, 1, "vrsra.s", NEONvrshrs>;
1907 defm VRSRAu : N2VShAdd_QHSD<1, 1, 0b0011, 1, "vrsra.u", NEONvrshru>;
1909 // VSLI : Vector Shift Left and Insert
1910 defm VSLI : N2VShIns_QHSD<1, 1, 0b0101, 1, "vsli.", NEONvsli>;
1911 // VSRI : Vector Shift Right and Insert
1912 defm VSRI : N2VShIns_QHSD<1, 1, 0b0100, 1, "vsri.", NEONvsri>;
1914 // Vector Absolute and Saturating Absolute.
1916 // VABS : Vector Absolute Value
1917 defm VABS : N2VInt_QHS<0b11, 0b11, 0b01, 0b00110, 0, "vabs.s",
1919 def VABSfd : N2VDInt<0b11, 0b11, 0b10, 0b01, 0b01110, 0, "vabs.f32",
1920 v2f32, v2f32, int_arm_neon_vabs>;
1921 def VABSfq : N2VQInt<0b11, 0b11, 0b10, 0b01, 0b01110, 0, "vabs.f32",
1922 v4f32, v4f32, int_arm_neon_vabs>;
1924 // VQABS : Vector Saturating Absolute Value
1925 defm VQABS : N2VInt_QHS<0b11, 0b11, 0b00, 0b01110, 0, "vqabs.s",
1926 int_arm_neon_vqabs>;
1930 def vneg : PatFrag<(ops node:$in), (sub immAllZerosV, node:$in)>;
1931 def vneg_conv : PatFrag<(ops node:$in), (sub immAllZerosV_bc, node:$in)>;
1933 class VNEGD<bits<2> size, string OpcodeStr, ValueType Ty>
1934 : N2V<0b11, 0b11, size, 0b01, 0b00111, 0, 0, (outs DPR:$dst), (ins DPR:$src),
1936 !strconcat(OpcodeStr, "\t$dst, $src"), "",
1937 [(set DPR:$dst, (Ty (vneg DPR:$src)))]>;
1938 class VNEGQ<bits<2> size, string OpcodeStr, ValueType Ty>
1939 : N2V<0b11, 0b11, size, 0b01, 0b00111, 1, 0, (outs QPR:$dst), (ins QPR:$src),
1941 !strconcat(OpcodeStr, "\t$dst, $src"), "",
1942 [(set QPR:$dst, (Ty (vneg QPR:$src)))]>;
1944 // VNEG : Vector Negate
1945 def VNEGs8d : VNEGD<0b00, "vneg.s8", v8i8>;
1946 def VNEGs16d : VNEGD<0b01, "vneg.s16", v4i16>;
1947 def VNEGs32d : VNEGD<0b10, "vneg.s32", v2i32>;
1948 def VNEGs8q : VNEGQ<0b00, "vneg.s8", v16i8>;
1949 def VNEGs16q : VNEGQ<0b01, "vneg.s16", v8i16>;
1950 def VNEGs32q : VNEGQ<0b10, "vneg.s32", v4i32>;
1952 // VNEG : Vector Negate (floating-point)
1953 def VNEGf32d : N2V<0b11, 0b11, 0b10, 0b01, 0b01111, 0, 0,
1954 (outs DPR:$dst), (ins DPR:$src), NoItinerary,
1955 "vneg.f32\t$dst, $src", "",
1956 [(set DPR:$dst, (v2f32 (fneg DPR:$src)))]>;
1957 def VNEGf32q : N2V<0b11, 0b11, 0b10, 0b01, 0b01111, 1, 0,
1958 (outs QPR:$dst), (ins QPR:$src), NoItinerary,
1959 "vneg.f32\t$dst, $src", "",
1960 [(set QPR:$dst, (v4f32 (fneg QPR:$src)))]>;
1962 def : Pat<(v8i8 (vneg_conv DPR:$src)), (VNEGs8d DPR:$src)>;
1963 def : Pat<(v4i16 (vneg_conv DPR:$src)), (VNEGs16d DPR:$src)>;
1964 def : Pat<(v2i32 (vneg_conv DPR:$src)), (VNEGs32d DPR:$src)>;
1965 def : Pat<(v16i8 (vneg_conv QPR:$src)), (VNEGs8q QPR:$src)>;
1966 def : Pat<(v8i16 (vneg_conv QPR:$src)), (VNEGs16q QPR:$src)>;
1967 def : Pat<(v4i32 (vneg_conv QPR:$src)), (VNEGs32q QPR:$src)>;
1969 // VQNEG : Vector Saturating Negate
1970 defm VQNEG : N2VInt_QHS<0b11, 0b11, 0b00, 0b01111, 0, "vqneg.s",
1971 int_arm_neon_vqneg>;
1973 // Vector Bit Counting Operations.
1975 // VCLS : Vector Count Leading Sign Bits
1976 defm VCLS : N2VInt_QHS<0b11, 0b11, 0b00, 0b01000, 0, "vcls.s",
1978 // VCLZ : Vector Count Leading Zeros
1979 defm VCLZ : N2VInt_QHS<0b11, 0b11, 0b00, 0b01001, 0, "vclz.i",
1981 // VCNT : Vector Count One Bits
1982 def VCNTd : N2VDInt<0b11, 0b11, 0b00, 0b00, 0b01010, 0, "vcnt.8",
1983 v8i8, v8i8, int_arm_neon_vcnt>;
1984 def VCNTq : N2VQInt<0b11, 0b11, 0b00, 0b00, 0b01010, 0, "vcnt.8",
1985 v16i8, v16i8, int_arm_neon_vcnt>;
1987 // Vector Move Operations.
1989 // VMOV : Vector Move (Register)
1991 def VMOVD : N3V<0, 0, 0b10, 0b0001, 0, 1, (outs DPR:$dst), (ins DPR:$src),
1992 NoItinerary, "vmov\t$dst, $src", "", []>;
1993 def VMOVQ : N3V<0, 0, 0b10, 0b0001, 1, 1, (outs QPR:$dst), (ins QPR:$src),
1994 NoItinerary, "vmov\t$dst, $src", "", []>;
1996 // VMOV : Vector Move (Immediate)
1998 // VMOV_get_imm8 xform function: convert build_vector to VMOV.i8 imm.
1999 def VMOV_get_imm8 : SDNodeXForm<build_vector, [{
2000 return ARM::getVMOVImm(N, 1, *CurDAG);
2002 def vmovImm8 : PatLeaf<(build_vector), [{
2003 return ARM::getVMOVImm(N, 1, *CurDAG).getNode() != 0;
2006 // VMOV_get_imm16 xform function: convert build_vector to VMOV.i16 imm.
2007 def VMOV_get_imm16 : SDNodeXForm<build_vector, [{
2008 return ARM::getVMOVImm(N, 2, *CurDAG);
2010 def vmovImm16 : PatLeaf<(build_vector), [{
2011 return ARM::getVMOVImm(N, 2, *CurDAG).getNode() != 0;
2012 }], VMOV_get_imm16>;
2014 // VMOV_get_imm32 xform function: convert build_vector to VMOV.i32 imm.
2015 def VMOV_get_imm32 : SDNodeXForm<build_vector, [{
2016 return ARM::getVMOVImm(N, 4, *CurDAG);
2018 def vmovImm32 : PatLeaf<(build_vector), [{
2019 return ARM::getVMOVImm(N, 4, *CurDAG).getNode() != 0;
2020 }], VMOV_get_imm32>;
2022 // VMOV_get_imm64 xform function: convert build_vector to VMOV.i64 imm.
2023 def VMOV_get_imm64 : SDNodeXForm<build_vector, [{
2024 return ARM::getVMOVImm(N, 8, *CurDAG);
2026 def vmovImm64 : PatLeaf<(build_vector), [{
2027 return ARM::getVMOVImm(N, 8, *CurDAG).getNode() != 0;
2028 }], VMOV_get_imm64>;
2030 // Note: Some of the cmode bits in the following VMOV instructions need to
2031 // be encoded based on the immed values.
2033 def VMOVv8i8 : N1ModImm<1, 0b000, 0b1110, 0, 0, 0, 1, (outs DPR:$dst),
2034 (ins i8imm:$SIMM), NoItinerary,
2035 "vmov.i8\t$dst, $SIMM", "",
2036 [(set DPR:$dst, (v8i8 vmovImm8:$SIMM))]>;
2037 def VMOVv16i8 : N1ModImm<1, 0b000, 0b1110, 0, 1, 0, 1, (outs QPR:$dst),
2038 (ins i8imm:$SIMM), NoItinerary,
2039 "vmov.i8\t$dst, $SIMM", "",
2040 [(set QPR:$dst, (v16i8 vmovImm8:$SIMM))]>;
2042 def VMOVv4i16 : N1ModImm<1, 0b000, 0b1000, 0, 0, 0, 1, (outs DPR:$dst),
2043 (ins i16imm:$SIMM), NoItinerary,
2044 "vmov.i16\t$dst, $SIMM", "",
2045 [(set DPR:$dst, (v4i16 vmovImm16:$SIMM))]>;
2046 def VMOVv8i16 : N1ModImm<1, 0b000, 0b1000, 0, 1, 0, 1, (outs QPR:$dst),
2047 (ins i16imm:$SIMM), NoItinerary,
2048 "vmov.i16\t$dst, $SIMM", "",
2049 [(set QPR:$dst, (v8i16 vmovImm16:$SIMM))]>;
2051 def VMOVv2i32 : N1ModImm<1, 0b000, 0b0000, 0, 0, 0, 1, (outs DPR:$dst),
2052 (ins i32imm:$SIMM), NoItinerary,
2053 "vmov.i32\t$dst, $SIMM", "",
2054 [(set DPR:$dst, (v2i32 vmovImm32:$SIMM))]>;
2055 def VMOVv4i32 : N1ModImm<1, 0b000, 0b0000, 0, 1, 0, 1, (outs QPR:$dst),
2056 (ins i32imm:$SIMM), NoItinerary,
2057 "vmov.i32\t$dst, $SIMM", "",
2058 [(set QPR:$dst, (v4i32 vmovImm32:$SIMM))]>;
2060 def VMOVv1i64 : N1ModImm<1, 0b000, 0b1110, 0, 0, 1, 1, (outs DPR:$dst),
2061 (ins i64imm:$SIMM), NoItinerary,
2062 "vmov.i64\t$dst, $SIMM", "",
2063 [(set DPR:$dst, (v1i64 vmovImm64:$SIMM))]>;
2064 def VMOVv2i64 : N1ModImm<1, 0b000, 0b1110, 0, 1, 1, 1, (outs QPR:$dst),
2065 (ins i64imm:$SIMM), NoItinerary,
2066 "vmov.i64\t$dst, $SIMM", "",
2067 [(set QPR:$dst, (v2i64 vmovImm64:$SIMM))]>;
2069 // VMOV : Vector Get Lane (move scalar to ARM core register)
2071 def VGETLNs8 : NVGetLane<0b11100101, 0b1011, 0b00,
2072 (outs GPR:$dst), (ins DPR:$src, nohash_imm:$lane),
2073 NoItinerary, "vmov", ".s8\t$dst, $src[$lane]",
2074 [(set GPR:$dst, (NEONvgetlanes (v8i8 DPR:$src),
2076 def VGETLNs16 : NVGetLane<0b11100001, 0b1011, 0b01,
2077 (outs GPR:$dst), (ins DPR:$src, nohash_imm:$lane),
2078 NoItinerary, "vmov", ".s16\t$dst, $src[$lane]",
2079 [(set GPR:$dst, (NEONvgetlanes (v4i16 DPR:$src),
2081 def VGETLNu8 : NVGetLane<0b11101101, 0b1011, 0b00,
2082 (outs GPR:$dst), (ins DPR:$src, nohash_imm:$lane),
2083 NoItinerary, "vmov", ".u8\t$dst, $src[$lane]",
2084 [(set GPR:$dst, (NEONvgetlaneu (v8i8 DPR:$src),
2086 def VGETLNu16 : NVGetLane<0b11101001, 0b1011, 0b01,
2087 (outs GPR:$dst), (ins DPR:$src, nohash_imm:$lane),
2088 NoItinerary, "vmov", ".u16\t$dst, $src[$lane]",
2089 [(set GPR:$dst, (NEONvgetlaneu (v4i16 DPR:$src),
2091 def VGETLNi32 : NVGetLane<0b11100001, 0b1011, 0b00,
2092 (outs GPR:$dst), (ins DPR:$src, nohash_imm:$lane),
2093 NoItinerary, "vmov", ".32\t$dst, $src[$lane]",
2094 [(set GPR:$dst, (extractelt (v2i32 DPR:$src),
2096 // def VGETLNf32: see FMRDH and FMRDL in ARMInstrVFP.td
2097 def : Pat<(NEONvgetlanes (v16i8 QPR:$src), imm:$lane),
2098 (VGETLNs8 (v8i8 (EXTRACT_SUBREG QPR:$src,
2099 (DSubReg_i8_reg imm:$lane))),
2100 (SubReg_i8_lane imm:$lane))>;
2101 def : Pat<(NEONvgetlanes (v8i16 QPR:$src), imm:$lane),
2102 (VGETLNs16 (v4i16 (EXTRACT_SUBREG QPR:$src,
2103 (DSubReg_i16_reg imm:$lane))),
2104 (SubReg_i16_lane imm:$lane))>;
2105 def : Pat<(NEONvgetlaneu (v16i8 QPR:$src), imm:$lane),
2106 (VGETLNu8 (v8i8 (EXTRACT_SUBREG QPR:$src,
2107 (DSubReg_i8_reg imm:$lane))),
2108 (SubReg_i8_lane imm:$lane))>;
2109 def : Pat<(NEONvgetlaneu (v8i16 QPR:$src), imm:$lane),
2110 (VGETLNu16 (v4i16 (EXTRACT_SUBREG QPR:$src,
2111 (DSubReg_i16_reg imm:$lane))),
2112 (SubReg_i16_lane imm:$lane))>;
2113 def : Pat<(extractelt (v4i32 QPR:$src), imm:$lane),
2114 (VGETLNi32 (v2i32 (EXTRACT_SUBREG QPR:$src,
2115 (DSubReg_i32_reg imm:$lane))),
2116 (SubReg_i32_lane imm:$lane))>;
2117 def : Pat<(extractelt (v2f32 DPR:$src1), imm:$src2),
2118 (EXTRACT_SUBREG (COPY_TO_REGCLASS DPR:$src1, DPR_VFP2),
2119 (SSubReg_f32_reg imm:$src2))>;
2120 def : Pat<(extractelt (v4f32 QPR:$src1), imm:$src2),
2121 (EXTRACT_SUBREG (COPY_TO_REGCLASS QPR:$src1, QPR_VFP2),
2122 (SSubReg_f32_reg imm:$src2))>;
2123 //def : Pat<(extractelt (v2i64 QPR:$src1), imm:$src2),
2124 // (EXTRACT_SUBREG QPR:$src1, (DSubReg_f64_reg imm:$src2))>;
2125 def : Pat<(extractelt (v2f64 QPR:$src1), imm:$src2),
2126 (EXTRACT_SUBREG QPR:$src1, (DSubReg_f64_reg imm:$src2))>;
2129 // VMOV : Vector Set Lane (move ARM core register to scalar)
2131 let Constraints = "$src1 = $dst" in {
2132 def VSETLNi8 : NVSetLane<0b11100100, 0b1011, 0b00, (outs DPR:$dst),
2133 (ins DPR:$src1, GPR:$src2, nohash_imm:$lane),
2134 NoItinerary, "vmov", ".8\t$dst[$lane], $src2",
2135 [(set DPR:$dst, (vector_insert (v8i8 DPR:$src1),
2136 GPR:$src2, imm:$lane))]>;
2137 def VSETLNi16 : NVSetLane<0b11100000, 0b1011, 0b01, (outs DPR:$dst),
2138 (ins DPR:$src1, GPR:$src2, nohash_imm:$lane),
2139 NoItinerary, "vmov", ".16\t$dst[$lane], $src2",
2140 [(set DPR:$dst, (vector_insert (v4i16 DPR:$src1),
2141 GPR:$src2, imm:$lane))]>;
2142 def VSETLNi32 : NVSetLane<0b11100000, 0b1011, 0b00, (outs DPR:$dst),
2143 (ins DPR:$src1, GPR:$src2, nohash_imm:$lane),
2144 NoItinerary, "vmov", ".32\t$dst[$lane], $src2",
2145 [(set DPR:$dst, (insertelt (v2i32 DPR:$src1),
2146 GPR:$src2, imm:$lane))]>;
2148 def : Pat<(vector_insert (v16i8 QPR:$src1), GPR:$src2, imm:$lane),
2149 (v16i8 (INSERT_SUBREG QPR:$src1,
2150 (VSETLNi8 (v8i8 (EXTRACT_SUBREG QPR:$src1,
2151 (DSubReg_i8_reg imm:$lane))),
2152 GPR:$src2, (SubReg_i8_lane imm:$lane)),
2153 (DSubReg_i8_reg imm:$lane)))>;
2154 def : Pat<(vector_insert (v8i16 QPR:$src1), GPR:$src2, imm:$lane),
2155 (v8i16 (INSERT_SUBREG QPR:$src1,
2156 (VSETLNi16 (v4i16 (EXTRACT_SUBREG QPR:$src1,
2157 (DSubReg_i16_reg imm:$lane))),
2158 GPR:$src2, (SubReg_i16_lane imm:$lane)),
2159 (DSubReg_i16_reg imm:$lane)))>;
2160 def : Pat<(insertelt (v4i32 QPR:$src1), GPR:$src2, imm:$lane),
2161 (v4i32 (INSERT_SUBREG QPR:$src1,
2162 (VSETLNi32 (v2i32 (EXTRACT_SUBREG QPR:$src1,
2163 (DSubReg_i32_reg imm:$lane))),
2164 GPR:$src2, (SubReg_i32_lane imm:$lane)),
2165 (DSubReg_i32_reg imm:$lane)))>;
2167 def : Pat<(v2f32 (insertelt DPR:$src1, SPR:$src2, imm:$src3)),
2168 (INSERT_SUBREG (COPY_TO_REGCLASS DPR:$src1, DPR_VFP2),
2169 SPR:$src2, (SSubReg_f32_reg imm:$src3))>;
2170 def : Pat<(v4f32 (insertelt QPR:$src1, SPR:$src2, imm:$src3)),
2171 (INSERT_SUBREG (COPY_TO_REGCLASS QPR:$src1, QPR_VFP2),
2172 SPR:$src2, (SSubReg_f32_reg imm:$src3))>;
2174 //def : Pat<(v2i64 (insertelt QPR:$src1, DPR:$src2, imm:$src3)),
2175 // (INSERT_SUBREG QPR:$src1, DPR:$src2, (DSubReg_f64_reg imm:$src3))>;
2176 def : Pat<(v2f64 (insertelt QPR:$src1, DPR:$src2, imm:$src3)),
2177 (INSERT_SUBREG QPR:$src1, DPR:$src2, (DSubReg_f64_reg imm:$src3))>;
2179 def : Pat<(v2f32 (scalar_to_vector SPR:$src)),
2180 (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), SPR:$src, arm_ssubreg_0)>;
2181 def : Pat<(v2f64 (scalar_to_vector DPR:$src)),
2182 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), DPR:$src, arm_dsubreg_0)>;
2183 def : Pat<(v4f32 (scalar_to_vector SPR:$src)),
2184 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), SPR:$src, arm_ssubreg_0)>;
2186 def : Pat<(v8i8 (scalar_to_vector GPR:$src)),
2187 (VSETLNi8 (v8i8 (IMPLICIT_DEF)), GPR:$src, (i32 0))>;
2188 def : Pat<(v4i16 (scalar_to_vector GPR:$src)),
2189 (VSETLNi16 (v4i16 (IMPLICIT_DEF)), GPR:$src, (i32 0))>;
2190 def : Pat<(v2i32 (scalar_to_vector GPR:$src)),
2191 (VSETLNi32 (v2i32 (IMPLICIT_DEF)), GPR:$src, (i32 0))>;
2193 def : Pat<(v16i8 (scalar_to_vector GPR:$src)),
2194 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
2195 (VSETLNi8 (v8i8 (IMPLICIT_DEF)), GPR:$src, (i32 0)),
2197 def : Pat<(v8i16 (scalar_to_vector GPR:$src)),
2198 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
2199 (VSETLNi16 (v4i16 (IMPLICIT_DEF)), GPR:$src, (i32 0)),
2201 def : Pat<(v4i32 (scalar_to_vector GPR:$src)),
2202 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
2203 (VSETLNi32 (v2i32 (IMPLICIT_DEF)), GPR:$src, (i32 0)),
2206 // VDUP : Vector Duplicate (from ARM core register to all elements)
2208 class VDUPD<bits<8> opcod1, bits<2> opcod3, string asmSize, ValueType Ty>
2209 : NVDup<opcod1, 0b1011, opcod3, (outs DPR:$dst), (ins GPR:$src),
2210 NoItinerary, "vdup", !strconcat(asmSize, "\t$dst, $src"),
2211 [(set DPR:$dst, (Ty (NEONvdup (i32 GPR:$src))))]>;
2212 class VDUPQ<bits<8> opcod1, bits<2> opcod3, string asmSize, ValueType Ty>
2213 : NVDup<opcod1, 0b1011, opcod3, (outs QPR:$dst), (ins GPR:$src),
2214 NoItinerary, "vdup", !strconcat(asmSize, "\t$dst, $src"),
2215 [(set QPR:$dst, (Ty (NEONvdup (i32 GPR:$src))))]>;
2217 def VDUP8d : VDUPD<0b11101100, 0b00, ".8", v8i8>;
2218 def VDUP16d : VDUPD<0b11101000, 0b01, ".16", v4i16>;
2219 def VDUP32d : VDUPD<0b11101000, 0b00, ".32", v2i32>;
2220 def VDUP8q : VDUPQ<0b11101110, 0b00, ".8", v16i8>;
2221 def VDUP16q : VDUPQ<0b11101010, 0b01, ".16", v8i16>;
2222 def VDUP32q : VDUPQ<0b11101010, 0b00, ".32", v4i32>;
2224 def VDUPfd : NVDup<0b11101000, 0b1011, 0b00, (outs DPR:$dst), (ins GPR:$src),
2225 NoItinerary, "vdup", ".32\t$dst, $src",
2226 [(set DPR:$dst, (v2f32 (NEONvdup
2227 (f32 (bitconvert GPR:$src)))))]>;
2228 def VDUPfq : NVDup<0b11101010, 0b1011, 0b00, (outs QPR:$dst), (ins GPR:$src),
2229 NoItinerary, "vdup", ".32\t$dst, $src",
2230 [(set QPR:$dst, (v4f32 (NEONvdup
2231 (f32 (bitconvert GPR:$src)))))]>;
2233 // VDUP : Vector Duplicate Lane (from scalar to all elements)
2235 class VDUPLND<bits<2> op19_18, bits<2> op17_16, string OpcodeStr, ValueType Ty>
2236 : N2V<0b11, 0b11, op19_18, op17_16, 0b11000, 0, 0,
2237 (outs DPR:$dst), (ins DPR:$src, nohash_imm:$lane), NoItinerary,
2238 !strconcat(OpcodeStr, "\t$dst, $src[$lane]"), "",
2239 [(set DPR:$dst, (Ty (NEONvduplane (Ty DPR:$src), imm:$lane)))]>;
2241 class VDUPLNQ<bits<2> op19_18, bits<2> op17_16, string OpcodeStr,
2242 ValueType ResTy, ValueType OpTy>
2243 : N2V<0b11, 0b11, op19_18, op17_16, 0b11000, 1, 0,
2244 (outs QPR:$dst), (ins DPR:$src, nohash_imm:$lane), NoItinerary,
2245 !strconcat(OpcodeStr, "\t$dst, $src[$lane]"), "",
2246 [(set QPR:$dst, (ResTy (NEONvduplane (OpTy DPR:$src), imm:$lane)))]>;
2248 def VDUPLN8d : VDUPLND<0b00, 0b01, "vdup.8", v8i8>;
2249 def VDUPLN16d : VDUPLND<0b00, 0b10, "vdup.16", v4i16>;
2250 def VDUPLN32d : VDUPLND<0b01, 0b00, "vdup.32", v2i32>;
2251 def VDUPLNfd : VDUPLND<0b01, 0b00, "vdup.32", v2f32>;
2252 def VDUPLN8q : VDUPLNQ<0b00, 0b01, "vdup.8", v16i8, v8i8>;
2253 def VDUPLN16q : VDUPLNQ<0b00, 0b10, "vdup.16", v8i16, v4i16>;
2254 def VDUPLN32q : VDUPLNQ<0b01, 0b00, "vdup.32", v4i32, v2i32>;
2255 def VDUPLNfq : VDUPLNQ<0b01, 0b00, "vdup.32", v4f32, v2f32>;
2257 def : Pat<(v16i8 (NEONvduplane (v16i8 QPR:$src), imm:$lane)),
2258 (v16i8 (VDUPLN8q (v8i8 (EXTRACT_SUBREG QPR:$src,
2259 (DSubReg_i8_reg imm:$lane))),
2260 (SubReg_i8_lane imm:$lane)))>;
2261 def : Pat<(v8i16 (NEONvduplane (v8i16 QPR:$src), imm:$lane)),
2262 (v8i16 (VDUPLN16q (v4i16 (EXTRACT_SUBREG QPR:$src,
2263 (DSubReg_i16_reg imm:$lane))),
2264 (SubReg_i16_lane imm:$lane)))>;
2265 def : Pat<(v4i32 (NEONvduplane (v4i32 QPR:$src), imm:$lane)),
2266 (v4i32 (VDUPLN32q (v2i32 (EXTRACT_SUBREG QPR:$src,
2267 (DSubReg_i32_reg imm:$lane))),
2268 (SubReg_i32_lane imm:$lane)))>;
2269 def : Pat<(v4f32 (NEONvduplane (v4f32 QPR:$src), imm:$lane)),
2270 (v4f32 (VDUPLNfq (v2f32 (EXTRACT_SUBREG QPR:$src,
2271 (DSubReg_i32_reg imm:$lane))),
2272 (SubReg_i32_lane imm:$lane)))>;
2274 def VDUPfdf : N2V<0b11, 0b11, 0b01, 0b00, 0b11000, 0, 0,
2275 (outs DPR:$dst), (ins SPR:$src),
2276 NoItinerary, "vdup.32\t$dst, ${src:lane}", "",
2277 [(set DPR:$dst, (v2f32 (NEONvdup (f32 SPR:$src))))]>;
2279 def VDUPfqf : N2V<0b11, 0b11, 0b01, 0b00, 0b11000, 1, 0,
2280 (outs QPR:$dst), (ins SPR:$src),
2281 NoItinerary, "vdup.32\t$dst, ${src:lane}", "",
2282 [(set QPR:$dst, (v4f32 (NEONvdup (f32 SPR:$src))))]>;
2284 def : Pat<(v2i64 (NEONvduplane (v2i64 QPR:$src), imm:$lane)),
2285 (INSERT_SUBREG QPR:$src,
2286 (i64 (EXTRACT_SUBREG QPR:$src, (DSubReg_f64_reg imm:$lane))),
2287 (DSubReg_f64_other_reg imm:$lane))>;
2288 def : Pat<(v2f64 (NEONvduplane (v2f64 QPR:$src), imm:$lane)),
2289 (INSERT_SUBREG QPR:$src,
2290 (f64 (EXTRACT_SUBREG QPR:$src, (DSubReg_f64_reg imm:$lane))),
2291 (DSubReg_f64_other_reg imm:$lane))>;
2293 // VMOVN : Vector Narrowing Move
2294 defm VMOVN : N2VNInt_HSD<0b11,0b11,0b10,0b00100,0,0, "vmovn.i",
2295 int_arm_neon_vmovn>;
2296 // VQMOVN : Vector Saturating Narrowing Move
2297 defm VQMOVNs : N2VNInt_HSD<0b11,0b11,0b10,0b00101,0,0, "vqmovn.s",
2298 int_arm_neon_vqmovns>;
2299 defm VQMOVNu : N2VNInt_HSD<0b11,0b11,0b10,0b00101,1,0, "vqmovn.u",
2300 int_arm_neon_vqmovnu>;
2301 defm VQMOVNsu : N2VNInt_HSD<0b11,0b11,0b10,0b00100,1,0, "vqmovun.s",
2302 int_arm_neon_vqmovnsu>;
2303 // VMOVL : Vector Lengthening Move
2304 defm VMOVLs : N2VLInt_QHS<0,1,0b1010,0,0,1, "vmovl.s", int_arm_neon_vmovls>;
2305 defm VMOVLu : N2VLInt_QHS<1,1,0b1010,0,0,1, "vmovl.u", int_arm_neon_vmovlu>;
2307 // Vector Conversions.
2309 // VCVT : Vector Convert Between Floating-Point and Integers
2310 def VCVTf2sd : N2VD<0b11, 0b11, 0b10, 0b11, 0b01110, 0, "vcvt.s32.f32",
2311 v2i32, v2f32, fp_to_sint>;
2312 def VCVTf2ud : N2VD<0b11, 0b11, 0b10, 0b11, 0b01111, 0, "vcvt.u32.f32",
2313 v2i32, v2f32, fp_to_uint>;
2314 def VCVTs2fd : N2VD<0b11, 0b11, 0b10, 0b11, 0b01100, 0, "vcvt.f32.s32",
2315 v2f32, v2i32, sint_to_fp>;
2316 def VCVTu2fd : N2VD<0b11, 0b11, 0b10, 0b11, 0b01101, 0, "vcvt.f32.u32",
2317 v2f32, v2i32, uint_to_fp>;
2319 def VCVTf2sq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01110, 0, "vcvt.s32.f32",
2320 v4i32, v4f32, fp_to_sint>;
2321 def VCVTf2uq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01111, 0, "vcvt.u32.f32",
2322 v4i32, v4f32, fp_to_uint>;
2323 def VCVTs2fq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01100, 0, "vcvt.f32.s32",
2324 v4f32, v4i32, sint_to_fp>;
2325 def VCVTu2fq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01101, 0, "vcvt.f32.u32",
2326 v4f32, v4i32, uint_to_fp>;
2328 // VCVT : Vector Convert Between Floating-Point and Fixed-Point.
2329 // Note: Some of the opcode bits in the following VCVT instructions need to
2330 // be encoded based on the immed values.
2331 def VCVTf2xsd : N2VCvtD<0, 1, 0b000000, 0b1111, 0, 1, "vcvt.s32.f32",
2332 v2i32, v2f32, int_arm_neon_vcvtfp2fxs>;
2333 def VCVTf2xud : N2VCvtD<1, 1, 0b000000, 0b1111, 0, 1, "vcvt.u32.f32",
2334 v2i32, v2f32, int_arm_neon_vcvtfp2fxu>;
2335 def VCVTxs2fd : N2VCvtD<0, 1, 0b000000, 0b1110, 0, 1, "vcvt.f32.s32",
2336 v2f32, v2i32, int_arm_neon_vcvtfxs2fp>;
2337 def VCVTxu2fd : N2VCvtD<1, 1, 0b000000, 0b1110, 0, 1, "vcvt.f32.u32",
2338 v2f32, v2i32, int_arm_neon_vcvtfxu2fp>;
2340 def VCVTf2xsq : N2VCvtQ<0, 1, 0b000000, 0b1111, 0, 1, "vcvt.s32.f32",
2341 v4i32, v4f32, int_arm_neon_vcvtfp2fxs>;
2342 def VCVTf2xuq : N2VCvtQ<1, 1, 0b000000, 0b1111, 0, 1, "vcvt.u32.f32",
2343 v4i32, v4f32, int_arm_neon_vcvtfp2fxu>;
2344 def VCVTxs2fq : N2VCvtQ<0, 1, 0b000000, 0b1110, 0, 1, "vcvt.f32.s32",
2345 v4f32, v4i32, int_arm_neon_vcvtfxs2fp>;
2346 def VCVTxu2fq : N2VCvtQ<1, 1, 0b000000, 0b1110, 0, 1, "vcvt.f32.u32",
2347 v4f32, v4i32, int_arm_neon_vcvtfxu2fp>;
2351 // VREV64 : Vector Reverse elements within 64-bit doublewords
2353 class VREV64D<bits<2> op19_18, string OpcodeStr, ValueType Ty>
2354 : N2V<0b11, 0b11, op19_18, 0b00, 0b00000, 0, 0, (outs DPR:$dst),
2355 (ins DPR:$src), NoItinerary,
2356 !strconcat(OpcodeStr, "\t$dst, $src"), "",
2357 [(set DPR:$dst, (Ty (NEONvrev64 (Ty DPR:$src))))]>;
2358 class VREV64Q<bits<2> op19_18, string OpcodeStr, ValueType Ty>
2359 : N2V<0b11, 0b11, op19_18, 0b00, 0b00000, 1, 0, (outs QPR:$dst),
2360 (ins QPR:$src), NoItinerary,
2361 !strconcat(OpcodeStr, "\t$dst, $src"), "",
2362 [(set QPR:$dst, (Ty (NEONvrev64 (Ty QPR:$src))))]>;
2364 def VREV64d8 : VREV64D<0b00, "vrev64.8", v8i8>;
2365 def VREV64d16 : VREV64D<0b01, "vrev64.16", v4i16>;
2366 def VREV64d32 : VREV64D<0b10, "vrev64.32", v2i32>;
2367 def VREV64df : VREV64D<0b10, "vrev64.32", v2f32>;
2369 def VREV64q8 : VREV64Q<0b00, "vrev64.8", v16i8>;
2370 def VREV64q16 : VREV64Q<0b01, "vrev64.16", v8i16>;
2371 def VREV64q32 : VREV64Q<0b10, "vrev64.32", v4i32>;
2372 def VREV64qf : VREV64Q<0b10, "vrev64.32", v4f32>;
2374 // VREV32 : Vector Reverse elements within 32-bit words
2376 class VREV32D<bits<2> op19_18, string OpcodeStr, ValueType Ty>
2377 : N2V<0b11, 0b11, op19_18, 0b00, 0b00001, 0, 0, (outs DPR:$dst),
2378 (ins DPR:$src), NoItinerary,
2379 !strconcat(OpcodeStr, "\t$dst, $src"), "",
2380 [(set DPR:$dst, (Ty (NEONvrev32 (Ty DPR:$src))))]>;
2381 class VREV32Q<bits<2> op19_18, string OpcodeStr, ValueType Ty>
2382 : N2V<0b11, 0b11, op19_18, 0b00, 0b00001, 1, 0, (outs QPR:$dst),
2383 (ins QPR:$src), NoItinerary,
2384 !strconcat(OpcodeStr, "\t$dst, $src"), "",
2385 [(set QPR:$dst, (Ty (NEONvrev32 (Ty QPR:$src))))]>;
2387 def VREV32d8 : VREV32D<0b00, "vrev32.8", v8i8>;
2388 def VREV32d16 : VREV32D<0b01, "vrev32.16", v4i16>;
2390 def VREV32q8 : VREV32Q<0b00, "vrev32.8", v16i8>;
2391 def VREV32q16 : VREV32Q<0b01, "vrev32.16", v8i16>;
2393 // VREV16 : Vector Reverse elements within 16-bit halfwords
2395 class VREV16D<bits<2> op19_18, string OpcodeStr, ValueType Ty>
2396 : N2V<0b11, 0b11, op19_18, 0b00, 0b00010, 0, 0, (outs DPR:$dst),
2397 (ins DPR:$src), NoItinerary,
2398 !strconcat(OpcodeStr, "\t$dst, $src"), "",
2399 [(set DPR:$dst, (Ty (NEONvrev16 (Ty DPR:$src))))]>;
2400 class VREV16Q<bits<2> op19_18, string OpcodeStr, ValueType Ty>
2401 : N2V<0b11, 0b11, op19_18, 0b00, 0b00010, 1, 0, (outs QPR:$dst),
2402 (ins QPR:$src), NoItinerary,
2403 !strconcat(OpcodeStr, "\t$dst, $src"), "",
2404 [(set QPR:$dst, (Ty (NEONvrev16 (Ty QPR:$src))))]>;
2406 def VREV16d8 : VREV16D<0b00, "vrev16.8", v8i8>;
2407 def VREV16q8 : VREV16Q<0b00, "vrev16.8", v16i8>;
2409 // Other Vector Shuffles.
2411 // VEXT : Vector Extract
2413 class VEXTd<string OpcodeStr, ValueType Ty>
2414 : N3V<0,1,0b11,0b0000,0,0, (outs DPR:$dst),
2415 (ins DPR:$lhs, DPR:$rhs, i32imm:$index), NoItinerary,
2416 !strconcat(OpcodeStr, "\t$dst, $lhs, $rhs, $index"), "",
2417 [(set DPR:$dst, (Ty (NEONvext (Ty DPR:$lhs),
2418 (Ty DPR:$rhs), imm:$index)))]>;
2420 class VEXTq<string OpcodeStr, ValueType Ty>
2421 : N3V<0,1,0b11,0b0000,1,0, (outs QPR:$dst),
2422 (ins QPR:$lhs, QPR:$rhs, i32imm:$index), NoItinerary,
2423 !strconcat(OpcodeStr, "\t$dst, $lhs, $rhs, $index"), "",
2424 [(set QPR:$dst, (Ty (NEONvext (Ty QPR:$lhs),
2425 (Ty QPR:$rhs), imm:$index)))]>;
2427 def VEXTd8 : VEXTd<"vext.8", v8i8>;
2428 def VEXTd16 : VEXTd<"vext.16", v4i16>;
2429 def VEXTd32 : VEXTd<"vext.32", v2i32>;
2430 def VEXTdf : VEXTd<"vext.32", v2f32>;
2432 def VEXTq8 : VEXTq<"vext.8", v16i8>;
2433 def VEXTq16 : VEXTq<"vext.16", v8i16>;
2434 def VEXTq32 : VEXTq<"vext.32", v4i32>;
2435 def VEXTqf : VEXTq<"vext.32", v4f32>;
2437 // VTRN : Vector Transpose
2439 def VTRNd8 : N2VDShuffle<0b00, 0b00001, "vtrn.8">;
2440 def VTRNd16 : N2VDShuffle<0b01, 0b00001, "vtrn.16">;
2441 def VTRNd32 : N2VDShuffle<0b10, 0b00001, "vtrn.32">;
2443 def VTRNq8 : N2VQShuffle<0b00, 0b00001, "vtrn.8">;
2444 def VTRNq16 : N2VQShuffle<0b01, 0b00001, "vtrn.16">;
2445 def VTRNq32 : N2VQShuffle<0b10, 0b00001, "vtrn.32">;
2447 // VUZP : Vector Unzip (Deinterleave)
2449 def VUZPd8 : N2VDShuffle<0b00, 0b00010, "vuzp.8">;
2450 def VUZPd16 : N2VDShuffle<0b01, 0b00010, "vuzp.16">;
2451 def VUZPd32 : N2VDShuffle<0b10, 0b00010, "vuzp.32">;
2453 def VUZPq8 : N2VQShuffle<0b00, 0b00010, "vuzp.8">;
2454 def VUZPq16 : N2VQShuffle<0b01, 0b00010, "vuzp.16">;
2455 def VUZPq32 : N2VQShuffle<0b10, 0b00010, "vuzp.32">;
2457 // VZIP : Vector Zip (Interleave)
2459 def VZIPd8 : N2VDShuffle<0b00, 0b00011, "vzip.8">;
2460 def VZIPd16 : N2VDShuffle<0b01, 0b00011, "vzip.16">;
2461 def VZIPd32 : N2VDShuffle<0b10, 0b00011, "vzip.32">;
2463 def VZIPq8 : N2VQShuffle<0b00, 0b00011, "vzip.8">;
2464 def VZIPq16 : N2VQShuffle<0b01, 0b00011, "vzip.16">;
2465 def VZIPq32 : N2VQShuffle<0b10, 0b00011, "vzip.32">;
2467 // Vector Table Lookup and Table Extension.
2469 // VTBL : Vector Table Lookup
2471 : N3V<1,1,0b11,0b1000,0,0, (outs DPR:$dst),
2472 (ins DPR:$tbl1, DPR:$src), NoItinerary,
2473 "vtbl.8\t$dst, \\{$tbl1\\}, $src", "",
2474 [(set DPR:$dst, (v8i8 (int_arm_neon_vtbl1 DPR:$tbl1, DPR:$src)))]>;
2476 : N3V<1,1,0b11,0b1001,0,0, (outs DPR:$dst),
2477 (ins DPR:$tbl1, DPR:$tbl2, DPR:$src), NoItinerary,
2478 "vtbl.8\t$dst, \\{$tbl1,$tbl2\\}, $src", "",
2479 [(set DPR:$dst, (v8i8 (int_arm_neon_vtbl2
2480 DPR:$tbl1, DPR:$tbl2, DPR:$src)))]>;
2482 : N3V<1,1,0b11,0b1010,0,0, (outs DPR:$dst),
2483 (ins DPR:$tbl1, DPR:$tbl2, DPR:$tbl3, DPR:$src), NoItinerary,
2484 "vtbl.8\t$dst, \\{$tbl1,$tbl2,$tbl3\\}, $src", "",
2485 [(set DPR:$dst, (v8i8 (int_arm_neon_vtbl3
2486 DPR:$tbl1, DPR:$tbl2, DPR:$tbl3, DPR:$src)))]>;
2488 : N3V<1,1,0b11,0b1011,0,0, (outs DPR:$dst),
2489 (ins DPR:$tbl1, DPR:$tbl2, DPR:$tbl3, DPR:$tbl4, DPR:$src), NoItinerary,
2490 "vtbl.8\t$dst, \\{$tbl1,$tbl2,$tbl3,$tbl4\\}, $src", "",
2491 [(set DPR:$dst, (v8i8 (int_arm_neon_vtbl4 DPR:$tbl1, DPR:$tbl2,
2492 DPR:$tbl3, DPR:$tbl4, DPR:$src)))]>;
2494 // VTBX : Vector Table Extension
2496 : N3V<1,1,0b11,0b1000,1,0, (outs DPR:$dst),
2497 (ins DPR:$orig, DPR:$tbl1, DPR:$src), NoItinerary,
2498 "vtbx.8\t$dst, \\{$tbl1\\}, $src", "$orig = $dst",
2499 [(set DPR:$dst, (v8i8 (int_arm_neon_vtbx1
2500 DPR:$orig, DPR:$tbl1, DPR:$src)))]>;
2502 : N3V<1,1,0b11,0b1001,1,0, (outs DPR:$dst),
2503 (ins DPR:$orig, DPR:$tbl1, DPR:$tbl2, DPR:$src), NoItinerary,
2504 "vtbx.8\t$dst, \\{$tbl1,$tbl2\\}, $src", "$orig = $dst",
2505 [(set DPR:$dst, (v8i8 (int_arm_neon_vtbx2
2506 DPR:$orig, DPR:$tbl1, DPR:$tbl2, DPR:$src)))]>;
2508 : N3V<1,1,0b11,0b1010,1,0, (outs DPR:$dst),
2509 (ins DPR:$orig, DPR:$tbl1, DPR:$tbl2, DPR:$tbl3, DPR:$src), NoItinerary,
2510 "vtbx.8\t$dst, \\{$tbl1,$tbl2,$tbl3\\}, $src", "$orig = $dst",
2511 [(set DPR:$dst, (v8i8 (int_arm_neon_vtbx3 DPR:$orig, DPR:$tbl1,
2512 DPR:$tbl2, DPR:$tbl3, DPR:$src)))]>;
2514 : N3V<1,1,0b11,0b1011,1,0, (outs DPR:$dst), (ins DPR:$orig, DPR:$tbl1,
2515 DPR:$tbl2, DPR:$tbl3, DPR:$tbl4, DPR:$src), NoItinerary,
2516 "vtbx.8\t$dst, \\{$tbl1,$tbl2,$tbl3,$tbl4\\}, $src", "$orig = $dst",
2517 [(set DPR:$dst, (v8i8 (int_arm_neon_vtbx4 DPR:$orig, DPR:$tbl1,
2518 DPR:$tbl2, DPR:$tbl3, DPR:$tbl4, DPR:$src)))]>;
2520 //===----------------------------------------------------------------------===//
2521 // NEON instructions for single-precision FP math
2522 //===----------------------------------------------------------------------===//
2524 // These need separate instructions because they must use DPR_VFP2 register
2525 // class which have SPR sub-registers.
2527 // Vector Add Operations used for single-precision FP
2528 let neverHasSideEffects = 1 in
2529 def VADDfd_sfp : N3VDs<0, 0, 0b00, 0b1101, 0, "vadd.f32", v2f32, v2f32, fadd,1>;
2530 def : N3VDsPat<fadd, VADDfd_sfp>;
2532 // Vector Sub Operations used for single-precision FP
2533 let neverHasSideEffects = 1 in
2534 def VSUBfd_sfp : N3VDs<0, 0, 0b10, 0b1101, 0, "vsub.f32", v2f32, v2f32, fsub,0>;
2535 def : N3VDsPat<fsub, VSUBfd_sfp>;
2537 // Vector Multiply Operations used for single-precision FP
2538 let neverHasSideEffects = 1 in
2539 def VMULfd_sfp : N3VDs<1, 0, 0b00, 0b1101, 1, "vmul.f32", v2f32, v2f32, fmul,1>;
2540 def : N3VDsPat<fmul, VMULfd_sfp>;
2542 // Vector Multiply-Accumulate/Subtract used for single-precision FP
2543 let neverHasSideEffects = 1 in
2544 def VMLAfd_sfp : N3VDMulOps<0, 0, 0b00, 0b1101, 1, "vmla.f32", v2f32,fmul,fadd>;
2545 def : N3VDMulOpsPat<fmul, fadd, VMLAfd_sfp>;
2547 let neverHasSideEffects = 1 in
2548 def VMLSfd_sfp : N3VDMulOps<0, 0, 0b10, 0b1101, 1, "vmls.f32", v2f32,fmul,fsub>;
2549 def : N3VDMulOpsPat<fmul, fsub, VMLSfd_sfp>;
2551 // Vector Absolute used for single-precision FP
2552 let neverHasSideEffects = 1 in
2553 def VABSfd_sfp : N2VDInts<0b11, 0b11, 0b10, 0b01, 0b01110, 0, "vabs.f32",
2554 v2f32, v2f32, int_arm_neon_vabs>;
2555 def : N2VDIntsPat<fabs, VABSfd_sfp>;
2557 // Vector Negate used for single-precision FP
2558 let neverHasSideEffects = 1 in
2559 def VNEGf32d_sfp : N2V<0b11, 0b11, 0b10, 0b01, 0b01111, 0, 0,
2560 (outs DPR_VFP2:$dst), (ins DPR_VFP2:$src), NoItinerary,
2561 "vneg.f32\t$dst, $src", "", []>;
2562 def : N2VDIntsPat<fneg, VNEGf32d_sfp>;
2564 // Vector Convert between single-precision FP and integer
2565 let neverHasSideEffects = 1 in
2566 def VCVTf2sd_sfp : N2VDs<0b11, 0b11, 0b10, 0b11, 0b01110, 0, "vcvt.s32.f32",
2567 v2i32, v2f32, fp_to_sint>;
2568 def : N2VDsPat<arm_ftosi, f32, v2f32, VCVTf2sd_sfp>;
2570 let neverHasSideEffects = 1 in
2571 def VCVTf2ud_sfp : N2VDs<0b11, 0b11, 0b10, 0b11, 0b01111, 0, "vcvt.u32.f32",
2572 v2i32, v2f32, fp_to_uint>;
2573 def : N2VDsPat<arm_ftoui, f32, v2f32, VCVTf2ud_sfp>;
2575 let neverHasSideEffects = 1 in
2576 def VCVTs2fd_sfp : N2VDs<0b11, 0b11, 0b10, 0b11, 0b01100, 0, "vcvt.f32.s32",
2577 v2f32, v2i32, sint_to_fp>;
2578 def : N2VDsPat<arm_sitof, f32, v2i32, VCVTs2fd_sfp>;
2580 let neverHasSideEffects = 1 in
2581 def VCVTu2fd_sfp : N2VDs<0b11, 0b11, 0b10, 0b11, 0b01101, 0, "vcvt.f32.u32",
2582 v2f32, v2i32, uint_to_fp>;
2583 def : N2VDsPat<arm_uitof, f32, v2i32, VCVTu2fd_sfp>;
2585 //===----------------------------------------------------------------------===//
2586 // Non-Instruction Patterns
2587 //===----------------------------------------------------------------------===//
2590 def : Pat<(v1i64 (bitconvert (v2i32 DPR:$src))), (v1i64 DPR:$src)>;
2591 def : Pat<(v1i64 (bitconvert (v4i16 DPR:$src))), (v1i64 DPR:$src)>;
2592 def : Pat<(v1i64 (bitconvert (v8i8 DPR:$src))), (v1i64 DPR:$src)>;
2593 def : Pat<(v1i64 (bitconvert (f64 DPR:$src))), (v1i64 DPR:$src)>;
2594 def : Pat<(v1i64 (bitconvert (v2f32 DPR:$src))), (v1i64 DPR:$src)>;
2595 def : Pat<(v2i32 (bitconvert (v1i64 DPR:$src))), (v2i32 DPR:$src)>;
2596 def : Pat<(v2i32 (bitconvert (v4i16 DPR:$src))), (v2i32 DPR:$src)>;
2597 def : Pat<(v2i32 (bitconvert (v8i8 DPR:$src))), (v2i32 DPR:$src)>;
2598 def : Pat<(v2i32 (bitconvert (f64 DPR:$src))), (v2i32 DPR:$src)>;
2599 def : Pat<(v2i32 (bitconvert (v2f32 DPR:$src))), (v2i32 DPR:$src)>;
2600 def : Pat<(v4i16 (bitconvert (v1i64 DPR:$src))), (v4i16 DPR:$src)>;
2601 def : Pat<(v4i16 (bitconvert (v2i32 DPR:$src))), (v4i16 DPR:$src)>;
2602 def : Pat<(v4i16 (bitconvert (v8i8 DPR:$src))), (v4i16 DPR:$src)>;
2603 def : Pat<(v4i16 (bitconvert (f64 DPR:$src))), (v4i16 DPR:$src)>;
2604 def : Pat<(v4i16 (bitconvert (v2f32 DPR:$src))), (v4i16 DPR:$src)>;
2605 def : Pat<(v8i8 (bitconvert (v1i64 DPR:$src))), (v8i8 DPR:$src)>;
2606 def : Pat<(v8i8 (bitconvert (v2i32 DPR:$src))), (v8i8 DPR:$src)>;
2607 def : Pat<(v8i8 (bitconvert (v4i16 DPR:$src))), (v8i8 DPR:$src)>;
2608 def : Pat<(v8i8 (bitconvert (f64 DPR:$src))), (v8i8 DPR:$src)>;
2609 def : Pat<(v8i8 (bitconvert (v2f32 DPR:$src))), (v8i8 DPR:$src)>;
2610 def : Pat<(f64 (bitconvert (v1i64 DPR:$src))), (f64 DPR:$src)>;
2611 def : Pat<(f64 (bitconvert (v2i32 DPR:$src))), (f64 DPR:$src)>;
2612 def : Pat<(f64 (bitconvert (v4i16 DPR:$src))), (f64 DPR:$src)>;
2613 def : Pat<(f64 (bitconvert (v8i8 DPR:$src))), (f64 DPR:$src)>;
2614 def : Pat<(f64 (bitconvert (v2f32 DPR:$src))), (f64 DPR:$src)>;
2615 def : Pat<(v2f32 (bitconvert (f64 DPR:$src))), (v2f32 DPR:$src)>;
2616 def : Pat<(v2f32 (bitconvert (v1i64 DPR:$src))), (v2f32 DPR:$src)>;
2617 def : Pat<(v2f32 (bitconvert (v2i32 DPR:$src))), (v2f32 DPR:$src)>;
2618 def : Pat<(v2f32 (bitconvert (v4i16 DPR:$src))), (v2f32 DPR:$src)>;
2619 def : Pat<(v2f32 (bitconvert (v8i8 DPR:$src))), (v2f32 DPR:$src)>;
2621 def : Pat<(v2i64 (bitconvert (v4i32 QPR:$src))), (v2i64 QPR:$src)>;
2622 def : Pat<(v2i64 (bitconvert (v8i16 QPR:$src))), (v2i64 QPR:$src)>;
2623 def : Pat<(v2i64 (bitconvert (v16i8 QPR:$src))), (v2i64 QPR:$src)>;
2624 def : Pat<(v2i64 (bitconvert (v2f64 QPR:$src))), (v2i64 QPR:$src)>;
2625 def : Pat<(v2i64 (bitconvert (v4f32 QPR:$src))), (v2i64 QPR:$src)>;
2626 def : Pat<(v4i32 (bitconvert (v2i64 QPR:$src))), (v4i32 QPR:$src)>;
2627 def : Pat<(v4i32 (bitconvert (v8i16 QPR:$src))), (v4i32 QPR:$src)>;
2628 def : Pat<(v4i32 (bitconvert (v16i8 QPR:$src))), (v4i32 QPR:$src)>;
2629 def : Pat<(v4i32 (bitconvert (v2f64 QPR:$src))), (v4i32 QPR:$src)>;
2630 def : Pat<(v4i32 (bitconvert (v4f32 QPR:$src))), (v4i32 QPR:$src)>;
2631 def : Pat<(v8i16 (bitconvert (v2i64 QPR:$src))), (v8i16 QPR:$src)>;
2632 def : Pat<(v8i16 (bitconvert (v4i32 QPR:$src))), (v8i16 QPR:$src)>;
2633 def : Pat<(v8i16 (bitconvert (v16i8 QPR:$src))), (v8i16 QPR:$src)>;
2634 def : Pat<(v8i16 (bitconvert (v2f64 QPR:$src))), (v8i16 QPR:$src)>;
2635 def : Pat<(v8i16 (bitconvert (v4f32 QPR:$src))), (v8i16 QPR:$src)>;
2636 def : Pat<(v16i8 (bitconvert (v2i64 QPR:$src))), (v16i8 QPR:$src)>;
2637 def : Pat<(v16i8 (bitconvert (v4i32 QPR:$src))), (v16i8 QPR:$src)>;
2638 def : Pat<(v16i8 (bitconvert (v8i16 QPR:$src))), (v16i8 QPR:$src)>;
2639 def : Pat<(v16i8 (bitconvert (v2f64 QPR:$src))), (v16i8 QPR:$src)>;
2640 def : Pat<(v16i8 (bitconvert (v4f32 QPR:$src))), (v16i8 QPR:$src)>;
2641 def : Pat<(v4f32 (bitconvert (v2i64 QPR:$src))), (v4f32 QPR:$src)>;
2642 def : Pat<(v4f32 (bitconvert (v4i32 QPR:$src))), (v4f32 QPR:$src)>;
2643 def : Pat<(v4f32 (bitconvert (v8i16 QPR:$src))), (v4f32 QPR:$src)>;
2644 def : Pat<(v4f32 (bitconvert (v16i8 QPR:$src))), (v4f32 QPR:$src)>;
2645 def : Pat<(v4f32 (bitconvert (v2f64 QPR:$src))), (v4f32 QPR:$src)>;
2646 def : Pat<(v2f64 (bitconvert (v2i64 QPR:$src))), (v2f64 QPR:$src)>;
2647 def : Pat<(v2f64 (bitconvert (v4i32 QPR:$src))), (v2f64 QPR:$src)>;
2648 def : Pat<(v2f64 (bitconvert (v8i16 QPR:$src))), (v2f64 QPR:$src)>;
2649 def : Pat<(v2f64 (bitconvert (v16i8 QPR:$src))), (v2f64 QPR:$src)>;
2650 def : Pat<(v2f64 (bitconvert (v4f32 QPR:$src))), (v2f64 QPR:$src)>;