[Alignment][NFC] Use Align with TargetLowering::setMinFunctionAlignment
[llvm-core.git] / include / llvm / IR / IntrinsicsAArch64.td
blobdc2ead3aeba2f6fa80c3f67f0cb14369aea5249c
1 //===- IntrinsicsAARCH64.td - Defines AARCH64 intrinsics ---*- tablegen -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines all of the AARCH64-specific intrinsics.
11 //===----------------------------------------------------------------------===//
13 let TargetPrefix = "aarch64" in {
15 def int_aarch64_ldxr : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty]>;
16 def int_aarch64_ldaxr : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty]>;
17 def int_aarch64_stxr : Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_anyptr_ty]>;
18 def int_aarch64_stlxr : Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_anyptr_ty]>;
20 def int_aarch64_ldxp : Intrinsic<[llvm_i64_ty, llvm_i64_ty], [llvm_ptr_ty]>;
21 def int_aarch64_ldaxp : Intrinsic<[llvm_i64_ty, llvm_i64_ty], [llvm_ptr_ty]>;
22 def int_aarch64_stxp : Intrinsic<[llvm_i32_ty],
23                                [llvm_i64_ty, llvm_i64_ty, llvm_ptr_ty]>;
24 def int_aarch64_stlxp : Intrinsic<[llvm_i32_ty],
25                                 [llvm_i64_ty, llvm_i64_ty, llvm_ptr_ty]>;
27 def int_aarch64_clrex : Intrinsic<[]>;
29 def int_aarch64_sdiv : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
30                                 LLVMMatchType<0>], [IntrNoMem]>;
31 def int_aarch64_udiv : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
32                                 LLVMMatchType<0>], [IntrNoMem]>;
34 def int_aarch64_fjcvtzs : Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
36 //===----------------------------------------------------------------------===//
37 // HINT
39 def int_aarch64_hint : Intrinsic<[], [llvm_i32_ty]>;
41 //===----------------------------------------------------------------------===//
42 // Data Barrier Instructions
44 def int_aarch64_dmb : GCCBuiltin<"__builtin_arm_dmb">, MSBuiltin<"__dmb">, Intrinsic<[], [llvm_i32_ty]>;
45 def int_aarch64_dsb : GCCBuiltin<"__builtin_arm_dsb">, MSBuiltin<"__dsb">, Intrinsic<[], [llvm_i32_ty]>;
46 def int_aarch64_isb : GCCBuiltin<"__builtin_arm_isb">, MSBuiltin<"__isb">, Intrinsic<[], [llvm_i32_ty]>;
48 // A space-consuming intrinsic primarily for testing block and jump table
49 // placements. The first argument is the number of bytes this "instruction"
50 // takes up, the second and return value are essentially chains, used to force
51 // ordering during ISel.
52 def int_aarch64_space : Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i64_ty], []>;
56 //===----------------------------------------------------------------------===//
57 // Advanced SIMD (NEON)
59 let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
60   class AdvSIMD_2Scalar_Float_Intrinsic
61     : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
62                 [IntrNoMem]>;
64   class AdvSIMD_FPToIntRounding_Intrinsic
65     : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
67   class AdvSIMD_1IntArg_Intrinsic
68     : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], [IntrNoMem]>;
69   class AdvSIMD_1FloatArg_Intrinsic
70     : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
71   class AdvSIMD_1VectorArg_Intrinsic
72     : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>;
73   class AdvSIMD_1VectorArg_Expand_Intrinsic
74     : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
75   class AdvSIMD_1VectorArg_Long_Intrinsic
76     : Intrinsic<[llvm_anyvector_ty], [LLVMTruncatedType<0>], [IntrNoMem]>;
77   class AdvSIMD_1IntArg_Narrow_Intrinsic
78     : Intrinsic<[llvm_anyint_ty], [llvm_anyint_ty], [IntrNoMem]>;
79   class AdvSIMD_1VectorArg_Narrow_Intrinsic
80     : Intrinsic<[llvm_anyint_ty], [LLVMExtendedType<0>], [IntrNoMem]>;
81   class AdvSIMD_1VectorArg_Int_Across_Intrinsic
82     : Intrinsic<[llvm_anyint_ty], [llvm_anyvector_ty], [IntrNoMem]>;
83   class AdvSIMD_1VectorArg_Float_Across_Intrinsic
84     : Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
86   class AdvSIMD_2IntArg_Intrinsic
87     : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
88                 [IntrNoMem]>;
89   class AdvSIMD_2FloatArg_Intrinsic
90     : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
91                 [IntrNoMem]>;
92   class AdvSIMD_2VectorArg_Intrinsic
93     : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
94                 [IntrNoMem]>;
95   class AdvSIMD_2VectorArg_Compare_Intrinsic
96     : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, LLVMMatchType<1>],
97                 [IntrNoMem]>;
98   class AdvSIMD_2Arg_FloatCompare_Intrinsic
99     : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, LLVMMatchType<1>],
100                 [IntrNoMem]>;
101   class AdvSIMD_2VectorArg_Long_Intrinsic
102     : Intrinsic<[llvm_anyvector_ty],
103                 [LLVMTruncatedType<0>, LLVMTruncatedType<0>],
104                 [IntrNoMem]>;
105   class AdvSIMD_2VectorArg_Wide_Intrinsic
106     : Intrinsic<[llvm_anyvector_ty],
107                 [LLVMMatchType<0>, LLVMTruncatedType<0>],
108                 [IntrNoMem]>;
109   class AdvSIMD_2VectorArg_Narrow_Intrinsic
110     : Intrinsic<[llvm_anyvector_ty],
111                 [LLVMExtendedType<0>, LLVMExtendedType<0>],
112                 [IntrNoMem]>;
113   class AdvSIMD_2Arg_Scalar_Narrow_Intrinsic
114     : Intrinsic<[llvm_anyint_ty],
115                 [LLVMExtendedType<0>, llvm_i32_ty],
116                 [IntrNoMem]>;
117   class AdvSIMD_2VectorArg_Scalar_Expand_BySize_Intrinsic
118     : Intrinsic<[llvm_anyvector_ty],
119                 [llvm_anyvector_ty],
120                 [IntrNoMem]>;
121   class AdvSIMD_2VectorArg_Scalar_Wide_BySize_Intrinsic
122     : Intrinsic<[llvm_anyvector_ty],
123                 [LLVMTruncatedType<0>],
124                 [IntrNoMem]>;
125   class AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic
126     : Intrinsic<[llvm_anyvector_ty],
127                 [LLVMTruncatedType<0>, llvm_i32_ty],
128                 [IntrNoMem]>;
129   class AdvSIMD_2VectorArg_Tied_Narrow_Intrinsic
130     : Intrinsic<[llvm_anyvector_ty],
131                 [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty],
132                 [IntrNoMem]>;
134   class AdvSIMD_3VectorArg_Intrinsic
135       : Intrinsic<[llvm_anyvector_ty],
136                [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
137                [IntrNoMem]>;
138   class AdvSIMD_3VectorArg_Scalar_Intrinsic
139       : Intrinsic<[llvm_anyvector_ty],
140                [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
141                [IntrNoMem]>;
142   class AdvSIMD_3VectorArg_Tied_Narrow_Intrinsic
143       : Intrinsic<[llvm_anyvector_ty],
144                [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty,
145                 LLVMMatchType<1>], [IntrNoMem]>;
146   class AdvSIMD_3VectorArg_Scalar_Tied_Narrow_Intrinsic
147     : Intrinsic<[llvm_anyvector_ty],
148                 [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty, llvm_i32_ty],
149                 [IntrNoMem]>;
150   class AdvSIMD_CvtFxToFP_Intrinsic
151     : Intrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty],
152                 [IntrNoMem]>;
153   class AdvSIMD_CvtFPToFx_Intrinsic
154     : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, llvm_i32_ty],
155                 [IntrNoMem]>;
157   class AdvSIMD_1Arg_Intrinsic
158     : Intrinsic<[llvm_any_ty], [LLVMMatchType<0>], [IntrNoMem]>;
160   class AdvSIMD_Dot_Intrinsic
161     : Intrinsic<[llvm_anyvector_ty],
162                 [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>],
163                 [IntrNoMem]>;
165   class AdvSIMD_FP16FML_Intrinsic
166     : Intrinsic<[llvm_anyvector_ty],
167                 [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>],
168                 [IntrNoMem]>;
171 // Arithmetic ops
173 let TargetPrefix = "aarch64", IntrProperties = [IntrNoMem] in {
174   // Vector Add Across Lanes
175   def int_aarch64_neon_saddv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
176   def int_aarch64_neon_uaddv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
177   def int_aarch64_neon_faddv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
179   // Vector Long Add Across Lanes
180   def int_aarch64_neon_saddlv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
181   def int_aarch64_neon_uaddlv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
183   // Vector Halving Add
184   def int_aarch64_neon_shadd : AdvSIMD_2VectorArg_Intrinsic;
185   def int_aarch64_neon_uhadd : AdvSIMD_2VectorArg_Intrinsic;
187   // Vector Rounding Halving Add
188   def int_aarch64_neon_srhadd : AdvSIMD_2VectorArg_Intrinsic;
189   def int_aarch64_neon_urhadd : AdvSIMD_2VectorArg_Intrinsic;
191   // Vector Saturating Add
192   def int_aarch64_neon_sqadd : AdvSIMD_2IntArg_Intrinsic;
193   def int_aarch64_neon_suqadd : AdvSIMD_2IntArg_Intrinsic;
194   def int_aarch64_neon_usqadd : AdvSIMD_2IntArg_Intrinsic;
195   def int_aarch64_neon_uqadd : AdvSIMD_2IntArg_Intrinsic;
197   // Vector Add High-Half
198   // FIXME: this is a legacy intrinsic for aarch64_simd.h. Remove it when that
199   // header is no longer supported.
200   def int_aarch64_neon_addhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
202   // Vector Rounding Add High-Half
203   def int_aarch64_neon_raddhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
205   // Vector Saturating Doubling Multiply High
206   def int_aarch64_neon_sqdmulh : AdvSIMD_2IntArg_Intrinsic;
208   // Vector Saturating Rounding Doubling Multiply High
209   def int_aarch64_neon_sqrdmulh : AdvSIMD_2IntArg_Intrinsic;
211   // Vector Polynominal Multiply
212   def int_aarch64_neon_pmul : AdvSIMD_2VectorArg_Intrinsic;
214   // Vector Long Multiply
215   def int_aarch64_neon_smull : AdvSIMD_2VectorArg_Long_Intrinsic;
216   def int_aarch64_neon_umull : AdvSIMD_2VectorArg_Long_Intrinsic;
217   def int_aarch64_neon_pmull : AdvSIMD_2VectorArg_Long_Intrinsic;
219   // 64-bit polynomial multiply really returns an i128, which is not legal. Fake
220   // it with a v16i8.
221   def int_aarch64_neon_pmull64 :
222         Intrinsic<[llvm_v16i8_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
224   // Vector Extending Multiply
225   def int_aarch64_neon_fmulx : AdvSIMD_2FloatArg_Intrinsic {
226     let IntrProperties = [IntrNoMem, Commutative];
227   }
229   // Vector Saturating Doubling Long Multiply
230   def int_aarch64_neon_sqdmull : AdvSIMD_2VectorArg_Long_Intrinsic;
231   def int_aarch64_neon_sqdmulls_scalar
232     : Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
234   // Vector Halving Subtract
235   def int_aarch64_neon_shsub : AdvSIMD_2VectorArg_Intrinsic;
236   def int_aarch64_neon_uhsub : AdvSIMD_2VectorArg_Intrinsic;
238   // Vector Saturating Subtract
239   def int_aarch64_neon_sqsub : AdvSIMD_2IntArg_Intrinsic;
240   def int_aarch64_neon_uqsub : AdvSIMD_2IntArg_Intrinsic;
242   // Vector Subtract High-Half
243   // FIXME: this is a legacy intrinsic for aarch64_simd.h. Remove it when that
244   // header is no longer supported.
245   def int_aarch64_neon_subhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
247   // Vector Rounding Subtract High-Half
248   def int_aarch64_neon_rsubhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
250   // Vector Compare Absolute Greater-than-or-equal
251   def int_aarch64_neon_facge : AdvSIMD_2Arg_FloatCompare_Intrinsic;
253   // Vector Compare Absolute Greater-than
254   def int_aarch64_neon_facgt : AdvSIMD_2Arg_FloatCompare_Intrinsic;
256   // Vector Absolute Difference
257   def int_aarch64_neon_sabd : AdvSIMD_2VectorArg_Intrinsic;
258   def int_aarch64_neon_uabd : AdvSIMD_2VectorArg_Intrinsic;
259   def int_aarch64_neon_fabd : AdvSIMD_2VectorArg_Intrinsic;
261   // Scalar Absolute Difference
262   def int_aarch64_sisd_fabd : AdvSIMD_2Scalar_Float_Intrinsic;
264   // Vector Max
265   def int_aarch64_neon_smax : AdvSIMD_2VectorArg_Intrinsic;
266   def int_aarch64_neon_umax : AdvSIMD_2VectorArg_Intrinsic;
267   def int_aarch64_neon_fmax : AdvSIMD_2FloatArg_Intrinsic;
268   def int_aarch64_neon_fmaxnmp : AdvSIMD_2VectorArg_Intrinsic;
270   // Vector Max Across Lanes
271   def int_aarch64_neon_smaxv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
272   def int_aarch64_neon_umaxv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
273   def int_aarch64_neon_fmaxv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
274   def int_aarch64_neon_fmaxnmv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
276   // Vector Min
277   def int_aarch64_neon_smin : AdvSIMD_2VectorArg_Intrinsic;
278   def int_aarch64_neon_umin : AdvSIMD_2VectorArg_Intrinsic;
279   def int_aarch64_neon_fmin : AdvSIMD_2FloatArg_Intrinsic;
280   def int_aarch64_neon_fminnmp : AdvSIMD_2VectorArg_Intrinsic;
282   // Vector Min/Max Number
283   def int_aarch64_neon_fminnm : AdvSIMD_2FloatArg_Intrinsic;
284   def int_aarch64_neon_fmaxnm : AdvSIMD_2FloatArg_Intrinsic;
286   // Vector Min Across Lanes
287   def int_aarch64_neon_sminv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
288   def int_aarch64_neon_uminv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
289   def int_aarch64_neon_fminv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
290   def int_aarch64_neon_fminnmv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
292   // Pairwise Add
293   def int_aarch64_neon_addp : AdvSIMD_2VectorArg_Intrinsic;
294   def int_aarch64_neon_faddp : AdvSIMD_2VectorArg_Intrinsic;
296   // Long Pairwise Add
297   // FIXME: In theory, we shouldn't need intrinsics for saddlp or
298   // uaddlp, but tblgen's type inference currently can't handle the
299   // pattern fragments this ends up generating.
300   def int_aarch64_neon_saddlp : AdvSIMD_1VectorArg_Expand_Intrinsic;
301   def int_aarch64_neon_uaddlp : AdvSIMD_1VectorArg_Expand_Intrinsic;
303   // Folding Maximum
304   def int_aarch64_neon_smaxp : AdvSIMD_2VectorArg_Intrinsic;
305   def int_aarch64_neon_umaxp : AdvSIMD_2VectorArg_Intrinsic;
306   def int_aarch64_neon_fmaxp : AdvSIMD_2VectorArg_Intrinsic;
308   // Folding Minimum
309   def int_aarch64_neon_sminp : AdvSIMD_2VectorArg_Intrinsic;
310   def int_aarch64_neon_uminp : AdvSIMD_2VectorArg_Intrinsic;
311   def int_aarch64_neon_fminp : AdvSIMD_2VectorArg_Intrinsic;
313   // Reciprocal Estimate/Step
314   def int_aarch64_neon_frecps : AdvSIMD_2FloatArg_Intrinsic;
315   def int_aarch64_neon_frsqrts : AdvSIMD_2FloatArg_Intrinsic;
317   // Reciprocal Exponent
318   def int_aarch64_neon_frecpx : AdvSIMD_1FloatArg_Intrinsic;
320   // Vector Saturating Shift Left
321   def int_aarch64_neon_sqshl : AdvSIMD_2IntArg_Intrinsic;
322   def int_aarch64_neon_uqshl : AdvSIMD_2IntArg_Intrinsic;
324   // Vector Rounding Shift Left
325   def int_aarch64_neon_srshl : AdvSIMD_2IntArg_Intrinsic;
326   def int_aarch64_neon_urshl : AdvSIMD_2IntArg_Intrinsic;
328   // Vector Saturating Rounding Shift Left
329   def int_aarch64_neon_sqrshl : AdvSIMD_2IntArg_Intrinsic;
330   def int_aarch64_neon_uqrshl : AdvSIMD_2IntArg_Intrinsic;
332   // Vector Signed->Unsigned Shift Left by Constant
333   def int_aarch64_neon_sqshlu : AdvSIMD_2IntArg_Intrinsic;
335   // Vector Signed->Unsigned Narrowing Saturating Shift Right by Constant
336   def int_aarch64_neon_sqshrun : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
338   // Vector Signed->Unsigned Rounding Narrowing Saturating Shift Right by Const
339   def int_aarch64_neon_sqrshrun : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
341   // Vector Narrowing Shift Right by Constant
342   def int_aarch64_neon_sqshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
343   def int_aarch64_neon_uqshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
345   // Vector Rounding Narrowing Shift Right by Constant
346   def int_aarch64_neon_rshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
348   // Vector Rounding Narrowing Saturating Shift Right by Constant
349   def int_aarch64_neon_sqrshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
350   def int_aarch64_neon_uqrshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
352   // Vector Shift Left
353   def int_aarch64_neon_sshl : AdvSIMD_2IntArg_Intrinsic;
354   def int_aarch64_neon_ushl : AdvSIMD_2IntArg_Intrinsic;
356   // Vector Widening Shift Left by Constant
357   def int_aarch64_neon_shll : AdvSIMD_2VectorArg_Scalar_Wide_BySize_Intrinsic;
358   def int_aarch64_neon_sshll : AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic;
359   def int_aarch64_neon_ushll : AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic;
361   // Vector Shift Right by Constant and Insert
362   def int_aarch64_neon_vsri : AdvSIMD_3VectorArg_Scalar_Intrinsic;
364   // Vector Shift Left by Constant and Insert
365   def int_aarch64_neon_vsli : AdvSIMD_3VectorArg_Scalar_Intrinsic;
367   // Vector Saturating Narrow
368   def int_aarch64_neon_scalar_sqxtn: AdvSIMD_1IntArg_Narrow_Intrinsic;
369   def int_aarch64_neon_scalar_uqxtn : AdvSIMD_1IntArg_Narrow_Intrinsic;
370   def int_aarch64_neon_sqxtn : AdvSIMD_1VectorArg_Narrow_Intrinsic;
371   def int_aarch64_neon_uqxtn : AdvSIMD_1VectorArg_Narrow_Intrinsic;
373   // Vector Saturating Extract and Unsigned Narrow
374   def int_aarch64_neon_scalar_sqxtun : AdvSIMD_1IntArg_Narrow_Intrinsic;
375   def int_aarch64_neon_sqxtun : AdvSIMD_1VectorArg_Narrow_Intrinsic;
377   // Vector Absolute Value
378   def int_aarch64_neon_abs : AdvSIMD_1Arg_Intrinsic;
380   // Vector Saturating Absolute Value
381   def int_aarch64_neon_sqabs : AdvSIMD_1IntArg_Intrinsic;
383   // Vector Saturating Negation
384   def int_aarch64_neon_sqneg : AdvSIMD_1IntArg_Intrinsic;
386   // Vector Count Leading Sign Bits
387   def int_aarch64_neon_cls : AdvSIMD_1VectorArg_Intrinsic;
389   // Vector Reciprocal Estimate
390   def int_aarch64_neon_urecpe : AdvSIMD_1VectorArg_Intrinsic;
391   def int_aarch64_neon_frecpe : AdvSIMD_1FloatArg_Intrinsic;
393   // Vector Square Root Estimate
394   def int_aarch64_neon_ursqrte : AdvSIMD_1VectorArg_Intrinsic;
395   def int_aarch64_neon_frsqrte : AdvSIMD_1FloatArg_Intrinsic;
397   // Vector Bitwise Reverse
398   def int_aarch64_neon_rbit : AdvSIMD_1VectorArg_Intrinsic;
400   // Vector Conversions Between Half-Precision and Single-Precision.
401   def int_aarch64_neon_vcvtfp2hf
402     : Intrinsic<[llvm_v4i16_ty], [llvm_v4f32_ty], [IntrNoMem]>;
403   def int_aarch64_neon_vcvthf2fp
404     : Intrinsic<[llvm_v4f32_ty], [llvm_v4i16_ty], [IntrNoMem]>;
406   // Vector Conversions Between Floating-point and Fixed-point.
407   def int_aarch64_neon_vcvtfp2fxs : AdvSIMD_CvtFPToFx_Intrinsic;
408   def int_aarch64_neon_vcvtfp2fxu : AdvSIMD_CvtFPToFx_Intrinsic;
409   def int_aarch64_neon_vcvtfxs2fp : AdvSIMD_CvtFxToFP_Intrinsic;
410   def int_aarch64_neon_vcvtfxu2fp : AdvSIMD_CvtFxToFP_Intrinsic;
412   // Vector FP->Int Conversions
413   def int_aarch64_neon_fcvtas : AdvSIMD_FPToIntRounding_Intrinsic;
414   def int_aarch64_neon_fcvtau : AdvSIMD_FPToIntRounding_Intrinsic;
415   def int_aarch64_neon_fcvtms : AdvSIMD_FPToIntRounding_Intrinsic;
416   def int_aarch64_neon_fcvtmu : AdvSIMD_FPToIntRounding_Intrinsic;
417   def int_aarch64_neon_fcvtns : AdvSIMD_FPToIntRounding_Intrinsic;
418   def int_aarch64_neon_fcvtnu : AdvSIMD_FPToIntRounding_Intrinsic;
419   def int_aarch64_neon_fcvtps : AdvSIMD_FPToIntRounding_Intrinsic;
420   def int_aarch64_neon_fcvtpu : AdvSIMD_FPToIntRounding_Intrinsic;
421   def int_aarch64_neon_fcvtzs : AdvSIMD_FPToIntRounding_Intrinsic;
422   def int_aarch64_neon_fcvtzu : AdvSIMD_FPToIntRounding_Intrinsic;
424   // Vector FP Rounding: only ties to even is unrepresented by a normal
425   // intrinsic.
426   def int_aarch64_neon_frintn : AdvSIMD_1FloatArg_Intrinsic;
428   // Scalar FP->Int conversions
430   // Vector FP Inexact Narrowing
431   def int_aarch64_neon_fcvtxn : AdvSIMD_1VectorArg_Expand_Intrinsic;
433   // Scalar FP Inexact Narrowing
434   def int_aarch64_sisd_fcvtxn : Intrinsic<[llvm_float_ty], [llvm_double_ty],
435                                         [IntrNoMem]>;
437   // v8.2-A Dot Product
438   def int_aarch64_neon_udot : AdvSIMD_Dot_Intrinsic;
439   def int_aarch64_neon_sdot : AdvSIMD_Dot_Intrinsic;
441   // v8.2-A FP16 Fused Multiply-Add Long
442   def int_aarch64_neon_fmlal : AdvSIMD_FP16FML_Intrinsic;
443   def int_aarch64_neon_fmlsl : AdvSIMD_FP16FML_Intrinsic;
444   def int_aarch64_neon_fmlal2 : AdvSIMD_FP16FML_Intrinsic;
445   def int_aarch64_neon_fmlsl2 : AdvSIMD_FP16FML_Intrinsic;
448 let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
449   class AdvSIMD_2Vector2Index_Intrinsic
450     : Intrinsic<[llvm_anyvector_ty],
451                 [llvm_anyvector_ty, llvm_i64_ty, LLVMMatchType<0>, llvm_i64_ty],
452                 [IntrNoMem]>;
455 // Vector element to element moves
456 def int_aarch64_neon_vcopy_lane: AdvSIMD_2Vector2Index_Intrinsic;
458 let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
459   class AdvSIMD_1Vec_Load_Intrinsic
460       : Intrinsic<[llvm_anyvector_ty], [LLVMAnyPointerType<LLVMMatchType<0>>],
461                   [IntrReadMem, IntrArgMemOnly]>;
462   class AdvSIMD_1Vec_Store_Lane_Intrinsic
463     : Intrinsic<[], [llvm_anyvector_ty, llvm_i64_ty, llvm_anyptr_ty],
464                 [IntrArgMemOnly, NoCapture<2>]>;
466   class AdvSIMD_2Vec_Load_Intrinsic
467     : Intrinsic<[LLVMMatchType<0>, llvm_anyvector_ty],
468                 [LLVMAnyPointerType<LLVMMatchType<0>>],
469                 [IntrReadMem, IntrArgMemOnly]>;
470   class AdvSIMD_2Vec_Load_Lane_Intrinsic
471     : Intrinsic<[LLVMMatchType<0>, LLVMMatchType<0>],
472                 [LLVMMatchType<0>, llvm_anyvector_ty,
473                  llvm_i64_ty, llvm_anyptr_ty],
474                 [IntrReadMem, IntrArgMemOnly]>;
475   class AdvSIMD_2Vec_Store_Intrinsic
476     : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
477                      LLVMAnyPointerType<LLVMMatchType<0>>],
478                 [IntrArgMemOnly, NoCapture<2>]>;
479   class AdvSIMD_2Vec_Store_Lane_Intrinsic
480     : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
481                  llvm_i64_ty, llvm_anyptr_ty],
482                 [IntrArgMemOnly, NoCapture<3>]>;
484   class AdvSIMD_3Vec_Load_Intrinsic
485     : Intrinsic<[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty],
486                 [LLVMAnyPointerType<LLVMMatchType<0>>],
487                 [IntrReadMem, IntrArgMemOnly]>;
488   class AdvSIMD_3Vec_Load_Lane_Intrinsic
489     : Intrinsic<[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
490                 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty,
491                  llvm_i64_ty, llvm_anyptr_ty],
492                 [IntrReadMem, IntrArgMemOnly]>;
493   class AdvSIMD_3Vec_Store_Intrinsic
494     : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
495                      LLVMMatchType<0>, LLVMAnyPointerType<LLVMMatchType<0>>],
496                 [IntrArgMemOnly, NoCapture<3>]>;
497   class AdvSIMD_3Vec_Store_Lane_Intrinsic
498     : Intrinsic<[], [llvm_anyvector_ty,
499                  LLVMMatchType<0>, LLVMMatchType<0>,
500                  llvm_i64_ty, llvm_anyptr_ty],
501                 [IntrArgMemOnly, NoCapture<4>]>;
503   class AdvSIMD_4Vec_Load_Intrinsic
504     : Intrinsic<[LLVMMatchType<0>, LLVMMatchType<0>,
505                  LLVMMatchType<0>, llvm_anyvector_ty],
506                 [LLVMAnyPointerType<LLVMMatchType<0>>],
507                 [IntrReadMem, IntrArgMemOnly]>;
508   class AdvSIMD_4Vec_Load_Lane_Intrinsic
509     : Intrinsic<[LLVMMatchType<0>, LLVMMatchType<0>,
510                  LLVMMatchType<0>, LLVMMatchType<0>],
511                 [LLVMMatchType<0>, LLVMMatchType<0>,
512                  LLVMMatchType<0>, llvm_anyvector_ty,
513                  llvm_i64_ty, llvm_anyptr_ty],
514                 [IntrReadMem, IntrArgMemOnly]>;
515   class AdvSIMD_4Vec_Store_Intrinsic
516     : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
517                  LLVMMatchType<0>, LLVMMatchType<0>,
518                  LLVMAnyPointerType<LLVMMatchType<0>>],
519                 [IntrArgMemOnly, NoCapture<4>]>;
520   class AdvSIMD_4Vec_Store_Lane_Intrinsic
521     : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
522                  LLVMMatchType<0>, LLVMMatchType<0>,
523                  llvm_i64_ty, llvm_anyptr_ty],
524                 [IntrArgMemOnly, NoCapture<5>]>;
527 // Memory ops
529 def int_aarch64_neon_ld1x2 : AdvSIMD_2Vec_Load_Intrinsic;
530 def int_aarch64_neon_ld1x3 : AdvSIMD_3Vec_Load_Intrinsic;
531 def int_aarch64_neon_ld1x4 : AdvSIMD_4Vec_Load_Intrinsic;
533 def int_aarch64_neon_st1x2 : AdvSIMD_2Vec_Store_Intrinsic;
534 def int_aarch64_neon_st1x3 : AdvSIMD_3Vec_Store_Intrinsic;
535 def int_aarch64_neon_st1x4 : AdvSIMD_4Vec_Store_Intrinsic;
537 def int_aarch64_neon_ld2 : AdvSIMD_2Vec_Load_Intrinsic;
538 def int_aarch64_neon_ld3 : AdvSIMD_3Vec_Load_Intrinsic;
539 def int_aarch64_neon_ld4 : AdvSIMD_4Vec_Load_Intrinsic;
541 def int_aarch64_neon_ld2lane : AdvSIMD_2Vec_Load_Lane_Intrinsic;
542 def int_aarch64_neon_ld3lane : AdvSIMD_3Vec_Load_Lane_Intrinsic;
543 def int_aarch64_neon_ld4lane : AdvSIMD_4Vec_Load_Lane_Intrinsic;
545 def int_aarch64_neon_ld2r : AdvSIMD_2Vec_Load_Intrinsic;
546 def int_aarch64_neon_ld3r : AdvSIMD_3Vec_Load_Intrinsic;
547 def int_aarch64_neon_ld4r : AdvSIMD_4Vec_Load_Intrinsic;
549 def int_aarch64_neon_st2  : AdvSIMD_2Vec_Store_Intrinsic;
550 def int_aarch64_neon_st3  : AdvSIMD_3Vec_Store_Intrinsic;
551 def int_aarch64_neon_st4  : AdvSIMD_4Vec_Store_Intrinsic;
553 def int_aarch64_neon_st2lane  : AdvSIMD_2Vec_Store_Lane_Intrinsic;
554 def int_aarch64_neon_st3lane  : AdvSIMD_3Vec_Store_Lane_Intrinsic;
555 def int_aarch64_neon_st4lane  : AdvSIMD_4Vec_Store_Lane_Intrinsic;
557 let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
558   class AdvSIMD_Tbl1_Intrinsic
559     : Intrinsic<[llvm_anyvector_ty], [llvm_v16i8_ty, LLVMMatchType<0>],
560                 [IntrNoMem]>;
561   class AdvSIMD_Tbl2_Intrinsic
562     : Intrinsic<[llvm_anyvector_ty],
563                 [llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>], [IntrNoMem]>;
564   class AdvSIMD_Tbl3_Intrinsic
565     : Intrinsic<[llvm_anyvector_ty],
566                 [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
567                  LLVMMatchType<0>],
568                 [IntrNoMem]>;
569   class AdvSIMD_Tbl4_Intrinsic
570     : Intrinsic<[llvm_anyvector_ty],
571                 [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
572                  LLVMMatchType<0>],
573                 [IntrNoMem]>;
575   class AdvSIMD_Tbx1_Intrinsic
576     : Intrinsic<[llvm_anyvector_ty],
577                 [LLVMMatchType<0>, llvm_v16i8_ty, LLVMMatchType<0>],
578                 [IntrNoMem]>;
579   class AdvSIMD_Tbx2_Intrinsic
580     : Intrinsic<[llvm_anyvector_ty],
581                 [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
582                  LLVMMatchType<0>],
583                 [IntrNoMem]>;
584   class AdvSIMD_Tbx3_Intrinsic
585     : Intrinsic<[llvm_anyvector_ty],
586                 [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
587                  llvm_v16i8_ty, LLVMMatchType<0>],
588                 [IntrNoMem]>;
589   class AdvSIMD_Tbx4_Intrinsic
590     : Intrinsic<[llvm_anyvector_ty],
591                 [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
592                  llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>],
593                 [IntrNoMem]>;
595 def int_aarch64_neon_tbl1 : AdvSIMD_Tbl1_Intrinsic;
596 def int_aarch64_neon_tbl2 : AdvSIMD_Tbl2_Intrinsic;
597 def int_aarch64_neon_tbl3 : AdvSIMD_Tbl3_Intrinsic;
598 def int_aarch64_neon_tbl4 : AdvSIMD_Tbl4_Intrinsic;
600 def int_aarch64_neon_tbx1 : AdvSIMD_Tbx1_Intrinsic;
601 def int_aarch64_neon_tbx2 : AdvSIMD_Tbx2_Intrinsic;
602 def int_aarch64_neon_tbx3 : AdvSIMD_Tbx3_Intrinsic;
603 def int_aarch64_neon_tbx4 : AdvSIMD_Tbx4_Intrinsic;
605 let TargetPrefix = "aarch64" in {
606   class FPCR_Get_Intrinsic
607     : Intrinsic<[llvm_i64_ty], [], [IntrNoMem]>;
610 // FPCR
611 def int_aarch64_get_fpcr : FPCR_Get_Intrinsic;
613 let TargetPrefix = "aarch64" in {
614   class Crypto_AES_DataKey_Intrinsic
615     : Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
617   class Crypto_AES_Data_Intrinsic
618     : Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
620   // SHA intrinsic taking 5 words of the hash (v4i32, i32) and 4 of the schedule
621   // (v4i32).
622   class Crypto_SHA_5Hash4Schedule_Intrinsic
623     : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty, llvm_v4i32_ty],
624                 [IntrNoMem]>;
626   // SHA intrinsic taking 5 words of the hash (v4i32, i32) and 4 of the schedule
627   // (v4i32).
628   class Crypto_SHA_1Hash_Intrinsic
629     : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
631   // SHA intrinsic taking 8 words of the schedule
632   class Crypto_SHA_8Schedule_Intrinsic
633     : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
635   // SHA intrinsic taking 12 words of the schedule
636   class Crypto_SHA_12Schedule_Intrinsic
637     : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
638                 [IntrNoMem]>;
640   // SHA intrinsic taking 8 words of the hash and 4 of the schedule.
641   class Crypto_SHA_8Hash4Schedule_Intrinsic
642     : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
643                 [IntrNoMem]>;
646 // AES
647 def int_aarch64_crypto_aese   : Crypto_AES_DataKey_Intrinsic;
648 def int_aarch64_crypto_aesd   : Crypto_AES_DataKey_Intrinsic;
649 def int_aarch64_crypto_aesmc  : Crypto_AES_Data_Intrinsic;
650 def int_aarch64_crypto_aesimc : Crypto_AES_Data_Intrinsic;
652 // SHA1
653 def int_aarch64_crypto_sha1c  : Crypto_SHA_5Hash4Schedule_Intrinsic;
654 def int_aarch64_crypto_sha1p  : Crypto_SHA_5Hash4Schedule_Intrinsic;
655 def int_aarch64_crypto_sha1m  : Crypto_SHA_5Hash4Schedule_Intrinsic;
656 def int_aarch64_crypto_sha1h  : Crypto_SHA_1Hash_Intrinsic;
658 def int_aarch64_crypto_sha1su0 : Crypto_SHA_12Schedule_Intrinsic;
659 def int_aarch64_crypto_sha1su1 : Crypto_SHA_8Schedule_Intrinsic;
661 // SHA256
662 def int_aarch64_crypto_sha256h   : Crypto_SHA_8Hash4Schedule_Intrinsic;
663 def int_aarch64_crypto_sha256h2  : Crypto_SHA_8Hash4Schedule_Intrinsic;
664 def int_aarch64_crypto_sha256su0 : Crypto_SHA_8Schedule_Intrinsic;
665 def int_aarch64_crypto_sha256su1 : Crypto_SHA_12Schedule_Intrinsic;
667 //===----------------------------------------------------------------------===//
668 // CRC32
670 let TargetPrefix = "aarch64" in {
672 def int_aarch64_crc32b  : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
673     [IntrNoMem]>;
674 def int_aarch64_crc32cb : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
675     [IntrNoMem]>;
676 def int_aarch64_crc32h  : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
677     [IntrNoMem]>;
678 def int_aarch64_crc32ch : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
679     [IntrNoMem]>;
680 def int_aarch64_crc32w  : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
681     [IntrNoMem]>;
682 def int_aarch64_crc32cw : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
683     [IntrNoMem]>;
684 def int_aarch64_crc32x  : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
685     [IntrNoMem]>;
686 def int_aarch64_crc32cx : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
687     [IntrNoMem]>;
690 //===----------------------------------------------------------------------===//
691 // Memory Tagging Extensions (MTE) Intrinsics
692 let TargetPrefix = "aarch64" in {
693 def int_aarch64_irg   : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i64_ty],
694     [IntrNoMem, IntrHasSideEffects]>;
695 def int_aarch64_addg  : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i64_ty],
696     [IntrNoMem]>;
697 def int_aarch64_gmi   : Intrinsic<[llvm_i64_ty], [llvm_ptr_ty, llvm_i64_ty],
698     [IntrNoMem]>;
699 def int_aarch64_ldg   : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_ptr_ty],
700     [IntrReadMem]>;
701 def int_aarch64_stg   : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty],
702     [IntrWriteMem]>;
703 def int_aarch64_subp :  Intrinsic<[llvm_i64_ty], [llvm_ptr_ty, llvm_ptr_ty],
704     [IntrNoMem]>;
706 // The following are codegen-only intrinsics for stack instrumentation.
708 // Generate a randomly tagged stack base pointer.
709 def int_aarch64_irg_sp   : Intrinsic<[llvm_ptr_ty], [llvm_i64_ty],
710     [IntrNoMem, IntrHasSideEffects]>;
712 // Transfer pointer tag with offset.
713 // ptr1 = tagp(ptr0, baseptr, tag_offset) returns a pointer where
714 // * address is the address in ptr0
715 // * tag is a function of (tag in baseptr, tag_offset).
716 // Address bits in baseptr and tag bits in ptr0 are ignored.
717 // When offset between ptr0 and baseptr is a compile time constant, this can be emitted as
718 //   ADDG ptr1, baseptr, (ptr0 - baseptr), tag_offset
719 // It is intended that ptr0 is an alloca address, and baseptr is the direct output of llvm.aarch64.irg.sp.
720 def int_aarch64_tagp : Intrinsic<[llvm_anyptr_ty], [LLVMMatchType<0>, llvm_ptr_ty, llvm_i64_ty],
721     [IntrNoMem, ImmArg<2>]>;
723 // Update allocation tags for the memory range to match the tag in the pointer argument.
724 def int_aarch64_settag  : Intrinsic<[], [llvm_ptr_ty, llvm_i64_ty],
725     [IntrWriteMem, IntrArgMemOnly, NoCapture<0>, WriteOnly<0>]>;
727 // Update allocation tags for the memory range to match the tag in the pointer argument,
728 // and set memory contents to zero.
729 def int_aarch64_settag_zero  : Intrinsic<[], [llvm_ptr_ty, llvm_i64_ty],
730     [IntrWriteMem, IntrArgMemOnly, NoCapture<0>, WriteOnly<0>]>;
732 // Update allocation tags for 16-aligned, 16-sized memory region, and store a pair 8-byte values.
733 def int_aarch64_stgp  : Intrinsic<[], [llvm_ptr_ty, llvm_i64_ty, llvm_i64_ty],
734     [IntrWriteMem, IntrArgMemOnly, NoCapture<0>, WriteOnly<0>]>;
737 // Transactional Memory Extension (TME) Intrinsics
738 let TargetPrefix = "aarch64" in {
739 def int_aarch64_tstart  : GCCBuiltin<"__builtin_arm_tstart">,
740                          Intrinsic<[llvm_i64_ty]>;
742 def int_aarch64_tcommit : GCCBuiltin<"__builtin_arm_tcommit">, Intrinsic<[]>;
744 def int_aarch64_tcancel : GCCBuiltin<"__builtin_arm_tcancel">,
745                           Intrinsic<[], [llvm_i64_ty], [ImmArg<0>]>;
747 def int_aarch64_ttest   : GCCBuiltin<"__builtin_arm_ttest">,
748                           Intrinsic<[llvm_i64_ty], [],
749                                     [IntrNoMem, IntrHasSideEffects]>;
752 //===----------------------------------------------------------------------===//
753 // SVE
755 def llvm_nxv2i1_ty  : LLVMType<nxv2i1>;
756 def llvm_nxv4i1_ty  : LLVMType<nxv4i1>;
757 def llvm_nxv8i1_ty  : LLVMType<nxv8i1>;
758 def llvm_nxv16i1_ty : LLVMType<nxv16i1>;
759 def llvm_nxv16i8_ty : LLVMType<nxv16i8>;
760 def llvm_nxv4i32_ty : LLVMType<nxv4i32>;
761 def llvm_nxv2i64_ty : LLVMType<nxv2i64>;
762 def llvm_nxv8f16_ty : LLVMType<nxv8f16>;
763 def llvm_nxv4f32_ty : LLVMType<nxv4f32>;
764 def llvm_nxv2f64_ty : LLVMType<nxv2f64>;
766 let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
767   // This class of intrinsics are not intended to be useful within LLVM IR but
768   // are instead here to support some of the more regid parts of the ACLE.
769   class Builtin_SVCVT<string name, LLVMType OUT, LLVMType IN>
770   : GCCBuiltin<"__builtin_sve_" # name>,
771     Intrinsic<[OUT], [OUT, llvm_nxv16i1_ty, IN], [IntrNoMem]>;
775 // Floating-point comparisons
778 def int_aarch64_sve_fcvtzs_i32f16 : Builtin_SVCVT<"svcvt_s32_f16_m", llvm_nxv4i32_ty, llvm_nxv8f16_ty>;