1 /* ACLE support for AArch64 SVE (__ARM_FEATURE_SVE2 intrinsics)
2 Copyright (C) 2020-2025 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GCC is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
28 #include "insn-codes.h"
32 #include "basic-block.h"
34 #include "fold-const.h"
36 #include "gimple-iterator.h"
40 #include "tree-vector-builder.h"
41 #include "rtx-vector-builder.h"
42 #include "vec-perm-indices.h"
43 #include "aarch64-sve-builtins.h"
44 #include "aarch64-sve-builtins-shapes.h"
45 #include "aarch64-sve-builtins-base.h"
46 #include "aarch64-sve-builtins-sve2.h"
47 #include "aarch64-sve-builtins-functions.h"
49 using namespace aarch64_sve
;
53 /* Return the UNSPEC_CDOT* unspec for rotation amount ROT. */
59 case 0: return UNSPEC_CDOT
;
60 case 90: return UNSPEC_CDOT90
;
61 case 180: return UNSPEC_CDOT180
;
62 case 270: return UNSPEC_CDOT270
;
63 default: gcc_unreachable ();
67 /* Return the UNSPEC_SQRDCMLAH* unspec for rotation amount ROT. */
69 unspec_sqrdcmlah (int rot
)
73 case 0: return UNSPEC_SQRDCMLAH
;
74 case 90: return UNSPEC_SQRDCMLAH90
;
75 case 180: return UNSPEC_SQRDCMLAH180
;
76 case 270: return UNSPEC_SQRDCMLAH270
;
77 default: gcc_unreachable ();
81 class ld1uxq_st1xq_base
: public function_base
84 CONSTEXPR
ld1uxq_st1xq_base (machine_mode memory_mode
)
85 : m_memory_mode (memory_mode
) {}
88 memory_scalar_type (const function_instance
&fi
) const override
90 return fi
.scalar_type (0);
94 memory_vector_mode (const function_instance
&) const override
100 machine_mode m_memory_mode
;
103 class ld234q_st234q_base
: public full_width_access
106 CONSTEXPR
ld234q_st234q_base (unsigned int vector_count
, machine_mode mode
)
107 : full_width_access (vector_count
), m_mode (mode
)
111 memory_vector_mode (const function_instance
&) const override
119 class svaba_impl
: public function_base
123 fold (gimple_folder
&f
) const override
125 /* Fold to svabd if op1 is all zeros. */
126 tree op1
= gimple_call_arg (f
.call
, 0);
127 if (!integer_zerop (op1
))
129 function_instance
instance ("svabd", functions::svabd
, shapes::binary_opt_n
,
130 f
.mode_suffix_id
, f
.type_suffix_ids
, GROUP_none
,
132 gcall
*call
= f
.redirect_call (instance
);
133 /* Add a ptrue as predicate, because unlike svaba, svabd is
135 gimple_call_set_arg (call
, 0, build_all_ones_cst (f
.gp_type ()));
141 expand (function_expander
&e
) const override
143 rtx_code max_code
= e
.type_suffix (0).unsigned_p
? UMAX
: SMAX
;
144 machine_mode mode
= e
.vector_mode (0);
145 return e
.use_exact_insn (code_for_aarch64_sve2_aba (max_code
, mode
));
149 class svxar_impl
: public function_base
153 expand (function_expander
&e
) const override
155 /* aarch64_sve2_xar represents this operation with a left-rotate RTX.
156 Convert the right-rotate amount from the intrinsic to fit this. */
157 machine_mode mode
= e
.vector_mode (0);
158 HOST_WIDE_INT rot
= GET_MODE_UNIT_BITSIZE (mode
)
159 - INTVAL (e
.args
[2]);
160 e
.args
[2] = aarch64_simd_gen_const_vector_dup (mode
, rot
);
161 return e
.use_exact_insn (code_for_aarch64_sve2_xar (mode
));
165 class svcdot_impl
: public function_base
169 expand (function_expander
&e
) const override
171 /* Convert the rotation amount into a specific unspec. */
172 int rot
= INTVAL (e
.args
.pop ());
173 return e
.use_exact_insn (code_for_aarch64_sve (unspec_cdot (rot
),
178 class svcdot_lane_impl
: public function_base
182 expand (function_expander
&e
) const override
184 /* Convert the rotation amount into a specific unspec. */
185 int rot
= INTVAL (e
.args
.pop ());
186 return e
.use_exact_insn (code_for_aarch64_lane (unspec_cdot (rot
),
191 class svclamp_impl
: public function_base
195 expand (function_expander
&e
) const override
197 auto mode
= e
.tuple_mode (0);
199 if (e
.type_suffix (0).float_p
)
200 icode
= (e
.vectors_per_tuple () > 1
201 ? code_for_aarch64_sve_fclamp_single (mode
)
202 : code_for_aarch64_sve_fclamp (mode
));
205 auto max
= e
.type_suffix (0).unsigned_p
? UMAX
: SMAX
;
206 icode
= (e
.vectors_per_tuple () > 1
207 ? code_for_aarch64_sve_clamp_single (max
, mode
)
208 : code_for_aarch64_sve_clamp (max
, mode
));
210 return e
.use_exact_insn (icode
);
214 class svcvtl_impl
: public function_base
218 expand (function_expander
&e
) const override
220 return e
.use_exact_insn (code_for_aarch64_sve_cvtl (e
.result_mode ()));
224 class svcvt_fp8_impl
: public function_base
228 svcvt_fp8_impl (int unspec
) : m_unspec (unspec
) {}
231 expand (function_expander
&e
) const override
233 auto icode
= code_for_aarch64_sve2_fp8_cvt (m_unspec
, e
.result_mode ());
234 return e
.use_exact_insn (icode
);
240 class svcvtn_impl
: public function_base
244 expand (function_expander
&e
) const override
247 if (e
.fpm_mode
== FPM_set
)
248 icode
= code_for_aarch64_sve2_fp8_cvtn (GET_MODE (e
.args
[0]));
250 icode
= code_for_aarch64_sve_cvtn (e
.result_mode ());
251 return e
.use_exact_insn (icode
);
255 class svcvtxnt_impl
: public CODE_FOR_MODE1 (aarch64_sve2_cvtxnt
)
259 fold (gimple_folder
&f
) const override
261 if (f
.pred
== PRED_x
&& is_pfalse (gimple_call_arg (f
.call
, 1)))
262 return f
.fold_call_to (build_zero_cst (TREE_TYPE (f
.lhs
)));
267 class svdup_laneq_impl
: public function_base
271 expand (function_expander
&e
) const override
273 return e
.use_exact_insn (code_for_aarch64_sve_dupq (e
.result_mode ()));
277 class svextq_impl
: public permute
281 fold (gimple_folder
&f
) const override
283 unsigned int index
= tree_to_uhwi (gimple_call_arg (f
.call
, 2));
284 machine_mode mode
= f
.vector_mode (0);
285 unsigned int subelts
= 128U / GET_MODE_UNIT_BITSIZE (mode
);
286 poly_uint64 nelts
= GET_MODE_NUNITS (mode
);
287 vec_perm_builder
builder (nelts
, subelts
, 3);
288 for (unsigned int i
= 0; i
< 3; ++i
)
289 for (unsigned int j
= 0; j
< subelts
; ++j
)
291 if (index
+ j
< subelts
)
292 builder
.quick_push (i
* subelts
+ index
+ j
);
294 builder
.quick_push (i
* subelts
+ index
+ j
- subelts
+ nelts
);
296 return fold_permute (f
, builder
);
300 expand (function_expander
&e
) const override
302 return e
.use_exact_insn (code_for_aarch64_sve_extq (e
.vector_mode (0)));
306 class svld1q_gather_impl
: public full_width_access
310 call_properties (const function_instance
&) const override
312 return CP_READ_MEMORY
;
316 expand (function_expander
&e
) const override
318 e
.prepare_gather_address_operands (1, false);
319 return e
.use_exact_insn (CODE_FOR_aarch64_gather_ld1q
);
323 class svld1uxq_impl
: public ld1uxq_st1xq_base
326 using ld1uxq_st1xq_base::ld1uxq_st1xq_base
;
329 call_properties (const function_instance
&) const override
331 return CP_READ_MEMORY
;
335 expand (function_expander
&e
) const override
337 insn_code icode
= code_for_aarch64_sve_ld1_extendq (e
.vector_mode (0));
338 return e
.use_contiguous_load_insn (icode
);
342 class svld234q_impl
: public ld234q_st234q_base
345 using ld234q_st234q_base::ld234q_st234q_base
;
348 call_properties (const function_instance
&) const override
350 return CP_READ_MEMORY
;
354 expand (function_expander
&e
) const override
356 insn_code icode
= code_for_aarch64_sve_ldnq (e
.result_mode ());
357 return e
.use_contiguous_load_insn (icode
);
361 class svldnt1_gather_impl
: public full_width_access
365 call_properties (const function_instance
&) const override
367 return CP_READ_MEMORY
;
371 expand (function_expander
&e
) const override
373 e
.prepare_gather_address_operands (1, false);
374 machine_mode mem_mode
= e
.memory_vector_mode ();
375 return e
.use_exact_insn (code_for_aarch64_gather_ldnt (mem_mode
));
379 /* Implements extending forms of svldnt1_gather. */
380 class svldnt1_gather_extend_impl
: public extending_load
383 using extending_load::extending_load
;
386 expand (function_expander
&e
) const override
388 e
.prepare_gather_address_operands (1, false);
389 /* Add a constant predicate for the extension rtx. */
390 e
.args
.quick_push (CONSTM1_RTX (VNx16BImode
));
391 insn_code icode
= code_for_aarch64_gather_ldnt (extend_rtx_code (),
393 e
.memory_vector_mode ());
394 return e
.use_exact_insn (icode
);
398 /* Implements both svmatch and svnmatch; the unspec parameter decides
400 class svmatch_svnmatch_impl
: public function_base
403 CONSTEXPR
svmatch_svnmatch_impl (int unspec
) : m_unspec (unspec
) {}
405 fold (gimple_folder
&f
) const override
407 tree pg
= gimple_call_arg (f
.call
, 0);
409 return f
.fold_call_to (pg
);
414 expand (function_expander
&e
) const override
416 /* These are UNSPEC_PRED_Z operations and so need a hint operand. */
417 e
.add_ptrue_hint (0, e
.gp_mode (0));
418 return e
.use_exact_insn (code_for_aarch64_pred (m_unspec
,
425 /* Implements both svmovlb and svmovlt; the unspec parameters decide
427 class svmovl_lb_impl
: public unspec_based_function_base
430 using unspec_based_function_base::unspec_based_function_base
;
433 expand (function_expander
&e
) const override
435 e
.args
.quick_push (const0_rtx
);
436 return e
.map_to_unspecs (m_unspec_for_sint
, m_unspec_for_uint
,
441 class svpext_lane_impl
: public function_base
445 expand (function_expander
&e
) const override
447 unsigned int bits
= e
.type_suffix (0).element_bits
;
448 return e
.use_exact_insn (e
.vectors_per_tuple () == 2
449 ? code_for_aarch64_sve_pextx2 (bits
)
450 : code_for_aarch64_sve_pext (bits
));
454 class svpmov_impl
: public function_base
458 expand (function_expander
&e
) const override
461 if (e
.pred
== PRED_z
)
462 icode
= code_for_aarch64_pmov_to (e
.vector_mode (0));
464 icode
= code_for_aarch64_pmov_from (e
.vector_mode (0));
465 return e
.use_exact_insn (icode
);
469 class svpmov_lane_impl
: public function_base
473 expand (function_expander
&e
) const override
476 if (e
.pred
== PRED_m
)
477 icode
= code_for_aarch64_pmov_lane_to (e
.vector_mode (0));
478 else if (e
.args
[1] == const0_rtx
)
479 icode
= code_for_aarch64_pmov_from (e
.vector_mode (0));
481 icode
= code_for_aarch64_pmov_lane_from (e
.vector_mode (0));
482 return e
.use_exact_insn (icode
);
486 class svpsel_lane_impl
: public function_base
490 expand (function_expander
&e
) const override
492 unsigned int bits
= e
.type_suffix (0).element_bits
;
493 return e
.use_exact_insn (code_for_aarch64_sve_psel (bits
));
497 class svqcadd_impl
: public function_base
501 expand (function_expander
&e
) const override
503 /* Convert the rotation amount into a specific unspec. */
504 int rot
= INTVAL (e
.args
.pop ());
506 return e
.map_to_unspecs (UNSPEC_SQCADD90
, -1, -1);
508 return e
.map_to_unspecs (UNSPEC_SQCADD270
, -1, -1);
513 class svqrdcmlah_impl
: public function_base
517 expand (function_expander
&e
) const override
519 /* Convert the rotation amount into a specific unspec. */
520 int rot
= INTVAL (e
.args
.pop ());
521 return e
.use_exact_insn (code_for_aarch64_sve (unspec_sqrdcmlah (rot
),
526 class svqrdcmlah_lane_impl
: public function_base
530 expand (function_expander
&e
) const override
532 /* Convert the rotation amount into a specific unspec. */
533 int rot
= INTVAL (e
.args
.pop ());
534 return e
.use_exact_insn (code_for_aarch64_lane (unspec_sqrdcmlah (rot
),
539 class svqrshl_impl
: public unspec_based_function
542 CONSTEXPR
svqrshl_impl ()
543 : unspec_based_function (UNSPEC_SQRSHL
, UNSPEC_UQRSHL
, -1) {}
546 fold (gimple_folder
&f
) const override
548 if (tree amount
= uniform_integer_cst_p (gimple_call_arg (f
.call
, 2)))
550 if (wi::to_widest (amount
) >= 0)
552 /* The rounding has no effect, and [SU]QSHL has immediate forms
553 that we can use for sensible shift amounts. */
554 function_instance
instance ("svqshl", functions::svqshl
,
555 shapes::binary_int_opt_n
, MODE_n
,
556 f
.type_suffix_ids
, GROUP_none
, f
.pred
,
558 return f
.redirect_call (instance
);
562 /* The saturation has no effect, and [SU]RSHL has immediate forms
563 that we can use for sensible shift amounts. */
564 function_instance
instance ("svrshl", functions::svrshl
,
565 shapes::binary_int_opt_single_n
, MODE_n
,
566 f
.type_suffix_ids
, GROUP_none
, f
.pred
,
568 return f
.redirect_call (instance
);
575 class svqshl_impl
: public unspec_based_function
578 CONSTEXPR
svqshl_impl ()
579 : unspec_based_function (UNSPEC_SQSHL
, UNSPEC_UQSHL
, -1) {}
582 fold (gimple_folder
&f
) const override
584 if (tree amount
= uniform_integer_cst_p (gimple_call_arg (f
.call
, 2)))
586 int element_bits
= f
.type_suffix (0).element_bits
;
587 if (wi::to_widest (amount
) >= -element_bits
588 && wi::to_widest (amount
) < 0)
590 /* The saturation has no effect for right shifts, so we can
591 use the immediate form of ASR or LSR. */
592 amount
= wide_int_to_tree (TREE_TYPE (amount
),
593 -wi::to_wide (amount
));
594 function_instance
instance ("svasr", functions::svasr
,
595 shapes::binary_uint_opt_n
, MODE_n
,
596 f
.type_suffix_ids
, GROUP_none
, f
.pred
,
598 if (f
.type_suffix (0).unsigned_p
)
600 instance
.base_name
= "svlsr";
601 instance
.base
= functions::svlsr
;
603 gcall
*call
= f
.redirect_call (instance
);
604 gimple_call_set_arg (call
, 2, amount
);
612 class svrshl_impl
: public unspec_based_function
615 CONSTEXPR
svrshl_impl ()
616 : unspec_based_function (UNSPEC_SRSHL
, UNSPEC_URSHL
, -1) {}
619 fold (gimple_folder
&f
) const override
621 if (f
.vectors_per_tuple () > 1)
624 if (tree amount
= uniform_integer_cst_p (gimple_call_arg (f
.call
, 2)))
626 if (wi::to_widest (amount
) >= 0)
628 /* The rounding has no effect, and LSL has immediate forms
629 that we can use for sensible shift amounts. */
630 function_instance
instance ("svlsl", functions::svlsl
,
631 shapes::binary_uint_opt_n
, MODE_n
,
632 f
.type_suffix_ids
, GROUP_none
, f
.pred
,
634 gcall
*call
= f
.redirect_call (instance
);
635 gimple_call_set_arg (call
, 2, amount
);
638 int element_bits
= f
.type_suffix (0).element_bits
;
639 if (wi::to_widest (amount
) >= -element_bits
)
641 /* The shift amount is in range of [SU]RSHR. */
642 amount
= wide_int_to_tree (TREE_TYPE (amount
),
643 -wi::to_wide (amount
));
644 function_instance
instance ("svrshr", functions::svrshr
,
645 shapes::shift_right_imm
, MODE_n
,
646 f
.type_suffix_ids
, GROUP_none
, f
.pred
,
648 gcall
*call
= f
.redirect_call (instance
);
649 gimple_call_set_arg (call
, 2, amount
);
657 class svsqadd_impl
: public function_base
661 expand (function_expander
&e
) const override
663 machine_mode mode
= e
.vector_mode (0);
665 && aarch64_sve_sqadd_sqsub_immediate_p (mode
, e
.args
[2], false))
666 return e
.map_to_rtx_codes (UNKNOWN
, US_PLUS
, -1, -1);
667 return e
.map_to_unspecs (-1, UNSPEC_USQADD
, -1);
671 class svsra_impl
: public function_base
675 fold (gimple_folder
&f
) const override
677 /* Fold to svlsr/svasr if op1 is all zeros. */
678 tree op1
= gimple_call_arg (f
.call
, 0);
679 if (!integer_zerop (op1
))
681 function_instance
instance ("svlsr", functions::svlsr
,
682 shapes::binary_uint_opt_n
, MODE_n
,
683 f
.type_suffix_ids
, GROUP_none
, PRED_x
,
685 if (!f
.type_suffix (0).unsigned_p
)
687 instance
.base_name
= "svasr";
688 instance
.base
= functions::svasr
;
690 gcall
*call
= f
.redirect_call (instance
);
691 /* Add a ptrue as predicate, because unlike svsra, svlsr/svasr are
692 predicated intrinsics. */
693 gimple_call_set_arg (call
, 0, build_all_ones_cst (f
.gp_type ()));
694 /* For svsra, the shift amount (imm3) is uint64_t for all function types,
695 but for svlsr/svasr, imm3 has the same width as the function type. */
696 tree imm3
= gimple_call_arg (f
.call
, 2);
697 tree imm3_prec
= wide_int_to_tree (f
.scalar_type (0),
698 wi::to_widest (imm3
));
699 gimple_call_set_arg (call
, 2, imm3_prec
);
704 expand (function_expander
&e
) const override
706 rtx_code shift_code
= e
.type_suffix (0).unsigned_p
? LSHIFTRT
: ASHIFTRT
;
707 machine_mode mode
= e
.vector_mode (0);
708 return e
.use_exact_insn (code_for_aarch64_sve_add (shift_code
, mode
));
712 class svst1q_scatter_impl
: public full_width_access
716 call_properties (const function_instance
&) const override
718 return CP_WRITE_MEMORY
;
722 expand (function_expander
&e
) const override
724 rtx data
= e
.args
.last ();
725 e
.args
.last () = force_lowpart_subreg (VNx2DImode
, data
, GET_MODE (data
));
726 e
.prepare_gather_address_operands (1, false);
727 return e
.use_exact_insn (CODE_FOR_aarch64_scatter_st1q
);
731 class svst1xq_impl
: public ld1uxq_st1xq_base
734 using ld1uxq_st1xq_base::ld1uxq_st1xq_base
;
737 call_properties (const function_instance
&) const override
739 return CP_WRITE_MEMORY
;
743 expand (function_expander
&e
) const override
745 insn_code icode
= code_for_aarch64_sve_st1_truncq (e
.vector_mode (0));
746 return e
.use_contiguous_store_insn (icode
);
750 class svst234q_impl
: public ld234q_st234q_base
753 using ld234q_st234q_base::ld234q_st234q_base
;
756 call_properties (const function_instance
&) const override
758 return CP_WRITE_MEMORY
;
762 expand (function_expander
&e
) const override
764 machine_mode tuple_mode
= GET_MODE (e
.args
.last ());
765 insn_code icode
= code_for_aarch64_sve_stnq (tuple_mode
);
766 return e
.use_contiguous_store_insn (icode
);
770 class svstnt1_scatter_impl
: public full_width_access
774 call_properties (const function_instance
&) const override
776 return CP_WRITE_MEMORY
;
780 expand (function_expander
&e
) const override
782 e
.prepare_gather_address_operands (1, false);
783 machine_mode mem_mode
= e
.memory_vector_mode ();
784 return e
.use_exact_insn (code_for_aarch64_scatter_stnt (mem_mode
));
788 /* Implements truncating forms of svstnt1_scatter. */
789 class svstnt1_scatter_truncate_impl
: public truncating_store
792 using truncating_store::truncating_store
;
795 expand (function_expander
&e
) const override
797 e
.prepare_gather_address_operands (1, false);
798 insn_code icode
= code_for_aarch64_scatter_stnt (e
.vector_mode (0),
799 e
.memory_vector_mode ());
800 return e
.use_exact_insn (icode
);
804 class svtbl2_impl
: public quiet
<multi_vector_function
>
807 CONSTEXPR
svtbl2_impl () : quiet
<multi_vector_function
> (2) {}
810 expand (function_expander
&e
) const override
812 return e
.use_exact_insn (code_for_aarch64_sve2_tbl2 (e
.vector_mode (0)));
816 class svunpk_impl
: public function_base
820 expand (function_expander
&e
) const override
822 optab op
= (e
.type_suffix (0).unsigned_p
? zext_optab
: sext_optab
);
823 insn_code icode
= convert_optab_handler (op
, e
.result_mode (),
824 GET_MODE (e
.args
[0]));
825 return e
.use_exact_insn (icode
);
829 class svuqadd_impl
: public function_base
833 expand (function_expander
&e
) const override
835 machine_mode mode
= e
.vector_mode (0);
837 && aarch64_sve_arith_immediate_p (mode
, e
.args
[2], false))
838 return e
.use_unpred_insn (code_for_aarch64_sve_suqadd_const (mode
));
839 return e
.map_to_unspecs (UNSPEC_SUQADD
, -1, -1);
843 /* Implements svuzpq1 and svuzpq2. */
844 class svuzpq_impl
: public binary_permute
847 CONSTEXPR
svuzpq_impl (unsigned int base
)
848 : binary_permute (base
? UNSPEC_UZPQ2
: UNSPEC_UZPQ1
), m_base (base
) {}
851 fold (gimple_folder
&f
) const override
853 machine_mode mode
= f
.vector_mode (0);
854 unsigned int subelts
= 128U / GET_MODE_UNIT_BITSIZE (mode
);
855 poly_uint64 nelts
= GET_MODE_NUNITS (mode
);
856 vec_perm_builder
builder (nelts
, subelts
, 3);
857 for (unsigned int i
= 0; i
< 3; ++i
)
859 for (unsigned int j
= 0; j
< subelts
/ 2; ++j
)
860 builder
.quick_push (m_base
+ j
* 2 + i
* subelts
);
861 for (unsigned int j
= 0; j
< subelts
/ 2; ++j
)
862 builder
.quick_push (m_base
+ j
* 2 + i
* subelts
+ nelts
);
864 return fold_permute (f
, builder
);
867 /* 0 for svuzpq1, 1 for svuzpq2. */
871 /* Implements both svwhilerw and svwhilewr; the unspec parameter decides
873 class svwhilerw_svwhilewr_impl
: public full_width_access
876 CONSTEXPR
svwhilerw_svwhilewr_impl (int unspec
) : m_unspec (unspec
) {}
879 expand (function_expander
&e
) const override
881 for (unsigned int i
= 0; i
< 2; ++i
)
882 e
.args
[i
] = e
.convert_to_pmode (e
.args
[i
]);
883 return e
.use_exact_insn (code_for_while (m_unspec
, Pmode
, e
.gp_mode (0)));
889 /* Implements svzipq1 and svzipq2. */
890 class svzipq_impl
: public binary_permute
893 CONSTEXPR
svzipq_impl (unsigned int base
)
894 : binary_permute (base
? UNSPEC_ZIPQ2
: UNSPEC_ZIPQ1
), m_base (base
) {}
897 fold (gimple_folder
&f
) const override
899 machine_mode mode
= f
.vector_mode (0);
900 unsigned int pairs
= 64U / GET_MODE_UNIT_BITSIZE (mode
);
901 poly_uint64 nelts
= GET_MODE_NUNITS (mode
);
902 auto base
= m_base
* pairs
;
903 vec_perm_builder
builder (nelts
, pairs
* 2, 3);
904 for (unsigned int i
= 0; i
< 3; ++i
)
905 for (unsigned int j
= 0; j
< pairs
; ++j
)
907 builder
.quick_push (base
+ j
+ i
* pairs
* 2);
908 builder
.quick_push (base
+ j
+ i
* pairs
* 2 + nelts
);
910 return fold_permute (f
, builder
);
913 /* 0 for svzipq1, 1 for svzipq2. */
917 class svluti_lane_impl
: public function_base
920 CONSTEXPR
svluti_lane_impl (unsigned int bits
) : m_bits (bits
)
923 rtx
expand (function_expander
&e
) const override
925 auto mode
= e
.tuple_mode (0);
926 return e
.use_exact_insn (code_for_aarch64_sve_luti (m_bits
, mode
));
932 } /* end anonymous namespace */
934 namespace aarch64_sve
{
936 FUNCTION (svaba
, svaba_impl
,)
937 FUNCTION (svabalb
, unspec_based_add_function
, (UNSPEC_SABDLB
,
939 FUNCTION (svabalt
, unspec_based_add_function
, (UNSPEC_SABDLT
,
941 FUNCTION (svabdlb
, unspec_based_function
, (UNSPEC_SABDLB
, UNSPEC_UABDLB
, -1))
942 FUNCTION (svabdlt
, unspec_based_function
, (UNSPEC_SABDLT
, UNSPEC_UABDLT
, -1))
943 FUNCTION (svadalp
, unspec_based_function
, (UNSPEC_SADALP
, UNSPEC_UADALP
, -1))
944 FUNCTION (svadclb
, unspec_based_function
, (-1, UNSPEC_ADCLB
, -1))
945 FUNCTION (svadclt
, unspec_based_function
, (-1, UNSPEC_ADCLT
, -1))
946 FUNCTION (svaddhnb
, unspec_based_function
, (UNSPEC_ADDHNB
, UNSPEC_ADDHNB
, -1))
947 FUNCTION (svaddhnt
, unspec_based_function
, (UNSPEC_ADDHNT
, UNSPEC_ADDHNT
, -1))
948 FUNCTION (svaddlb
, unspec_based_function
, (UNSPEC_SADDLB
, UNSPEC_UADDLB
, -1))
949 FUNCTION (svaddlbt
, unspec_based_function
, (UNSPEC_SADDLBT
, -1, -1))
950 FUNCTION (svaddlt
, unspec_based_function
, (UNSPEC_SADDLT
, UNSPEC_UADDLT
, -1))
951 FUNCTION (svaddp
, unspec_based_pred_function
, (UNSPEC_ADDP
, UNSPEC_ADDP
,
953 FUNCTION (svaddqv
, reduction
, (UNSPEC_ADDQV
, UNSPEC_ADDQV
, UNSPEC_FADDQV
))
954 FUNCTION (svaddwb
, unspec_based_function
, (UNSPEC_SADDWB
, UNSPEC_UADDWB
, -1))
955 FUNCTION (svaddwt
, unspec_based_function
, (UNSPEC_SADDWT
, UNSPEC_UADDWT
, -1))
956 FUNCTION (svaesd
, fixed_insn_function
, (CODE_FOR_aarch64_sve2_aesd
))
957 FUNCTION (svaese
, fixed_insn_function
, (CODE_FOR_aarch64_sve2_aese
))
958 FUNCTION (svaesimc
, fixed_insn_function
, (CODE_FOR_aarch64_sve2_aesimc
))
959 FUNCTION (svaesmc
, fixed_insn_function
, (CODE_FOR_aarch64_sve2_aesmc
))
960 FUNCTION (svamax
, cond_or_uncond_unspec_function
,
961 (UNSPEC_COND_FAMAX
, UNSPEC_FAMAX
))
962 FUNCTION (svamin
, cond_or_uncond_unspec_function
,
963 (UNSPEC_COND_FAMIN
, UNSPEC_FAMIN
))
964 FUNCTION (svandqv
, reduction
, (UNSPEC_ANDQV
, UNSPEC_ANDQV
, -1))
965 FUNCTION (svbcax
, CODE_FOR_MODE0 (aarch64_sve2_bcax
),)
966 FUNCTION (svbdep
, unspec_based_function
, (UNSPEC_BDEP
, UNSPEC_BDEP
, -1))
967 FUNCTION (svbext
, unspec_based_function
, (UNSPEC_BEXT
, UNSPEC_BEXT
, -1))
968 FUNCTION (svbfmlslb
, fixed_insn_function
, (CODE_FOR_aarch64_sve_bfmlslbvnx4sf
))
969 FUNCTION (svbfmlslb_lane
, fixed_insn_function
,
970 (CODE_FOR_aarch64_sve_bfmlslb_lanevnx4sf
))
971 FUNCTION (svbfmlslt
, fixed_insn_function
, (CODE_FOR_aarch64_sve_bfmlsltvnx4sf
))
972 FUNCTION (svbfmlslt_lane
, fixed_insn_function
,
973 (CODE_FOR_aarch64_sve_bfmlslt_lanevnx4sf
))
974 FUNCTION (svbgrp
, unspec_based_function
, (UNSPEC_BGRP
, UNSPEC_BGRP
, -1))
975 FUNCTION (svbsl
, CODE_FOR_MODE0 (aarch64_sve2_bsl
),)
976 FUNCTION (svbsl1n
, CODE_FOR_MODE0 (aarch64_sve2_bsl1n
),)
977 FUNCTION (svbsl2n
, CODE_FOR_MODE0 (aarch64_sve2_bsl2n
),)
978 FUNCTION (svcdot
, svcdot_impl
,)
979 FUNCTION (svcdot_lane
, svcdot_lane_impl
,)
980 FUNCTION (svclamp
, svclamp_impl
,)
981 FUNCTION (svcvt1
, svcvt_fp8_impl
, (UNSPEC_F1CVT
))
982 FUNCTION (svcvt2
, svcvt_fp8_impl
, (UNSPEC_F2CVT
))
983 FUNCTION (svcvtl
, svcvtl_impl
,)
984 FUNCTION (svcvtlt1
, svcvt_fp8_impl
, (UNSPEC_F1CVTLT
))
985 FUNCTION (svcvtlt2
, svcvt_fp8_impl
, (UNSPEC_F2CVTLT
))
986 FUNCTION (svcvtlt
, unspec_based_function
, (-1, -1, UNSPEC_COND_FCVTLT
))
987 FUNCTION (svcvtn
, svcvtn_impl
,)
988 FUNCTION (svcvtnb
, fixed_insn_function
, (CODE_FOR_aarch64_sve2_fp8_cvtnbvnx16qi
))
989 FUNCTION (svcvtx
, unspec_based_function
, (-1, -1, UNSPEC_COND_FCVTX
))
990 FUNCTION (svcvtxnt
, svcvtxnt_impl
,)
991 FUNCTION (svdup_laneq
, svdup_laneq_impl
,)
992 FUNCTION (sveor3
, CODE_FOR_MODE0 (aarch64_sve2_eor3
),)
993 FUNCTION (sveorbt
, unspec_based_function
, (UNSPEC_EORBT
, UNSPEC_EORBT
, -1))
994 FUNCTION (sveorqv
, reduction
, (UNSPEC_EORQV
, UNSPEC_EORQV
, -1))
995 FUNCTION (sveortb
, unspec_based_function
, (UNSPEC_EORTB
, UNSPEC_EORTB
, -1))
996 FUNCTION (svextq
, svextq_impl
,)
997 FUNCTION (svhadd
, unspec_based_function
, (UNSPEC_SHADD
, UNSPEC_UHADD
, -1))
998 FUNCTION (svhsub
, unspec_based_function
, (UNSPEC_SHSUB
, UNSPEC_UHSUB
, -1))
999 FUNCTION (svhistcnt
, CODE_FOR_MODE0 (aarch64_sve2_histcnt
),)
1000 FUNCTION (svhistseg
, CODE_FOR_MODE0 (aarch64_sve2_histseg
),)
1001 FUNCTION (svhsubr
, unspec_based_function_rotated
, (UNSPEC_SHSUB
,
1003 FUNCTION (svld1q_gather
, svld1q_gather_impl
,)
1004 FUNCTION (svld1udq
, svld1uxq_impl
, (VNx1DImode
))
1005 FUNCTION (svld1uwq
, svld1uxq_impl
, (VNx1SImode
))
1006 FUNCTION (svld2q
, svld234q_impl
, (2, VNx2TImode
))
1007 FUNCTION (svld3q
, svld234q_impl
, (3, VNx3TImode
))
1008 FUNCTION (svld4q
, svld234q_impl
, (4, VNx4TImode
))
1009 FUNCTION (svldnt1_gather
, svldnt1_gather_impl
,)
1010 FUNCTION (svldnt1sb_gather
, svldnt1_gather_extend_impl
, (TYPE_SUFFIX_s8
))
1011 FUNCTION (svldnt1sh_gather
, svldnt1_gather_extend_impl
, (TYPE_SUFFIX_s16
))
1012 FUNCTION (svldnt1sw_gather
, svldnt1_gather_extend_impl
, (TYPE_SUFFIX_s32
))
1013 FUNCTION (svldnt1ub_gather
, svldnt1_gather_extend_impl
, (TYPE_SUFFIX_u8
))
1014 FUNCTION (svldnt1uh_gather
, svldnt1_gather_extend_impl
, (TYPE_SUFFIX_u16
))
1015 FUNCTION (svldnt1uw_gather
, svldnt1_gather_extend_impl
, (TYPE_SUFFIX_u32
))
1016 FUNCTION (svlogb
, unspec_based_function
, (-1, -1, UNSPEC_COND_FLOGB
))
1017 FUNCTION (svmatch
, svmatch_svnmatch_impl
, (UNSPEC_MATCH
))
1018 FUNCTION (svmaxnmp
, unspec_based_pred_function
, (-1, -1, UNSPEC_FMAXNMP
))
1019 FUNCTION (svmaxnmqv
, reduction
, (-1, -1, UNSPEC_FMAXNMQV
))
1020 FUNCTION (svmaxp
, unspec_based_pred_function
, (UNSPEC_SMAXP
, UNSPEC_UMAXP
,
1022 FUNCTION (svmaxqv
, reduction
, (UNSPEC_SMAXQV
, UNSPEC_UMAXQV
, UNSPEC_FMAXQV
))
1023 FUNCTION (svminnmp
, unspec_based_pred_function
, (-1, -1, UNSPEC_FMINNMP
))
1024 FUNCTION (svminnmqv
, reduction
, (-1, -1, UNSPEC_FMINNMQV
))
1025 FUNCTION (svminp
, unspec_based_pred_function
, (UNSPEC_SMINP
, UNSPEC_UMINP
,
1027 FUNCTION (svminqv
, reduction
, (UNSPEC_SMINQV
, UNSPEC_UMINQV
, UNSPEC_FMINQV
))
1028 FUNCTION (svmlalb_lane
, unspec_based_mla_lane_function
,
1029 (UNSPEC_SMULLB
, UNSPEC_UMULLB
, UNSPEC_FMLALB
,
1031 FUNCTION (svmlalb
, unspec_based_mla_function
,
1032 (UNSPEC_SMULLB
, UNSPEC_UMULLB
, UNSPEC_FMLALB
,
1034 FUNCTION (svmlallbb_lane
, unspec_based_mla_lane_function
,
1035 (-1, -1, -1, UNSPEC_FMLALLBB_FP8
))
1036 FUNCTION (svmlallbb
, unspec_based_mla_function
,
1037 (-1, -1, -1, UNSPEC_FMLALLBB_FP8
))
1038 FUNCTION (svmlallbt_lane
, unspec_based_mla_lane_function
,
1039 (-1, -1, -1, UNSPEC_FMLALLBT_FP8
))
1040 FUNCTION (svmlallbt
, unspec_based_mla_function
,
1041 (-1, -1, -1, UNSPEC_FMLALLBT_FP8
))
1042 FUNCTION (svmlalltb_lane
, unspec_based_mla_lane_function
,
1043 (-1, -1, -1, UNSPEC_FMLALLTB_FP8
))
1044 FUNCTION (svmlalltb
, unspec_based_mla_function
,
1045 (-1, -1, -1, UNSPEC_FMLALLTB_FP8
))
1046 FUNCTION (svmlalltt_lane
, unspec_based_mla_lane_function
,
1047 (-1, -1, -1, UNSPEC_FMLALLTT_FP8
))
1048 FUNCTION (svmlalltt
, unspec_based_mla_function
,
1049 (-1, -1, -1, UNSPEC_FMLALLTT_FP8
))
1050 FUNCTION (svmlalt_lane
, unspec_based_mla_lane_function
,
1051 (UNSPEC_SMULLT
, UNSPEC_UMULLT
, UNSPEC_FMLALT
,
1053 FUNCTION (svmlalt
, unspec_based_mla_function
,
1054 (UNSPEC_SMULLT
, UNSPEC_UMULLT
, UNSPEC_FMLALT
,
1056 FUNCTION (svmlslb
, unspec_based_mls_function
, (UNSPEC_SMULLB
,
1057 UNSPEC_UMULLB
, UNSPEC_FMLSLB
))
1058 FUNCTION (svmlslb_lane
, unspec_based_mls_lane_function
, (UNSPEC_SMULLB
,
1061 FUNCTION (svmlslt
, unspec_based_mls_function
, (UNSPEC_SMULLT
,
1062 UNSPEC_UMULLT
, UNSPEC_FMLSLT
))
1063 FUNCTION (svmlslt_lane
, unspec_based_mls_lane_function
, (UNSPEC_SMULLT
,
1066 FUNCTION (svmovlb
, svmovl_lb_impl
, (UNSPEC_SSHLLB
, UNSPEC_USHLLB
, -1))
1067 FUNCTION (svmovlt
, svmovl_lb_impl
, (UNSPEC_SSHLLT
, UNSPEC_USHLLT
, -1))
1068 FUNCTION (svmullb
, unspec_based_function
, (UNSPEC_SMULLB
, UNSPEC_UMULLB
, -1))
1069 FUNCTION (svmullb_lane
, unspec_based_lane_function
, (UNSPEC_SMULLB
,
1071 FUNCTION (svmullt
, unspec_based_function
, (UNSPEC_SMULLT
, UNSPEC_UMULLT
, -1))
1072 FUNCTION (svmullt_lane
, unspec_based_lane_function
, (UNSPEC_SMULLT
,
1074 FUNCTION (svnbsl
, CODE_FOR_MODE0 (aarch64_sve2_nbsl
),)
1075 FUNCTION (svnmatch
, svmatch_svnmatch_impl
, (UNSPEC_NMATCH
))
1076 FUNCTION (svorqv
, reduction
, (UNSPEC_ORQV
, UNSPEC_ORQV
, -1))
1077 FUNCTION (svpext_lane
, svpext_lane_impl
,)
1078 FUNCTION (svpmov
, svpmov_impl
,)
1079 FUNCTION (svpmov_lane
, svpmov_lane_impl
,)
1080 FUNCTION (svpmul
, CODE_FOR_MODE0 (aarch64_sve2_pmul
),)
1081 FUNCTION (svpmullb
, unspec_based_function
, (-1, UNSPEC_PMULLB
, -1))
1082 FUNCTION (svpmullb_pair
, unspec_based_function
, (-1, UNSPEC_PMULLB_PAIR
, -1))
1083 FUNCTION (svpmullt
, unspec_based_function
, (-1, UNSPEC_PMULLT
, -1))
1084 FUNCTION (svpmullt_pair
, unspec_based_function
, (-1, UNSPEC_PMULLT_PAIR
, -1))
1085 FUNCTION (svpsel_lane
, svpsel_lane_impl
,)
1086 FUNCTION (svqabs
, rtx_code_function
, (SS_ABS
, UNKNOWN
, UNKNOWN
))
1087 FUNCTION (svqcadd
, svqcadd_impl
,)
1088 FUNCTION (svqcvt
, integer_conversion
, (UNSPEC_SQCVT
, UNSPEC_SQCVTU
,
1090 FUNCTION (svqcvtn
, integer_conversion
, (UNSPEC_SQCVTN
, UNSPEC_SQCVTUN
,
1092 FUNCTION (svqdmlalb
, unspec_based_qadd_function
, (UNSPEC_SQDMULLB
, -1, -1))
1093 FUNCTION (svqdmlalb_lane
, unspec_based_qadd_lane_function
, (UNSPEC_SQDMULLB
,
1095 FUNCTION (svqdmlalbt
, unspec_based_qadd_function
, (UNSPEC_SQDMULLBT
, -1, -1))
1096 FUNCTION (svqdmlalt
, unspec_based_qadd_function
, (UNSPEC_SQDMULLT
, -1, -1))
1097 FUNCTION (svqdmlalt_lane
, unspec_based_qadd_lane_function
, (UNSPEC_SQDMULLT
,
1099 FUNCTION (svqdmlslb
, unspec_based_qsub_function
, (UNSPEC_SQDMULLB
, -1, -1))
1100 FUNCTION (svqdmlslb_lane
, unspec_based_qsub_lane_function
, (UNSPEC_SQDMULLB
,
1102 FUNCTION (svqdmlslbt
, unspec_based_qsub_function
, (UNSPEC_SQDMULLBT
, -1, -1))
1103 FUNCTION (svqdmlslt
, unspec_based_qsub_function
, (UNSPEC_SQDMULLT
, -1, -1))
1104 FUNCTION (svqdmlslt_lane
, unspec_based_qsub_lane_function
, (UNSPEC_SQDMULLT
,
1106 FUNCTION (svqdmulh
, unspec_based_function
, (UNSPEC_SQDMULH
, -1, -1))
1107 FUNCTION (svqdmulh_lane
, unspec_based_lane_function
, (UNSPEC_SQDMULH
, -1, -1))
1108 FUNCTION (svqdmullb
, unspec_based_function
, (UNSPEC_SQDMULLB
, -1, -1))
1109 FUNCTION (svqdmullb_lane
, unspec_based_lane_function
, (UNSPEC_SQDMULLB
,
1111 FUNCTION (svqdmullt
, unspec_based_function
, (UNSPEC_SQDMULLT
, -1, -1))
1112 FUNCTION (svqdmullt_lane
, unspec_based_lane_function
, (UNSPEC_SQDMULLT
,
1114 FUNCTION (svqneg
, rtx_code_function
, (SS_NEG
, UNKNOWN
, UNKNOWN
))
1115 FUNCTION (svqrdcmlah
, svqrdcmlah_impl
,)
1116 FUNCTION (svqrdcmlah_lane
, svqrdcmlah_lane_impl
,)
1117 FUNCTION (svqrdmlah
, unspec_based_function
, (UNSPEC_SQRDMLAH
, -1, -1))
1118 FUNCTION (svqrdmlah_lane
, unspec_based_lane_function
, (UNSPEC_SQRDMLAH
,
1120 FUNCTION (svqrdmlsh
, unspec_based_function
, (UNSPEC_SQRDMLSH
, -1, -1))
1121 FUNCTION (svqrdmlsh_lane
, unspec_based_lane_function
, (UNSPEC_SQRDMLSH
,
1123 FUNCTION (svqrdmulh
, unspec_based_function
, (UNSPEC_SQRDMULH
, -1, -1))
1124 FUNCTION (svqrdmulh_lane
, unspec_based_lane_function
, (UNSPEC_SQRDMULH
,
1126 FUNCTION (svqrshl
, svqrshl_impl
,)
1127 FUNCTION (svqrshr
, unspec_based_uncond_function
, (UNSPEC_SQRSHR
,
1128 UNSPEC_UQRSHR
, -1, -1, 1))
1129 FUNCTION (svqrshrn
, unspec_based_uncond_function
, (UNSPEC_SQRSHRN
,
1130 UNSPEC_UQRSHRN
, -1, -1, 1))
1131 FUNCTION (svqrshrnb
, unspec_based_function
, (UNSPEC_SQRSHRNB
,
1132 UNSPEC_UQRSHRNB
, -1))
1133 FUNCTION (svqrshrnt
, unspec_based_function
, (UNSPEC_SQRSHRNT
,
1134 UNSPEC_UQRSHRNT
, -1))
1135 FUNCTION (svqrshru
, unspec_based_uncond_function
, (UNSPEC_SQRSHRU
, -1, -1, -1, 1))
1136 FUNCTION (svqrshrun
, unspec_based_uncond_function
, (UNSPEC_SQRSHRUN
, -1, -1, -1, 1))
1137 FUNCTION (svqrshrunb
, unspec_based_function
, (UNSPEC_SQRSHRUNB
, -1, -1))
1138 FUNCTION (svqrshrunt
, unspec_based_function
, (UNSPEC_SQRSHRUNT
, -1, -1))
1139 FUNCTION (svqshl
, svqshl_impl
,)
1140 FUNCTION (svqshlu
, unspec_based_function
, (UNSPEC_SQSHLU
, -1, -1))
1141 FUNCTION (svqshrnb
, unspec_based_function
, (UNSPEC_SQSHRNB
,
1142 UNSPEC_UQSHRNB
, -1))
1143 FUNCTION (svqshrnt
, unspec_based_function
, (UNSPEC_SQSHRNT
,
1144 UNSPEC_UQSHRNT
, -1))
1145 FUNCTION (svqshrunb
, unspec_based_function
, (UNSPEC_SQSHRUNB
, -1, -1))
1146 FUNCTION (svqshrunt
, unspec_based_function
, (UNSPEC_SQSHRUNT
, -1, -1))
1147 FUNCTION (svqsubr
, rtx_code_function_rotated
, (SS_MINUS
, US_MINUS
, -1))
1148 FUNCTION (svqxtnb
, unspec_based_function
, (UNSPEC_SQXTNB
, UNSPEC_UQXTNB
, -1))
1149 FUNCTION (svqxtnt
, unspec_based_function
, (UNSPEC_SQXTNT
, UNSPEC_UQXTNT
, -1))
1150 FUNCTION (svqxtunb
, unspec_based_function
, (UNSPEC_SQXTUNB
, -1, -1))
1151 FUNCTION (svqxtunt
, unspec_based_function
, (UNSPEC_SQXTUNT
, -1, -1))
1152 FUNCTION (svraddhnb
, unspec_based_function
, (UNSPEC_RADDHNB
,
1153 UNSPEC_RADDHNB
, -1))
1154 FUNCTION (svraddhnt
, unspec_based_function
, (UNSPEC_RADDHNT
,
1155 UNSPEC_RADDHNT
, -1))
1156 FUNCTION (svrax1
, fixed_insn_function
, (CODE_FOR_aarch64_sve2_rax1
))
1157 FUNCTION (svrevd
, unspec_based_function
, (UNSPEC_REVD
, UNSPEC_REVD
,
1159 FUNCTION (svrhadd
, unspec_based_function
, (UNSPEC_SRHADD
, UNSPEC_URHADD
, -1))
1160 FUNCTION (svrshl
, svrshl_impl
,)
1161 FUNCTION (svrshr
, unspec_based_function
, (UNSPEC_SRSHR
, UNSPEC_URSHR
, -1))
1162 FUNCTION (svrshrnb
, unspec_based_function
, (UNSPEC_RSHRNB
, UNSPEC_RSHRNB
, -1))
1163 FUNCTION (svrshrnt
, unspec_based_function
, (UNSPEC_RSHRNT
, UNSPEC_RSHRNT
, -1))
1164 FUNCTION (svrsra
, unspec_based_add_function
, (UNSPEC_SRSHR
, UNSPEC_URSHR
, -1))
1165 FUNCTION (svrsubhnb
, unspec_based_function
, (UNSPEC_RSUBHNB
,
1166 UNSPEC_RSUBHNB
, -1))
1167 FUNCTION (svrsubhnt
, unspec_based_function
, (UNSPEC_RSUBHNT
,
1168 UNSPEC_RSUBHNT
, -1))
1169 FUNCTION (svsbclb
, unspec_based_function
, (-1, UNSPEC_SBCLB
, -1))
1170 FUNCTION (svsbclt
, unspec_based_function
, (-1, UNSPEC_SBCLT
, -1))
1171 FUNCTION (svshllb
, unspec_based_function
, (UNSPEC_SSHLLB
, UNSPEC_USHLLB
, -1))
1172 FUNCTION (svshllt
, unspec_based_function
, (UNSPEC_SSHLLT
, UNSPEC_USHLLT
, -1))
1173 FUNCTION (svshrnb
, unspec_based_function
, (UNSPEC_SHRNB
, UNSPEC_SHRNB
, -1))
1174 FUNCTION (svshrnt
, unspec_based_function
, (UNSPEC_SHRNT
, UNSPEC_SHRNT
, -1))
1175 FUNCTION (svsli
, unspec_based_function
, (UNSPEC_SLI
, UNSPEC_SLI
, -1))
1176 FUNCTION (svsm4e
, fixed_insn_function
, (CODE_FOR_aarch64_sve2_sm4e
))
1177 FUNCTION (svsm4ekey
, fixed_insn_function
, (CODE_FOR_aarch64_sve2_sm4ekey
))
1178 FUNCTION (svsqadd
, svsqadd_impl
,)
1179 FUNCTION (svsra
, svsra_impl
,)
1180 FUNCTION (svsri
, unspec_based_function
, (UNSPEC_SRI
, UNSPEC_SRI
, -1))
1181 FUNCTION (svst1dq
, svst1xq_impl
, (VNx1DImode
))
1182 FUNCTION (svst1q_scatter
, svst1q_scatter_impl
,)
1183 FUNCTION (svst1wq
, svst1xq_impl
, (VNx1SImode
))
1184 FUNCTION (svst2q
, svst234q_impl
, (2, VNx2TImode
))
1185 FUNCTION (svst3q
, svst234q_impl
, (3, VNx3TImode
))
1186 FUNCTION (svst4q
, svst234q_impl
, (4, VNx4TImode
))
1187 FUNCTION (svstnt1_scatter
, svstnt1_scatter_impl
,)
1188 FUNCTION (svstnt1b_scatter
, svstnt1_scatter_truncate_impl
, (QImode
))
1189 FUNCTION (svstnt1h_scatter
, svstnt1_scatter_truncate_impl
, (HImode
))
1190 FUNCTION (svstnt1w_scatter
, svstnt1_scatter_truncate_impl
, (SImode
))
1191 FUNCTION (svsubhnb
, unspec_based_function
, (UNSPEC_SUBHNB
, UNSPEC_SUBHNB
, -1))
1192 FUNCTION (svsubhnt
, unspec_based_function
, (UNSPEC_SUBHNT
, UNSPEC_SUBHNT
, -1))
1193 FUNCTION (svsublb
, unspec_based_function
, (UNSPEC_SSUBLB
, UNSPEC_USUBLB
, -1))
1194 FUNCTION (svsublbt
, unspec_based_function
, (UNSPEC_SSUBLBT
, -1, -1))
1195 FUNCTION (svsublt
, unspec_based_function
, (UNSPEC_SSUBLT
, UNSPEC_USUBLT
, -1))
1196 FUNCTION (svsubltb
, unspec_based_function
, (UNSPEC_SSUBLTB
, -1, -1))
1197 FUNCTION (svsubwb
, unspec_based_function
, (UNSPEC_SSUBWB
, UNSPEC_USUBWB
, -1))
1198 FUNCTION (svsubwt
, unspec_based_function
, (UNSPEC_SSUBWT
, UNSPEC_USUBWT
, -1))
1199 FUNCTION (svtbl2
, svtbl2_impl
,)
1200 FUNCTION (svtblq
, quiet
<unspec_based_uncond_function
>, (UNSPEC_TBLQ
,
1203 FUNCTION (svtbx
, quiet
<unspec_based_uncond_function
>, (UNSPEC_TBX
, UNSPEC_TBX
,
1205 FUNCTION (svtbxq
, quiet
<unspec_based_uncond_function
>, (UNSPEC_TBXQ
,
1208 FUNCTION (svunpk
, svunpk_impl
,)
1209 FUNCTION (svuqadd
, svuqadd_impl
,)
1210 FUNCTION (svuzp
, multireg_permute
, (UNSPEC_UZP
))
1211 FUNCTION (svuzpq
, multireg_permute
, (UNSPEC_UZPQ
))
1212 FUNCTION (svuzpq1
, svuzpq_impl
, (0))
1213 FUNCTION (svuzpq2
, svuzpq_impl
, (1))
1214 FUNCTION (svwhilege
, while_comparison
, (UNSPEC_WHILEGE
, UNSPEC_WHILEHS
))
1215 FUNCTION (svwhilegt
, while_comparison
, (UNSPEC_WHILEGT
, UNSPEC_WHILEHI
))
1216 FUNCTION (svwhilerw
, svwhilerw_svwhilewr_impl
, (UNSPEC_WHILERW
))
1217 FUNCTION (svwhilewr
, svwhilerw_svwhilewr_impl
, (UNSPEC_WHILEWR
))
1218 FUNCTION (svxar
, svxar_impl
,)
1219 FUNCTION (svzip
, multireg_permute
, (UNSPEC_ZIP
))
1220 FUNCTION (svzipq
, multireg_permute
, (UNSPEC_ZIPQ
))
1221 FUNCTION (svzipq1
, svzipq_impl
, (0))
1222 FUNCTION (svzipq2
, svzipq_impl
, (1))
1223 FUNCTION (svluti2_lane
, svluti_lane_impl
, (2))
1224 FUNCTION (svluti4_lane
, svluti_lane_impl
, (4))
1226 } /* end namespace aarch64_sve */