2 * ARM translation: AArch32 Neon instructions
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 * Copyright (c) 2020 Linaro, Ltd.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include "qemu/osdep.h"
24 #include "translate.h"
25 #include "translate-a32.h"
27 /* Include the generated Neon decoder */
28 #include "decode-neon-dp.c.inc"
29 #include "decode-neon-ls.c.inc"
30 #include "decode-neon-shared.c.inc"
32 static TCGv_ptr
vfp_reg_ptr(bool dp
, int reg
)
34 TCGv_ptr ret
= tcg_temp_new_ptr();
35 tcg_gen_addi_ptr(ret
, tcg_env
, vfp_reg_offset(dp
, reg
));
39 static void neon_load_element(TCGv_i32 var
, int reg
, int ele
, MemOp mop
)
41 long offset
= neon_element_offset(reg
, ele
, mop
& MO_SIZE
);
45 tcg_gen_ld8u_i32(var
, tcg_env
, offset
);
48 tcg_gen_ld16u_i32(var
, tcg_env
, offset
);
51 tcg_gen_ld_i32(var
, tcg_env
, offset
);
54 g_assert_not_reached();
58 static void neon_load_element64(TCGv_i64 var
, int reg
, int ele
, MemOp mop
)
60 long offset
= neon_element_offset(reg
, ele
, mop
& MO_SIZE
);
64 tcg_gen_ld8u_i64(var
, tcg_env
, offset
);
67 tcg_gen_ld16u_i64(var
, tcg_env
, offset
);
70 tcg_gen_ld32u_i64(var
, tcg_env
, offset
);
73 tcg_gen_ld_i64(var
, tcg_env
, offset
);
76 g_assert_not_reached();
80 static void neon_store_element(int reg
, int ele
, MemOp size
, TCGv_i32 var
)
82 long offset
= neon_element_offset(reg
, ele
, size
);
86 tcg_gen_st8_i32(var
, tcg_env
, offset
);
89 tcg_gen_st16_i32(var
, tcg_env
, offset
);
92 tcg_gen_st_i32(var
, tcg_env
, offset
);
95 g_assert_not_reached();
99 static void neon_store_element64(int reg
, int ele
, MemOp size
, TCGv_i64 var
)
101 long offset
= neon_element_offset(reg
, ele
, size
);
105 tcg_gen_st8_i64(var
, tcg_env
, offset
);
108 tcg_gen_st16_i64(var
, tcg_env
, offset
);
111 tcg_gen_st32_i64(var
, tcg_env
, offset
);
114 tcg_gen_st_i64(var
, tcg_env
, offset
);
117 g_assert_not_reached();
121 static bool do_neon_ddda(DisasContext
*s
, int q
, int vd
, int vn
, int vm
,
122 int data
, gen_helper_gvec_4
*fn_gvec
)
124 /* UNDEF accesses to D16-D31 if they don't exist. */
125 if (((vd
| vn
| vm
) & 0x10) && !dc_isar_feature(aa32_simd_r32
, s
)) {
130 * UNDEF accesses to odd registers for each bit of Q.
131 * Q will be 0b111 for all Q-reg instructions, otherwise
132 * when we have mixed Q- and D-reg inputs.
134 if (((vd
& 1) * 4 | (vn
& 1) * 2 | (vm
& 1)) & q
) {
138 if (!vfp_access_check(s
)) {
142 int opr_sz
= q
? 16 : 8;
143 tcg_gen_gvec_4_ool(vfp_reg_offset(1, vd
),
144 vfp_reg_offset(1, vn
),
145 vfp_reg_offset(1, vm
),
146 vfp_reg_offset(1, vd
),
147 opr_sz
, opr_sz
, data
, fn_gvec
);
151 static bool do_neon_ddda_env(DisasContext
*s
, int q
, int vd
, int vn
, int vm
,
152 int data
, gen_helper_gvec_4_ptr
*fn_gvec
)
154 /* UNDEF accesses to D16-D31 if they don't exist. */
155 if (((vd
| vn
| vm
) & 0x10) && !dc_isar_feature(aa32_simd_r32
, s
)) {
160 * UNDEF accesses to odd registers for each bit of Q.
161 * Q will be 0b111 for all Q-reg instructions, otherwise
162 * when we have mixed Q- and D-reg inputs.
164 if (((vd
& 1) * 4 | (vn
& 1) * 2 | (vm
& 1)) & q
) {
168 if (!vfp_access_check(s
)) {
172 int opr_sz
= q
? 16 : 8;
173 tcg_gen_gvec_4_ptr(vfp_reg_offset(1, vd
),
174 vfp_reg_offset(1, vn
),
175 vfp_reg_offset(1, vm
),
176 vfp_reg_offset(1, vd
),
178 opr_sz
, opr_sz
, data
, fn_gvec
);
182 static bool do_neon_ddda_fpst(DisasContext
*s
, int q
, int vd
, int vn
, int vm
,
183 int data
, ARMFPStatusFlavour fp_flavour
,
184 gen_helper_gvec_4_ptr
*fn_gvec_ptr
)
186 /* UNDEF accesses to D16-D31 if they don't exist. */
187 if (((vd
| vn
| vm
) & 0x10) && !dc_isar_feature(aa32_simd_r32
, s
)) {
192 * UNDEF accesses to odd registers for each bit of Q.
193 * Q will be 0b111 for all Q-reg instructions, otherwise
194 * when we have mixed Q- and D-reg inputs.
196 if (((vd
& 1) * 4 | (vn
& 1) * 2 | (vm
& 1)) & q
) {
200 if (!vfp_access_check(s
)) {
204 int opr_sz
= q
? 16 : 8;
205 TCGv_ptr fpst
= fpstatus_ptr(fp_flavour
);
207 tcg_gen_gvec_4_ptr(vfp_reg_offset(1, vd
),
208 vfp_reg_offset(1, vn
),
209 vfp_reg_offset(1, vm
),
210 vfp_reg_offset(1, vd
),
211 fpst
, opr_sz
, opr_sz
, data
, fn_gvec_ptr
);
215 static bool trans_VCMLA(DisasContext
*s
, arg_VCMLA
*a
)
217 if (!dc_isar_feature(aa32_vcma
, s
)) {
220 if (a
->size
== MO_16
) {
221 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
224 return do_neon_ddda_fpst(s
, a
->q
* 7, a
->vd
, a
->vn
, a
->vm
, a
->rot
,
225 FPST_STD_F16
, gen_helper_gvec_fcmlah
);
227 return do_neon_ddda_fpst(s
, a
->q
* 7, a
->vd
, a
->vn
, a
->vm
, a
->rot
,
228 FPST_STD
, gen_helper_gvec_fcmlas
);
231 static bool trans_VCADD(DisasContext
*s
, arg_VCADD
*a
)
235 gen_helper_gvec_3_ptr
*fn_gvec_ptr
;
237 if (!dc_isar_feature(aa32_vcma
, s
)
238 || (a
->size
== MO_16
&& !dc_isar_feature(aa32_fp16_arith
, s
))) {
242 /* UNDEF accesses to D16-D31 if they don't exist. */
243 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
244 ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
248 if ((a
->vn
| a
->vm
| a
->vd
) & a
->q
) {
252 if (!vfp_access_check(s
)) {
256 opr_sz
= (1 + a
->q
) * 8;
257 fpst
= fpstatus_ptr(a
->size
== MO_16
? FPST_STD_F16
: FPST_STD
);
258 fn_gvec_ptr
= (a
->size
== MO_16
) ?
259 gen_helper_gvec_fcaddh
: gen_helper_gvec_fcadds
;
260 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a
->vd
),
261 vfp_reg_offset(1, a
->vn
),
262 vfp_reg_offset(1, a
->vm
),
263 fpst
, opr_sz
, opr_sz
, a
->rot
,
268 static bool trans_VSDOT(DisasContext
*s
, arg_VSDOT
*a
)
270 if (!dc_isar_feature(aa32_dp
, s
)) {
273 return do_neon_ddda(s
, a
->q
* 7, a
->vd
, a
->vn
, a
->vm
, 0,
274 gen_helper_gvec_sdot_b
);
277 static bool trans_VUDOT(DisasContext
*s
, arg_VUDOT
*a
)
279 if (!dc_isar_feature(aa32_dp
, s
)) {
282 return do_neon_ddda(s
, a
->q
* 7, a
->vd
, a
->vn
, a
->vm
, 0,
283 gen_helper_gvec_udot_b
);
286 static bool trans_VUSDOT(DisasContext
*s
, arg_VUSDOT
*a
)
288 if (!dc_isar_feature(aa32_i8mm
, s
)) {
291 return do_neon_ddda(s
, a
->q
* 7, a
->vd
, a
->vn
, a
->vm
, 0,
292 gen_helper_gvec_usdot_b
);
295 static bool trans_VDOT_b16(DisasContext
*s
, arg_VDOT_b16
*a
)
297 if (!dc_isar_feature(aa32_bf16
, s
)) {
300 return do_neon_ddda_env(s
, a
->q
* 7, a
->vd
, a
->vn
, a
->vm
, 0,
301 gen_helper_gvec_bfdot
);
304 static bool trans_VFML(DisasContext
*s
, arg_VFML
*a
)
308 if (!dc_isar_feature(aa32_fhm
, s
)) {
312 /* UNDEF accesses to D16-D31 if they don't exist. */
313 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
322 if (!vfp_access_check(s
)) {
326 opr_sz
= (1 + a
->q
) * 8;
327 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a
->vd
),
328 vfp_reg_offset(a
->q
, a
->vn
),
329 vfp_reg_offset(a
->q
, a
->vm
),
330 tcg_env
, opr_sz
, opr_sz
, a
->s
, /* is_2 == 0 */
331 gen_helper_gvec_fmlal_a32
);
335 static bool trans_VCMLA_scalar(DisasContext
*s
, arg_VCMLA_scalar
*a
)
337 int data
= (a
->index
<< 2) | a
->rot
;
339 if (!dc_isar_feature(aa32_vcma
, s
)) {
342 if (a
->size
== MO_16
) {
343 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
346 return do_neon_ddda_fpst(s
, a
->q
* 6, a
->vd
, a
->vn
, a
->vm
, data
,
347 FPST_STD_F16
, gen_helper_gvec_fcmlah_idx
);
349 return do_neon_ddda_fpst(s
, a
->q
* 6, a
->vd
, a
->vn
, a
->vm
, data
,
350 FPST_STD
, gen_helper_gvec_fcmlas_idx
);
353 static bool trans_VSDOT_scalar(DisasContext
*s
, arg_VSDOT_scalar
*a
)
355 if (!dc_isar_feature(aa32_dp
, s
)) {
358 return do_neon_ddda(s
, a
->q
* 6, a
->vd
, a
->vn
, a
->vm
, a
->index
,
359 gen_helper_gvec_sdot_idx_b
);
362 static bool trans_VUDOT_scalar(DisasContext
*s
, arg_VUDOT_scalar
*a
)
364 if (!dc_isar_feature(aa32_dp
, s
)) {
367 return do_neon_ddda(s
, a
->q
* 6, a
->vd
, a
->vn
, a
->vm
, a
->index
,
368 gen_helper_gvec_udot_idx_b
);
371 static bool trans_VUSDOT_scalar(DisasContext
*s
, arg_VUSDOT_scalar
*a
)
373 if (!dc_isar_feature(aa32_i8mm
, s
)) {
376 return do_neon_ddda(s
, a
->q
* 6, a
->vd
, a
->vn
, a
->vm
, a
->index
,
377 gen_helper_gvec_usdot_idx_b
);
380 static bool trans_VSUDOT_scalar(DisasContext
*s
, arg_VSUDOT_scalar
*a
)
382 if (!dc_isar_feature(aa32_i8mm
, s
)) {
385 return do_neon_ddda(s
, a
->q
* 6, a
->vd
, a
->vn
, a
->vm
, a
->index
,
386 gen_helper_gvec_sudot_idx_b
);
389 static bool trans_VDOT_b16_scal(DisasContext
*s
, arg_VDOT_b16_scal
*a
)
391 if (!dc_isar_feature(aa32_bf16
, s
)) {
394 return do_neon_ddda_env(s
, a
->q
* 6, a
->vd
, a
->vn
, a
->vm
, a
->index
,
395 gen_helper_gvec_bfdot_idx
);
398 static bool trans_VFML_scalar(DisasContext
*s
, arg_VFML_scalar
*a
)
402 if (!dc_isar_feature(aa32_fhm
, s
)) {
406 /* UNDEF accesses to D16-D31 if they don't exist. */
407 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
408 ((a
->vd
& 0x10) || (a
->q
&& (a
->vn
& 0x10)))) {
416 if (!vfp_access_check(s
)) {
420 opr_sz
= (1 + a
->q
) * 8;
421 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a
->vd
),
422 vfp_reg_offset(a
->q
, a
->vn
),
423 vfp_reg_offset(a
->q
, a
->rm
),
424 tcg_env
, opr_sz
, opr_sz
,
425 (a
->index
<< 2) | a
->s
, /* is_2 == 0 */
426 gen_helper_gvec_fmlal_idx_a32
);
434 } const neon_ls_element_type
[11] = {
448 static void gen_neon_ldst_base_update(DisasContext
*s
, int rm
, int rn
,
454 base
= load_reg(s
, rn
);
456 tcg_gen_addi_i32(base
, base
, stride
);
459 index
= load_reg(s
, rm
);
460 tcg_gen_add_i32(base
, base
, index
);
462 store_reg(s
, rn
, base
);
466 static bool trans_VLDST_multiple(DisasContext
*s
, arg_VLDST_multiple
*a
)
468 /* Neon load/store multiple structures */
469 int nregs
, interleave
, spacing
, reg
, n
;
470 MemOp mop
, align
, endian
;
471 int mmu_idx
= get_mem_index(s
);
476 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
480 /* UNDEF accesses to D16-D31 if they don't exist */
481 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
& 0x10)) {
487 /* Catch UNDEF cases for bad values of align field */
488 switch (a
->itype
& 0xc) {
502 nregs
= neon_ls_element_type
[a
->itype
].nregs
;
503 interleave
= neon_ls_element_type
[a
->itype
].interleave
;
504 spacing
= neon_ls_element_type
[a
->itype
].spacing
;
505 if (size
== 3 && (interleave
| spacing
) != 1) {
509 if (!vfp_access_check(s
)) {
513 /* For our purposes, bytes are always little-endian. */
519 /* Enforce alignment requested by the instruction */
521 align
= pow2_align(a
->align
+ 2); /* 4 ** a->align */
523 align
= s
->align_mem
? MO_ALIGN
: 0;
527 * Consecutive little-endian elements from a single register
528 * can be promoted to a larger little-endian operation.
530 if (interleave
== 1 && endian
== MO_LE
) {
531 /* Retain any natural alignment. */
532 if (align
== MO_ALIGN
) {
533 align
= pow2_align(size
);
538 tmp64
= tcg_temp_new_i64();
539 addr
= tcg_temp_new_i32();
540 load_reg_var(s
, addr
, a
->rn
);
542 mop
= endian
| size
| align
;
543 for (reg
= 0; reg
< nregs
; reg
++) {
544 for (n
= 0; n
< 8 >> size
; n
++) {
546 for (xs
= 0; xs
< interleave
; xs
++) {
547 int tt
= a
->vd
+ reg
+ spacing
* xs
;
550 gen_aa32_ld_internal_i64(s
, tmp64
, addr
, mmu_idx
, mop
);
551 neon_store_element64(tt
, n
, size
, tmp64
);
553 neon_load_element64(tmp64
, tt
, n
, size
);
554 gen_aa32_st_internal_i64(s
, tmp64
, addr
, mmu_idx
, mop
);
556 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
558 /* Subsequent memory operations inherit alignment */
564 gen_neon_ldst_base_update(s
, a
->rm
, a
->rn
, nregs
* interleave
* 8);
568 static bool trans_VLD_all_lanes(DisasContext
*s
, arg_VLD_all_lanes
*a
)
570 /* Neon load single structure to all lanes */
571 int reg
, stride
, vec_size
;
574 int nregs
= a
->n
+ 1;
578 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
582 /* UNDEF accesses to D16-D31 if they don't exist */
583 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
& 0x10)) {
589 if (nregs
!= 4 || a
->a
== 0) {
592 /* For VLD4 size == 3 a == 1 means 32 bits at 16 byte alignment */
604 align
= pow2_align(size
+ 1);
610 align
= pow2_align(3);
612 align
= pow2_align(size
+ 2);
616 g_assert_not_reached();
620 if (!vfp_access_check(s
)) {
625 * VLD1 to all lanes: T bit indicates how many Dregs to write.
626 * VLD2/3/4 to all lanes: T bit indicates register stride.
628 stride
= a
->t
? 2 : 1;
629 vec_size
= nregs
== 1 ? stride
* 8 : 8;
631 tmp
= tcg_temp_new_i32();
632 addr
= tcg_temp_new_i32();
633 load_reg_var(s
, addr
, a
->rn
);
634 for (reg
= 0; reg
< nregs
; reg
++) {
635 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
), mop
);
636 if ((vd
& 1) && vec_size
== 16) {
638 * We cannot write 16 bytes at once because the
639 * destination is unaligned.
641 tcg_gen_gvec_dup_i32(size
, neon_full_reg_offset(vd
),
643 tcg_gen_gvec_mov(0, neon_full_reg_offset(vd
+ 1),
644 neon_full_reg_offset(vd
), 8, 8);
646 tcg_gen_gvec_dup_i32(size
, neon_full_reg_offset(vd
),
647 vec_size
, vec_size
, tmp
);
649 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
652 /* Subsequent memory operations inherit alignment */
656 gen_neon_ldst_base_update(s
, a
->rm
, a
->rn
, (1 << size
) * nregs
);
661 static bool trans_VLDST_single(DisasContext
*s
, arg_VLDST_single
*a
)
663 /* Neon load/store single structure to one lane */
665 int nregs
= a
->n
+ 1;
670 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
674 /* UNDEF accesses to D16-D31 if they don't exist */
675 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
& 0x10)) {
679 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
682 if (a
->stride
!= 1) {
685 if (((a
->align
& (1 << a
->size
)) != 0) ||
686 (a
->size
== 2 && (a
->align
== 1 || a
->align
== 2))) {
691 if (a
->size
== 2 && (a
->align
& 2) != 0) {
701 if (a
->size
== 2 && a
->align
== 3) {
706 g_assert_not_reached();
708 if ((vd
+ a
->stride
* (nregs
- 1)) > 31) {
710 * Attempts to write off the end of the register file are
711 * UNPREDICTABLE; we choose to UNDEF because otherwise we would
712 * access off the end of the array that holds the register data.
717 if (!vfp_access_check(s
)) {
721 /* Pick up SCTLR settings */
722 mop
= finalize_memop(s
, a
->size
);
729 /* For VLD1, use natural alignment. */
733 /* For VLD2, use double alignment. */
734 align_op
= pow2_align(a
->size
+ 1);
737 if (a
->size
== MO_32
) {
739 * For VLD4.32, align = 1 is double alignment, align = 2 is
740 * quad alignment; align = 3 is rejected above.
742 align_op
= pow2_align(a
->size
+ a
->align
);
744 /* For VLD4.8 and VLD.16, we want quad alignment. */
745 align_op
= pow2_align(a
->size
+ 2);
749 /* For VLD3, the alignment field is zero and rejected above. */
750 g_assert_not_reached();
753 mop
= (mop
& ~MO_AMASK
) | align_op
;
756 tmp
= tcg_temp_new_i32();
757 addr
= tcg_temp_new_i32();
758 load_reg_var(s
, addr
, a
->rn
);
760 for (reg
= 0; reg
< nregs
; reg
++) {
762 gen_aa32_ld_internal_i32(s
, tmp
, addr
, get_mem_index(s
), mop
);
763 neon_store_element(vd
, a
->reg_idx
, a
->size
, tmp
);
765 neon_load_element(tmp
, vd
, a
->reg_idx
, a
->size
);
766 gen_aa32_st_internal_i32(s
, tmp
, addr
, get_mem_index(s
), mop
);
769 tcg_gen_addi_i32(addr
, addr
, 1 << a
->size
);
771 /* Subsequent memory operations inherit alignment */
775 gen_neon_ldst_base_update(s
, a
->rm
, a
->rn
, (1 << a
->size
) * nregs
);
780 static bool do_3same(DisasContext
*s
, arg_3same
*a
, GVecGen3Fn fn
)
782 int vec_size
= a
->q
? 16 : 8;
783 int rd_ofs
= neon_full_reg_offset(a
->vd
);
784 int rn_ofs
= neon_full_reg_offset(a
->vn
);
785 int rm_ofs
= neon_full_reg_offset(a
->vm
);
787 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
791 /* UNDEF accesses to D16-D31 if they don't exist. */
792 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
793 ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
797 if ((a
->vn
| a
->vm
| a
->vd
) & a
->q
) {
801 if (!vfp_access_check(s
)) {
805 fn(a
->size
, rd_ofs
, rn_ofs
, rm_ofs
, vec_size
, vec_size
);
809 #define DO_3SAME(INSN, FUNC) \
810 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
812 return do_3same(s, a, FUNC); \
815 DO_3SAME(VADD
, tcg_gen_gvec_add
)
816 DO_3SAME(VSUB
, tcg_gen_gvec_sub
)
817 DO_3SAME(VAND
, tcg_gen_gvec_and
)
818 DO_3SAME(VBIC
, tcg_gen_gvec_andc
)
819 DO_3SAME(VORR
, tcg_gen_gvec_or
)
820 DO_3SAME(VORN
, tcg_gen_gvec_orc
)
821 DO_3SAME(VEOR
, tcg_gen_gvec_xor
)
822 DO_3SAME(VSHL_S
, gen_gvec_sshl
)
823 DO_3SAME(VSHL_U
, gen_gvec_ushl
)
824 DO_3SAME(VQADD_S
, gen_gvec_sqadd_qc
)
825 DO_3SAME(VQADD_U
, gen_gvec_uqadd_qc
)
826 DO_3SAME(VQSUB_S
, gen_gvec_sqsub_qc
)
827 DO_3SAME(VQSUB_U
, gen_gvec_uqsub_qc
)
828 DO_3SAME(VRSHL_S
, gen_gvec_srshl
)
829 DO_3SAME(VRSHL_U
, gen_gvec_urshl
)
830 DO_3SAME(VQSHL_S
, gen_neon_sqshl
)
831 DO_3SAME(VQSHL_U
, gen_neon_uqshl
)
832 DO_3SAME(VQRSHL_S
, gen_neon_sqrshl
)
833 DO_3SAME(VQRSHL_U
, gen_neon_uqrshl
)
835 /* These insns are all gvec_bitsel but with the inputs in various orders. */
836 #define DO_3SAME_BITSEL(INSN, O1, O2, O3) \
837 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
838 uint32_t rn_ofs, uint32_t rm_ofs, \
839 uint32_t oprsz, uint32_t maxsz) \
841 tcg_gen_gvec_bitsel(vece, rd_ofs, O1, O2, O3, oprsz, maxsz); \
843 DO_3SAME(INSN, gen_##INSN##_3s)
845 DO_3SAME_BITSEL(VBSL
, rd_ofs
, rn_ofs
, rm_ofs
)
846 DO_3SAME_BITSEL(VBIT
, rm_ofs
, rn_ofs
, rd_ofs
)
847 DO_3SAME_BITSEL(VBIF
, rm_ofs
, rd_ofs
, rn_ofs
)
849 #define DO_3SAME_NO_SZ_3(INSN, FUNC) \
850 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
852 if (a->size == 3) { \
855 return do_3same(s, a, FUNC); \
858 DO_3SAME_NO_SZ_3(VMAX_S
, tcg_gen_gvec_smax
)
859 DO_3SAME_NO_SZ_3(VMAX_U
, tcg_gen_gvec_umax
)
860 DO_3SAME_NO_SZ_3(VMIN_S
, tcg_gen_gvec_smin
)
861 DO_3SAME_NO_SZ_3(VMIN_U
, tcg_gen_gvec_umin
)
862 DO_3SAME_NO_SZ_3(VMUL
, tcg_gen_gvec_mul
)
863 DO_3SAME_NO_SZ_3(VMLA
, gen_gvec_mla
)
864 DO_3SAME_NO_SZ_3(VMLS
, gen_gvec_mls
)
865 DO_3SAME_NO_SZ_3(VTST
, gen_gvec_cmtst
)
866 DO_3SAME_NO_SZ_3(VABD_S
, gen_gvec_sabd
)
867 DO_3SAME_NO_SZ_3(VABA_S
, gen_gvec_saba
)
868 DO_3SAME_NO_SZ_3(VABD_U
, gen_gvec_uabd
)
869 DO_3SAME_NO_SZ_3(VABA_U
, gen_gvec_uaba
)
870 DO_3SAME_NO_SZ_3(VPADD
, gen_gvec_addp
)
871 DO_3SAME_NO_SZ_3(VPMAX_S
, gen_gvec_smaxp
)
872 DO_3SAME_NO_SZ_3(VPMIN_S
, gen_gvec_sminp
)
873 DO_3SAME_NO_SZ_3(VPMAX_U
, gen_gvec_umaxp
)
874 DO_3SAME_NO_SZ_3(VPMIN_U
, gen_gvec_uminp
)
875 DO_3SAME_NO_SZ_3(VHADD_S
, gen_gvec_shadd
)
876 DO_3SAME_NO_SZ_3(VHADD_U
, gen_gvec_uhadd
)
877 DO_3SAME_NO_SZ_3(VHSUB_S
, gen_gvec_shsub
)
878 DO_3SAME_NO_SZ_3(VHSUB_U
, gen_gvec_uhsub
)
879 DO_3SAME_NO_SZ_3(VRHADD_S
, gen_gvec_srhadd
)
880 DO_3SAME_NO_SZ_3(VRHADD_U
, gen_gvec_urhadd
)
882 #define DO_3SAME_CMP(INSN, COND) \
883 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
884 uint32_t rn_ofs, uint32_t rm_ofs, \
885 uint32_t oprsz, uint32_t maxsz) \
887 tcg_gen_gvec_cmp(COND, vece, rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz); \
889 DO_3SAME_NO_SZ_3(INSN, gen_##INSN##_3s)
891 DO_3SAME_CMP(VCGT_S
, TCG_COND_GT
)
892 DO_3SAME_CMP(VCGT_U
, TCG_COND_GTU
)
893 DO_3SAME_CMP(VCGE_S
, TCG_COND_GE
)
894 DO_3SAME_CMP(VCGE_U
, TCG_COND_GEU
)
895 DO_3SAME_CMP(VCEQ
, TCG_COND_EQ
)
897 #define WRAP_OOL_FN(WRAPNAME, FUNC) \
898 static void WRAPNAME(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, \
899 uint32_t rm_ofs, uint32_t oprsz, uint32_t maxsz) \
901 tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, 0, FUNC); \
904 WRAP_OOL_FN(gen_VMUL_p_3s
, gen_helper_gvec_pmul_b
)
906 static bool trans_VMUL_p_3s(DisasContext
*s
, arg_3same
*a
)
911 return do_3same(s
, a
, gen_VMUL_p_3s
);
914 #define DO_VQRDMLAH(INSN, FUNC) \
915 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
917 if (!dc_isar_feature(aa32_rdm, s)) { \
920 if (a->size != 1 && a->size != 2) { \
923 return do_3same(s, a, FUNC); \
926 DO_VQRDMLAH(VQRDMLAH
, gen_gvec_sqrdmlah_qc
)
927 DO_VQRDMLAH(VQRDMLSH
, gen_gvec_sqrdmlsh_qc
)
929 #define DO_SHA1(NAME, FUNC) \
930 WRAP_OOL_FN(gen_##NAME##_3s, FUNC) \
931 static bool trans_##NAME##_3s(DisasContext *s, arg_3same *a) \
933 if (!dc_isar_feature(aa32_sha1, s)) { \
936 return do_3same(s, a, gen_##NAME##_3s); \
939 DO_SHA1(SHA1C
, gen_helper_crypto_sha1c
)
940 DO_SHA1(SHA1P
, gen_helper_crypto_sha1p
)
941 DO_SHA1(SHA1M
, gen_helper_crypto_sha1m
)
942 DO_SHA1(SHA1SU0
, gen_helper_crypto_sha1su0
)
944 #define DO_SHA2(NAME, FUNC) \
945 WRAP_OOL_FN(gen_##NAME##_3s, FUNC) \
946 static bool trans_##NAME##_3s(DisasContext *s, arg_3same *a) \
948 if (!dc_isar_feature(aa32_sha2, s)) { \
951 return do_3same(s, a, gen_##NAME##_3s); \
954 DO_SHA2(SHA256H
, gen_helper_crypto_sha256h
)
955 DO_SHA2(SHA256H2
, gen_helper_crypto_sha256h2
)
956 DO_SHA2(SHA256SU1
, gen_helper_crypto_sha256su1
)
959 * Some helper functions need to be passed the tcg_env. In order
960 * to use those with the gvec APIs like tcg_gen_gvec_3() we need
961 * to create wrapper functions whose prototype is a NeonGenTwoOpFn()
962 * and which call a NeonGenTwoOpEnvFn().
964 #define WRAP_ENV_FN(WRAPNAME, FUNC) \
965 static void WRAPNAME(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m) \
967 FUNC(d, tcg_env, n, m); \
970 #define DO_3SAME_VQDMULH(INSN, FUNC) \
971 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
972 { return a->size >= 1 && a->size <= 2 && do_3same(s, a, FUNC); }
974 DO_3SAME_VQDMULH(VQDMULH
, gen_gvec_sqdmulh_qc
)
975 DO_3SAME_VQDMULH(VQRDMULH
, gen_gvec_sqrdmulh_qc
)
977 #define WRAP_FP_GVEC(WRAPNAME, FPST, FUNC) \
978 static void WRAPNAME(unsigned vece, uint32_t rd_ofs, \
979 uint32_t rn_ofs, uint32_t rm_ofs, \
980 uint32_t oprsz, uint32_t maxsz) \
982 TCGv_ptr fpst = fpstatus_ptr(FPST); \
983 tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, fpst, \
984 oprsz, maxsz, 0, FUNC); \
987 #define DO_3S_FP_GVEC(INSN,SFUNC,HFUNC) \
988 WRAP_FP_GVEC(gen_##INSN##_fp32_3s, FPST_STD, SFUNC) \
989 WRAP_FP_GVEC(gen_##INSN##_fp16_3s, FPST_STD_F16, HFUNC) \
990 static bool trans_##INSN##_fp_3s(DisasContext *s, arg_3same *a) \
992 if (a->size == MO_16) { \
993 if (!dc_isar_feature(aa32_fp16_arith, s)) { \
996 return do_3same(s, a, gen_##INSN##_fp16_3s); \
998 return do_3same(s, a, gen_##INSN##_fp32_3s); \
1002 DO_3S_FP_GVEC(VADD
, gen_helper_gvec_fadd_s
, gen_helper_gvec_fadd_h
)
1003 DO_3S_FP_GVEC(VSUB
, gen_helper_gvec_fsub_s
, gen_helper_gvec_fsub_h
)
1004 DO_3S_FP_GVEC(VABD
, gen_helper_gvec_fabd_s
, gen_helper_gvec_fabd_h
)
1005 DO_3S_FP_GVEC(VMUL
, gen_helper_gvec_fmul_s
, gen_helper_gvec_fmul_h
)
1006 DO_3S_FP_GVEC(VCEQ
, gen_helper_gvec_fceq_s
, gen_helper_gvec_fceq_h
)
1007 DO_3S_FP_GVEC(VCGE
, gen_helper_gvec_fcge_s
, gen_helper_gvec_fcge_h
)
1008 DO_3S_FP_GVEC(VCGT
, gen_helper_gvec_fcgt_s
, gen_helper_gvec_fcgt_h
)
1009 DO_3S_FP_GVEC(VACGE
, gen_helper_gvec_facge_s
, gen_helper_gvec_facge_h
)
1010 DO_3S_FP_GVEC(VACGT
, gen_helper_gvec_facgt_s
, gen_helper_gvec_facgt_h
)
1011 DO_3S_FP_GVEC(VMAX
, gen_helper_gvec_fmax_s
, gen_helper_gvec_fmax_h
)
1012 DO_3S_FP_GVEC(VMIN
, gen_helper_gvec_fmin_s
, gen_helper_gvec_fmin_h
)
1013 DO_3S_FP_GVEC(VMLA
, gen_helper_gvec_fmla_s
, gen_helper_gvec_fmla_h
)
1014 DO_3S_FP_GVEC(VMLS
, gen_helper_gvec_fmls_s
, gen_helper_gvec_fmls_h
)
1015 DO_3S_FP_GVEC(VFMA
, gen_helper_gvec_vfma_s
, gen_helper_gvec_vfma_h
)
1016 DO_3S_FP_GVEC(VFMS
, gen_helper_gvec_vfms_s
, gen_helper_gvec_vfms_h
)
1017 DO_3S_FP_GVEC(VRECPS
, gen_helper_gvec_recps_nf_s
, gen_helper_gvec_recps_nf_h
)
1018 DO_3S_FP_GVEC(VRSQRTS
, gen_helper_gvec_rsqrts_nf_s
, gen_helper_gvec_rsqrts_nf_h
)
1019 DO_3S_FP_GVEC(VPADD
, gen_helper_gvec_faddp_s
, gen_helper_gvec_faddp_h
)
1020 DO_3S_FP_GVEC(VPMAX
, gen_helper_gvec_fmaxp_s
, gen_helper_gvec_fmaxp_h
)
1021 DO_3S_FP_GVEC(VPMIN
, gen_helper_gvec_fminp_s
, gen_helper_gvec_fminp_h
)
1023 WRAP_FP_GVEC(gen_VMAXNM_fp32_3s
, FPST_STD
, gen_helper_gvec_fmaxnum_s
)
1024 WRAP_FP_GVEC(gen_VMAXNM_fp16_3s
, FPST_STD_F16
, gen_helper_gvec_fmaxnum_h
)
1025 WRAP_FP_GVEC(gen_VMINNM_fp32_3s
, FPST_STD
, gen_helper_gvec_fminnum_s
)
1026 WRAP_FP_GVEC(gen_VMINNM_fp16_3s
, FPST_STD_F16
, gen_helper_gvec_fminnum_h
)
1028 static bool trans_VMAXNM_fp_3s(DisasContext
*s
, arg_3same
*a
)
1030 if (!arm_dc_feature(s
, ARM_FEATURE_V8
)) {
1034 if (a
->size
== MO_16
) {
1035 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
1038 return do_3same(s
, a
, gen_VMAXNM_fp16_3s
);
1040 return do_3same(s
, a
, gen_VMAXNM_fp32_3s
);
1043 static bool trans_VMINNM_fp_3s(DisasContext
*s
, arg_3same
*a
)
1045 if (!arm_dc_feature(s
, ARM_FEATURE_V8
)) {
1049 if (a
->size
== MO_16
) {
1050 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
1053 return do_3same(s
, a
, gen_VMINNM_fp16_3s
);
1055 return do_3same(s
, a
, gen_VMINNM_fp32_3s
);
1058 static bool do_vector_2sh(DisasContext
*s
, arg_2reg_shift
*a
, GVecGen2iFn
*fn
)
1060 /* Handle a 2-reg-shift insn which can be vectorized. */
1061 int vec_size
= a
->q
? 16 : 8;
1062 int rd_ofs
= neon_full_reg_offset(a
->vd
);
1063 int rm_ofs
= neon_full_reg_offset(a
->vm
);
1065 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
1069 /* UNDEF accesses to D16-D31 if they don't exist. */
1070 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
1071 ((a
->vd
| a
->vm
) & 0x10)) {
1075 if ((a
->vm
| a
->vd
) & a
->q
) {
1079 if (!vfp_access_check(s
)) {
1083 fn(a
->size
, rd_ofs
, rm_ofs
, a
->shift
, vec_size
, vec_size
);
1087 #define DO_2SH(INSN, FUNC) \
1088 static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
1090 return do_vector_2sh(s, a, FUNC); \
1093 DO_2SH(VSHL, tcg_gen_gvec_shli)
1094 DO_2SH(VSLI
, gen_gvec_sli
)
1095 DO_2SH(VSRI
, gen_gvec_sri
)
1096 DO_2SH(VSRA_S
, gen_gvec_ssra
)
1097 DO_2SH(VSRA_U
, gen_gvec_usra
)
1098 DO_2SH(VRSHR_S
, gen_gvec_srshr
)
1099 DO_2SH(VRSHR_U
, gen_gvec_urshr
)
1100 DO_2SH(VRSRA_S
, gen_gvec_srsra
)
1101 DO_2SH(VRSRA_U
, gen_gvec_ursra
)
1102 DO_2SH(VSHR_S
, gen_gvec_sshr
)
1103 DO_2SH(VSHR_U
, gen_gvec_ushr
)
1104 DO_2SH(VQSHLU
, gen_neon_sqshlui
)
1105 DO_2SH(VQSHL_U
, gen_neon_uqshli
)
1106 DO_2SH(VQSHL_S
, gen_neon_sqshli
)
1108 static bool do_2shift_narrow_64(DisasContext
*s
, arg_2reg_shift
*a
,
1109 NeonGenTwo64OpFn
*shiftfn
,
1110 NeonGenOne64OpEnvFn
*narrowfn
)
1112 /* 2-reg-and-shift narrowing-shift operations, size == 3 case */
1113 TCGv_i64 constimm
, rm1
, rm2
, rd
;
1115 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
1119 /* UNDEF accesses to D16-D31 if they don't exist. */
1120 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
1121 ((a
->vd
| a
->vm
) & 0x10)) {
1129 if (!vfp_access_check(s
)) {
1134 * This is always a right shift, and the shiftfn is always a
1135 * left-shift helper, which thus needs the negated shift count.
1137 constimm
= tcg_constant_i64(-a
->shift
);
1138 rm1
= tcg_temp_new_i64();
1139 rm2
= tcg_temp_new_i64();
1140 rd
= tcg_temp_new_i64();
1142 /* Load both inputs first to avoid potential overwrite if rm == rd */
1143 read_neon_element64(rm1
, a
->vm
, 0, MO_64
);
1144 read_neon_element64(rm2
, a
->vm
, 1, MO_64
);
1146 shiftfn(rm1
, rm1
, constimm
);
1147 narrowfn(rd
, tcg_env
, rm1
);
1148 write_neon_element64(rd
, a
->vd
, 0, MO_32
);
1150 shiftfn(rm2
, rm2
, constimm
);
1151 narrowfn(rd
, tcg_env
, rm2
);
1152 write_neon_element64(rd
, a
->vd
, 1, MO_32
);
1157 static bool do_2shift_narrow_32(DisasContext
*s
, arg_2reg_shift
*a
,
1158 NeonGenTwoOpFn
*shiftfn
,
1159 NeonGenOne64OpEnvFn
*narrowfn
)
1161 /* 2-reg-and-shift narrowing-shift operations, size < 3 case */
1162 TCGv_i32 constimm
, rm1
, rm2
, rm3
, rm4
;
1166 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
1170 /* UNDEF accesses to D16-D31 if they don't exist. */
1171 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
1172 ((a
->vd
| a
->vm
) & 0x10)) {
1180 if (!vfp_access_check(s
)) {
1185 * This is always a right shift, and the shiftfn is always a
1186 * left-shift helper, which thus needs the negated shift count
1187 * duplicated into each lane of the immediate value.
1190 imm
= (uint16_t)(-a
->shift
);
1196 constimm
= tcg_constant_i32(imm
);
1198 /* Load all inputs first to avoid potential overwrite */
1199 rm1
= tcg_temp_new_i32();
1200 rm2
= tcg_temp_new_i32();
1201 rm3
= tcg_temp_new_i32();
1202 rm4
= tcg_temp_new_i32();
1203 read_neon_element32(rm1
, a
->vm
, 0, MO_32
);
1204 read_neon_element32(rm2
, a
->vm
, 1, MO_32
);
1205 read_neon_element32(rm3
, a
->vm
, 2, MO_32
);
1206 read_neon_element32(rm4
, a
->vm
, 3, MO_32
);
1207 rtmp
= tcg_temp_new_i64();
1209 shiftfn(rm1
, rm1
, constimm
);
1210 shiftfn(rm2
, rm2
, constimm
);
1212 tcg_gen_concat_i32_i64(rtmp
, rm1
, rm2
);
1214 narrowfn(rtmp
, tcg_env
, rtmp
);
1215 write_neon_element64(rtmp
, a
->vd
, 0, MO_32
);
1217 shiftfn(rm3
, rm3
, constimm
);
1218 shiftfn(rm4
, rm4
, constimm
);
1220 tcg_gen_concat_i32_i64(rtmp
, rm3
, rm4
);
1222 narrowfn(rtmp
, tcg_env
, rtmp
);
1223 write_neon_element64(rtmp
, a
->vd
, 1, MO_32
);
1227 #define DO_2SN_64(INSN, FUNC, NARROWFUNC) \
1228 static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
1230 return do_2shift_narrow_64(s, a, FUNC, NARROWFUNC); \
1232 #define DO_2SN_32(INSN, FUNC, NARROWFUNC) \
1233 static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
1235 return do_2shift_narrow_32(s, a, FUNC, NARROWFUNC); \
1238 static void gen_neon_narrow_u32(TCGv_i64 dest
, TCGv_ptr env
, TCGv_i64 src
)
1240 tcg_gen_ext32u_i64(dest
, src
);
1243 static void gen_neon_narrow_u16(TCGv_i64 dest
, TCGv_ptr env
, TCGv_i64 src
)
1245 gen_helper_neon_narrow_u16(dest
, src
);
1248 static void gen_neon_narrow_u8(TCGv_i64 dest
, TCGv_ptr env
, TCGv_i64 src
)
1250 gen_helper_neon_narrow_u8(dest
, src
);
1253 DO_2SN_64(VSHRN_64
, gen_ushl_i64
, gen_neon_narrow_u32
)
1254 DO_2SN_32(VSHRN_32
, gen_ushl_i32
, gen_neon_narrow_u16
)
1255 DO_2SN_32(VSHRN_16
, gen_helper_neon_shl_u16
, gen_neon_narrow_u8
)
1257 DO_2SN_64(VRSHRN_64
, gen_helper_neon_rshl_u64
, gen_neon_narrow_u32
)
1258 DO_2SN_32(VRSHRN_32
, gen_helper_neon_rshl_u32
, gen_neon_narrow_u16
)
1259 DO_2SN_32(VRSHRN_16
, gen_helper_neon_rshl_u16
, gen_neon_narrow_u8
)
1261 DO_2SN_64(VQSHRUN_64
, gen_sshl_i64
, gen_helper_neon_unarrow_sat32
)
1262 DO_2SN_32(VQSHRUN_32
, gen_sshl_i32
, gen_helper_neon_unarrow_sat16
)
1263 DO_2SN_32(VQSHRUN_16
, gen_helper_neon_shl_s16
, gen_helper_neon_unarrow_sat8
)
1265 DO_2SN_64(VQRSHRUN_64
, gen_helper_neon_rshl_s64
, gen_helper_neon_unarrow_sat32
)
1266 DO_2SN_32(VQRSHRUN_32
, gen_helper_neon_rshl_s32
, gen_helper_neon_unarrow_sat16
)
1267 DO_2SN_32(VQRSHRUN_16
, gen_helper_neon_rshl_s16
, gen_helper_neon_unarrow_sat8
)
1268 DO_2SN_64(VQSHRN_S64
, gen_sshl_i64
, gen_helper_neon_narrow_sat_s32
)
1269 DO_2SN_32(VQSHRN_S32
, gen_sshl_i32
, gen_helper_neon_narrow_sat_s16
)
1270 DO_2SN_32(VQSHRN_S16
, gen_helper_neon_shl_s16
, gen_helper_neon_narrow_sat_s8
)
1272 DO_2SN_64(VQRSHRN_S64
, gen_helper_neon_rshl_s64
, gen_helper_neon_narrow_sat_s32
)
1273 DO_2SN_32(VQRSHRN_S32
, gen_helper_neon_rshl_s32
, gen_helper_neon_narrow_sat_s16
)
1274 DO_2SN_32(VQRSHRN_S16
, gen_helper_neon_rshl_s16
, gen_helper_neon_narrow_sat_s8
)
1276 DO_2SN_64(VQSHRN_U64
, gen_ushl_i64
, gen_helper_neon_narrow_sat_u32
)
1277 DO_2SN_32(VQSHRN_U32
, gen_ushl_i32
, gen_helper_neon_narrow_sat_u16
)
1278 DO_2SN_32(VQSHRN_U16
, gen_helper_neon_shl_u16
, gen_helper_neon_narrow_sat_u8
)
1280 DO_2SN_64(VQRSHRN_U64
, gen_helper_neon_rshl_u64
, gen_helper_neon_narrow_sat_u32
)
1281 DO_2SN_32(VQRSHRN_U32
, gen_helper_neon_rshl_u32
, gen_helper_neon_narrow_sat_u16
)
1282 DO_2SN_32(VQRSHRN_U16
, gen_helper_neon_rshl_u16
, gen_helper_neon_narrow_sat_u8
)
1284 static bool do_vshll_2sh(DisasContext
*s
, arg_2reg_shift
*a
,
1285 NeonGenWidenFn
*widenfn
, bool u
)
1289 uint64_t widen_mask
= 0;
1291 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
1295 /* UNDEF accesses to D16-D31 if they don't exist. */
1296 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
1297 ((a
->vd
| a
->vm
) & 0x10)) {
1305 if (!vfp_access_check(s
)) {
1310 * This is a widen-and-shift operation. The shift is always less
1311 * than the width of the source type, so after widening the input
1312 * vector we can simply shift the whole 64-bit widened register,
1313 * and then clear the potential overflow bits resulting from left
1314 * bits of the narrow input appearing as right bits of the left
1315 * neighbour narrow input. Calculate a mask of bits to clear.
1317 if ((a
->shift
!= 0) && (a
->size
< 2 || u
)) {
1318 int esize
= 8 << a
->size
;
1319 widen_mask
= MAKE_64BIT_MASK(0, esize
);
1320 widen_mask
>>= esize
- a
->shift
;
1321 widen_mask
= dup_const(a
->size
+ 1, widen_mask
);
1324 rm0
= tcg_temp_new_i32();
1325 rm1
= tcg_temp_new_i32();
1326 read_neon_element32(rm0
, a
->vm
, 0, MO_32
);
1327 read_neon_element32(rm1
, a
->vm
, 1, MO_32
);
1328 tmp
= tcg_temp_new_i64();
1331 if (a
->shift
!= 0) {
1332 tcg_gen_shli_i64(tmp
, tmp
, a
->shift
);
1333 tcg_gen_andi_i64(tmp
, tmp
, ~widen_mask
);
1335 write_neon_element64(tmp
, a
->vd
, 0, MO_64
);
1338 if (a
->shift
!= 0) {
1339 tcg_gen_shli_i64(tmp
, tmp
, a
->shift
);
1340 tcg_gen_andi_i64(tmp
, tmp
, ~widen_mask
);
1342 write_neon_element64(tmp
, a
->vd
, 1, MO_64
);
1346 static bool trans_VSHLL_S_2sh(DisasContext
*s
, arg_2reg_shift
*a
)
1348 static NeonGenWidenFn
* const widenfn
[] = {
1349 gen_helper_neon_widen_s8
,
1350 gen_helper_neon_widen_s16
,
1351 tcg_gen_ext_i32_i64
,
1353 return do_vshll_2sh(s
, a
, widenfn
[a
->size
], false);
1356 static bool trans_VSHLL_U_2sh(DisasContext
*s
, arg_2reg_shift
*a
)
1358 static NeonGenWidenFn
* const widenfn
[] = {
1359 gen_helper_neon_widen_u8
,
1360 gen_helper_neon_widen_u16
,
1361 tcg_gen_extu_i32_i64
,
1363 return do_vshll_2sh(s
, a
, widenfn
[a
->size
], true);
1366 static bool do_fp_2sh(DisasContext
*s
, arg_2reg_shift
*a
,
1367 gen_helper_gvec_2_ptr
*fn
)
1369 /* FP operations in 2-reg-and-shift group */
1370 int vec_size
= a
->q
? 16 : 8;
1371 int rd_ofs
= neon_full_reg_offset(a
->vd
);
1372 int rm_ofs
= neon_full_reg_offset(a
->vm
);
1375 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
1379 if (a
->size
== MO_16
) {
1380 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
1385 /* UNDEF accesses to D16-D31 if they don't exist. */
1386 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
1387 ((a
->vd
| a
->vm
) & 0x10)) {
1391 if ((a
->vm
| a
->vd
) & a
->q
) {
1395 if (!vfp_access_check(s
)) {
1399 fpst
= fpstatus_ptr(a
->size
== MO_16
? FPST_STD_F16
: FPST_STD
);
1400 tcg_gen_gvec_2_ptr(rd_ofs
, rm_ofs
, fpst
, vec_size
, vec_size
, a
->shift
, fn
);
1404 #define DO_FP_2SH(INSN, FUNC) \
1405 static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
1407 return do_fp_2sh(s, a, FUNC); \
1410 DO_FP_2SH(VCVT_SF
, gen_helper_gvec_vcvt_sf
)
1411 DO_FP_2SH(VCVT_UF
, gen_helper_gvec_vcvt_uf
)
1412 DO_FP_2SH(VCVT_FS
, gen_helper_gvec_vcvt_fs
)
1413 DO_FP_2SH(VCVT_FU
, gen_helper_gvec_vcvt_fu
)
1415 DO_FP_2SH(VCVT_SH
, gen_helper_gvec_vcvt_sh
)
1416 DO_FP_2SH(VCVT_UH
, gen_helper_gvec_vcvt_uh
)
1417 DO_FP_2SH(VCVT_HS
, gen_helper_gvec_vcvt_hs
)
1418 DO_FP_2SH(VCVT_HU
, gen_helper_gvec_vcvt_hu
)
1420 static bool do_1reg_imm(DisasContext
*s
, arg_1reg_imm
*a
,
1424 int reg_ofs
, vec_size
;
1426 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
1430 /* UNDEF accesses to D16-D31 if they don't exist. */
1431 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
& 0x10)) {
1439 if (!vfp_access_check(s
)) {
1443 reg_ofs
= neon_full_reg_offset(a
->vd
);
1444 vec_size
= a
->q
? 16 : 8;
1445 imm
= asimd_imm_const(a
->imm
, a
->cmode
, a
->op
);
1447 fn(MO_64
, reg_ofs
, reg_ofs
, imm
, vec_size
, vec_size
);
1451 static void gen_VMOV_1r(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1452 int64_t c
, uint32_t oprsz
, uint32_t maxsz
)
1454 tcg_gen_gvec_dup_imm(MO_64
, dofs
, oprsz
, maxsz
, c
);
1457 static bool trans_Vimm_1r(DisasContext
*s
, arg_1reg_imm
*a
)
1459 /* Handle decode of cmode/op here between VORR/VBIC/VMOV */
1462 if ((a
->cmode
& 1) && a
->cmode
< 12) {
1463 /* for op=1, the imm will be inverted, so BIC becomes AND. */
1464 fn
= a
->op
? tcg_gen_gvec_andi
: tcg_gen_gvec_ori
;
1466 /* There is one unallocated cmode/op combination in this space */
1467 if (a
->cmode
== 15 && a
->op
== 1) {
1472 return do_1reg_imm(s
, a
, fn
);
1475 static bool do_prewiden_3d(DisasContext
*s
, arg_3diff
*a
,
1476 NeonGenWidenFn
*widenfn
,
1477 NeonGenTwo64OpFn
*opfn
,
1478 int src1_mop
, int src2_mop
)
1480 /* 3-regs different lengths, prewidening case (VADDL/VSUBL/VAADW/VSUBW) */
1481 TCGv_i64 rn0_64
, rn1_64
, rm_64
;
1483 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
1487 /* UNDEF accesses to D16-D31 if they don't exist. */
1488 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
1489 ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
1494 /* size == 3 case, which is an entirely different insn group */
1498 if ((a
->vd
& 1) || (src1_mop
== MO_UQ
&& (a
->vn
& 1))) {
1502 if (!vfp_access_check(s
)) {
1506 rn0_64
= tcg_temp_new_i64();
1507 rn1_64
= tcg_temp_new_i64();
1508 rm_64
= tcg_temp_new_i64();
1510 if (src1_mop
>= 0) {
1511 read_neon_element64(rn0_64
, a
->vn
, 0, src1_mop
);
1513 TCGv_i32 tmp
= tcg_temp_new_i32();
1514 read_neon_element32(tmp
, a
->vn
, 0, MO_32
);
1515 widenfn(rn0_64
, tmp
);
1517 if (src2_mop
>= 0) {
1518 read_neon_element64(rm_64
, a
->vm
, 0, src2_mop
);
1520 TCGv_i32 tmp
= tcg_temp_new_i32();
1521 read_neon_element32(tmp
, a
->vm
, 0, MO_32
);
1522 widenfn(rm_64
, tmp
);
1525 opfn(rn0_64
, rn0_64
, rm_64
);
1528 * Load second pass inputs before storing the first pass result, to
1529 * avoid incorrect results if a narrow input overlaps with the result.
1531 if (src1_mop
>= 0) {
1532 read_neon_element64(rn1_64
, a
->vn
, 1, src1_mop
);
1534 TCGv_i32 tmp
= tcg_temp_new_i32();
1535 read_neon_element32(tmp
, a
->vn
, 1, MO_32
);
1536 widenfn(rn1_64
, tmp
);
1538 if (src2_mop
>= 0) {
1539 read_neon_element64(rm_64
, a
->vm
, 1, src2_mop
);
1541 TCGv_i32 tmp
= tcg_temp_new_i32();
1542 read_neon_element32(tmp
, a
->vm
, 1, MO_32
);
1543 widenfn(rm_64
, tmp
);
1546 write_neon_element64(rn0_64
, a
->vd
, 0, MO_64
);
1548 opfn(rn1_64
, rn1_64
, rm_64
);
1549 write_neon_element64(rn1_64
, a
->vd
, 1, MO_64
);
1554 #define DO_PREWIDEN(INSN, S, OP, SRC1WIDE, SIGN) \
1555 static bool trans_##INSN##_3d(DisasContext *s, arg_3diff *a) \
1557 static NeonGenWidenFn * const widenfn[] = { \
1558 gen_helper_neon_widen_##S##8, \
1559 gen_helper_neon_widen_##S##16, \
1562 static NeonGenTwo64OpFn * const addfn[] = { \
1563 gen_helper_neon_##OP##l_u16, \
1564 gen_helper_neon_##OP##l_u32, \
1565 tcg_gen_##OP##_i64, \
1568 int narrow_mop = a->size == MO_32 ? MO_32 | SIGN : -1; \
1569 return do_prewiden_3d(s, a, widenfn[a->size], addfn[a->size], \
1570 SRC1WIDE ? MO_UQ : narrow_mop, \
1574 DO_PREWIDEN(VADDL_S
, s
, add
, false, MO_SIGN
)
1575 DO_PREWIDEN(VADDL_U
, u
, add
, false, 0)
1576 DO_PREWIDEN(VSUBL_S
, s
, sub
, false, MO_SIGN
)
1577 DO_PREWIDEN(VSUBL_U
, u
, sub
, false, 0)
1578 DO_PREWIDEN(VADDW_S
, s
, add
, true, MO_SIGN
)
1579 DO_PREWIDEN(VADDW_U
, u
, add
, true, 0)
1580 DO_PREWIDEN(VSUBW_S
, s
, sub
, true, MO_SIGN
)
1581 DO_PREWIDEN(VSUBW_U
, u
, sub
, true, 0)
1583 static bool do_narrow_3d(DisasContext
*s
, arg_3diff
*a
,
1584 NeonGenTwo64OpFn
*opfn
, NeonGenNarrowFn
*narrowfn
)
1586 /* 3-regs different lengths, narrowing (VADDHN/VSUBHN/VRADDHN/VRSUBHN) */
1587 TCGv_i64 rn_64
, rm_64
;
1590 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
1594 /* UNDEF accesses to D16-D31 if they don't exist. */
1595 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
1596 ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
1600 if (!opfn
|| !narrowfn
) {
1601 /* size == 3 case, which is an entirely different insn group */
1605 if ((a
->vn
| a
->vm
) & 1) {
1609 if (!vfp_access_check(s
)) {
1613 rn_64
= tcg_temp_new_i64();
1614 rm_64
= tcg_temp_new_i64();
1615 rd0
= tcg_temp_new_i32();
1616 rd1
= tcg_temp_new_i32();
1618 read_neon_element64(rn_64
, a
->vn
, 0, MO_64
);
1619 read_neon_element64(rm_64
, a
->vm
, 0, MO_64
);
1621 opfn(rn_64
, rn_64
, rm_64
);
1623 narrowfn(rd0
, rn_64
);
1625 read_neon_element64(rn_64
, a
->vn
, 1, MO_64
);
1626 read_neon_element64(rm_64
, a
->vm
, 1, MO_64
);
1628 opfn(rn_64
, rn_64
, rm_64
);
1630 narrowfn(rd1
, rn_64
);
1632 write_neon_element32(rd0
, a
->vd
, 0, MO_32
);
1633 write_neon_element32(rd1
, a
->vd
, 1, MO_32
);
1638 #define DO_NARROW_3D(INSN, OP, NARROWTYPE, EXTOP) \
1639 static bool trans_##INSN##_3d(DisasContext *s, arg_3diff *a) \
1641 static NeonGenTwo64OpFn * const addfn[] = { \
1642 gen_helper_neon_##OP##l_u16, \
1643 gen_helper_neon_##OP##l_u32, \
1644 tcg_gen_##OP##_i64, \
1647 static NeonGenNarrowFn * const narrowfn[] = { \
1648 gen_helper_neon_##NARROWTYPE##_high_u8, \
1649 gen_helper_neon_##NARROWTYPE##_high_u16, \
1653 return do_narrow_3d(s, a, addfn[a->size], narrowfn[a->size]); \
1656 static void gen_narrow_round_high_u32(TCGv_i32 rd
, TCGv_i64 rn
)
1658 tcg_gen_addi_i64(rn
, rn
, 1u << 31);
1659 tcg_gen_extrh_i64_i32(rd
, rn
);
1662 DO_NARROW_3D(VADDHN
, add
, narrow
, tcg_gen_extrh_i64_i32
)
1663 DO_NARROW_3D(VSUBHN
, sub
, narrow
, tcg_gen_extrh_i64_i32
)
1664 DO_NARROW_3D(VRADDHN
, add
, narrow_round
, gen_narrow_round_high_u32
)
1665 DO_NARROW_3D(VRSUBHN
, sub
, narrow_round
, gen_narrow_round_high_u32
)
1667 static bool do_long_3d(DisasContext
*s
, arg_3diff
*a
,
1668 NeonGenTwoOpWidenFn
*opfn
,
1669 NeonGenTwo64OpFn
*accfn
)
1672 * 3-regs different lengths, long operations.
1673 * These perform an operation on two inputs that returns a double-width
1674 * result, and then possibly perform an accumulation operation of
1675 * that result into the double-width destination.
1677 TCGv_i64 rd0
, rd1
, tmp
;
1680 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
1684 /* UNDEF accesses to D16-D31 if they don't exist. */
1685 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
1686 ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
1691 /* size == 3 case, which is an entirely different insn group */
1699 if (!vfp_access_check(s
)) {
1703 rd0
= tcg_temp_new_i64();
1704 rd1
= tcg_temp_new_i64();
1706 rn
= tcg_temp_new_i32();
1707 rm
= tcg_temp_new_i32();
1708 read_neon_element32(rn
, a
->vn
, 0, MO_32
);
1709 read_neon_element32(rm
, a
->vm
, 0, MO_32
);
1712 read_neon_element32(rn
, a
->vn
, 1, MO_32
);
1713 read_neon_element32(rm
, a
->vm
, 1, MO_32
);
1716 /* Don't store results until after all loads: they might overlap */
1718 tmp
= tcg_temp_new_i64();
1719 read_neon_element64(tmp
, a
->vd
, 0, MO_64
);
1720 accfn(rd0
, tmp
, rd0
);
1721 read_neon_element64(tmp
, a
->vd
, 1, MO_64
);
1722 accfn(rd1
, tmp
, rd1
);
1725 write_neon_element64(rd0
, a
->vd
, 0, MO_64
);
1726 write_neon_element64(rd1
, a
->vd
, 1, MO_64
);
1731 static bool trans_VABDL_S_3d(DisasContext
*s
, arg_3diff
*a
)
1733 static NeonGenTwoOpWidenFn
* const opfn
[] = {
1734 gen_helper_neon_abdl_s16
,
1735 gen_helper_neon_abdl_s32
,
1736 gen_helper_neon_abdl_s64
,
1740 return do_long_3d(s
, a
, opfn
[a
->size
], NULL
);
1743 static bool trans_VABDL_U_3d(DisasContext
*s
, arg_3diff
*a
)
1745 static NeonGenTwoOpWidenFn
* const opfn
[] = {
1746 gen_helper_neon_abdl_u16
,
1747 gen_helper_neon_abdl_u32
,
1748 gen_helper_neon_abdl_u64
,
1752 return do_long_3d(s
, a
, opfn
[a
->size
], NULL
);
1755 static bool trans_VABAL_S_3d(DisasContext
*s
, arg_3diff
*a
)
1757 static NeonGenTwoOpWidenFn
* const opfn
[] = {
1758 gen_helper_neon_abdl_s16
,
1759 gen_helper_neon_abdl_s32
,
1760 gen_helper_neon_abdl_s64
,
1763 static NeonGenTwo64OpFn
* const addfn
[] = {
1764 gen_helper_neon_addl_u16
,
1765 gen_helper_neon_addl_u32
,
1770 return do_long_3d(s
, a
, opfn
[a
->size
], addfn
[a
->size
]);
1773 static bool trans_VABAL_U_3d(DisasContext
*s
, arg_3diff
*a
)
1775 static NeonGenTwoOpWidenFn
* const opfn
[] = {
1776 gen_helper_neon_abdl_u16
,
1777 gen_helper_neon_abdl_u32
,
1778 gen_helper_neon_abdl_u64
,
1781 static NeonGenTwo64OpFn
* const addfn
[] = {
1782 gen_helper_neon_addl_u16
,
1783 gen_helper_neon_addl_u32
,
1788 return do_long_3d(s
, a
, opfn
[a
->size
], addfn
[a
->size
]);
1791 static void gen_mull_s32(TCGv_i64 rd
, TCGv_i32 rn
, TCGv_i32 rm
)
1793 TCGv_i32 lo
= tcg_temp_new_i32();
1794 TCGv_i32 hi
= tcg_temp_new_i32();
1796 tcg_gen_muls2_i32(lo
, hi
, rn
, rm
);
1797 tcg_gen_concat_i32_i64(rd
, lo
, hi
);
1800 static void gen_mull_u32(TCGv_i64 rd
, TCGv_i32 rn
, TCGv_i32 rm
)
1802 TCGv_i32 lo
= tcg_temp_new_i32();
1803 TCGv_i32 hi
= tcg_temp_new_i32();
1805 tcg_gen_mulu2_i32(lo
, hi
, rn
, rm
);
1806 tcg_gen_concat_i32_i64(rd
, lo
, hi
);
1809 static bool trans_VMULL_S_3d(DisasContext
*s
, arg_3diff
*a
)
1811 static NeonGenTwoOpWidenFn
* const opfn
[] = {
1812 gen_helper_neon_mull_s8
,
1813 gen_helper_neon_mull_s16
,
1818 return do_long_3d(s
, a
, opfn
[a
->size
], NULL
);
1821 static bool trans_VMULL_U_3d(DisasContext
*s
, arg_3diff
*a
)
1823 static NeonGenTwoOpWidenFn
* const opfn
[] = {
1824 gen_helper_neon_mull_u8
,
1825 gen_helper_neon_mull_u16
,
1830 return do_long_3d(s
, a
, opfn
[a
->size
], NULL
);
1833 #define DO_VMLAL(INSN,MULL,ACC) \
1834 static bool trans_##INSN##_3d(DisasContext *s, arg_3diff *a) \
1836 static NeonGenTwoOpWidenFn * const opfn[] = { \
1837 gen_helper_neon_##MULL##8, \
1838 gen_helper_neon_##MULL##16, \
1842 static NeonGenTwo64OpFn * const accfn[] = { \
1843 gen_helper_neon_##ACC##l_u16, \
1844 gen_helper_neon_##ACC##l_u32, \
1845 tcg_gen_##ACC##_i64, \
1848 return do_long_3d(s, a, opfn[a->size], accfn[a->size]); \
1851 DO_VMLAL(VMLAL_S
,mull_s
,add
)
1852 DO_VMLAL(VMLAL_U
,mull_u
,add
)
1853 DO_VMLAL(VMLSL_S
,mull_s
,sub
)
1854 DO_VMLAL(VMLSL_U
,mull_u
,sub
)
1856 static void gen_VQDMULL_16(TCGv_i64 rd
, TCGv_i32 rn
, TCGv_i32 rm
)
1858 gen_helper_neon_mull_s16(rd
, rn
, rm
);
1859 gen_helper_neon_addl_saturate_s32(rd
, tcg_env
, rd
, rd
);
1862 static void gen_VQDMULL_32(TCGv_i64 rd
, TCGv_i32 rn
, TCGv_i32 rm
)
1864 gen_mull_s32(rd
, rn
, rm
);
1865 gen_helper_neon_addl_saturate_s64(rd
, tcg_env
, rd
, rd
);
1868 static bool trans_VQDMULL_3d(DisasContext
*s
, arg_3diff
*a
)
1870 static NeonGenTwoOpWidenFn
* const opfn
[] = {
1877 return do_long_3d(s
, a
, opfn
[a
->size
], NULL
);
1880 static void gen_VQDMLAL_acc_16(TCGv_i64 rd
, TCGv_i64 rn
, TCGv_i64 rm
)
1882 gen_helper_neon_addl_saturate_s32(rd
, tcg_env
, rn
, rm
);
1885 static void gen_VQDMLAL_acc_32(TCGv_i64 rd
, TCGv_i64 rn
, TCGv_i64 rm
)
1887 gen_helper_neon_addl_saturate_s64(rd
, tcg_env
, rn
, rm
);
1890 static bool trans_VQDMLAL_3d(DisasContext
*s
, arg_3diff
*a
)
1892 static NeonGenTwoOpWidenFn
* const opfn
[] = {
1898 static NeonGenTwo64OpFn
* const accfn
[] = {
1905 return do_long_3d(s
, a
, opfn
[a
->size
], accfn
[a
->size
]);
1908 static void gen_VQDMLSL_acc_16(TCGv_i64 rd
, TCGv_i64 rn
, TCGv_i64 rm
)
1910 gen_helper_neon_negl_u32(rm
, rm
);
1911 gen_helper_neon_addl_saturate_s32(rd
, tcg_env
, rn
, rm
);
1914 static void gen_VQDMLSL_acc_32(TCGv_i64 rd
, TCGv_i64 rn
, TCGv_i64 rm
)
1916 tcg_gen_neg_i64(rm
, rm
);
1917 gen_helper_neon_addl_saturate_s64(rd
, tcg_env
, rn
, rm
);
1920 static bool trans_VQDMLSL_3d(DisasContext
*s
, arg_3diff
*a
)
1922 static NeonGenTwoOpWidenFn
* const opfn
[] = {
1928 static NeonGenTwo64OpFn
* const accfn
[] = {
1935 return do_long_3d(s
, a
, opfn
[a
->size
], accfn
[a
->size
]);
1938 static bool trans_VMULL_P_3d(DisasContext
*s
, arg_3diff
*a
)
1940 gen_helper_gvec_3
*fn_gvec
;
1942 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
1946 /* UNDEF accesses to D16-D31 if they don't exist. */
1947 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
1948 ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
1958 fn_gvec
= gen_helper_neon_pmull_h
;
1961 if (!dc_isar_feature(aa32_pmull
, s
)) {
1964 fn_gvec
= gen_helper_gvec_pmull_q
;
1970 if (!vfp_access_check(s
)) {
1974 tcg_gen_gvec_3_ool(neon_full_reg_offset(a
->vd
),
1975 neon_full_reg_offset(a
->vn
),
1976 neon_full_reg_offset(a
->vm
),
1977 16, 16, 0, fn_gvec
);
1981 static void gen_neon_dup_low16(TCGv_i32 var
)
1983 TCGv_i32 tmp
= tcg_temp_new_i32();
1984 tcg_gen_ext16u_i32(var
, var
);
1985 tcg_gen_shli_i32(tmp
, var
, 16);
1986 tcg_gen_or_i32(var
, var
, tmp
);
1989 static void gen_neon_dup_high16(TCGv_i32 var
)
1991 TCGv_i32 tmp
= tcg_temp_new_i32();
1992 tcg_gen_andi_i32(var
, var
, 0xffff0000);
1993 tcg_gen_shri_i32(tmp
, var
, 16);
1994 tcg_gen_or_i32(var
, var
, tmp
);
1997 static inline TCGv_i32
neon_get_scalar(int size
, int reg
)
1999 TCGv_i32 tmp
= tcg_temp_new_i32();
2000 if (size
== MO_16
) {
2001 read_neon_element32(tmp
, reg
& 7, reg
>> 4, MO_32
);
2003 gen_neon_dup_high16(tmp
);
2005 gen_neon_dup_low16(tmp
);
2008 read_neon_element32(tmp
, reg
& 15, reg
>> 4, MO_32
);
2013 static bool do_2scalar(DisasContext
*s
, arg_2scalar
*a
,
2014 NeonGenTwoOpFn
*opfn
, NeonGenTwoOpFn
*accfn
)
2017 * Two registers and a scalar: perform an operation between
2018 * the input elements and the scalar, and then possibly
2019 * perform an accumulation operation of that result into the
2022 TCGv_i32 scalar
, tmp
;
2025 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
2029 /* UNDEF accesses to D16-D31 if they don't exist. */
2030 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
2031 ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
2036 /* Bad size (including size == 3, which is a different insn group) */
2040 if (a
->q
&& ((a
->vd
| a
->vn
) & 1)) {
2044 if (!vfp_access_check(s
)) {
2048 scalar
= neon_get_scalar(a
->size
, a
->vm
);
2049 tmp
= tcg_temp_new_i32();
2051 for (pass
= 0; pass
< (a
->q
? 4 : 2); pass
++) {
2052 read_neon_element32(tmp
, a
->vn
, pass
, MO_32
);
2053 opfn(tmp
, tmp
, scalar
);
2055 TCGv_i32 rd
= tcg_temp_new_i32();
2056 read_neon_element32(rd
, a
->vd
, pass
, MO_32
);
2057 accfn(tmp
, rd
, tmp
);
2059 write_neon_element32(tmp
, a
->vd
, pass
, MO_32
);
2064 static bool trans_VMUL_2sc(DisasContext
*s
, arg_2scalar
*a
)
2066 static NeonGenTwoOpFn
* const opfn
[] = {
2068 gen_helper_neon_mul_u16
,
2073 return do_2scalar(s
, a
, opfn
[a
->size
], NULL
);
2076 static bool trans_VMLA_2sc(DisasContext
*s
, arg_2scalar
*a
)
2078 static NeonGenTwoOpFn
* const opfn
[] = {
2080 gen_helper_neon_mul_u16
,
2084 static NeonGenTwoOpFn
* const accfn
[] = {
2086 gen_helper_neon_add_u16
,
2091 return do_2scalar(s
, a
, opfn
[a
->size
], accfn
[a
->size
]);
2094 static bool trans_VMLS_2sc(DisasContext
*s
, arg_2scalar
*a
)
2096 static NeonGenTwoOpFn
* const opfn
[] = {
2098 gen_helper_neon_mul_u16
,
2102 static NeonGenTwoOpFn
* const accfn
[] = {
2104 gen_helper_neon_sub_u16
,
2109 return do_2scalar(s
, a
, opfn
[a
->size
], accfn
[a
->size
]);
2112 static bool do_2scalar_fp_vec(DisasContext
*s
, arg_2scalar
*a
,
2113 gen_helper_gvec_3_ptr
*fn
)
2115 /* Two registers and a scalar, using gvec */
2116 int vec_size
= a
->q
? 16 : 8;
2117 int rd_ofs
= neon_full_reg_offset(a
->vd
);
2118 int rn_ofs
= neon_full_reg_offset(a
->vn
);
2123 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
2127 /* UNDEF accesses to D16-D31 if they don't exist. */
2128 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
2129 ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
2134 /* Bad size (including size == 3, which is a different insn group) */
2138 if (a
->q
&& ((a
->vd
| a
->vn
) & 1)) {
2142 if (!vfp_access_check(s
)) {
2146 /* a->vm is M:Vm, which encodes both register and index */
2147 idx
= extract32(a
->vm
, a
->size
+ 2, 2);
2148 a
->vm
= extract32(a
->vm
, 0, a
->size
+ 2);
2149 rm_ofs
= neon_full_reg_offset(a
->vm
);
2151 fpstatus
= fpstatus_ptr(a
->size
== 1 ? FPST_STD_F16
: FPST_STD
);
2152 tcg_gen_gvec_3_ptr(rd_ofs
, rn_ofs
, rm_ofs
, fpstatus
,
2153 vec_size
, vec_size
, idx
, fn
);
2157 #define DO_VMUL_F_2sc(NAME, FUNC) \
2158 static bool trans_##NAME##_F_2sc(DisasContext *s, arg_2scalar *a) \
2160 static gen_helper_gvec_3_ptr * const opfn[] = { \
2162 gen_helper_##FUNC##_h, \
2163 gen_helper_##FUNC##_s, \
2166 if (a->size == MO_16 && !dc_isar_feature(aa32_fp16_arith, s)) { \
2169 return do_2scalar_fp_vec(s, a, opfn[a->size]); \
2172 DO_VMUL_F_2sc(VMUL
, gvec_fmul_idx
)
2173 DO_VMUL_F_2sc(VMLA
, gvec_fmla_nf_idx
)
2174 DO_VMUL_F_2sc(VMLS
, gvec_fmls_nf_idx
)
2176 WRAP_ENV_FN(gen_VQDMULH_16
, gen_helper_neon_qdmulh_s16
)
2177 WRAP_ENV_FN(gen_VQDMULH_32
, gen_helper_neon_qdmulh_s32
)
2178 WRAP_ENV_FN(gen_VQRDMULH_16
, gen_helper_neon_qrdmulh_s16
)
2179 WRAP_ENV_FN(gen_VQRDMULH_32
, gen_helper_neon_qrdmulh_s32
)
2181 static bool trans_VQDMULH_2sc(DisasContext
*s
, arg_2scalar
*a
)
2183 static NeonGenTwoOpFn
* const opfn
[] = {
2190 return do_2scalar(s
, a
, opfn
[a
->size
], NULL
);
2193 static bool trans_VQRDMULH_2sc(DisasContext
*s
, arg_2scalar
*a
)
2195 static NeonGenTwoOpFn
* const opfn
[] = {
2202 return do_2scalar(s
, a
, opfn
[a
->size
], NULL
);
2205 static bool do_vqrdmlah_2sc(DisasContext
*s
, arg_2scalar
*a
,
2206 NeonGenThreeOpEnvFn
*opfn
)
2209 * VQRDMLAH/VQRDMLSH: this is like do_2scalar, but the opfn
2210 * performs a kind of fused op-then-accumulate using a helper
2211 * function that takes all of rd, rn and the scalar at once.
2213 TCGv_i32 scalar
, rn
, rd
;
2216 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
2220 if (!dc_isar_feature(aa32_rdm
, s
)) {
2224 /* UNDEF accesses to D16-D31 if they don't exist. */
2225 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
2226 ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
2231 /* Bad size (including size == 3, which is a different insn group) */
2235 if (a
->q
&& ((a
->vd
| a
->vn
) & 1)) {
2239 if (!vfp_access_check(s
)) {
2243 scalar
= neon_get_scalar(a
->size
, a
->vm
);
2244 rn
= tcg_temp_new_i32();
2245 rd
= tcg_temp_new_i32();
2247 for (pass
= 0; pass
< (a
->q
? 4 : 2); pass
++) {
2248 read_neon_element32(rn
, a
->vn
, pass
, MO_32
);
2249 read_neon_element32(rd
, a
->vd
, pass
, MO_32
);
2250 opfn(rd
, tcg_env
, rn
, scalar
, rd
);
2251 write_neon_element32(rd
, a
->vd
, pass
, MO_32
);
2256 static bool trans_VQRDMLAH_2sc(DisasContext
*s
, arg_2scalar
*a
)
2258 static NeonGenThreeOpEnvFn
*opfn
[] = {
2260 gen_helper_neon_qrdmlah_s16
,
2261 gen_helper_neon_qrdmlah_s32
,
2264 return do_vqrdmlah_2sc(s
, a
, opfn
[a
->size
]);
2267 static bool trans_VQRDMLSH_2sc(DisasContext
*s
, arg_2scalar
*a
)
2269 static NeonGenThreeOpEnvFn
*opfn
[] = {
2271 gen_helper_neon_qrdmlsh_s16
,
2272 gen_helper_neon_qrdmlsh_s32
,
2275 return do_vqrdmlah_2sc(s
, a
, opfn
[a
->size
]);
2278 static bool do_2scalar_long(DisasContext
*s
, arg_2scalar
*a
,
2279 NeonGenTwoOpWidenFn
*opfn
,
2280 NeonGenTwo64OpFn
*accfn
)
2283 * Two registers and a scalar, long operations: perform an
2284 * operation on the input elements and the scalar which produces
2285 * a double-width result, and then possibly perform an accumulation
2286 * operation of that result into the destination.
2288 TCGv_i32 scalar
, rn
;
2289 TCGv_i64 rn0_64
, rn1_64
;
2291 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
2295 /* UNDEF accesses to D16-D31 if they don't exist. */
2296 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
2297 ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
2302 /* Bad size (including size == 3, which is a different insn group) */
2310 if (!vfp_access_check(s
)) {
2314 scalar
= neon_get_scalar(a
->size
, a
->vm
);
2316 /* Load all inputs before writing any outputs, in case of overlap */
2317 rn
= tcg_temp_new_i32();
2318 read_neon_element32(rn
, a
->vn
, 0, MO_32
);
2319 rn0_64
= tcg_temp_new_i64();
2320 opfn(rn0_64
, rn
, scalar
);
2322 read_neon_element32(rn
, a
->vn
, 1, MO_32
);
2323 rn1_64
= tcg_temp_new_i64();
2324 opfn(rn1_64
, rn
, scalar
);
2327 TCGv_i64 t64
= tcg_temp_new_i64();
2328 read_neon_element64(t64
, a
->vd
, 0, MO_64
);
2329 accfn(rn0_64
, t64
, rn0_64
);
2330 read_neon_element64(t64
, a
->vd
, 1, MO_64
);
2331 accfn(rn1_64
, t64
, rn1_64
);
2334 write_neon_element64(rn0_64
, a
->vd
, 0, MO_64
);
2335 write_neon_element64(rn1_64
, a
->vd
, 1, MO_64
);
2339 static bool trans_VMULL_S_2sc(DisasContext
*s
, arg_2scalar
*a
)
2341 static NeonGenTwoOpWidenFn
* const opfn
[] = {
2343 gen_helper_neon_mull_s16
,
2348 return do_2scalar_long(s
, a
, opfn
[a
->size
], NULL
);
2351 static bool trans_VMULL_U_2sc(DisasContext
*s
, arg_2scalar
*a
)
2353 static NeonGenTwoOpWidenFn
* const opfn
[] = {
2355 gen_helper_neon_mull_u16
,
2360 return do_2scalar_long(s
, a
, opfn
[a
->size
], NULL
);
2363 #define DO_VMLAL_2SC(INSN, MULL, ACC) \
2364 static bool trans_##INSN##_2sc(DisasContext *s, arg_2scalar *a) \
2366 static NeonGenTwoOpWidenFn * const opfn[] = { \
2368 gen_helper_neon_##MULL##16, \
2372 static NeonGenTwo64OpFn * const accfn[] = { \
2374 gen_helper_neon_##ACC##l_u32, \
2375 tcg_gen_##ACC##_i64, \
2378 return do_2scalar_long(s, a, opfn[a->size], accfn[a->size]); \
2381 DO_VMLAL_2SC(VMLAL_S
, mull_s
, add
)
2382 DO_VMLAL_2SC(VMLAL_U
, mull_u
, add
)
2383 DO_VMLAL_2SC(VMLSL_S
, mull_s
, sub
)
2384 DO_VMLAL_2SC(VMLSL_U
, mull_u
, sub
)
2386 static bool trans_VQDMULL_2sc(DisasContext
*s
, arg_2scalar
*a
)
2388 static NeonGenTwoOpWidenFn
* const opfn
[] = {
2395 return do_2scalar_long(s
, a
, opfn
[a
->size
], NULL
);
2398 static bool trans_VQDMLAL_2sc(DisasContext
*s
, arg_2scalar
*a
)
2400 static NeonGenTwoOpWidenFn
* const opfn
[] = {
2406 static NeonGenTwo64OpFn
* const accfn
[] = {
2413 return do_2scalar_long(s
, a
, opfn
[a
->size
], accfn
[a
->size
]);
2416 static bool trans_VQDMLSL_2sc(DisasContext
*s
, arg_2scalar
*a
)
2418 static NeonGenTwoOpWidenFn
* const opfn
[] = {
2424 static NeonGenTwo64OpFn
* const accfn
[] = {
2431 return do_2scalar_long(s
, a
, opfn
[a
->size
], accfn
[a
->size
]);
2434 static bool trans_VEXT(DisasContext
*s
, arg_VEXT
*a
)
2436 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
2440 /* UNDEF accesses to D16-D31 if they don't exist. */
2441 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
2442 ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
2446 if ((a
->vn
| a
->vm
| a
->vd
) & a
->q
) {
2450 if (a
->imm
> 7 && !a
->q
) {
2454 if (!vfp_access_check(s
)) {
2459 /* Extract 64 bits from <Vm:Vn> */
2460 TCGv_i64 left
, right
, dest
;
2462 left
= tcg_temp_new_i64();
2463 right
= tcg_temp_new_i64();
2464 dest
= tcg_temp_new_i64();
2466 read_neon_element64(right
, a
->vn
, 0, MO_64
);
2467 read_neon_element64(left
, a
->vm
, 0, MO_64
);
2468 tcg_gen_extract2_i64(dest
, right
, left
, a
->imm
* 8);
2469 write_neon_element64(dest
, a
->vd
, 0, MO_64
);
2471 /* Extract 128 bits from <Vm+1:Vm:Vn+1:Vn> */
2472 TCGv_i64 left
, middle
, right
, destleft
, destright
;
2474 left
= tcg_temp_new_i64();
2475 middle
= tcg_temp_new_i64();
2476 right
= tcg_temp_new_i64();
2477 destleft
= tcg_temp_new_i64();
2478 destright
= tcg_temp_new_i64();
2481 read_neon_element64(right
, a
->vn
, 0, MO_64
);
2482 read_neon_element64(middle
, a
->vn
, 1, MO_64
);
2483 tcg_gen_extract2_i64(destright
, right
, middle
, a
->imm
* 8);
2484 read_neon_element64(left
, a
->vm
, 0, MO_64
);
2485 tcg_gen_extract2_i64(destleft
, middle
, left
, a
->imm
* 8);
2487 read_neon_element64(right
, a
->vn
, 1, MO_64
);
2488 read_neon_element64(middle
, a
->vm
, 0, MO_64
);
2489 tcg_gen_extract2_i64(destright
, right
, middle
, (a
->imm
- 8) * 8);
2490 read_neon_element64(left
, a
->vm
, 1, MO_64
);
2491 tcg_gen_extract2_i64(destleft
, middle
, left
, (a
->imm
- 8) * 8);
2494 write_neon_element64(destright
, a
->vd
, 0, MO_64
);
2495 write_neon_element64(destleft
, a
->vd
, 1, MO_64
);
2500 static bool trans_VTBL(DisasContext
*s
, arg_VTBL
*a
)
2505 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
2509 /* UNDEF accesses to D16-D31 if they don't exist. */
2510 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
2511 ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
2515 if ((a
->vn
+ a
->len
+ 1) > 32) {
2517 * This is UNPREDICTABLE; we choose to UNDEF to avoid the
2518 * helper function running off the end of the register file.
2523 if (!vfp_access_check(s
)) {
2527 desc
= tcg_constant_i32((a
->vn
<< 2) | a
->len
);
2528 def
= tcg_temp_new_i64();
2530 read_neon_element64(def
, a
->vd
, 0, MO_64
);
2532 tcg_gen_movi_i64(def
, 0);
2534 val
= tcg_temp_new_i64();
2535 read_neon_element64(val
, a
->vm
, 0, MO_64
);
2537 gen_helper_neon_tbl(val
, tcg_env
, desc
, val
, def
);
2538 write_neon_element64(val
, a
->vd
, 0, MO_64
);
2542 static bool trans_VDUP_scalar(DisasContext
*s
, arg_VDUP_scalar
*a
)
2544 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
2548 /* UNDEF accesses to D16-D31 if they don't exist. */
2549 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
2550 ((a
->vd
| a
->vm
) & 0x10)) {
2558 if (!vfp_access_check(s
)) {
2562 tcg_gen_gvec_dup_mem(a
->size
, neon_full_reg_offset(a
->vd
),
2563 neon_element_offset(a
->vm
, a
->index
, a
->size
),
2564 a
->q
? 16 : 8, a
->q
? 16 : 8);
2568 static bool trans_VREV64(DisasContext
*s
, arg_VREV64
*a
)
2573 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
2577 /* UNDEF accesses to D16-D31 if they don't exist. */
2578 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
2579 ((a
->vd
| a
->vm
) & 0x10)) {
2583 if ((a
->vd
| a
->vm
) & a
->q
) {
2591 if (!vfp_access_check(s
)) {
2595 tmp
[0] = tcg_temp_new_i32();
2596 tmp
[1] = tcg_temp_new_i32();
2598 for (pass
= 0; pass
< (a
->q
? 2 : 1); pass
++) {
2599 for (half
= 0; half
< 2; half
++) {
2600 read_neon_element32(tmp
[half
], a
->vm
, pass
* 2 + half
, MO_32
);
2603 tcg_gen_bswap32_i32(tmp
[half
], tmp
[half
]);
2606 gen_swap_half(tmp
[half
], tmp
[half
]);
2611 g_assert_not_reached();
2614 write_neon_element32(tmp
[1], a
->vd
, pass
* 2, MO_32
);
2615 write_neon_element32(tmp
[0], a
->vd
, pass
* 2 + 1, MO_32
);
2620 static bool do_2misc_pairwise(DisasContext
*s
, arg_2misc
*a
,
2621 NeonGenWidenFn
*widenfn
,
2622 NeonGenTwo64OpFn
*opfn
,
2623 NeonGenTwo64OpFn
*accfn
)
2626 * Pairwise long operations: widen both halves of the pair,
2627 * combine the pairs with the opfn, and then possibly accumulate
2628 * into the destination with the accfn.
2632 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
2636 /* UNDEF accesses to D16-D31 if they don't exist. */
2637 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
2638 ((a
->vd
| a
->vm
) & 0x10)) {
2642 if ((a
->vd
| a
->vm
) & a
->q
) {
2650 if (!vfp_access_check(s
)) {
2654 for (pass
= 0; pass
< a
->q
+ 1; pass
++) {
2656 TCGv_i64 rm0_64
, rm1_64
, rd_64
;
2658 rm0_64
= tcg_temp_new_i64();
2659 rm1_64
= tcg_temp_new_i64();
2660 rd_64
= tcg_temp_new_i64();
2662 tmp
= tcg_temp_new_i32();
2663 read_neon_element32(tmp
, a
->vm
, pass
* 2, MO_32
);
2664 widenfn(rm0_64
, tmp
);
2665 read_neon_element32(tmp
, a
->vm
, pass
* 2 + 1, MO_32
);
2666 widenfn(rm1_64
, tmp
);
2668 opfn(rd_64
, rm0_64
, rm1_64
);
2671 TCGv_i64 tmp64
= tcg_temp_new_i64();
2672 read_neon_element64(tmp64
, a
->vd
, pass
, MO_64
);
2673 accfn(rd_64
, tmp64
, rd_64
);
2675 write_neon_element64(rd_64
, a
->vd
, pass
, MO_64
);
2680 static bool trans_VPADDL_S(DisasContext
*s
, arg_2misc
*a
)
2682 static NeonGenWidenFn
* const widenfn
[] = {
2683 gen_helper_neon_widen_s8
,
2684 gen_helper_neon_widen_s16
,
2685 tcg_gen_ext_i32_i64
,
2688 static NeonGenTwo64OpFn
* const opfn
[] = {
2689 gen_helper_neon_paddl_u16
,
2690 gen_helper_neon_paddl_u32
,
2695 return do_2misc_pairwise(s
, a
, widenfn
[a
->size
], opfn
[a
->size
], NULL
);
2698 static bool trans_VPADDL_U(DisasContext
*s
, arg_2misc
*a
)
2700 static NeonGenWidenFn
* const widenfn
[] = {
2701 gen_helper_neon_widen_u8
,
2702 gen_helper_neon_widen_u16
,
2703 tcg_gen_extu_i32_i64
,
2706 static NeonGenTwo64OpFn
* const opfn
[] = {
2707 gen_helper_neon_paddl_u16
,
2708 gen_helper_neon_paddl_u32
,
2713 return do_2misc_pairwise(s
, a
, widenfn
[a
->size
], opfn
[a
->size
], NULL
);
2716 static bool trans_VPADAL_S(DisasContext
*s
, arg_2misc
*a
)
2718 static NeonGenWidenFn
* const widenfn
[] = {
2719 gen_helper_neon_widen_s8
,
2720 gen_helper_neon_widen_s16
,
2721 tcg_gen_ext_i32_i64
,
2724 static NeonGenTwo64OpFn
* const opfn
[] = {
2725 gen_helper_neon_paddl_u16
,
2726 gen_helper_neon_paddl_u32
,
2730 static NeonGenTwo64OpFn
* const accfn
[] = {
2731 gen_helper_neon_addl_u16
,
2732 gen_helper_neon_addl_u32
,
2737 return do_2misc_pairwise(s
, a
, widenfn
[a
->size
], opfn
[a
->size
],
2741 static bool trans_VPADAL_U(DisasContext
*s
, arg_2misc
*a
)
2743 static NeonGenWidenFn
* const widenfn
[] = {
2744 gen_helper_neon_widen_u8
,
2745 gen_helper_neon_widen_u16
,
2746 tcg_gen_extu_i32_i64
,
2749 static NeonGenTwo64OpFn
* const opfn
[] = {
2750 gen_helper_neon_paddl_u16
,
2751 gen_helper_neon_paddl_u32
,
2755 static NeonGenTwo64OpFn
* const accfn
[] = {
2756 gen_helper_neon_addl_u16
,
2757 gen_helper_neon_addl_u32
,
2762 return do_2misc_pairwise(s
, a
, widenfn
[a
->size
], opfn
[a
->size
],
2766 typedef void ZipFn(TCGv_ptr
, TCGv_ptr
);
2768 static bool do_zip_uzp(DisasContext
*s
, arg_2misc
*a
,
2773 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
2777 /* UNDEF accesses to D16-D31 if they don't exist. */
2778 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
2779 ((a
->vd
| a
->vm
) & 0x10)) {
2783 if ((a
->vd
| a
->vm
) & a
->q
) {
2788 /* Bad size or size/q combination */
2792 if (!vfp_access_check(s
)) {
2796 pd
= vfp_reg_ptr(true, a
->vd
);
2797 pm
= vfp_reg_ptr(true, a
->vm
);
2802 static bool trans_VUZP(DisasContext
*s
, arg_2misc
*a
)
2804 static ZipFn
* const fn
[2][4] = {
2806 gen_helper_neon_unzip8
,
2807 gen_helper_neon_unzip16
,
2811 gen_helper_neon_qunzip8
,
2812 gen_helper_neon_qunzip16
,
2813 gen_helper_neon_qunzip32
,
2817 return do_zip_uzp(s
, a
, fn
[a
->q
][a
->size
]);
2820 static bool trans_VZIP(DisasContext
*s
, arg_2misc
*a
)
2822 static ZipFn
* const fn
[2][4] = {
2824 gen_helper_neon_zip8
,
2825 gen_helper_neon_zip16
,
2829 gen_helper_neon_qzip8
,
2830 gen_helper_neon_qzip16
,
2831 gen_helper_neon_qzip32
,
2835 return do_zip_uzp(s
, a
, fn
[a
->q
][a
->size
]);
2838 static bool do_vmovn(DisasContext
*s
, arg_2misc
*a
,
2839 NeonGenOne64OpEnvFn
*narrowfn
)
2841 TCGv_i64 rm
, rd0
, rd1
;
2843 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
2847 /* UNDEF accesses to D16-D31 if they don't exist. */
2848 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
2849 ((a
->vd
| a
->vm
) & 0x10)) {
2861 if (!vfp_access_check(s
)) {
2865 rm
= tcg_temp_new_i64();
2866 rd0
= tcg_temp_new_i64();
2867 rd1
= tcg_temp_new_i64();
2869 read_neon_element64(rm
, a
->vm
, 0, MO_64
);
2870 narrowfn(rd0
, tcg_env
, rm
);
2871 read_neon_element64(rm
, a
->vm
, 1, MO_64
);
2872 narrowfn(rd1
, tcg_env
, rm
);
2873 write_neon_element64(rd0
, a
->vd
, 0, MO_32
);
2874 write_neon_element64(rd1
, a
->vd
, 1, MO_32
);
2878 #define DO_VMOVN(INSN, FUNC) \
2879 static bool trans_##INSN(DisasContext *s, arg_2misc *a) \
2881 static NeonGenOne64OpEnvFn * const narrowfn[] = { \
2887 return do_vmovn(s, a, narrowfn[a->size]); \
2890 DO_VMOVN(VMOVN
, gen_neon_narrow_u
)
2891 DO_VMOVN(VQMOVUN
, gen_helper_neon_unarrow_sat
)
2892 DO_VMOVN(VQMOVN_S
, gen_helper_neon_narrow_sat_s
)
2893 DO_VMOVN(VQMOVN_U
, gen_helper_neon_narrow_sat_u
)
2895 static bool trans_VSHLL(DisasContext
*s
, arg_2misc
*a
)
2899 static NeonGenWidenFn
* const widenfns
[] = {
2900 gen_helper_neon_widen_u8
,
2901 gen_helper_neon_widen_u16
,
2902 tcg_gen_extu_i32_i64
,
2905 NeonGenWidenFn
*widenfn
= widenfns
[a
->size
];
2907 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
2911 /* UNDEF accesses to D16-D31 if they don't exist. */
2912 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
2913 ((a
->vd
| a
->vm
) & 0x10)) {
2925 if (!vfp_access_check(s
)) {
2929 rd
= tcg_temp_new_i64();
2930 rm0
= tcg_temp_new_i32();
2931 rm1
= tcg_temp_new_i32();
2933 read_neon_element32(rm0
, a
->vm
, 0, MO_32
);
2934 read_neon_element32(rm1
, a
->vm
, 1, MO_32
);
2937 tcg_gen_shli_i64(rd
, rd
, 8 << a
->size
);
2938 write_neon_element64(rd
, a
->vd
, 0, MO_64
);
2940 tcg_gen_shli_i64(rd
, rd
, 8 << a
->size
);
2941 write_neon_element64(rd
, a
->vd
, 1, MO_64
);
2945 static bool trans_VCVT_B16_F32(DisasContext
*s
, arg_2misc
*a
)
2949 TCGv_i32 dst0
, dst1
;
2951 if (!dc_isar_feature(aa32_bf16
, s
)) {
2955 /* UNDEF accesses to D16-D31 if they don't exist. */
2956 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
2957 ((a
->vd
| a
->vm
) & 0x10)) {
2961 if ((a
->vm
& 1) || (a
->size
!= 1)) {
2965 if (!vfp_access_check(s
)) {
2969 fpst
= fpstatus_ptr(FPST_STD
);
2970 tmp
= tcg_temp_new_i64();
2971 dst0
= tcg_temp_new_i32();
2972 dst1
= tcg_temp_new_i32();
2974 read_neon_element64(tmp
, a
->vm
, 0, MO_64
);
2975 gen_helper_bfcvt_pair(dst0
, tmp
, fpst
);
2977 read_neon_element64(tmp
, a
->vm
, 1, MO_64
);
2978 gen_helper_bfcvt_pair(dst1
, tmp
, fpst
);
2980 write_neon_element32(dst0
, a
->vd
, 0, MO_32
);
2981 write_neon_element32(dst1
, a
->vd
, 1, MO_32
);
2985 static bool trans_VCVT_F16_F32(DisasContext
*s
, arg_2misc
*a
)
2988 TCGv_i32 ahp
, tmp
, tmp2
, tmp3
;
2990 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
) ||
2991 !dc_isar_feature(aa32_fp16_spconv
, s
)) {
2995 /* UNDEF accesses to D16-D31 if they don't exist. */
2996 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
2997 ((a
->vd
| a
->vm
) & 0x10)) {
3001 if ((a
->vm
& 1) || (a
->size
!= 1)) {
3005 if (!vfp_access_check(s
)) {
3009 fpst
= fpstatus_ptr(FPST_STD
);
3010 ahp
= get_ahp_flag();
3011 tmp
= tcg_temp_new_i32();
3012 read_neon_element32(tmp
, a
->vm
, 0, MO_32
);
3013 gen_helper_vfp_fcvt_f32_to_f16(tmp
, tmp
, fpst
, ahp
);
3014 tmp2
= tcg_temp_new_i32();
3015 read_neon_element32(tmp2
, a
->vm
, 1, MO_32
);
3016 gen_helper_vfp_fcvt_f32_to_f16(tmp2
, tmp2
, fpst
, ahp
);
3017 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
3018 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
3019 read_neon_element32(tmp
, a
->vm
, 2, MO_32
);
3020 gen_helper_vfp_fcvt_f32_to_f16(tmp
, tmp
, fpst
, ahp
);
3021 tmp3
= tcg_temp_new_i32();
3022 read_neon_element32(tmp3
, a
->vm
, 3, MO_32
);
3023 write_neon_element32(tmp2
, a
->vd
, 0, MO_32
);
3024 gen_helper_vfp_fcvt_f32_to_f16(tmp3
, tmp3
, fpst
, ahp
);
3025 tcg_gen_shli_i32(tmp3
, tmp3
, 16);
3026 tcg_gen_or_i32(tmp3
, tmp3
, tmp
);
3027 write_neon_element32(tmp3
, a
->vd
, 1, MO_32
);
3031 static bool trans_VCVT_F32_F16(DisasContext
*s
, arg_2misc
*a
)
3034 TCGv_i32 ahp
, tmp
, tmp2
, tmp3
;
3036 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
) ||
3037 !dc_isar_feature(aa32_fp16_spconv
, s
)) {
3041 /* UNDEF accesses to D16-D31 if they don't exist. */
3042 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
3043 ((a
->vd
| a
->vm
) & 0x10)) {
3047 if ((a
->vd
& 1) || (a
->size
!= 1)) {
3051 if (!vfp_access_check(s
)) {
3055 fpst
= fpstatus_ptr(FPST_STD
);
3056 ahp
= get_ahp_flag();
3057 tmp3
= tcg_temp_new_i32();
3058 tmp2
= tcg_temp_new_i32();
3059 tmp
= tcg_temp_new_i32();
3060 read_neon_element32(tmp
, a
->vm
, 0, MO_32
);
3061 read_neon_element32(tmp2
, a
->vm
, 1, MO_32
);
3062 tcg_gen_ext16u_i32(tmp3
, tmp
);
3063 gen_helper_vfp_fcvt_f16_to_f32(tmp3
, tmp3
, fpst
, ahp
);
3064 write_neon_element32(tmp3
, a
->vd
, 0, MO_32
);
3065 tcg_gen_shri_i32(tmp
, tmp
, 16);
3066 gen_helper_vfp_fcvt_f16_to_f32(tmp
, tmp
, fpst
, ahp
);
3067 write_neon_element32(tmp
, a
->vd
, 1, MO_32
);
3068 tcg_gen_ext16u_i32(tmp3
, tmp2
);
3069 gen_helper_vfp_fcvt_f16_to_f32(tmp3
, tmp3
, fpst
, ahp
);
3070 write_neon_element32(tmp3
, a
->vd
, 2, MO_32
);
3071 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
3072 gen_helper_vfp_fcvt_f16_to_f32(tmp2
, tmp2
, fpst
, ahp
);
3073 write_neon_element32(tmp2
, a
->vd
, 3, MO_32
);
3077 static bool do_2misc_vec(DisasContext
*s
, arg_2misc
*a
, GVecGen2Fn
*fn
)
3079 int vec_size
= a
->q
? 16 : 8;
3080 int rd_ofs
= neon_full_reg_offset(a
->vd
);
3081 int rm_ofs
= neon_full_reg_offset(a
->vm
);
3083 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
3087 /* UNDEF accesses to D16-D31 if they don't exist. */
3088 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
3089 ((a
->vd
| a
->vm
) & 0x10)) {
3097 if ((a
->vd
| a
->vm
) & a
->q
) {
3101 if (!vfp_access_check(s
)) {
3105 fn(a
->size
, rd_ofs
, rm_ofs
, vec_size
, vec_size
);
3110 #define DO_2MISC_VEC(INSN, FN) \
3111 static bool trans_##INSN(DisasContext *s, arg_2misc *a) \
3113 return do_2misc_vec(s, a, FN); \
3116 DO_2MISC_VEC(VNEG
, tcg_gen_gvec_neg
)
3117 DO_2MISC_VEC(VABS
, tcg_gen_gvec_abs
)
3118 DO_2MISC_VEC(VCEQ0
, gen_gvec_ceq0
)
3119 DO_2MISC_VEC(VCGT0
, gen_gvec_cgt0
)
3120 DO_2MISC_VEC(VCLE0
, gen_gvec_cle0
)
3121 DO_2MISC_VEC(VCGE0
, gen_gvec_cge0
)
3122 DO_2MISC_VEC(VCLT0
, gen_gvec_clt0
)
3124 static bool trans_VMVN(DisasContext
*s
, arg_2misc
*a
)
3129 return do_2misc_vec(s
, a
, tcg_gen_gvec_not
);
3132 #define WRAP_2M_3_OOL_FN(WRAPNAME, FUNC, DATA) \
3133 static void WRAPNAME(unsigned vece, uint32_t rd_ofs, \
3134 uint32_t rm_ofs, uint32_t oprsz, \
3137 tcg_gen_gvec_3_ool(rd_ofs, rd_ofs, rm_ofs, oprsz, maxsz, \
3141 #define WRAP_2M_2_OOL_FN(WRAPNAME, FUNC, DATA) \
3142 static void WRAPNAME(unsigned vece, uint32_t rd_ofs, \
3143 uint32_t rm_ofs, uint32_t oprsz, \
3146 tcg_gen_gvec_2_ool(rd_ofs, rm_ofs, oprsz, maxsz, DATA, FUNC); \
3149 WRAP_2M_3_OOL_FN(gen_AESE
, gen_helper_crypto_aese
, 0)
3150 WRAP_2M_3_OOL_FN(gen_AESD
, gen_helper_crypto_aesd
, 0)
3151 WRAP_2M_2_OOL_FN(gen_AESMC
, gen_helper_crypto_aesmc
, 0)
3152 WRAP_2M_2_OOL_FN(gen_AESIMC
, gen_helper_crypto_aesimc
, 0)
3153 WRAP_2M_2_OOL_FN(gen_SHA1H
, gen_helper_crypto_sha1h
, 0)
3154 WRAP_2M_2_OOL_FN(gen_SHA1SU1
, gen_helper_crypto_sha1su1
, 0)
3155 WRAP_2M_2_OOL_FN(gen_SHA256SU0
, gen_helper_crypto_sha256su0
, 0)
3157 #define DO_2M_CRYPTO(INSN, FEATURE, SIZE) \
3158 static bool trans_##INSN(DisasContext *s, arg_2misc *a) \
3160 if (!dc_isar_feature(FEATURE, s) || a->size != SIZE) { \
3163 return do_2misc_vec(s, a, gen_##INSN); \
3166 DO_2M_CRYPTO(AESE
, aa32_aes
, 0)
3167 DO_2M_CRYPTO(AESD
, aa32_aes
, 0)
3168 DO_2M_CRYPTO(AESMC
, aa32_aes
, 0)
3169 DO_2M_CRYPTO(AESIMC
, aa32_aes
, 0)
3170 DO_2M_CRYPTO(SHA1H
, aa32_sha1
, 2)
3171 DO_2M_CRYPTO(SHA1SU1
, aa32_sha1
, 2)
3172 DO_2M_CRYPTO(SHA256SU0
, aa32_sha2
, 2)
3174 static bool do_2misc(DisasContext
*s
, arg_2misc
*a
, NeonGenOneOpFn
*fn
)
3179 /* Handle a 2-reg-misc operation by iterating 32 bits at a time */
3180 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
3184 /* UNDEF accesses to D16-D31 if they don't exist. */
3185 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
3186 ((a
->vd
| a
->vm
) & 0x10)) {
3194 if ((a
->vd
| a
->vm
) & a
->q
) {
3198 if (!vfp_access_check(s
)) {
3202 tmp
= tcg_temp_new_i32();
3203 for (pass
= 0; pass
< (a
->q
? 4 : 2); pass
++) {
3204 read_neon_element32(tmp
, a
->vm
, pass
, MO_32
);
3206 write_neon_element32(tmp
, a
->vd
, pass
, MO_32
);
3211 static bool trans_VREV32(DisasContext
*s
, arg_2misc
*a
)
3213 static NeonGenOneOpFn
* const fn
[] = {
3214 tcg_gen_bswap32_i32
,
3219 return do_2misc(s
, a
, fn
[a
->size
]);
3222 static bool trans_VREV16(DisasContext
*s
, arg_2misc
*a
)
3227 return do_2misc(s
, a
, gen_rev16
);
3230 static bool trans_VCLS(DisasContext
*s
, arg_2misc
*a
)
3232 static NeonGenOneOpFn
* const fn
[] = {
3233 gen_helper_neon_cls_s8
,
3234 gen_helper_neon_cls_s16
,
3235 gen_helper_neon_cls_s32
,
3238 return do_2misc(s
, a
, fn
[a
->size
]);
3241 static void do_VCLZ_32(TCGv_i32 rd
, TCGv_i32 rm
)
3243 tcg_gen_clzi_i32(rd
, rm
, 32);
3246 static bool trans_VCLZ(DisasContext
*s
, arg_2misc
*a
)
3248 static NeonGenOneOpFn
* const fn
[] = {
3249 gen_helper_neon_clz_u8
,
3250 gen_helper_neon_clz_u16
,
3254 return do_2misc(s
, a
, fn
[a
->size
]);
3257 static bool trans_VCNT(DisasContext
*s
, arg_2misc
*a
)
3262 return do_2misc(s
, a
, gen_helper_neon_cnt_u8
);
3265 static void gen_VABS_F(unsigned vece
, uint32_t rd_ofs
, uint32_t rm_ofs
,
3266 uint32_t oprsz
, uint32_t maxsz
)
3268 tcg_gen_gvec_andi(vece
, rd_ofs
, rm_ofs
,
3269 vece
== MO_16
? 0x7fff : 0x7fffffff,
3273 static bool trans_VABS_F(DisasContext
*s
, arg_2misc
*a
)
3275 if (a
->size
== MO_16
) {
3276 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
3279 } else if (a
->size
!= MO_32
) {
3282 return do_2misc_vec(s
, a
, gen_VABS_F
);
3285 static void gen_VNEG_F(unsigned vece
, uint32_t rd_ofs
, uint32_t rm_ofs
,
3286 uint32_t oprsz
, uint32_t maxsz
)
3288 tcg_gen_gvec_xori(vece
, rd_ofs
, rm_ofs
,
3289 vece
== MO_16
? 0x8000 : 0x80000000,
3293 static bool trans_VNEG_F(DisasContext
*s
, arg_2misc
*a
)
3295 if (a
->size
== MO_16
) {
3296 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
3299 } else if (a
->size
!= MO_32
) {
3302 return do_2misc_vec(s
, a
, gen_VNEG_F
);
3305 static bool trans_VRECPE(DisasContext
*s
, arg_2misc
*a
)
3310 return do_2misc(s
, a
, gen_helper_recpe_u32
);
3313 static bool trans_VRSQRTE(DisasContext
*s
, arg_2misc
*a
)
3318 return do_2misc(s
, a
, gen_helper_rsqrte_u32
);
3321 #define WRAP_1OP_ENV_FN(WRAPNAME, FUNC) \
3322 static void WRAPNAME(TCGv_i32 d, TCGv_i32 m) \
3324 FUNC(d, tcg_env, m); \
3327 WRAP_1OP_ENV_FN(gen_VQABS_s8
, gen_helper_neon_qabs_s8
)
3328 WRAP_1OP_ENV_FN(gen_VQABS_s16
, gen_helper_neon_qabs_s16
)
3329 WRAP_1OP_ENV_FN(gen_VQABS_s32
, gen_helper_neon_qabs_s32
)
3330 WRAP_1OP_ENV_FN(gen_VQNEG_s8
, gen_helper_neon_qneg_s8
)
3331 WRAP_1OP_ENV_FN(gen_VQNEG_s16
, gen_helper_neon_qneg_s16
)
3332 WRAP_1OP_ENV_FN(gen_VQNEG_s32
, gen_helper_neon_qneg_s32
)
3334 static bool trans_VQABS(DisasContext
*s
, arg_2misc
*a
)
3336 static NeonGenOneOpFn
* const fn
[] = {
3342 return do_2misc(s
, a
, fn
[a
->size
]);
3345 static bool trans_VQNEG(DisasContext
*s
, arg_2misc
*a
)
3347 static NeonGenOneOpFn
* const fn
[] = {
3353 return do_2misc(s
, a
, fn
[a
->size
]);
3356 #define DO_2MISC_FP_VEC(INSN, HFUNC, SFUNC) \
3357 static void gen_##INSN(unsigned vece, uint32_t rd_ofs, \
3359 uint32_t oprsz, uint32_t maxsz) \
3361 static gen_helper_gvec_2_ptr * const fns[4] = { \
3362 NULL, HFUNC, SFUNC, NULL, \
3365 fpst = fpstatus_ptr(vece == MO_16 ? FPST_STD_F16 : FPST_STD); \
3366 tcg_gen_gvec_2_ptr(rd_ofs, rm_ofs, fpst, oprsz, maxsz, 0, \
3369 static bool trans_##INSN(DisasContext *s, arg_2misc *a) \
3371 if (a->size == MO_16) { \
3372 if (!dc_isar_feature(aa32_fp16_arith, s)) { \
3375 } else if (a->size != MO_32) { \
3378 return do_2misc_vec(s, a, gen_##INSN); \
3381 DO_2MISC_FP_VEC(VRECPE_F
, gen_helper_gvec_frecpe_h
, gen_helper_gvec_frecpe_s
)
3382 DO_2MISC_FP_VEC(VRSQRTE_F
, gen_helper_gvec_frsqrte_h
, gen_helper_gvec_frsqrte_s
)
3383 DO_2MISC_FP_VEC(VCGT0_F
, gen_helper_gvec_fcgt0_h
, gen_helper_gvec_fcgt0_s
)
3384 DO_2MISC_FP_VEC(VCGE0_F
, gen_helper_gvec_fcge0_h
, gen_helper_gvec_fcge0_s
)
3385 DO_2MISC_FP_VEC(VCEQ0_F
, gen_helper_gvec_fceq0_h
, gen_helper_gvec_fceq0_s
)
3386 DO_2MISC_FP_VEC(VCLT0_F
, gen_helper_gvec_fclt0_h
, gen_helper_gvec_fclt0_s
)
3387 DO_2MISC_FP_VEC(VCLE0_F
, gen_helper_gvec_fcle0_h
, gen_helper_gvec_fcle0_s
)
3388 DO_2MISC_FP_VEC(VCVT_FS
, gen_helper_gvec_sstoh
, gen_helper_gvec_sitos
)
3389 DO_2MISC_FP_VEC(VCVT_FU
, gen_helper_gvec_ustoh
, gen_helper_gvec_uitos
)
3390 DO_2MISC_FP_VEC(VCVT_SF
, gen_helper_gvec_tosszh
, gen_helper_gvec_tosizs
)
3391 DO_2MISC_FP_VEC(VCVT_UF
, gen_helper_gvec_touszh
, gen_helper_gvec_touizs
)
3393 DO_2MISC_FP_VEC(VRINTX_impl
, gen_helper_gvec_vrintx_h
, gen_helper_gvec_vrintx_s
)
3395 static bool trans_VRINTX(DisasContext
*s
, arg_2misc
*a
)
3397 if (!arm_dc_feature(s
, ARM_FEATURE_V8
)) {
3400 return trans_VRINTX_impl(s
, a
);
3403 #define DO_VEC_RMODE(INSN, RMODE, OP) \
3404 static void gen_##INSN(unsigned vece, uint32_t rd_ofs, \
3406 uint32_t oprsz, uint32_t maxsz) \
3408 static gen_helper_gvec_2_ptr * const fns[4] = { \
3410 gen_helper_gvec_##OP##h, \
3411 gen_helper_gvec_##OP##s, \
3415 fpst = fpstatus_ptr(vece == 1 ? FPST_STD_F16 : FPST_STD); \
3416 tcg_gen_gvec_2_ptr(rd_ofs, rm_ofs, fpst, oprsz, maxsz, \
3417 arm_rmode_to_sf(RMODE), fns[vece]); \
3419 static bool trans_##INSN(DisasContext *s, arg_2misc *a) \
3421 if (!arm_dc_feature(s, ARM_FEATURE_V8)) { \
3424 if (a->size == MO_16) { \
3425 if (!dc_isar_feature(aa32_fp16_arith, s)) { \
3428 } else if (a->size != MO_32) { \
3431 return do_2misc_vec(s, a, gen_##INSN); \
3434 DO_VEC_RMODE(VCVTAU
, FPROUNDING_TIEAWAY
, vcvt_rm_u
)
3435 DO_VEC_RMODE(VCVTAS
, FPROUNDING_TIEAWAY
, vcvt_rm_s
)
3436 DO_VEC_RMODE(VCVTNU
, FPROUNDING_TIEEVEN
, vcvt_rm_u
)
3437 DO_VEC_RMODE(VCVTNS
, FPROUNDING_TIEEVEN
, vcvt_rm_s
)
3438 DO_VEC_RMODE(VCVTPU
, FPROUNDING_POSINF
, vcvt_rm_u
)
3439 DO_VEC_RMODE(VCVTPS
, FPROUNDING_POSINF
, vcvt_rm_s
)
3440 DO_VEC_RMODE(VCVTMU
, FPROUNDING_NEGINF
, vcvt_rm_u
)
3441 DO_VEC_RMODE(VCVTMS
, FPROUNDING_NEGINF
, vcvt_rm_s
)
3443 DO_VEC_RMODE(VRINTN
, FPROUNDING_TIEEVEN
, vrint_rm_
)
3444 DO_VEC_RMODE(VRINTA
, FPROUNDING_TIEAWAY
, vrint_rm_
)
3445 DO_VEC_RMODE(VRINTZ
, FPROUNDING_ZERO
, vrint_rm_
)
3446 DO_VEC_RMODE(VRINTM
, FPROUNDING_NEGINF
, vrint_rm_
)
3447 DO_VEC_RMODE(VRINTP
, FPROUNDING_POSINF
, vrint_rm_
)
3449 static bool trans_VSWP(DisasContext
*s
, arg_2misc
*a
)
3454 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
3458 /* UNDEF accesses to D16-D31 if they don't exist. */
3459 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
3460 ((a
->vd
| a
->vm
) & 0x10)) {
3468 if ((a
->vd
| a
->vm
) & a
->q
) {
3472 if (!vfp_access_check(s
)) {
3476 rm
= tcg_temp_new_i64();
3477 rd
= tcg_temp_new_i64();
3478 for (pass
= 0; pass
< (a
->q
? 2 : 1); pass
++) {
3479 read_neon_element64(rm
, a
->vm
, pass
, MO_64
);
3480 read_neon_element64(rd
, a
->vd
, pass
, MO_64
);
3481 write_neon_element64(rm
, a
->vd
, pass
, MO_64
);
3482 write_neon_element64(rd
, a
->vm
, pass
, MO_64
);
3487 static void gen_neon_trn_u8(TCGv_i32 t0
, TCGv_i32 t1
)
3491 rd
= tcg_temp_new_i32();
3492 tmp
= tcg_temp_new_i32();
3494 tcg_gen_shli_i32(rd
, t0
, 8);
3495 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
3496 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
3497 tcg_gen_or_i32(rd
, rd
, tmp
);
3499 tcg_gen_shri_i32(t1
, t1
, 8);
3500 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
3501 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
3502 tcg_gen_or_i32(t1
, t1
, tmp
);
3503 tcg_gen_mov_i32(t0
, rd
);
3506 static void gen_neon_trn_u16(TCGv_i32 t0
, TCGv_i32 t1
)
3510 rd
= tcg_temp_new_i32();
3511 tmp
= tcg_temp_new_i32();
3513 tcg_gen_shli_i32(rd
, t0
, 16);
3514 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
3515 tcg_gen_or_i32(rd
, rd
, tmp
);
3516 tcg_gen_shri_i32(t1
, t1
, 16);
3517 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
3518 tcg_gen_or_i32(t1
, t1
, tmp
);
3519 tcg_gen_mov_i32(t0
, rd
);
3522 static bool trans_VTRN(DisasContext
*s
, arg_2misc
*a
)
3527 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
3531 /* UNDEF accesses to D16-D31 if they don't exist. */
3532 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
3533 ((a
->vd
| a
->vm
) & 0x10)) {
3537 if ((a
->vd
| a
->vm
) & a
->q
) {
3545 if (!vfp_access_check(s
)) {
3549 tmp
= tcg_temp_new_i32();
3550 tmp2
= tcg_temp_new_i32();
3551 if (a
->size
== MO_32
) {
3552 for (pass
= 0; pass
< (a
->q
? 4 : 2); pass
+= 2) {
3553 read_neon_element32(tmp
, a
->vm
, pass
, MO_32
);
3554 read_neon_element32(tmp2
, a
->vd
, pass
+ 1, MO_32
);
3555 write_neon_element32(tmp2
, a
->vm
, pass
, MO_32
);
3556 write_neon_element32(tmp
, a
->vd
, pass
+ 1, MO_32
);
3559 for (pass
= 0; pass
< (a
->q
? 4 : 2); pass
++) {
3560 read_neon_element32(tmp
, a
->vm
, pass
, MO_32
);
3561 read_neon_element32(tmp2
, a
->vd
, pass
, MO_32
);
3562 if (a
->size
== MO_8
) {
3563 gen_neon_trn_u8(tmp
, tmp2
);
3565 gen_neon_trn_u16(tmp
, tmp2
);
3567 write_neon_element32(tmp2
, a
->vm
, pass
, MO_32
);
3568 write_neon_element32(tmp
, a
->vd
, pass
, MO_32
);
3574 static bool trans_VSMMLA(DisasContext
*s
, arg_VSMMLA
*a
)
3576 if (!dc_isar_feature(aa32_i8mm
, s
)) {
3579 return do_neon_ddda(s
, 7, a
->vd
, a
->vn
, a
->vm
, 0,
3580 gen_helper_gvec_smmla_b
);
3583 static bool trans_VUMMLA(DisasContext
*s
, arg_VUMMLA
*a
)
3585 if (!dc_isar_feature(aa32_i8mm
, s
)) {
3588 return do_neon_ddda(s
, 7, a
->vd
, a
->vn
, a
->vm
, 0,
3589 gen_helper_gvec_ummla_b
);
3592 static bool trans_VUSMMLA(DisasContext
*s
, arg_VUSMMLA
*a
)
3594 if (!dc_isar_feature(aa32_i8mm
, s
)) {
3597 return do_neon_ddda(s
, 7, a
->vd
, a
->vn
, a
->vm
, 0,
3598 gen_helper_gvec_usmmla_b
);
3601 static bool trans_VMMLA_b16(DisasContext
*s
, arg_VMMLA_b16
*a
)
3603 if (!dc_isar_feature(aa32_bf16
, s
)) {
3606 return do_neon_ddda_env(s
, 7, a
->vd
, a
->vn
, a
->vm
, 0,
3607 gen_helper_gvec_bfmmla
);
3610 static bool trans_VFMA_b16(DisasContext
*s
, arg_VFMA_b16
*a
)
3612 if (!dc_isar_feature(aa32_bf16
, s
)) {
3615 return do_neon_ddda_fpst(s
, 7, a
->vd
, a
->vn
, a
->vm
, a
->q
, FPST_STD
,
3616 gen_helper_gvec_bfmlal
);
3619 static bool trans_VFMA_b16_scal(DisasContext
*s
, arg_VFMA_b16_scal
*a
)
3621 if (!dc_isar_feature(aa32_bf16
, s
)) {
3624 return do_neon_ddda_fpst(s
, 6, a
->vd
, a
->vn
, a
->vm
,
3625 (a
->index
<< 1) | a
->q
, FPST_STD
,
3626 gen_helper_gvec_bfmlal_idx
);