2 * ARM translation: M-profile NOCP special-case instructions
4 * Copyright (c) 2020 Linaro, Ltd.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "tcg/tcg-op.h"
22 #include "tcg/tcg-op-gvec.h"
23 #include "translate.h"
24 #include "translate-a32.h"
26 #include "decode-m-nocp.c.inc"
29 * Decode VLLDM and VLSTM are nonstandard because:
30 * * if there is no FPU then these insns must NOP in
31 * Secure state and UNDEF in Nonsecure state
32 * * if there is an FPU then these insns do not have
33 * the usual behaviour that vfp_access_check() provides of
34 * being controlled by CPACR/NSACR enable bits or the
35 * lazy-stacking logic.
37 static bool trans_VLLDM_VLSTM(DisasContext
*s
, arg_VLLDM_VLSTM
*a
)
41 if (!arm_dc_feature(s
, ARM_FEATURE_M
) ||
42 !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
48 * T2 encoding ({D0-D31} reglist): v8.1M and up. We choose not
49 * to take the IMPDEF option to make memory accesses to the stack
50 * slots that correspond to the D16-D31 registers (discarding
51 * read data and writing UNKNOWN values), so for us the T2
52 * encoding behaves identically to the T1 encoding.
54 if (!arm_dc_feature(s
, ARM_FEATURE_V8_1M
)) {
59 * T1 encoding ({D0-D15} reglist); undef if we have 32 Dregs.
60 * This is currently architecturally impossible, but we add the
61 * check to stay in line with the pseudocode. Note that we must
62 * emit code for the UNDEF so it takes precedence over the NOCP.
64 if (dc_isar_feature(aa32_simd_r32
, s
)) {
65 unallocated_encoding(s
);
71 * If not secure, UNDEF. We must emit code for this
72 * rather than returning false so that this takes
73 * precedence over the m-nocp.decode NOCP fallback.
76 unallocated_encoding(s
);
80 s
->eci_handled
= true;
83 if (!dc_isar_feature(aa32_vfp
, s
)) {
88 fptr
= load_reg(s
, a
->rn
);
90 gen_helper_v7m_vlldm(cpu_env
, fptr
);
92 gen_helper_v7m_vlstm(cpu_env
, fptr
);
94 tcg_temp_free_i32(fptr
);
99 * End the TB, because we have updated FP control bits,
100 * and possibly VPR or LTPSIZE.
102 s
->base
.is_jmp
= DISAS_UPDATE_EXIT
;
106 static bool trans_VSCCLRM(DisasContext
*s
, arg_VSCCLRM
*a
)
110 TCGv_i32 aspen
, sfpa
;
112 if (!dc_isar_feature(aa32_m_sec_state
, s
)) {
113 /* Before v8.1M, fall through in decode to NOCP check */
117 /* Explicitly UNDEF because this takes precedence over NOCP */
118 if (!arm_dc_feature(s
, ARM_FEATURE_M_MAIN
) || !s
->v8m_secure
) {
119 unallocated_encoding(s
);
123 s
->eci_handled
= true;
125 if (!dc_isar_feature(aa32_vfp_simd
, s
)) {
126 /* NOP if we have neither FP nor MVE */
132 * If FPCCR.ASPEN != 0 && CONTROL_S.SFPA == 0 then there is no
133 * active floating point context so we must NOP (without doing
134 * any lazy state preservation or the NOCP check).
136 aspen
= load_cpu_field(v7m
.fpccr
[M_REG_S
]);
137 sfpa
= load_cpu_field(v7m
.control
[M_REG_S
]);
138 tcg_gen_andi_i32(aspen
, aspen
, R_V7M_FPCCR_ASPEN_MASK
);
139 tcg_gen_xori_i32(aspen
, aspen
, R_V7M_FPCCR_ASPEN_MASK
);
140 tcg_gen_andi_i32(sfpa
, sfpa
, R_V7M_CONTROL_SFPA_MASK
);
141 tcg_gen_or_i32(sfpa
, sfpa
, aspen
);
142 arm_gen_condlabel(s
);
143 tcg_gen_brcondi_i32(TCG_COND_EQ
, sfpa
, 0, s
->condlabel
);
145 if (s
->fp_excp_el
!= 0) {
146 gen_exception_insn(s
, s
->pc_curr
, EXCP_NOCP
,
147 syn_uncategorized(), s
->fp_excp_el
);
151 topreg
= a
->vd
+ a
->imm
- 1;
154 /* Convert to Sreg numbers if the insn specified in Dregs */
156 topreg
= topreg
* 2 + 1;
160 if (topreg
> 63 || (topreg
> 31 && !(topreg
& 1))) {
161 /* UNPREDICTABLE: we choose to undef */
162 unallocated_encoding(s
);
166 /* Silently ignore requests to clear D16-D31 if they don't exist */
167 if (topreg
> 31 && !dc_isar_feature(aa32_simd_r32
, s
)) {
171 if (!vfp_access_check(s
)) {
175 /* Zero the Sregs from btmreg to topreg inclusive. */
176 zero
= tcg_const_i64(0);
178 write_neon_element64(zero
, btmreg
>> 1, 1, MO_32
);
181 for (; btmreg
+ 1 <= topreg
; btmreg
+= 2) {
182 write_neon_element64(zero
, btmreg
>> 1, 0, MO_64
);
184 if (btmreg
== topreg
) {
185 write_neon_element64(zero
, btmreg
>> 1, 0, MO_32
);
188 assert(btmreg
== topreg
+ 1);
189 if (dc_isar_feature(aa32_mve
, s
)) {
190 TCGv_i32 z32
= tcg_const_i32(0);
191 store_cpu_field(z32
, v7m
.vpr
);
199 * M-profile provides two different sets of instructions that can
200 * access floating point system registers: VMSR/VMRS (which move
201 * to/from a general purpose register) and VLDR/VSTR sysreg (which
202 * move directly to/from memory). In some cases there are also side
203 * effects which must happen after any write to memory (which could
204 * cause an exception). So we implement the common logic for the
205 * sysreg access in gen_M_fp_sysreg_write() and gen_M_fp_sysreg_read(),
206 * which take pointers to callback functions which will perform the
207 * actual "read/write general purpose register" and "read/write
208 * memory" operations.
212 * Emit code to store the sysreg to its final destination; frees the
213 * TCG temp 'value' it is passed. do_access is true to do the store,
214 * and false to skip it and only perform side-effects like base
215 * register writeback.
217 typedef void fp_sysreg_storefn(DisasContext
*s
, void *opaque
, TCGv_i32 value
,
220 * Emit code to load the value to be copied to the sysreg; returns
221 * a new TCG temporary. do_access is true to do the store,
222 * and false to skip it and only perform side-effects like base
223 * register writeback.
225 typedef TCGv_i32
fp_sysreg_loadfn(DisasContext
*s
, void *opaque
,
228 /* Common decode/access checks for fp sysreg read/write */
229 typedef enum FPSysRegCheckResult
{
230 FPSysRegCheckFailed
, /* caller should return false */
231 FPSysRegCheckDone
, /* caller should return true */
232 FPSysRegCheckContinue
, /* caller should continue generating code */
233 } FPSysRegCheckResult
;
235 static FPSysRegCheckResult
fp_sysreg_checks(DisasContext
*s
, int regno
)
237 if (!dc_isar_feature(aa32_fpsp_v2
, s
) && !dc_isar_feature(aa32_mve
, s
)) {
238 return FPSysRegCheckFailed
;
243 case QEMU_VFP_FPSCR_NZCV
:
245 case ARM_VFP_FPSCR_NZCVQC
:
246 if (!arm_dc_feature(s
, ARM_FEATURE_V8_1M
)) {
247 return FPSysRegCheckFailed
;
250 case ARM_VFP_FPCXT_S
:
251 case ARM_VFP_FPCXT_NS
:
252 if (!arm_dc_feature(s
, ARM_FEATURE_V8_1M
)) {
253 return FPSysRegCheckFailed
;
255 if (!s
->v8m_secure
) {
256 return FPSysRegCheckFailed
;
261 if (!dc_isar_feature(aa32_mve
, s
)) {
262 return FPSysRegCheckFailed
;
266 return FPSysRegCheckFailed
;
270 * FPCXT_NS is a special case: it has specific handling for
271 * "current FP state is inactive", and must do the PreserveFPState()
272 * but not the usual full set of actions done by ExecuteFPCheck().
273 * So we don't call vfp_access_check() and the callers must handle this.
275 if (regno
!= ARM_VFP_FPCXT_NS
&& !vfp_access_check(s
)) {
276 return FPSysRegCheckDone
;
278 return FPSysRegCheckContinue
;
281 static void gen_branch_fpInactive(DisasContext
*s
, TCGCond cond
,
285 * FPCXT_NS is a special case: it has specific handling for
286 * "current FP state is inactive", and must do the PreserveFPState()
287 * but not the usual full set of actions done by ExecuteFPCheck().
288 * We don't have a TB flag that matches the fpInactive check, so we
289 * do it at runtime as we don't expect FPCXT_NS accesses to be frequent.
291 * Emit code that checks fpInactive and does a conditional
292 * branch to label based on it:
293 * if cond is TCG_COND_NE then branch if fpInactive != 0 (ie if inactive)
294 * if cond is TCG_COND_EQ then branch if fpInactive == 0 (ie if active)
296 assert(cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
);
298 /* fpInactive = FPCCR_NS.ASPEN == 1 && CONTROL.FPCA == 0 */
299 TCGv_i32 aspen
, fpca
;
300 aspen
= load_cpu_field(v7m
.fpccr
[M_REG_NS
]);
301 fpca
= load_cpu_field(v7m
.control
[M_REG_S
]);
302 tcg_gen_andi_i32(aspen
, aspen
, R_V7M_FPCCR_ASPEN_MASK
);
303 tcg_gen_xori_i32(aspen
, aspen
, R_V7M_FPCCR_ASPEN_MASK
);
304 tcg_gen_andi_i32(fpca
, fpca
, R_V7M_CONTROL_FPCA_MASK
);
305 tcg_gen_or_i32(fpca
, fpca
, aspen
);
306 tcg_gen_brcondi_i32(tcg_invert_cond(cond
), fpca
, 0, label
);
307 tcg_temp_free_i32(aspen
);
308 tcg_temp_free_i32(fpca
);
311 static bool gen_M_fp_sysreg_write(DisasContext
*s
, int regno
,
312 fp_sysreg_loadfn
*loadfn
,
315 /* Do a write to an M-profile floating point system register */
317 TCGLabel
*lab_end
= NULL
;
319 switch (fp_sysreg_checks(s
, regno
)) {
320 case FPSysRegCheckFailed
:
322 case FPSysRegCheckDone
:
324 case FPSysRegCheckContinue
:
330 tmp
= loadfn(s
, opaque
, true);
331 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
332 tcg_temp_free_i32(tmp
);
335 case ARM_VFP_FPSCR_NZCVQC
:
338 tmp
= loadfn(s
, opaque
, true);
339 if (dc_isar_feature(aa32_mve
, s
)) {
340 /* QC is only present for MVE; otherwise RES0 */
341 TCGv_i32 qc
= tcg_temp_new_i32();
342 tcg_gen_andi_i32(qc
, tmp
, FPCR_QC
);
344 * The 4 vfp.qc[] fields need only be "zero" vs "non-zero";
345 * here writing the same value into all elements is simplest.
347 tcg_gen_gvec_dup_i32(MO_32
, offsetof(CPUARMState
, vfp
.qc
),
350 tcg_gen_andi_i32(tmp
, tmp
, FPCR_NZCV_MASK
);
351 fpscr
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
352 tcg_gen_andi_i32(fpscr
, fpscr
, ~FPCR_NZCV_MASK
);
353 tcg_gen_or_i32(fpscr
, fpscr
, tmp
);
354 store_cpu_field(fpscr
, vfp
.xregs
[ARM_VFP_FPSCR
]);
355 tcg_temp_free_i32(tmp
);
358 case ARM_VFP_FPCXT_NS
:
360 TCGLabel
*lab_active
= gen_new_label();
362 lab_end
= gen_new_label();
363 gen_branch_fpInactive(s
, TCG_COND_EQ
, lab_active
);
365 * fpInactive case: write is a NOP, so only do side effects
366 * like register writeback before we branch to end
368 loadfn(s
, opaque
, false);
371 gen_set_label(lab_active
);
373 * !fpInactive: if FPU disabled, take NOCP exception;
374 * otherwise PreserveFPState(), and then FPCXT_NS writes
375 * behave the same as FPCXT_S writes.
377 if (!vfp_access_check_m(s
, true)) {
379 * This was only a conditional exception, so override
380 * gen_exception_insn()'s default to DISAS_NORETURN
382 s
->base
.is_jmp
= DISAS_NEXT
;
387 case ARM_VFP_FPCXT_S
:
389 TCGv_i32 sfpa
, control
;
391 * Set FPSCR and CONTROL.SFPA from value; the new FPSCR takes
392 * bits [27:0] from value and zeroes bits [31:28].
394 tmp
= loadfn(s
, opaque
, true);
395 sfpa
= tcg_temp_new_i32();
396 tcg_gen_shri_i32(sfpa
, tmp
, 31);
397 control
= load_cpu_field(v7m
.control
[M_REG_S
]);
398 tcg_gen_deposit_i32(control
, control
, sfpa
,
399 R_V7M_CONTROL_SFPA_SHIFT
, 1);
400 store_cpu_field(control
, v7m
.control
[M_REG_S
]);
401 tcg_gen_andi_i32(tmp
, tmp
, ~FPCR_NZCV_MASK
);
402 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
403 s
->base
.is_jmp
= DISAS_UPDATE_NOCHAIN
;
404 tcg_temp_free_i32(tmp
);
405 tcg_temp_free_i32(sfpa
);
409 /* Behaves as NOP if not privileged */
411 loadfn(s
, opaque
, false);
414 tmp
= loadfn(s
, opaque
, true);
415 store_cpu_field(tmp
, v7m
.vpr
);
416 s
->base
.is_jmp
= DISAS_UPDATE_NOCHAIN
;
421 tmp
= loadfn(s
, opaque
, true);
422 vpr
= load_cpu_field(v7m
.vpr
);
423 tcg_gen_deposit_i32(vpr
, vpr
, tmp
,
424 R_V7M_VPR_P0_SHIFT
, R_V7M_VPR_P0_LENGTH
);
425 store_cpu_field(vpr
, v7m
.vpr
);
426 s
->base
.is_jmp
= DISAS_UPDATE_NOCHAIN
;
427 tcg_temp_free_i32(tmp
);
431 g_assert_not_reached();
434 gen_set_label(lab_end
);
439 static bool gen_M_fp_sysreg_read(DisasContext
*s
, int regno
,
440 fp_sysreg_storefn
*storefn
,
443 /* Do a read from an M-profile floating point system register */
445 TCGLabel
*lab_end
= NULL
;
446 bool lookup_tb
= false;
448 switch (fp_sysreg_checks(s
, regno
)) {
449 case FPSysRegCheckFailed
:
451 case FPSysRegCheckDone
:
453 case FPSysRegCheckContinue
:
457 if (regno
== ARM_VFP_FPSCR_NZCVQC
&& !dc_isar_feature(aa32_mve
, s
)) {
458 /* QC is RES0 without MVE, so NZCVQC simplifies to NZCV */
459 regno
= QEMU_VFP_FPSCR_NZCV
;
464 tmp
= tcg_temp_new_i32();
465 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
466 storefn(s
, opaque
, tmp
, true);
468 case ARM_VFP_FPSCR_NZCVQC
:
469 tmp
= tcg_temp_new_i32();
470 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
471 tcg_gen_andi_i32(tmp
, tmp
, FPCR_NZCVQC_MASK
);
472 storefn(s
, opaque
, tmp
, true);
474 case QEMU_VFP_FPSCR_NZCV
:
476 * Read just NZCV; this is a special case to avoid the
477 * helper call for the "VMRS to CPSR.NZCV" insn.
479 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
480 tcg_gen_andi_i32(tmp
, tmp
, FPCR_NZCV_MASK
);
481 storefn(s
, opaque
, tmp
, true);
483 case ARM_VFP_FPCXT_S
:
485 TCGv_i32 control
, sfpa
, fpscr
;
486 /* Bits [27:0] from FPSCR, bit [31] from CONTROL.SFPA */
487 tmp
= tcg_temp_new_i32();
488 sfpa
= tcg_temp_new_i32();
489 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
490 tcg_gen_andi_i32(tmp
, tmp
, ~FPCR_NZCV_MASK
);
491 control
= load_cpu_field(v7m
.control
[M_REG_S
]);
492 tcg_gen_andi_i32(sfpa
, control
, R_V7M_CONTROL_SFPA_MASK
);
493 tcg_gen_shli_i32(sfpa
, sfpa
, 31 - R_V7M_CONTROL_SFPA_SHIFT
);
494 tcg_gen_or_i32(tmp
, tmp
, sfpa
);
495 tcg_temp_free_i32(sfpa
);
497 * Store result before updating FPSCR etc, in case
498 * it is a memory write which causes an exception.
500 storefn(s
, opaque
, tmp
, true);
502 * Now we must reset FPSCR from FPDSCR_NS, and clear
503 * CONTROL.SFPA; so we'll end the TB here.
505 tcg_gen_andi_i32(control
, control
, ~R_V7M_CONTROL_SFPA_MASK
);
506 store_cpu_field(control
, v7m
.control
[M_REG_S
]);
507 fpscr
= load_cpu_field(v7m
.fpdscr
[M_REG_NS
]);
508 gen_helper_vfp_set_fpscr(cpu_env
, fpscr
);
509 tcg_temp_free_i32(fpscr
);
513 case ARM_VFP_FPCXT_NS
:
515 TCGv_i32 control
, sfpa
, fpscr
, fpdscr
, zero
;
516 TCGLabel
*lab_active
= gen_new_label();
520 gen_branch_fpInactive(s
, TCG_COND_EQ
, lab_active
);
521 /* fpInactive case: reads as FPDSCR_NS */
522 TCGv_i32 tmp
= load_cpu_field(v7m
.fpdscr
[M_REG_NS
]);
523 storefn(s
, opaque
, tmp
, true);
524 lab_end
= gen_new_label();
527 gen_set_label(lab_active
);
529 * !fpInactive: if FPU disabled, take NOCP exception;
530 * otherwise PreserveFPState(), and then FPCXT_NS
531 * reads the same as FPCXT_S.
533 if (!vfp_access_check_m(s
, true)) {
535 * This was only a conditional exception, so override
536 * gen_exception_insn()'s default to DISAS_NORETURN
538 s
->base
.is_jmp
= DISAS_NEXT
;
541 tmp
= tcg_temp_new_i32();
542 sfpa
= tcg_temp_new_i32();
543 fpscr
= tcg_temp_new_i32();
544 gen_helper_vfp_get_fpscr(fpscr
, cpu_env
);
545 tcg_gen_andi_i32(tmp
, fpscr
, ~FPCR_NZCV_MASK
);
546 control
= load_cpu_field(v7m
.control
[M_REG_S
]);
547 tcg_gen_andi_i32(sfpa
, control
, R_V7M_CONTROL_SFPA_MASK
);
548 tcg_gen_shli_i32(sfpa
, sfpa
, 31 - R_V7M_CONTROL_SFPA_SHIFT
);
549 tcg_gen_or_i32(tmp
, tmp
, sfpa
);
550 tcg_temp_free_i32(control
);
551 /* Store result before updating FPSCR, in case it faults */
552 storefn(s
, opaque
, tmp
, true);
553 /* If SFPA is zero then set FPSCR from FPDSCR_NS */
554 fpdscr
= load_cpu_field(v7m
.fpdscr
[M_REG_NS
]);
555 zero
= tcg_const_i32(0);
556 tcg_gen_movcond_i32(TCG_COND_EQ
, fpscr
, sfpa
, zero
, fpdscr
, fpscr
);
557 gen_helper_vfp_set_fpscr(cpu_env
, fpscr
);
558 tcg_temp_free_i32(zero
);
559 tcg_temp_free_i32(sfpa
);
560 tcg_temp_free_i32(fpdscr
);
561 tcg_temp_free_i32(fpscr
);
565 /* Behaves as NOP if not privileged */
567 storefn(s
, opaque
, NULL
, false);
570 tmp
= load_cpu_field(v7m
.vpr
);
571 storefn(s
, opaque
, tmp
, true);
574 tmp
= load_cpu_field(v7m
.vpr
);
575 tcg_gen_extract_i32(tmp
, tmp
, R_V7M_VPR_P0_SHIFT
, R_V7M_VPR_P0_LENGTH
);
576 storefn(s
, opaque
, tmp
, true);
579 g_assert_not_reached();
583 gen_set_label(lab_end
);
591 static void fp_sysreg_to_gpr(DisasContext
*s
, void *opaque
, TCGv_i32 value
,
594 arg_VMSR_VMRS
*a
= opaque
;
601 /* Set the 4 flag bits in the CPSR */
603 tcg_temp_free_i32(value
);
605 store_reg(s
, a
->rt
, value
);
609 static TCGv_i32
gpr_to_fp_sysreg(DisasContext
*s
, void *opaque
, bool do_access
)
611 arg_VMSR_VMRS
*a
= opaque
;
616 return load_reg(s
, a
->rt
);
619 static bool trans_VMSR_VMRS(DisasContext
*s
, arg_VMSR_VMRS
*a
)
622 * Accesses to R15 are UNPREDICTABLE; we choose to undef.
623 * FPSCR -> r15 is a special case which writes to the PSR flags;
624 * set a->reg to a special value to tell gen_M_fp_sysreg_read()
625 * we only care about the top 4 bits of FPSCR there.
628 if (a
->l
&& a
->reg
== ARM_VFP_FPSCR
) {
629 a
->reg
= QEMU_VFP_FPSCR_NZCV
;
636 /* VMRS, move FP system register to gp register */
637 return gen_M_fp_sysreg_read(s
, a
->reg
, fp_sysreg_to_gpr
, a
);
639 /* VMSR, move gp register to FP system register */
640 return gen_M_fp_sysreg_write(s
, a
->reg
, gpr_to_fp_sysreg
, a
);
644 static void fp_sysreg_to_memory(DisasContext
*s
, void *opaque
, TCGv_i32 value
,
647 arg_vldr_sysreg
*a
= opaque
;
648 uint32_t offset
= a
->imm
;
655 if (!do_access
&& !a
->w
) {
659 addr
= load_reg(s
, a
->rn
);
661 tcg_gen_addi_i32(addr
, addr
, offset
);
664 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
665 gen_helper_v8m_stackcheck(cpu_env
, addr
);
669 gen_aa32_st_i32(s
, value
, addr
, get_mem_index(s
),
670 MO_UL
| MO_ALIGN
| s
->be_data
);
671 tcg_temp_free_i32(value
);
677 tcg_gen_addi_i32(addr
, addr
, offset
);
679 store_reg(s
, a
->rn
, addr
);
681 tcg_temp_free_i32(addr
);
685 static TCGv_i32
memory_to_fp_sysreg(DisasContext
*s
, void *opaque
,
688 arg_vldr_sysreg
*a
= opaque
;
689 uint32_t offset
= a
->imm
;
691 TCGv_i32 value
= NULL
;
697 if (!do_access
&& !a
->w
) {
701 addr
= load_reg(s
, a
->rn
);
703 tcg_gen_addi_i32(addr
, addr
, offset
);
706 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
707 gen_helper_v8m_stackcheck(cpu_env
, addr
);
711 value
= tcg_temp_new_i32();
712 gen_aa32_ld_i32(s
, value
, addr
, get_mem_index(s
),
713 MO_UL
| MO_ALIGN
| s
->be_data
);
719 tcg_gen_addi_i32(addr
, addr
, offset
);
721 store_reg(s
, a
->rn
, addr
);
723 tcg_temp_free_i32(addr
);
728 static bool trans_VLDR_sysreg(DisasContext
*s
, arg_vldr_sysreg
*a
)
730 if (!arm_dc_feature(s
, ARM_FEATURE_V8_1M
)) {
736 return gen_M_fp_sysreg_write(s
, a
->reg
, memory_to_fp_sysreg
, a
);
739 static bool trans_VSTR_sysreg(DisasContext
*s
, arg_vldr_sysreg
*a
)
741 if (!arm_dc_feature(s
, ARM_FEATURE_V8_1M
)) {
747 return gen_M_fp_sysreg_read(s
, a
->reg
, fp_sysreg_to_memory
, a
);
750 static bool trans_NOCP(DisasContext
*s
, arg_nocp
*a
)
753 * Handle M-profile early check for disabled coprocessor:
754 * all we need to do here is emit the NOCP exception if
755 * the coprocessor is disabled. Otherwise we return false
756 * and the real VFP/etc decode will handle the insn.
758 assert(arm_dc_feature(s
, ARM_FEATURE_M
));
763 if (arm_dc_feature(s
, ARM_FEATURE_V8_1M
) &&
764 (a
->cp
== 8 || a
->cp
== 9 || a
->cp
== 14 || a
->cp
== 15)) {
765 /* in v8.1M cp 8, 9, 14, 15 also are governed by the cp10 enable */
770 gen_exception_insn(s
, s
->pc_curr
, EXCP_NOCP
,
771 syn_uncategorized(), default_exception_el(s
));
775 if (s
->fp_excp_el
!= 0) {
776 gen_exception_insn(s
, s
->pc_curr
, EXCP_NOCP
,
777 syn_uncategorized(), s
->fp_excp_el
);
784 static bool trans_NOCP_8_1(DisasContext
*s
, arg_nocp
*a
)
786 /* This range needs a coprocessor check for v8.1M and later only */
787 if (!arm_dc_feature(s
, ARM_FEATURE_V8_1M
)) {
790 return trans_NOCP(s
, a
);