4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
8 #include "qemu/osdep.h"
10 #include "internals.h"
11 #include "cpu-features.h"
12 #include "exec/helper-proto.h"
15 static inline bool fgt_svc(CPUARMState
*env
, int el
)
18 * Assuming fine-grained-traps are active, return true if we
19 * should be trapping on SVC instructions. Only AArch64 can
20 * trap on an SVC at EL1, but we don't need to special-case this
21 * because if this is AArch32 EL1 then arm_fgt_active() is false.
22 * We also know el is 0 or 1.
25 FIELD_EX64(env
->cp15
.fgt_exec
[FGTREG_HFGITR
], HFGITR_EL2
, SVC_EL0
) :
26 FIELD_EX64(env
->cp15
.fgt_exec
[FGTREG_HFGITR
], HFGITR_EL2
, SVC_EL1
);
29 /* Return true if memory alignment should be enforced. */
30 static bool aprofile_require_alignment(CPUARMState
*env
, int el
, uint64_t sctlr
)
32 #ifdef CONFIG_USER_ONLY
35 /* Check the alignment enable bit. */
36 if (sctlr
& SCTLR_A
) {
41 * With PMSA, when the MPU is disabled, all memory types in the
42 * default map are Normal, so don't need aligment enforcing.
44 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
49 * With VMSA, if translation is disabled, then the default memory type
50 * is Device(-nGnRnE) instead of Normal, which requires that alignment
51 * be enforced. Since this affects all ram, it is most efficient
52 * to handle this during translation.
54 if (sctlr
& SCTLR_M
) {
55 /* Translation enabled: memory type in PTE via MAIR_ELx. */
58 if (el
< 2 && (arm_hcr_el2_eff(env
) & (HCR_DC
| HCR_VM
))) {
59 /* Stage 2 translation enabled: memory type in PTE. */
66 static CPUARMTBFlags
rebuild_hflags_common(CPUARMState
*env
, int fp_el
,
70 DP_TBFLAG_ANY(flags
, FPEXC_EL
, fp_el
);
71 DP_TBFLAG_ANY(flags
, MMUIDX
, arm_to_core_mmu_idx(mmu_idx
));
73 if (arm_singlestep_active(env
)) {
74 DP_TBFLAG_ANY(flags
, SS_ACTIVE
, 1);
80 static CPUARMTBFlags
rebuild_hflags_common_32(CPUARMState
*env
, int fp_el
,
84 bool sctlr_b
= arm_sctlr_b(env
);
87 DP_TBFLAG_A32(flags
, SCTLR__B
, 1);
89 if (arm_cpu_data_is_big_endian_a32(env
, sctlr_b
)) {
90 DP_TBFLAG_ANY(flags
, BE_DATA
, 1);
92 DP_TBFLAG_A32(flags
, NS
, !access_secure_reg(env
));
94 return rebuild_hflags_common(env
, fp_el
, mmu_idx
, flags
);
97 static CPUARMTBFlags
rebuild_hflags_m32(CPUARMState
*env
, int fp_el
,
100 CPUARMTBFlags flags
= {};
101 uint32_t ccr
= env
->v7m
.ccr
[env
->v7m
.secure
];
103 /* Without HaveMainExt, CCR.UNALIGN_TRP is RES1. */
104 if (ccr
& R_V7M_CCR_UNALIGN_TRP_MASK
) {
105 DP_TBFLAG_ANY(flags
, ALIGN_MEM
, 1);
108 if (arm_v7m_is_handler_mode(env
)) {
109 DP_TBFLAG_M32(flags
, HANDLER
, 1);
113 * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
114 * is suppressing them because the requested execution priority
117 if (arm_feature(env
, ARM_FEATURE_V8
) &&
118 !((mmu_idx
& ARM_MMU_IDX_M_NEGPRI
) &&
119 (ccr
& R_V7M_CCR_STKOFHFNMIGN_MASK
))) {
120 DP_TBFLAG_M32(flags
, STACKCHECK
, 1);
123 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) && env
->v7m
.secure
) {
124 DP_TBFLAG_M32(flags
, SECURE
, 1);
127 return rebuild_hflags_common_32(env
, fp_el
, mmu_idx
, flags
);
130 /* This corresponds to the ARM pseudocode function IsFullA64Enabled(). */
131 static bool sme_fa64(CPUARMState
*env
, int el
)
133 if (!cpu_isar_feature(aa64_sme_fa64
, env_archcpu(env
))) {
137 if (el
<= 1 && !el_is_in_host(env
, el
)) {
138 if (!FIELD_EX64(env
->vfp
.smcr_el
[1], SMCR
, FA64
)) {
142 if (el
<= 2 && arm_is_el2_enabled(env
)) {
143 if (!FIELD_EX64(env
->vfp
.smcr_el
[2], SMCR
, FA64
)) {
147 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
148 if (!FIELD_EX64(env
->vfp
.smcr_el
[3], SMCR
, FA64
)) {
156 static CPUARMTBFlags
rebuild_hflags_a32(CPUARMState
*env
, int fp_el
,
159 CPUARMTBFlags flags
= {};
160 int el
= arm_current_el(env
);
161 uint64_t sctlr
= arm_sctlr(env
, el
);
163 if (aprofile_require_alignment(env
, el
, sctlr
)) {
164 DP_TBFLAG_ANY(flags
, ALIGN_MEM
, 1);
167 if (arm_el_is_aa64(env
, 1)) {
168 DP_TBFLAG_A32(flags
, VFPEN
, 1);
171 if (el
< 2 && env
->cp15
.hstr_el2
&& arm_is_el2_enabled(env
) &&
172 (arm_hcr_el2_eff(env
) & (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
173 DP_TBFLAG_A32(flags
, HSTR_ACTIVE
, 1);
176 if (arm_fgt_active(env
, el
)) {
177 DP_TBFLAG_ANY(flags
, FGT_ACTIVE
, 1);
178 if (fgt_svc(env
, el
)) {
179 DP_TBFLAG_ANY(flags
, FGT_SVC
, 1);
183 if (env
->uncached_cpsr
& CPSR_IL
) {
184 DP_TBFLAG_ANY(flags
, PSTATE__IL
, 1);
188 * The SME exception we are testing for is raised via
189 * AArch64.CheckFPAdvSIMDEnabled(), as called from
190 * AArch32.CheckAdvSIMDOrFPEnabled().
193 && FIELD_EX64(env
->svcr
, SVCR
, SM
)
194 && (!arm_is_el2_enabled(env
)
195 || (arm_el_is_aa64(env
, 2) && !(env
->cp15
.hcr_el2
& HCR_TGE
)))
196 && arm_el_is_aa64(env
, 1)
197 && !sme_fa64(env
, el
)) {
198 DP_TBFLAG_A32(flags
, SME_TRAP_NONSTREAMING
, 1);
201 if (arm_aa32_secure_pl1_0(env
)) {
202 DP_TBFLAG_A32(flags
, S_PL1_0
, 1);
205 return rebuild_hflags_common_32(env
, fp_el
, mmu_idx
, flags
);
208 static CPUARMTBFlags
rebuild_hflags_a64(CPUARMState
*env
, int el
, int fp_el
,
211 CPUARMTBFlags flags
= {};
212 ARMMMUIdx stage1
= stage_1_mmu_idx(mmu_idx
);
213 uint64_t tcr
= regime_tcr(env
, mmu_idx
);
214 uint64_t hcr
= arm_hcr_el2_eff(env
);
218 DP_TBFLAG_ANY(flags
, AARCH64_STATE
, 1);
220 /* Get control bits for tagged addresses. */
221 tbid
= aa64_va_parameter_tbi(tcr
, mmu_idx
);
222 tbii
= tbid
& ~aa64_va_parameter_tbid(tcr
, mmu_idx
);
224 DP_TBFLAG_A64(flags
, TBII
, tbii
);
225 DP_TBFLAG_A64(flags
, TBID
, tbid
);
227 if (cpu_isar_feature(aa64_sve
, env_archcpu(env
))) {
228 int sve_el
= sve_exception_el(env
, el
);
231 * If either FP or SVE are disabled, translator does not need len.
232 * If SVE EL > FP EL, FP exception has precedence, and translator
233 * does not need SVE EL. Save potential re-translations by forcing
234 * the unneeded data to zero.
237 if (sve_el
> fp_el
) {
240 } else if (sve_el
== 0) {
241 DP_TBFLAG_A64(flags
, VL
, sve_vqm1_for_el(env
, el
));
243 DP_TBFLAG_A64(flags
, SVEEXC_EL
, sve_el
);
245 if (cpu_isar_feature(aa64_sme
, env_archcpu(env
))) {
246 int sme_el
= sme_exception_el(env
, el
);
247 bool sm
= FIELD_EX64(env
->svcr
, SVCR
, SM
);
249 DP_TBFLAG_A64(flags
, SMEEXC_EL
, sme_el
);
251 /* Similarly, do not compute SVL if SME is disabled. */
252 int svl
= sve_vqm1_for_el_sm(env
, el
, true);
253 DP_TBFLAG_A64(flags
, SVL
, svl
);
255 /* If SVE is disabled, we will not have set VL above. */
256 DP_TBFLAG_A64(flags
, VL
, svl
);
260 DP_TBFLAG_A64(flags
, PSTATE_SM
, 1);
261 DP_TBFLAG_A64(flags
, SME_TRAP_NONSTREAMING
, !sme_fa64(env
, el
));
263 DP_TBFLAG_A64(flags
, PSTATE_ZA
, FIELD_EX64(env
->svcr
, SVCR
, ZA
));
266 sctlr
= regime_sctlr(env
, stage1
);
268 if (aprofile_require_alignment(env
, el
, sctlr
)) {
269 DP_TBFLAG_ANY(flags
, ALIGN_MEM
, 1);
272 if (arm_cpu_data_is_big_endian_a64(el
, sctlr
)) {
273 DP_TBFLAG_ANY(flags
, BE_DATA
, 1);
276 if (cpu_isar_feature(aa64_pauth
, env_archcpu(env
))) {
278 * In order to save space in flags, we record only whether
279 * pauth is "inactive", meaning all insns are implemented as
280 * a nop, or "active" when some action must be performed.
281 * The decision of which action to take is left to a helper.
283 if (sctlr
& (SCTLR_EnIA
| SCTLR_EnIB
| SCTLR_EnDA
| SCTLR_EnDB
)) {
284 DP_TBFLAG_A64(flags
, PAUTH_ACTIVE
, 1);
288 if (cpu_isar_feature(aa64_bti
, env_archcpu(env
))) {
289 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
290 if (sctlr
& (el
== 0 ? SCTLR_BT0
: SCTLR_BT1
)) {
291 DP_TBFLAG_A64(flags
, BT
, 1);
295 if (cpu_isar_feature(aa64_lse2
, env_archcpu(env
))) {
296 if (sctlr
& SCTLR_nAA
) {
297 DP_TBFLAG_A64(flags
, NAA
, 1);
301 /* Compute the condition for using AccType_UNPRIV for LDTR et al. */
302 if (!(env
->pstate
& PSTATE_UAO
)) {
304 case ARMMMUIdx_E10_1
:
305 case ARMMMUIdx_E10_1_PAN
:
306 /* FEAT_NV: NV,NV1 == 1,1 means we don't do UNPRIV accesses */
307 if ((hcr
& (HCR_NV
| HCR_NV1
)) != (HCR_NV
| HCR_NV1
)) {
308 DP_TBFLAG_A64(flags
, UNPRIV
, 1);
311 case ARMMMUIdx_E20_2
:
312 case ARMMMUIdx_E20_2_PAN
:
314 * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
315 * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
317 if (env
->cp15
.hcr_el2
& HCR_TGE
) {
318 DP_TBFLAG_A64(flags
, UNPRIV
, 1);
326 if (env
->pstate
& PSTATE_IL
) {
327 DP_TBFLAG_ANY(flags
, PSTATE__IL
, 1);
330 if (arm_fgt_active(env
, el
)) {
331 DP_TBFLAG_ANY(flags
, FGT_ACTIVE
, 1);
332 if (FIELD_EX64(env
->cp15
.fgt_exec
[FGTREG_HFGITR
], HFGITR_EL2
, ERET
)) {
333 DP_TBFLAG_A64(flags
, TRAP_ERET
, 1);
335 if (fgt_svc(env
, el
)) {
336 DP_TBFLAG_ANY(flags
, FGT_SVC
, 1);
341 * ERET can also be trapped for FEAT_NV. arm_hcr_el2_eff() takes care
342 * of "is EL2 enabled" and the NV bit can only be set if FEAT_NV is present.
344 if (el
== 1 && (hcr
& HCR_NV
)) {
345 DP_TBFLAG_A64(flags
, TRAP_ERET
, 1);
346 DP_TBFLAG_A64(flags
, NV
, 1);
348 DP_TBFLAG_A64(flags
, NV1
, 1);
351 DP_TBFLAG_A64(flags
, NV2
, 1);
353 DP_TBFLAG_A64(flags
, NV2_MEM_E20
, 1);
355 if (env
->cp15
.sctlr_el
[2] & SCTLR_EE
) {
356 DP_TBFLAG_A64(flags
, NV2_MEM_BE
, 1);
361 if (cpu_isar_feature(aa64_mte
, env_archcpu(env
))) {
363 * Set MTE_ACTIVE if any access may be Checked, and leave clear
364 * if all accesses must be Unchecked:
365 * 1) If no TBI, then there are no tags in the address to check,
366 * 2) If Tag Check Override, then all accesses are Unchecked,
367 * 3) If Tag Check Fail == 0, then Checked access have no effect,
368 * 4) If no Allocation Tag Access, then all accesses are Unchecked.
370 if (allocation_tag_access_enabled(env
, el
, sctlr
)) {
371 DP_TBFLAG_A64(flags
, ATA
, 1);
373 && !(env
->pstate
& PSTATE_TCO
)
374 && (sctlr
& (el
== 0 ? SCTLR_TCF0
: SCTLR_TCF
))) {
375 DP_TBFLAG_A64(flags
, MTE_ACTIVE
, 1);
376 if (!EX_TBFLAG_A64(flags
, UNPRIV
)) {
378 * In non-unpriv contexts (eg EL0), unpriv load/stores
379 * act like normal ones; duplicate the MTE info to
380 * avoid translate-a64.c having to check UNPRIV to see
381 * whether it is OK to index into MTE_ACTIVE[].
383 DP_TBFLAG_A64(flags
, MTE0_ACTIVE
, 1);
387 /* And again for unprivileged accesses, if required. */
388 if (EX_TBFLAG_A64(flags
, UNPRIV
)
390 && !(env
->pstate
& PSTATE_TCO
)
391 && (sctlr
& SCTLR_TCF0
)
392 && allocation_tag_access_enabled(env
, 0, sctlr
)) {
393 DP_TBFLAG_A64(flags
, MTE0_ACTIVE
, 1);
396 * For unpriv tag-setting accesses we also need ATA0. Again, in
397 * contexts where unpriv and normal insns are the same we
398 * duplicate the ATA bit to save effort for translate-a64.c.
400 if (EX_TBFLAG_A64(flags
, UNPRIV
)) {
401 if (allocation_tag_access_enabled(env
, 0, sctlr
)) {
402 DP_TBFLAG_A64(flags
, ATA0
, 1);
405 DP_TBFLAG_A64(flags
, ATA0
, EX_TBFLAG_A64(flags
, ATA
));
407 /* Cache TCMA as well as TBI. */
408 DP_TBFLAG_A64(flags
, TCMA
, aa64_va_parameter_tcma(tcr
, mmu_idx
));
411 return rebuild_hflags_common(env
, fp_el
, mmu_idx
, flags
);
414 static CPUARMTBFlags
rebuild_hflags_internal(CPUARMState
*env
)
416 int el
= arm_current_el(env
);
417 int fp_el
= fp_exception_el(env
, el
);
418 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
421 return rebuild_hflags_a64(env
, el
, fp_el
, mmu_idx
);
422 } else if (arm_feature(env
, ARM_FEATURE_M
)) {
423 return rebuild_hflags_m32(env
, fp_el
, mmu_idx
);
425 return rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
429 void arm_rebuild_hflags(CPUARMState
*env
)
431 env
->hflags
= rebuild_hflags_internal(env
);
435 * If we have triggered a EL state change we can't rely on the
436 * translator having passed it to us, we need to recompute.
438 void HELPER(rebuild_hflags_m32_newel
)(CPUARMState
*env
)
440 int el
= arm_current_el(env
);
441 int fp_el
= fp_exception_el(env
, el
);
442 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
444 env
->hflags
= rebuild_hflags_m32(env
, fp_el
, mmu_idx
);
447 void HELPER(rebuild_hflags_m32
)(CPUARMState
*env
, int el
)
449 int fp_el
= fp_exception_el(env
, el
);
450 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
452 env
->hflags
= rebuild_hflags_m32(env
, fp_el
, mmu_idx
);
456 * If we have triggered a EL state change we can't rely on the
457 * translator having passed it to us, we need to recompute.
459 void HELPER(rebuild_hflags_a32_newel
)(CPUARMState
*env
)
461 int el
= arm_current_el(env
);
462 int fp_el
= fp_exception_el(env
, el
);
463 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
464 env
->hflags
= rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
467 void HELPER(rebuild_hflags_a32
)(CPUARMState
*env
, int el
)
469 int fp_el
= fp_exception_el(env
, el
);
470 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
472 env
->hflags
= rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
475 void HELPER(rebuild_hflags_a64
)(CPUARMState
*env
, int el
)
477 int fp_el
= fp_exception_el(env
, el
);
478 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
480 env
->hflags
= rebuild_hflags_a64(env
, el
, fp_el
, mmu_idx
);
483 void assert_hflags_rebuild_correctly(CPUARMState
*env
)
485 #ifdef CONFIG_DEBUG_TCG
486 CPUARMTBFlags c
= env
->hflags
;
487 CPUARMTBFlags r
= rebuild_hflags_internal(env
);
489 if (unlikely(c
.flags
!= r
.flags
|| c
.flags2
!= r
.flags2
)) {
490 fprintf(stderr
, "TCG hflags mismatch "
491 "(current:(0x%08x,0x" TARGET_FMT_lx
")"
492 " rebuilt:(0x%08x,0x" TARGET_FMT_lx
")\n",
493 c
.flags
, c
.flags2
, r
.flags
, r
.flags2
);