2 * ARM TLB (Translation lookaside buffer) helpers.
4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
8 #include "qemu/osdep.h"
10 #include "internals.h"
11 #include "cpu-features.h"
12 #include "exec/exec-all.h"
13 #include "exec/helper-proto.h"
17 * Returns true if the stage 1 translation regime is using LPAE format page
18 * tables. Used when raising alignment exceptions, whose FSR changes depending
19 * on whether the long or short descriptor format is in use.
21 bool arm_s1_regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
23 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
24 return regime_using_lpae_format(env
, mmu_idx
);
27 static inline uint32_t merge_syn_data_abort(uint32_t template_syn
,
29 unsigned int target_el
,
30 bool same_el
, bool is_write
,
36 * ISV is only set for stage-2 data aborts routed to EL2 and
37 * never for stage-1 page table walks faulting on stage 2
38 * or for stage-1 faults.
40 * Furthermore, ISV is only set for certain kinds of load/stores.
41 * If the template syndrome does not have ISV set, we should leave
44 * See ARMv8 specs, D7-1974:
45 * ISS encoding for an exception from a Data Abort, the
48 * TODO: FEAT_LS64/FEAT_LS64_V/FEAT_SL64_ACCDATA: Translation,
49 * Access Flag, and Permission faults caused by LD64B, ST64B,
50 * ST64BV, or ST64BV0 insns report syndrome info even for stage-1
51 * faults and regardless of the target EL.
53 if (template_syn
& ARM_EL_VNCR
) {
55 * FEAT_NV2 faults on accesses via VNCR_EL2 are a special case:
56 * they are always reported as "same EL", even though we are going
60 syn
= syn_data_abort_vncr(fi
->ea
, is_write
, fsc
);
61 } else if (!(template_syn
& ARM_EL_ISV
) || target_el
!= 2
62 || fi
->s1ptw
|| !fi
->stage2
) {
63 syn
= syn_data_abort_no_iss(same_el
, 0,
64 fi
->ea
, 0, fi
->s1ptw
, is_write
, fsc
);
67 * Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
68 * syndrome created at translation time.
69 * Now we create the runtime syndrome with the remaining fields.
71 syn
= syn_data_abort_with_iss(same_el
,
73 fi
->ea
, 0, fi
->s1ptw
, is_write
, fsc
,
75 /* Merge the runtime syndrome with the template syndrome. */
81 static uint32_t compute_fsr_fsc(CPUARMState
*env
, ARMMMUFaultInfo
*fi
,
82 int target_el
, int mmu_idx
, uint32_t *ret_fsc
)
84 ARMMMUIdx arm_mmu_idx
= core_to_arm_mmu_idx(env
, mmu_idx
);
88 * For M-profile there is no guest-facing FSR. We compute a
89 * short-form value for env->exception.fsr which we will then
90 * examine in arm_v7m_cpu_do_interrupt(). In theory we could
91 * use the LPAE format instead as long as both bits of code agree
92 * (and arm_fi_to_lfsc() handled the M-profile specific
93 * ARMFault_QEMU_NSCExec and ARMFault_QEMU_SFault cases).
95 if (!arm_feature(env
, ARM_FEATURE_M
) &&
96 (target_el
== 2 || arm_el_is_aa64(env
, target_el
) ||
97 arm_s1_regime_using_lpae_format(env
, arm_mmu_idx
))) {
99 * LPAE format fault status register : bottom 6 bits are
100 * status code in the same form as needed for syndrome
102 fsr
= arm_fi_to_lfsc(fi
);
103 fsc
= extract32(fsr
, 0, 6);
105 fsr
= arm_fi_to_sfsc(fi
);
107 * Short format FSR : this fault will never actually be reported
108 * to an EL that uses a syndrome register. Use a (currently)
109 * reserved FSR code in case the constructed syndrome does leak
110 * into the guest somehow.
119 static bool report_as_gpc_exception(ARMCPU
*cpu
, int current_el
,
127 case GPCF_AddressSize
:
130 /* R_PYTGX: GPT faults are reported as GPC. */
135 * R_BLYPM: A GPF at EL3 is reported as insn or data abort.
136 * R_VBZMW, R_LXHQR: A GPF at EL[0-2] is reported as a GPC
137 * if SCR_EL3.GPF is set, otherwise an insn or data abort.
139 ret
= (cpu
->env
.cp15
.scr_el3
& SCR_GPF
) && current_el
!= 3;
142 g_assert_not_reached();
145 assert(cpu_isar_feature(aa64_rme
, cpu
));
146 assert(fi
->type
== ARMFault_GPCFOnWalk
||
147 fi
->type
== ARMFault_GPCFOnOutput
);
148 if (fi
->gpcf
== GPCF_AddressSize
) {
149 assert(fi
->level
== 0);
151 assert(fi
->level
>= 0 && fi
->level
<= 1);
157 static unsigned encode_gpcsc(ARMMMUFaultInfo
*fi
)
159 static uint8_t const gpcsc
[] = {
160 [GPCF_AddressSize
] = 0b000000,
161 [GPCF_Walk
] = 0b000100,
162 [GPCF_Fail
] = 0b001100,
163 [GPCF_EABT
] = 0b010100,
166 /* Note that we've validated fi->gpcf and fi->level above. */
167 return gpcsc
[fi
->gpcf
] | fi
->level
;
171 void arm_deliver_fault(ARMCPU
*cpu
, vaddr addr
,
172 MMUAccessType access_type
,
173 int mmu_idx
, ARMMMUFaultInfo
*fi
)
175 CPUARMState
*env
= &cpu
->env
;
176 int target_el
= exception_target_el(env
);
177 int current_el
= arm_current_el(env
);
179 uint32_t syn
, exc
, fsr
, fsc
;
181 * We know this must be a data or insn abort, and that
182 * env->exception.syndrome contains the template syndrome set
183 * up at translate time. So we can check only the VNCR bit
184 * (and indeed syndrome does not have the EC field in it,
185 * because we masked that out in disas_set_insn_syndrome())
187 bool is_vncr
= (access_type
!= MMU_INST_FETCH
) &&
188 (env
->exception
.syndrome
& ARM_EL_VNCR
);
191 /* FEAT_NV2 faults on accesses via VNCR_EL2 go to EL2 */
195 if (report_as_gpc_exception(cpu
, current_el
, fi
)) {
198 fsr
= compute_fsr_fsc(env
, fi
, target_el
, mmu_idx
, &fsc
);
200 syn
= syn_gpc(fi
->stage2
&& fi
->type
== ARMFault_GPCFOnWalk
,
201 access_type
== MMU_INST_FETCH
,
202 encode_gpcsc(fi
), is_vncr
,
204 access_type
== MMU_DATA_STORE
, fsc
);
206 env
->cp15
.mfar_el3
= fi
->paddr
;
207 switch (fi
->paddr_space
) {
210 case ARMSS_NonSecure
:
211 env
->cp15
.mfar_el3
|= R_MFAR_NS_MASK
;
214 env
->cp15
.mfar_el3
|= R_MFAR_NSE_MASK
;
217 env
->cp15
.mfar_el3
|= R_MFAR_NSE_MASK
| R_MFAR_NS_MASK
;
220 g_assert_not_reached();
227 /* If SCR_EL3.GPF is unset, GPF may still be routed to EL2. */
228 if (fi
->gpcf
== GPCF_Fail
&& target_el
< 2) {
229 if (arm_hcr_el2_eff(env
) & HCR_GPF
) {
236 env
->cp15
.hpfar_el2
= extract64(fi
->s2addr
, 12, 47) << 4;
237 if (arm_is_secure_below_el3(env
) && fi
->s1ns
) {
238 env
->cp15
.hpfar_el2
|= HPFAR_NS
;
242 same_el
= current_el
== target_el
;
243 fsr
= compute_fsr_fsc(env
, fi
, target_el
, mmu_idx
, &fsc
);
245 if (access_type
== MMU_INST_FETCH
) {
246 syn
= syn_insn_abort(same_el
, fi
->ea
, fi
->s1ptw
, fsc
);
247 exc
= EXCP_PREFETCH_ABORT
;
249 syn
= merge_syn_data_abort(env
->exception
.syndrome
, fi
, target_el
,
250 same_el
, access_type
== MMU_DATA_STORE
,
252 if (access_type
== MMU_DATA_STORE
253 && arm_feature(env
, ARM_FEATURE_V6
)) {
256 exc
= EXCP_DATA_ABORT
;
260 env
->exception
.vaddress
= addr
;
261 env
->exception
.fsr
= fsr
;
262 raise_exception(env
, exc
, syn
, target_el
);
265 /* Raise a data fault alignment exception for the specified virtual address */
266 void arm_cpu_do_unaligned_access(CPUState
*cs
, vaddr vaddr
,
267 MMUAccessType access_type
,
268 int mmu_idx
, uintptr_t retaddr
)
270 ARMCPU
*cpu
= ARM_CPU(cs
);
271 ARMMMUFaultInfo fi
= {};
273 /* now we have a real cpu fault */
274 cpu_restore_state(cs
, retaddr
);
276 fi
.type
= ARMFault_Alignment
;
277 arm_deliver_fault(cpu
, vaddr
, access_type
, mmu_idx
, &fi
);
280 void helper_exception_pc_alignment(CPUARMState
*env
, target_ulong pc
)
282 ARMMMUFaultInfo fi
= { .type
= ARMFault_Alignment
};
283 int target_el
= exception_target_el(env
);
284 int mmu_idx
= arm_env_mmu_index(env
);
287 env
->exception
.vaddress
= pc
;
290 * Note that the fsc is not applicable to this exception,
291 * since any syndrome is pcalignment not insn_abort.
293 env
->exception
.fsr
= compute_fsr_fsc(env
, &fi
, target_el
, mmu_idx
, &fsc
);
294 raise_exception(env
, EXCP_PREFETCH_ABORT
, syn_pcalignment(), target_el
);
297 #if !defined(CONFIG_USER_ONLY)
300 * arm_cpu_do_transaction_failed: handle a memory system error response
301 * (eg "no device/memory present at address") by raising an external abort
304 void arm_cpu_do_transaction_failed(CPUState
*cs
, hwaddr physaddr
,
305 vaddr addr
, unsigned size
,
306 MMUAccessType access_type
,
307 int mmu_idx
, MemTxAttrs attrs
,
308 MemTxResult response
, uintptr_t retaddr
)
310 ARMCPU
*cpu
= ARM_CPU(cs
);
311 ARMMMUFaultInfo fi
= {};
313 /* now we have a real cpu fault */
314 cpu_restore_state(cs
, retaddr
);
316 fi
.ea
= arm_extabort_type(response
);
317 fi
.type
= ARMFault_SyncExternal
;
318 arm_deliver_fault(cpu
, addr
, access_type
, mmu_idx
, &fi
);
321 bool arm_cpu_tlb_fill_align(CPUState
*cs
, CPUTLBEntryFull
*out
, vaddr address
,
322 MMUAccessType access_type
, int mmu_idx
,
323 MemOp memop
, int size
, bool probe
, uintptr_t ra
)
325 ARMCPU
*cpu
= ARM_CPU(cs
);
326 GetPhysAddrResult res
= {};
327 ARMMMUFaultInfo local_fi
, *fi
;
330 * Allow S1_ptw_translate to see any fault generated here.
331 * Since this may recurse, read and clear.
333 fi
= cpu
->env
.tlb_fi
;
335 cpu
->env
.tlb_fi
= NULL
;
337 fi
= memset(&local_fi
, 0, sizeof(local_fi
));
341 * Per R_XCHFJ, alignment fault not due to memory type has
342 * highest precedence. Otherwise, walk the page table and
343 * and collect the page description.
345 if (address
& ((1 << memop_alignment_bits(memop
)) - 1)) {
346 fi
->type
= ARMFault_Alignment
;
347 } else if (!get_phys_addr(&cpu
->env
, address
, access_type
, memop
,
348 core_to_arm_mmu_idx(&cpu
->env
, mmu_idx
),
350 res
.f
.extra
.arm
.pte_attrs
= res
.cacheattrs
.attrs
;
351 res
.f
.extra
.arm
.shareability
= res
.cacheattrs
.shareability
;
359 /* Now we have a real cpu fault. */
360 cpu_restore_state(cs
, ra
);
361 arm_deliver_fault(cpu
, address
, access_type
, mmu_idx
, fi
);
364 void arm_cpu_record_sigsegv(CPUState
*cs
, vaddr addr
,
365 MMUAccessType access_type
,
366 bool maperr
, uintptr_t ra
)
368 ARMMMUFaultInfo fi
= {
369 .type
= maperr
? ARMFault_Translation
: ARMFault_Permission
,
372 ARMCPU
*cpu
= ARM_CPU(cs
);
375 * We report both ESR and FAR to signal handlers.
376 * For now, it's easiest to deliver the fault normally.
378 cpu_restore_state(cs
, ra
);
379 arm_deliver_fault(cpu
, addr
, access_type
, MMU_USER_IDX
, &fi
);
382 void arm_cpu_record_sigbus(CPUState
*cs
, vaddr addr
,
383 MMUAccessType access_type
, uintptr_t ra
)
385 arm_cpu_do_unaligned_access(cs
, addr
, access_type
, MMU_USER_IDX
, ra
);
387 #endif /* !defined(CONFIG_USER_ONLY) */