2 * RISC-V CPU helpers for qemu.
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
23 #include "exec/exec-all.h"
27 int riscv_cpu_mmu_index(CPURISCVState
*env
, bool ifetch
)
29 #ifdef CONFIG_USER_ONLY
36 #ifndef CONFIG_USER_ONLY
37 static int riscv_cpu_local_irq_pending(CPURISCVState
*env
)
39 target_ulong mstatus_mie
= get_field(env
->mstatus
, MSTATUS_MIE
);
40 target_ulong mstatus_sie
= get_field(env
->mstatus
, MSTATUS_SIE
);
41 target_ulong pending
= atomic_read(&env
->mip
) & env
->mie
;
42 target_ulong mie
= env
->priv
< PRV_M
|| (env
->priv
== PRV_M
&& mstatus_mie
);
43 target_ulong sie
= env
->priv
< PRV_S
|| (env
->priv
== PRV_S
&& mstatus_sie
);
44 target_ulong irqs
= (pending
& ~env
->mideleg
& -mie
) |
45 (pending
& env
->mideleg
& -sie
);
48 return ctz64(irqs
); /* since non-zero */
50 return EXCP_NONE
; /* indicates no pending interrupt */
55 bool riscv_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
57 #if !defined(CONFIG_USER_ONLY)
58 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
59 RISCVCPU
*cpu
= RISCV_CPU(cs
);
60 CPURISCVState
*env
= &cpu
->env
;
61 int interruptno
= riscv_cpu_local_irq_pending(env
);
62 if (interruptno
>= 0) {
63 cs
->exception_index
= RISCV_EXCP_INT_FLAG
| interruptno
;
64 riscv_cpu_do_interrupt(cs
);
72 #if !defined(CONFIG_USER_ONLY)
74 /* Return true is floating point support is currently enabled */
75 bool riscv_cpu_fp_enabled(CPURISCVState
*env
)
77 if (env
->mstatus
& MSTATUS_FS
) {
84 int riscv_cpu_claim_interrupts(RISCVCPU
*cpu
, uint32_t interrupts
)
86 CPURISCVState
*env
= &cpu
->env
;
87 if (env
->miclaim
& interrupts
) {
90 env
->miclaim
|= interrupts
;
99 static void riscv_cpu_update_mip_irqs_async(CPUState
*target_cpu_state
,
100 run_on_cpu_data data
)
102 struct CpuAsyncInfo
*info
= (struct CpuAsyncInfo
*) data
.host_ptr
;
105 cpu_interrupt(target_cpu_state
, CPU_INTERRUPT_HARD
);
107 cpu_reset_interrupt(target_cpu_state
, CPU_INTERRUPT_HARD
);
113 uint32_t riscv_cpu_update_mip(RISCVCPU
*cpu
, uint32_t mask
, uint32_t value
)
115 CPURISCVState
*env
= &cpu
->env
;
116 CPUState
*cs
= CPU(cpu
);
117 struct CpuAsyncInfo
*info
;
118 uint32_t old
, new, cmp
= atomic_read(&env
->mip
);
122 new = (old
& ~mask
) | (value
& mask
);
123 cmp
= atomic_cmpxchg(&env
->mip
, old
, new);
124 } while (old
!= cmp
);
126 info
= g_new(struct CpuAsyncInfo
, 1);
129 async_run_on_cpu(cs
, riscv_cpu_update_mip_irqs_async
,
130 RUN_ON_CPU_HOST_PTR(info
));
135 void riscv_cpu_set_mode(CPURISCVState
*env
, target_ulong newpriv
)
137 if (newpriv
> PRV_M
) {
138 g_assert_not_reached();
140 if (newpriv
== PRV_H
) {
143 /* tlb_flush is unnecessary as mode is contained in mmu_idx */
147 * Clear the load reservation - otherwise a reservation placed in one
148 * context/process can be used by another, resulting in an SC succeeding
149 * incorrectly. Version 2.2 of the ISA specification explicitly requires
150 * this behaviour, while later revisions say that the kernel "should" use
151 * an SC instruction to force the yielding of a load reservation on a
152 * preemptive context switch. As a result, do both.
157 /* get_physical_address - get the physical address for this virtual address
159 * Do a page table walk to obtain the physical address corresponding to a
160 * virtual address. Returns 0 if the translation was successful
162 * Adapted from Spike's mmu_t::translate and mmu_t::walk
165 static int get_physical_address(CPURISCVState
*env
, hwaddr
*physical
,
166 int *prot
, target_ulong addr
,
167 int access_type
, int mmu_idx
)
169 /* NOTE: the env->pc value visible here will not be
170 * correct, but the value visible to the exception handler
171 * (riscv_cpu_do_interrupt) is correct */
175 if (mode
== PRV_M
&& access_type
!= MMU_INST_FETCH
) {
176 if (get_field(env
->mstatus
, MSTATUS_MPRV
)) {
177 mode
= get_field(env
->mstatus
, MSTATUS_MPP
);
181 if (mode
== PRV_M
|| !riscv_feature(env
, RISCV_FEATURE_MMU
)) {
183 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
184 return TRANSLATE_SUCCESS
;
190 int levels
, ptidxbits
, ptesize
, vm
, sum
;
191 int mxr
= get_field(env
->mstatus
, MSTATUS_MXR
);
193 if (env
->priv_ver
>= PRIV_VERSION_1_10_0
) {
194 base
= (hwaddr
)get_field(env
->satp
, SATP_PPN
) << PGSHIFT
;
195 sum
= get_field(env
->mstatus
, MSTATUS_SUM
);
196 vm
= get_field(env
->satp
, SATP_MODE
);
199 levels
= 2; ptidxbits
= 10; ptesize
= 4; break;
201 levels
= 3; ptidxbits
= 9; ptesize
= 8; break;
203 levels
= 4; ptidxbits
= 9; ptesize
= 8; break;
205 levels
= 5; ptidxbits
= 9; ptesize
= 8; break;
208 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
209 return TRANSLATE_SUCCESS
;
211 g_assert_not_reached();
214 base
= (hwaddr
)(env
->sptbr
) << PGSHIFT
;
215 sum
= !get_field(env
->mstatus
, MSTATUS_PUM
);
216 vm
= get_field(env
->mstatus
, MSTATUS_VM
);
219 levels
= 2; ptidxbits
= 10; ptesize
= 4; break;
221 levels
= 3; ptidxbits
= 9; ptesize
= 8; break;
223 levels
= 4; ptidxbits
= 9; ptesize
= 8; break;
226 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
227 return TRANSLATE_SUCCESS
;
229 g_assert_not_reached();
233 CPUState
*cs
= env_cpu(env
);
234 int va_bits
= PGSHIFT
+ levels
* ptidxbits
;
235 target_ulong mask
= (1L << (TARGET_LONG_BITS
- (va_bits
- 1))) - 1;
236 target_ulong masked_msbs
= (addr
>> (va_bits
- 1)) & mask
;
237 if (masked_msbs
!= 0 && masked_msbs
!= mask
) {
238 return TRANSLATE_FAIL
;
241 int ptshift
= (levels
- 1) * ptidxbits
;
244 #if !TCG_OVERSIZED_GUEST
247 for (i
= 0; i
< levels
; i
++, ptshift
-= ptidxbits
) {
248 target_ulong idx
= (addr
>> (PGSHIFT
+ ptshift
)) &
249 ((1 << ptidxbits
) - 1);
251 /* check that physical address of PTE is legal */
252 hwaddr pte_addr
= base
+ idx
* ptesize
;
254 if (riscv_feature(env
, RISCV_FEATURE_PMP
) &&
255 !pmp_hart_has_privs(env
, pte_addr
, sizeof(target_ulong
),
256 1 << MMU_DATA_LOAD
, PRV_S
)) {
257 return TRANSLATE_PMP_FAIL
;
259 #if defined(TARGET_RISCV32)
260 target_ulong pte
= ldl_phys(cs
->as
, pte_addr
);
261 #elif defined(TARGET_RISCV64)
262 target_ulong pte
= ldq_phys(cs
->as
, pte_addr
);
264 hwaddr ppn
= pte
>> PTE_PPN_SHIFT
;
266 if (!(pte
& PTE_V
)) {
268 return TRANSLATE_FAIL
;
269 } else if (!(pte
& (PTE_R
| PTE_W
| PTE_X
))) {
270 /* Inner PTE, continue walking */
271 base
= ppn
<< PGSHIFT
;
272 } else if ((pte
& (PTE_R
| PTE_W
| PTE_X
)) == PTE_W
) {
273 /* Reserved leaf PTE flags: PTE_W */
274 return TRANSLATE_FAIL
;
275 } else if ((pte
& (PTE_R
| PTE_W
| PTE_X
)) == (PTE_W
| PTE_X
)) {
276 /* Reserved leaf PTE flags: PTE_W + PTE_X */
277 return TRANSLATE_FAIL
;
278 } else if ((pte
& PTE_U
) && ((mode
!= PRV_U
) &&
279 (!sum
|| access_type
== MMU_INST_FETCH
))) {
280 /* User PTE flags when not U mode and mstatus.SUM is not set,
281 or the access type is an instruction fetch */
282 return TRANSLATE_FAIL
;
283 } else if (!(pte
& PTE_U
) && (mode
!= PRV_S
)) {
284 /* Supervisor PTE flags when not S mode */
285 return TRANSLATE_FAIL
;
286 } else if (ppn
& ((1ULL << ptshift
) - 1)) {
288 return TRANSLATE_FAIL
;
289 } else if (access_type
== MMU_DATA_LOAD
&& !((pte
& PTE_R
) ||
290 ((pte
& PTE_X
) && mxr
))) {
291 /* Read access check failed */
292 return TRANSLATE_FAIL
;
293 } else if (access_type
== MMU_DATA_STORE
&& !(pte
& PTE_W
)) {
294 /* Write access check failed */
295 return TRANSLATE_FAIL
;
296 } else if (access_type
== MMU_INST_FETCH
&& !(pte
& PTE_X
)) {
297 /* Fetch access check failed */
298 return TRANSLATE_FAIL
;
300 /* if necessary, set accessed and dirty bits. */
301 target_ulong updated_pte
= pte
| PTE_A
|
302 (access_type
== MMU_DATA_STORE
? PTE_D
: 0);
304 /* Page table updates need to be atomic with MTTCG enabled */
305 if (updated_pte
!= pte
) {
307 * - if accessed or dirty bits need updating, and the PTE is
308 * in RAM, then we do so atomically with a compare and swap.
309 * - if the PTE is in IO space or ROM, then it can't be updated
310 * and we return TRANSLATE_FAIL.
311 * - if the PTE changed by the time we went to update it, then
312 * it is no longer valid and we must re-walk the page table.
315 hwaddr l
= sizeof(target_ulong
), addr1
;
316 mr
= address_space_translate(cs
->as
, pte_addr
,
317 &addr1
, &l
, false, MEMTXATTRS_UNSPECIFIED
);
318 if (memory_region_is_ram(mr
)) {
319 target_ulong
*pte_pa
=
320 qemu_map_ram_ptr(mr
->ram_block
, addr1
);
321 #if TCG_OVERSIZED_GUEST
322 /* MTTCG is not enabled on oversized TCG guests so
323 * page table updates do not need to be atomic */
324 *pte_pa
= pte
= updated_pte
;
326 target_ulong old_pte
=
327 atomic_cmpxchg(pte_pa
, pte
, updated_pte
);
328 if (old_pte
!= pte
) {
335 /* misconfigured PTE in ROM (AD bits are not preset) or
336 * PTE is in IO space and can't be updated atomically */
337 return TRANSLATE_FAIL
;
341 /* for superpage mappings, make a fake leaf PTE for the TLB's
343 target_ulong vpn
= addr
>> PGSHIFT
;
344 *physical
= (ppn
| (vpn
& ((1L << ptshift
) - 1))) << PGSHIFT
;
346 /* set permissions on the TLB entry */
347 if ((pte
& PTE_R
) || ((pte
& PTE_X
) && mxr
)) {
353 /* add write permission on stores or if the page is already dirty,
354 so that we TLB miss on later writes to update the dirty bit */
356 (access_type
== MMU_DATA_STORE
|| (pte
& PTE_D
))) {
359 return TRANSLATE_SUCCESS
;
362 return TRANSLATE_FAIL
;
365 static void raise_mmu_exception(CPURISCVState
*env
, target_ulong address
,
366 MMUAccessType access_type
, bool pmp_violation
)
368 CPUState
*cs
= env_cpu(env
);
369 int page_fault_exceptions
=
370 (env
->priv_ver
>= PRIV_VERSION_1_10_0
) &&
371 get_field(env
->satp
, SATP_MODE
) != VM_1_10_MBARE
&&
373 switch (access_type
) {
375 cs
->exception_index
= page_fault_exceptions
?
376 RISCV_EXCP_INST_PAGE_FAULT
: RISCV_EXCP_INST_ACCESS_FAULT
;
379 cs
->exception_index
= page_fault_exceptions
?
380 RISCV_EXCP_LOAD_PAGE_FAULT
: RISCV_EXCP_LOAD_ACCESS_FAULT
;
383 cs
->exception_index
= page_fault_exceptions
?
384 RISCV_EXCP_STORE_PAGE_FAULT
: RISCV_EXCP_STORE_AMO_ACCESS_FAULT
;
387 g_assert_not_reached();
389 env
->badaddr
= address
;
392 hwaddr
riscv_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
394 RISCVCPU
*cpu
= RISCV_CPU(cs
);
397 int mmu_idx
= cpu_mmu_index(&cpu
->env
, false);
399 if (get_physical_address(&cpu
->env
, &phys_addr
, &prot
, addr
, 0, mmu_idx
)) {
405 void riscv_cpu_unassigned_access(CPUState
*cs
, hwaddr addr
, bool is_write
,
406 bool is_exec
, int unused
, unsigned size
)
408 RISCVCPU
*cpu
= RISCV_CPU(cs
);
409 CPURISCVState
*env
= &cpu
->env
;
412 cs
->exception_index
= RISCV_EXCP_STORE_AMO_ACCESS_FAULT
;
414 cs
->exception_index
= RISCV_EXCP_LOAD_ACCESS_FAULT
;
418 riscv_raise_exception(&cpu
->env
, cs
->exception_index
, GETPC());
421 void riscv_cpu_do_unaligned_access(CPUState
*cs
, vaddr addr
,
422 MMUAccessType access_type
, int mmu_idx
,
425 RISCVCPU
*cpu
= RISCV_CPU(cs
);
426 CPURISCVState
*env
= &cpu
->env
;
427 switch (access_type
) {
429 cs
->exception_index
= RISCV_EXCP_INST_ADDR_MIS
;
432 cs
->exception_index
= RISCV_EXCP_LOAD_ADDR_MIS
;
435 cs
->exception_index
= RISCV_EXCP_STORE_AMO_ADDR_MIS
;
438 g_assert_not_reached();
441 riscv_raise_exception(env
, cs
->exception_index
, retaddr
);
445 bool riscv_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
446 MMUAccessType access_type
, int mmu_idx
,
447 bool probe
, uintptr_t retaddr
)
449 #ifndef CONFIG_USER_ONLY
450 RISCVCPU
*cpu
= RISCV_CPU(cs
);
451 CPURISCVState
*env
= &cpu
->env
;
454 bool pmp_violation
= false;
455 int ret
= TRANSLATE_FAIL
;
458 qemu_log_mask(CPU_LOG_MMU
, "%s ad %" VADDR_PRIx
" rw %d mmu_idx %d\n",
459 __func__
, address
, access_type
, mmu_idx
);
461 ret
= get_physical_address(env
, &pa
, &prot
, address
, access_type
, mmu_idx
);
463 if (mode
== PRV_M
&& access_type
!= MMU_INST_FETCH
) {
464 if (get_field(env
->mstatus
, MSTATUS_MPRV
)) {
465 mode
= get_field(env
->mstatus
, MSTATUS_MPP
);
469 qemu_log_mask(CPU_LOG_MMU
,
470 "%s address=%" VADDR_PRIx
" ret %d physical " TARGET_FMT_plx
471 " prot %d\n", __func__
, address
, ret
, pa
, prot
);
473 if (riscv_feature(env
, RISCV_FEATURE_PMP
) &&
474 (ret
== TRANSLATE_SUCCESS
) &&
475 !pmp_hart_has_privs(env
, pa
, size
, 1 << access_type
, mode
)) {
476 ret
= TRANSLATE_PMP_FAIL
;
478 if (ret
== TRANSLATE_PMP_FAIL
) {
479 pmp_violation
= true;
481 if (ret
== TRANSLATE_SUCCESS
) {
482 tlb_set_page(cs
, address
& TARGET_PAGE_MASK
, pa
& TARGET_PAGE_MASK
,
483 prot
, mmu_idx
, TARGET_PAGE_SIZE
);
488 raise_mmu_exception(env
, address
, access_type
, pmp_violation
);
489 riscv_raise_exception(env
, cs
->exception_index
, retaddr
);
492 switch (access_type
) {
494 cs
->exception_index
= RISCV_EXCP_INST_PAGE_FAULT
;
497 cs
->exception_index
= RISCV_EXCP_LOAD_PAGE_FAULT
;
500 cs
->exception_index
= RISCV_EXCP_STORE_PAGE_FAULT
;
503 cpu_loop_exit_restore(cs
, retaddr
);
510 * Adapted from Spike's processor_t::take_trap.
513 void riscv_cpu_do_interrupt(CPUState
*cs
)
515 #if !defined(CONFIG_USER_ONLY)
517 RISCVCPU
*cpu
= RISCV_CPU(cs
);
518 CPURISCVState
*env
= &cpu
->env
;
520 /* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
521 * so we mask off the MSB and separate into trap type and cause.
523 bool async
= !!(cs
->exception_index
& RISCV_EXCP_INT_FLAG
);
524 target_ulong cause
= cs
->exception_index
& RISCV_EXCP_INT_MASK
;
525 target_ulong deleg
= async
? env
->mideleg
: env
->medeleg
;
526 target_ulong tval
= 0;
528 static const int ecall_cause_map
[] = {
529 [PRV_U
] = RISCV_EXCP_U_ECALL
,
530 [PRV_S
] = RISCV_EXCP_S_ECALL
,
531 [PRV_H
] = RISCV_EXCP_H_ECALL
,
532 [PRV_M
] = RISCV_EXCP_M_ECALL
536 /* set tval to badaddr for traps with address information */
538 case RISCV_EXCP_INST_ADDR_MIS
:
539 case RISCV_EXCP_INST_ACCESS_FAULT
:
540 case RISCV_EXCP_LOAD_ADDR_MIS
:
541 case RISCV_EXCP_STORE_AMO_ADDR_MIS
:
542 case RISCV_EXCP_LOAD_ACCESS_FAULT
:
543 case RISCV_EXCP_STORE_AMO_ACCESS_FAULT
:
544 case RISCV_EXCP_INST_PAGE_FAULT
:
545 case RISCV_EXCP_LOAD_PAGE_FAULT
:
546 case RISCV_EXCP_STORE_PAGE_FAULT
:
552 /* ecall is dispatched as one cause so translate based on mode */
553 if (cause
== RISCV_EXCP_U_ECALL
) {
554 assert(env
->priv
<= 3);
555 cause
= ecall_cause_map
[env
->priv
];
559 trace_riscv_trap(env
->mhartid
, async
, cause
, env
->pc
, tval
, cause
< 16 ?
560 (async
? riscv_intr_names
: riscv_excp_names
)[cause
] : "(unknown)");
562 if (env
->priv
<= PRV_S
&&
563 cause
< TARGET_LONG_BITS
&& ((deleg
>> cause
) & 1)) {
564 /* handle the trap in S-mode */
565 target_ulong s
= env
->mstatus
;
566 s
= set_field(s
, MSTATUS_SPIE
, env
->priv_ver
>= PRIV_VERSION_1_10_0
?
567 get_field(s
, MSTATUS_SIE
) : get_field(s
, MSTATUS_UIE
<< env
->priv
));
568 s
= set_field(s
, MSTATUS_SPP
, env
->priv
);
569 s
= set_field(s
, MSTATUS_SIE
, 0);
571 env
->scause
= cause
| ((target_ulong
)async
<< (TARGET_LONG_BITS
- 1));
573 env
->sbadaddr
= tval
;
574 env
->pc
= (env
->stvec
>> 2 << 2) +
575 ((async
&& (env
->stvec
& 3) == 1) ? cause
* 4 : 0);
576 riscv_cpu_set_mode(env
, PRV_S
);
578 /* handle the trap in M-mode */
579 target_ulong s
= env
->mstatus
;
580 s
= set_field(s
, MSTATUS_MPIE
, env
->priv_ver
>= PRIV_VERSION_1_10_0
?
581 get_field(s
, MSTATUS_MIE
) : get_field(s
, MSTATUS_UIE
<< env
->priv
));
582 s
= set_field(s
, MSTATUS_MPP
, env
->priv
);
583 s
= set_field(s
, MSTATUS_MIE
, 0);
585 env
->mcause
= cause
| ~(((target_ulong
)-1) >> async
);
587 env
->mbadaddr
= tval
;
588 env
->pc
= (env
->mtvec
>> 2 << 2) +
589 ((async
&& (env
->mtvec
& 3) == 1) ? cause
* 4 : 0);
590 riscv_cpu_set_mode(env
, PRV_M
);
593 /* NOTE: it is not necessary to yield load reservations here. It is only
594 * necessary for an SC from "another hart" to cause a load reservation
595 * to be yielded. Refer to the memory consistency model section of the
596 * RISC-V ISA Specification.
600 cs
->exception_index
= EXCP_NONE
; /* mark handled to qemu */