4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
23 #include "hw/core/cpu.h"
24 #include "hw/registerfields.h"
25 #include "hw/qdev-properties.h"
26 #include "exec/cpu-defs.h"
27 #include "exec/gdbstub.h"
28 #include "qemu/cpu-float.h"
29 #include "qom/object.h"
30 #include "qemu/int128.h"
33 #include "qapi/qapi-types-common.h"
36 typedef struct CPUArchState CPURISCVState
;
38 #define CPU_RESOLVING_TYPE TYPE_RISCV_CPU
40 #if defined(TARGET_RISCV32)
41 # define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE32
42 #elif defined(TARGET_RISCV64)
43 # define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE64
47 * RISC-V-specific extra insn start words:
48 * 1: Original instruction opcode
50 #define TARGET_INSN_START_EXTRA_WORDS 1
52 #define RV(x) ((target_ulong)1 << (x - 'A'))
55 * Update misa_bits[], misa_ext_info_arr[] and misa_ext_cfgs[]
56 * when adding new MISA bits here.
59 #define RVE RV('E') /* E and I are mutually exclusive */
73 extern const uint32_t misa_bits
[];
74 const char *riscv_get_misa_ext_name(uint32_t bit
);
75 const char *riscv_get_misa_ext_description(uint32_t bit
);
77 #define CPU_CFG_OFFSET(_prop) offsetof(struct RISCVCPUConfig, _prop)
79 typedef struct riscv_cpu_profile
{
80 struct riscv_cpu_profile
*parent
;
87 const int32_t ext_offsets
[];
90 #define RISCV_PROFILE_EXT_LIST_END -1
91 #define RISCV_PROFILE_ATTR_UNUSED -1
93 extern RISCVCPUProfile
*riscv_profiles
[];
95 /* Privileged specification version */
96 #define PRIV_VER_1_10_0_STR "v1.10.0"
97 #define PRIV_VER_1_11_0_STR "v1.11.0"
98 #define PRIV_VER_1_12_0_STR "v1.12.0"
99 #define PRIV_VER_1_13_0_STR "v1.13.0"
101 PRIV_VERSION_1_10_0
= 0,
106 PRIV_VERSION_LATEST
= PRIV_VERSION_1_13_0
,
109 #define VEXT_VERSION_1_00_0 0x00010000
110 #define VEXT_VER_1_00_0_STR "v1.0"
116 TRANSLATE_G_STAGE_FAIL
119 /* Extension context status */
121 EXT_STATUS_DISABLED
= 0,
127 typedef struct riscv_cpu_implied_exts_rule
{
128 #ifndef CONFIG_USER_ONLY
130 * Bitmask indicates the rule enabled status for the harts.
131 * This enhancement is only available in system-mode QEMU,
132 * as we don't have a good way (e.g. mhartid) to distinguish
133 * the SMP cores in user-mode QEMU.
135 unsigned long *enabled
;
137 /* True if this is a MISA implied rule. */
139 /* ext is MISA bit if is_misa flag is true, else multi extension offset. */
141 const uint32_t implied_misa_exts
;
142 const uint32_t implied_multi_exts
[];
143 } RISCVCPUImpliedExtsRule
;
145 extern RISCVCPUImpliedExtsRule
*riscv_misa_ext_implied_rules
[];
146 extern RISCVCPUImpliedExtsRule
*riscv_multi_ext_implied_rules
[];
148 #define RISCV_IMPLIED_EXTS_RULE_END -1
150 #define MMU_USER_IDX 3
152 #define MAX_RISCV_PMPS (16)
154 #if !defined(CONFIG_USER_ONLY)
159 #define RV_VLEN_MAX 1024
160 #define RV_MAX_MHPMEVENTS 32
161 #define RV_MAX_MHPMCOUNTERS 32
163 FIELD(VTYPE
, VLMUL
, 0, 3)
164 FIELD(VTYPE
, VSEW
, 3, 3)
165 FIELD(VTYPE
, VTA
, 6, 1)
166 FIELD(VTYPE
, VMA
, 7, 1)
167 FIELD(VTYPE
, VEDIV
, 8, 2)
168 FIELD(VTYPE
, RESERVED
, 10, sizeof(target_ulong
) * 8 - 11)
170 typedef struct PMUCTRState
{
171 /* Current value of a counter */
172 target_ulong mhpmcounter_val
;
173 /* Current value of a counter in RV32 */
174 target_ulong mhpmcounterh_val
;
175 /* Snapshot values of counter */
176 target_ulong mhpmcounter_prev
;
177 /* Snapshort value of a counter in RV32 */
178 target_ulong mhpmcounterh_prev
;
179 /* Value beyond UINT32_MAX/UINT64_MAX before overflow interrupt trigger */
180 target_ulong irq_overflow_left
;
183 typedef struct PMUFixedCtrState
{
184 /* Track cycle and icount for each privilege mode */
186 uint64_t counter_prev
[4];
187 /* Track cycle and icount for each privilege mode when V = 1*/
188 uint64_t counter_virt
[2];
189 uint64_t counter_virt_prev
[2];
192 struct CPUArchState
{
193 target_ulong gpr
[32];
194 target_ulong gprh
[32]; /* 64 top bits of the 128-bit registers */
196 /* vector coprocessor state. */
197 uint64_t vreg
[32 * RV_VLEN_MAX
/ 64] QEMU_ALIGNED(16);
206 target_ulong load_res
;
207 target_ulong load_val
;
209 /* Floating-Point state */
210 uint64_t fpr
[32]; /* assume both F and D extensions */
212 float_status fp_status
;
214 target_ulong badaddr
;
217 target_ulong guest_phys_fault_addr
;
219 target_ulong priv_ver
;
220 target_ulong vext_ver
;
222 /* RISCVMXL, but uint32_t for vmstate migration */
223 uint32_t misa_mxl
; /* current mxl */
224 uint32_t misa_ext
; /* current extensions */
225 uint32_t misa_ext_mask
; /* max ext for this cpu */
226 uint32_t xl
; /* current xlen */
228 /* 128-bit helpers upper part return value */
233 #ifdef CONFIG_USER_ONLY
237 #ifndef CONFIG_USER_ONLY
239 /* This contains QEMU specific information about the virt state. */
244 target_ulong mhartid
;
246 * For RV32 this is 32-bit mstatus and 32-bit mstatush.
247 * For RV64 this is a 64-bit mstatus.
253 * MIP contains the software writable version of SEIP ORed with the
254 * external interrupt value. The MIP register is always up-to-date.
255 * To keep track of the current source, we also save booleans of the values
267 * When mideleg[i]=0 and mvien[i]=1, sie[i] is no more
268 * alias of mie[i] and needs to be maintained separately.
273 * When hideleg[i]=0 and hvien[i]=1, vsie[i] is no more
274 * alias of sie[i] (mie[i]) and needs to be maintained separately.
278 target_ulong satp
; /* since: priv-1.10.0 */
280 target_ulong medeleg
;
289 target_ulong mtval
; /* since: priv-1.10.0 */
291 /* Machine and Supervisor interrupt priorities */
296 target_ulong miselect
;
297 target_ulong siselect
;
301 /* Hypervisor CSRs */
302 target_ulong hstatus
;
303 target_ulong hedeleg
;
315 * Bits VSSIP, VSTIP and VSEIP in hvip are maintained in mip. Other bits
316 * from 0:12 are reserved. Bits 13:63 are not aliased and must be separately
321 /* Hypervisor controlled virtual interrupt priorities */
325 /* Upper 64-bits of 128-bit CSRs */
331 * For RV32 this is 32-bit vsstatus and 32-bit vsstatush.
332 * For RV64 this is a 64-bit vsstatus.
336 target_ulong vsscratch
;
338 target_ulong vscause
;
342 /* AIA VS-mode CSRs */
343 target_ulong vsiselect
;
349 target_ulong stvec_hs
;
350 target_ulong sscratch_hs
;
351 target_ulong sepc_hs
;
352 target_ulong scause_hs
;
353 target_ulong stval_hs
;
354 target_ulong satp_hs
;
358 * Signals whether the current exception occurred with two-stage address
359 * translation active.
361 bool two_stage_lookup
;
363 * Signals whether the current exception occurred while doing two-stage
364 * address translation for the VS-stage page table walk.
366 bool two_stage_indirect_lookup
;
371 uint32_t mcountinhibit
;
373 /* PMU cycle & instret privilege mode filtering */
374 target_ulong mcyclecfg
;
375 target_ulong mcyclecfgh
;
376 target_ulong minstretcfg
;
377 target_ulong minstretcfgh
;
379 /* PMU counter state */
380 PMUCTRState pmu_ctrs
[RV_MAX_MHPMCOUNTERS
];
382 /* PMU event selector configured values. First three are unused */
383 target_ulong mhpmevent_val
[RV_MAX_MHPMEVENTS
];
385 /* PMU event selector configured values for RV32 */
386 target_ulong mhpmeventh_val
[RV_MAX_MHPMEVENTS
];
388 PMUFixedCtrState pmu_fixed_ctrs
[2];
390 target_ulong sscratch
;
391 target_ulong mscratch
;
398 /* physical memory protection */
399 pmp_table_t pmp_state
;
400 target_ulong mseccfg
;
403 target_ulong trigger_cur
;
404 target_ulong tdata1
[RV_MAX_TRIGGERS
];
405 target_ulong tdata2
[RV_MAX_TRIGGERS
];
406 target_ulong tdata3
[RV_MAX_TRIGGERS
];
407 target_ulong mcontext
;
408 struct CPUBreakpoint
*cpu_breakpoint
[RV_MAX_TRIGGERS
];
409 struct CPUWatchpoint
*cpu_watchpoint
[RV_MAX_TRIGGERS
];
410 QEMUTimer
*itrigger_timer
[RV_MAX_TRIGGERS
];
412 bool itrigger_enabled
;
414 /* machine specific rdtime callback */
415 uint64_t (*rdtime_fn
)(void *);
418 /* machine specific AIA ireg read-modify-write callback */
419 #define AIA_MAKE_IREG(__isel, __priv, __virt, __vgein, __xlen) \
420 ((((__xlen) & 0xff) << 24) | \
421 (((__vgein) & 0x3f) << 20) | \
422 (((__virt) & 0x1) << 18) | \
423 (((__priv) & 0x3) << 16) | \
425 #define AIA_IREG_ISEL(__ireg) ((__ireg) & 0xffff)
426 #define AIA_IREG_PRIV(__ireg) (((__ireg) >> 16) & 0x3)
427 #define AIA_IREG_VIRT(__ireg) (((__ireg) >> 18) & 0x1)
428 #define AIA_IREG_VGEIN(__ireg) (((__ireg) >> 20) & 0x3f)
429 #define AIA_IREG_XLEN(__ireg) (((__ireg) >> 24) & 0xff)
430 int (*aia_ireg_rmw_fn
[4])(void *arg
, target_ulong reg
,
431 target_ulong
*val
, target_ulong new_val
, target_ulong write_mask
);
432 void *aia_ireg_rmw_fn_arg
[4];
434 /* True if in debugger mode. */
438 * CSRs for PointerMasking extension
441 target_ulong mpmmask
;
442 target_ulong mpmbase
;
443 target_ulong spmmask
;
444 target_ulong spmbase
;
445 target_ulong upmmask
;
446 target_ulong upmbase
;
448 /* CSRs for execution environment configuration */
450 uint64_t mstateen
[SMSTATEEN_MAX_COUNT
];
451 uint64_t hstateen
[SMSTATEEN_MAX_COUNT
];
452 uint64_t sstateen
[SMSTATEEN_MAX_COUNT
];
453 target_ulong senvcfg
;
456 target_ulong cur_pmmask
;
457 target_ulong cur_pmbase
;
459 /* Fields from here on are preserved across CPU reset. */
460 QEMUTimer
*stimer
; /* Internal timer for S-mode interrupt */
461 QEMUTimer
*vstimer
; /* Internal timer for VS-mode interrupt */
469 bool kvm_timer_dirty
;
470 uint64_t kvm_timer_time
;
471 uint64_t kvm_timer_compare
;
472 uint64_t kvm_timer_state
;
473 uint64_t kvm_timer_frequency
;
474 #endif /* CONFIG_KVM */
479 * @env: #CPURISCVState
488 GDBFeature dyn_csr_feature
;
489 GDBFeature dyn_vreg_feature
;
491 /* Configuration Settings */
494 QEMUTimer
*pmu_timer
;
495 /* A bitmask of Available programmable counters */
496 uint32_t pmu_avail_ctrs
;
497 /* Mapping of events to counters */
498 GHashTable
*pmu_event_ctr_map
;
499 const GPtrArray
*decoders
;
504 * @parent_realize: The parent class' realize handler.
505 * @parent_phases: The parent class' reset phase handlers.
509 struct RISCVCPUClass
{
510 CPUClass parent_class
;
512 DeviceRealize parent_realize
;
513 ResettablePhases parent_phases
;
514 uint32_t misa_mxl_max
; /* max mxl for this cpu */
517 static inline int riscv_has_ext(CPURISCVState
*env
, target_ulong ext
)
519 return (env
->misa_ext
& ext
) != 0;
522 #include "cpu_user.h"
524 extern const char * const riscv_int_regnames
[];
525 extern const char * const riscv_int_regnamesh
[];
526 extern const char * const riscv_fpr_regnames
[];
528 const char *riscv_cpu_get_trap_name(target_ulong cause
, bool async
);
529 int riscv_cpu_write_elf64_note(WriteCoreDumpFunction f
, CPUState
*cs
,
530 int cpuid
, DumpState
*s
);
531 int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f
, CPUState
*cs
,
532 int cpuid
, DumpState
*s
);
533 int riscv_cpu_gdb_read_register(CPUState
*cpu
, GByteArray
*buf
, int reg
);
534 int riscv_cpu_gdb_write_register(CPUState
*cpu
, uint8_t *buf
, int reg
);
535 int riscv_cpu_hviprio_index2irq(int index
, int *out_irq
, int *out_rdzero
);
536 uint8_t riscv_cpu_default_priority(int irq
);
537 uint64_t riscv_cpu_all_pending(CPURISCVState
*env
);
538 int riscv_cpu_mirq_pending(CPURISCVState
*env
);
539 int riscv_cpu_sirq_pending(CPURISCVState
*env
);
540 int riscv_cpu_vsirq_pending(CPURISCVState
*env
);
541 bool riscv_cpu_fp_enabled(CPURISCVState
*env
);
542 target_ulong
riscv_cpu_get_geilen(CPURISCVState
*env
);
543 void riscv_cpu_set_geilen(CPURISCVState
*env
, target_ulong geilen
);
544 bool riscv_cpu_vector_enabled(CPURISCVState
*env
);
545 void riscv_cpu_set_virt_enabled(CPURISCVState
*env
, bool enable
);
546 int riscv_env_mmu_index(CPURISCVState
*env
, bool ifetch
);
547 G_NORETURN
void riscv_cpu_do_unaligned_access(CPUState
*cs
, vaddr addr
,
548 MMUAccessType access_type
,
549 int mmu_idx
, uintptr_t retaddr
);
550 bool riscv_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
551 MMUAccessType access_type
, int mmu_idx
,
552 bool probe
, uintptr_t retaddr
);
553 char *riscv_isa_string(RISCVCPU
*cpu
);
554 int riscv_cpu_max_xlen(RISCVCPUClass
*mcc
);
555 bool riscv_cpu_option_set(const char *optname
);
557 #ifndef CONFIG_USER_ONLY
558 void riscv_cpu_do_interrupt(CPUState
*cpu
);
559 void riscv_isa_write_fdt(RISCVCPU
*cpu
, void *fdt
, char *nodename
);
560 void riscv_cpu_do_transaction_failed(CPUState
*cs
, hwaddr physaddr
,
561 vaddr addr
, unsigned size
,
562 MMUAccessType access_type
,
563 int mmu_idx
, MemTxAttrs attrs
,
564 MemTxResult response
, uintptr_t retaddr
);
565 hwaddr
riscv_cpu_get_phys_page_debug(CPUState
*cpu
, vaddr addr
);
566 bool riscv_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
);
567 void riscv_cpu_swap_hypervisor_regs(CPURISCVState
*env
);
568 int riscv_cpu_claim_interrupts(RISCVCPU
*cpu
, uint64_t interrupts
);
569 uint64_t riscv_cpu_update_mip(CPURISCVState
*env
, uint64_t mask
,
571 void riscv_cpu_interrupt(CPURISCVState
*env
);
572 #define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
573 void riscv_cpu_set_rdtime_fn(CPURISCVState
*env
, uint64_t (*fn
)(void *),
575 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState
*env
, uint32_t priv
,
576 int (*rmw_fn
)(void *arg
,
579 target_ulong new_val
,
580 target_ulong write_mask
),
583 RISCVException
smstateen_acc_ok(CPURISCVState
*env
, int index
, uint64_t bit
);
584 #endif /* !CONFIG_USER_ONLY */
586 void riscv_cpu_set_mode(CPURISCVState
*env
, target_ulong newpriv
, bool virt_en
);
588 void riscv_translate_init(void);
589 G_NORETURN
void riscv_raise_exception(CPURISCVState
*env
,
590 uint32_t exception
, uintptr_t pc
);
592 target_ulong
riscv_cpu_get_fflags(CPURISCVState
*env
);
593 void riscv_cpu_set_fflags(CPURISCVState
*env
, target_ulong
);
595 #include "exec/cpu-all.h"
597 FIELD(TB_FLAGS
, MEM_IDX
, 0, 3)
598 FIELD(TB_FLAGS
, FS
, 3, 2)
600 FIELD(TB_FLAGS
, VS
, 5, 2)
601 FIELD(TB_FLAGS
, LMUL
, 7, 3)
602 FIELD(TB_FLAGS
, SEW
, 10, 3)
603 FIELD(TB_FLAGS
, VL_EQ_VLMAX
, 13, 1)
604 FIELD(TB_FLAGS
, VILL
, 14, 1)
605 FIELD(TB_FLAGS
, VSTART_EQ_ZERO
, 15, 1)
606 /* The combination of MXL/SXL/UXL that applies to the current cpu mode. */
607 FIELD(TB_FLAGS
, XL
, 16, 2)
608 /* If PointerMasking should be applied */
609 FIELD(TB_FLAGS
, PM_MASK_ENABLED
, 18, 1)
610 FIELD(TB_FLAGS
, PM_BASE_ENABLED
, 19, 1)
611 FIELD(TB_FLAGS
, VTA
, 20, 1)
612 FIELD(TB_FLAGS
, VMA
, 21, 1)
613 /* Native debug itrigger */
614 FIELD(TB_FLAGS
, ITRIGGER
, 22, 1)
615 /* Virtual mode enabled */
616 FIELD(TB_FLAGS
, VIRT_ENABLED
, 23, 1)
617 FIELD(TB_FLAGS
, PRIV
, 24, 2)
618 FIELD(TB_FLAGS
, AXL
, 26, 2)
620 #ifdef TARGET_RISCV32
621 #define riscv_cpu_mxl(env) ((void)(env), MXL_RV32)
623 static inline RISCVMXL
riscv_cpu_mxl(CPURISCVState
*env
)
625 return env
->misa_mxl
;
628 #define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env)))
630 static inline const RISCVCPUConfig
*riscv_cpu_cfg(CPURISCVState
*env
)
632 return &env_archcpu(env
)->cfg
;
635 #if !defined(CONFIG_USER_ONLY)
636 static inline int cpu_address_mode(CPURISCVState
*env
)
638 int mode
= env
->priv
;
640 if (mode
== PRV_M
&& get_field(env
->mstatus
, MSTATUS_MPRV
)) {
641 mode
= get_field(env
->mstatus
, MSTATUS_MPP
);
646 static inline RISCVMXL
cpu_get_xl(CPURISCVState
*env
, target_ulong mode
)
648 RISCVMXL xl
= env
->misa_mxl
;
650 * When emulating a 32-bit-only cpu, use RV32.
651 * When emulating a 64-bit cpu, and MXL has been reduced to RV32,
652 * MSTATUSH doesn't have UXL/SXL, therefore XLEN cannot be widened
653 * back to RV64 for lower privs.
655 if (xl
!= MXL_RV32
) {
660 xl
= get_field(env
->mstatus
, MSTATUS64_UXL
);
663 xl
= get_field(env
->mstatus
, MSTATUS64_SXL
);
671 #if defined(TARGET_RISCV32)
672 #define cpu_recompute_xl(env) ((void)(env), MXL_RV32)
674 static inline RISCVMXL
cpu_recompute_xl(CPURISCVState
*env
)
676 #if !defined(CONFIG_USER_ONLY)
677 return cpu_get_xl(env
, env
->priv
);
679 return env
->misa_mxl
;
684 #if defined(TARGET_RISCV32)
685 #define cpu_address_xl(env) ((void)(env), MXL_RV32)
687 static inline RISCVMXL
cpu_address_xl(CPURISCVState
*env
)
689 #ifdef CONFIG_USER_ONLY
692 int mode
= cpu_address_mode(env
);
694 return cpu_get_xl(env
, mode
);
699 static inline int riscv_cpu_xlen(CPURISCVState
*env
)
701 return 16 << env
->xl
;
704 #ifdef TARGET_RISCV32
705 #define riscv_cpu_sxl(env) ((void)(env), MXL_RV32)
707 static inline RISCVMXL
riscv_cpu_sxl(CPURISCVState
*env
)
709 #ifdef CONFIG_USER_ONLY
710 return env
->misa_mxl
;
712 return get_field(env
->mstatus
, MSTATUS64_SXL
);
718 * Encode LMUL to lmul as follows:
729 * then, we can calculate VLMAX = vlen >> (vsew + 3 - lmul)
730 * e.g. vlen = 256 bits, SEW = 16, LMUL = 1/8
731 * => VLMAX = vlen >> (1 + 3 - (-3))
735 static inline uint32_t vext_get_vlmax(uint32_t vlenb
, uint32_t vsew
,
738 uint32_t vlen
= vlenb
<< 3;
741 * We need to use 'vlen' instead of 'vlenb' to
742 * preserve the '+ 3' in the formula. Otherwise
743 * we risk a negative shift if vsew < lmul.
745 return vlen
>> (vsew
+ 3 - lmul
);
748 void cpu_get_tb_cpu_state(CPURISCVState
*env
, vaddr
*pc
,
749 uint64_t *cs_base
, uint32_t *pflags
);
751 void riscv_cpu_update_mask(CPURISCVState
*env
);
752 bool riscv_cpu_is_32bit(RISCVCPU
*cpu
);
754 RISCVException
riscv_csrr(CPURISCVState
*env
, int csrno
,
755 target_ulong
*ret_value
);
756 RISCVException
riscv_csrrw(CPURISCVState
*env
, int csrno
,
757 target_ulong
*ret_value
,
758 target_ulong new_value
, target_ulong write_mask
);
759 RISCVException
riscv_csrrw_debug(CPURISCVState
*env
, int csrno
,
760 target_ulong
*ret_value
,
761 target_ulong new_value
,
762 target_ulong write_mask
);
764 static inline void riscv_csr_write(CPURISCVState
*env
, int csrno
,
767 riscv_csrrw(env
, csrno
, NULL
, val
, MAKE_64BIT_MASK(0, TARGET_LONG_BITS
));
770 static inline target_ulong
riscv_csr_read(CPURISCVState
*env
, int csrno
)
772 target_ulong val
= 0;
773 riscv_csrrw(env
, csrno
, &val
, 0, 0);
777 typedef RISCVException (*riscv_csr_predicate_fn
)(CPURISCVState
*env
,
779 typedef RISCVException (*riscv_csr_read_fn
)(CPURISCVState
*env
, int csrno
,
780 target_ulong
*ret_value
);
781 typedef RISCVException (*riscv_csr_write_fn
)(CPURISCVState
*env
, int csrno
,
782 target_ulong new_value
);
783 typedef RISCVException (*riscv_csr_op_fn
)(CPURISCVState
*env
, int csrno
,
784 target_ulong
*ret_value
,
785 target_ulong new_value
,
786 target_ulong write_mask
);
788 RISCVException
riscv_csrr_i128(CPURISCVState
*env
, int csrno
,
790 RISCVException
riscv_csrrw_i128(CPURISCVState
*env
, int csrno
,
792 Int128 new_value
, Int128 write_mask
);
794 typedef RISCVException (*riscv_csr_read128_fn
)(CPURISCVState
*env
, int csrno
,
796 typedef RISCVException (*riscv_csr_write128_fn
)(CPURISCVState
*env
, int csrno
,
801 riscv_csr_predicate_fn predicate
;
802 riscv_csr_read_fn read
;
803 riscv_csr_write_fn write
;
805 riscv_csr_read128_fn read128
;
806 riscv_csr_write128_fn write128
;
807 /* The default priv spec version should be PRIV_VERSION_1_10_0 (i.e 0) */
808 uint32_t min_priv_ver
;
809 } riscv_csr_operations
;
811 /* CSR function table constants */
813 CSR_TABLE_SIZE
= 0x1000
817 * The event id are encoded based on the encoding specified in the
818 * SBI specification v0.3
821 enum riscv_pmu_event_idx
{
822 RISCV_PMU_EVENT_HW_CPU_CYCLES
= 0x01,
823 RISCV_PMU_EVENT_HW_INSTRUCTIONS
= 0x02,
824 RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS
= 0x10019,
825 RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS
= 0x1001B,
826 RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS
= 0x10021,
829 /* used by tcg/tcg-cpu.c*/
830 void isa_ext_update_enabled(RISCVCPU
*cpu
, uint32_t ext_offset
, bool en
);
831 bool isa_ext_is_enabled(RISCVCPU
*cpu
, uint32_t ext_offset
);
832 void riscv_cpu_set_misa_ext(CPURISCVState
*env
, uint32_t ext
);
833 bool riscv_cpu_is_vendor(Object
*cpu_obj
);
835 typedef struct RISCVCPUMultiExtConfig
{
839 } RISCVCPUMultiExtConfig
;
841 extern const RISCVCPUMultiExtConfig riscv_cpu_extensions
[];
842 extern const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts
[];
843 extern const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts
[];
844 extern const RISCVCPUMultiExtConfig riscv_cpu_named_features
[];
845 extern const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts
[];
847 typedef struct isa_ext_data
{
850 int ext_enable_offset
;
852 extern const RISCVIsaExtData isa_edata_arr
[];
853 char *riscv_cpu_get_name(RISCVCPU
*cpu
);
855 void riscv_cpu_finalize_features(RISCVCPU
*cpu
, Error
**errp
);
856 void riscv_add_satp_mode_properties(Object
*obj
);
857 bool riscv_cpu_accelerator_compatible(RISCVCPU
*cpu
);
859 /* CSR function table */
860 extern riscv_csr_operations csr_ops
[CSR_TABLE_SIZE
];
862 extern const bool valid_vm_1_10_32
[], valid_vm_1_10_64
[];
864 void riscv_get_csr_ops(int csrno
, riscv_csr_operations
*ops
);
865 void riscv_set_csr_ops(int csrno
, riscv_csr_operations
*ops
);
867 void riscv_cpu_register_gdb_regs_for_features(CPUState
*cs
);
869 target_ulong
riscv_new_csr_seed(target_ulong new_value
,
870 target_ulong write_mask
);
872 uint8_t satp_mode_max_from_map(uint32_t map
);
873 const char *satp_mode_str(uint8_t satp_mode
, bool is_32_bit
);
875 /* Implemented in th_csr.c */
876 void th_register_custom_csrs(RISCVCPU
*cpu
);
878 const char *priv_spec_to_str(int priv_version
);
879 #endif /* RISCV_CPU_H */