8 #define CPUArchState struct CPUMIPSState
10 #include "qemu-common.h"
12 #include "mips-defs.h"
13 #include "exec/cpu-defs.h"
14 #include "fpu/softfloat.h"
18 typedef struct r4k_tlb_t r4k_tlb_t
;
38 #if !defined(CONFIG_USER_ONLY)
39 typedef struct CPUMIPSTLBContext CPUMIPSTLBContext
;
40 struct CPUMIPSTLBContext
{
43 int (*map_address
) (struct CPUMIPSState
*env
, hwaddr
*physical
, int *prot
, target_ulong address
, int rw
, int access_type
);
44 void (*helper_tlbwi
)(struct CPUMIPSState
*env
);
45 void (*helper_tlbwr
)(struct CPUMIPSState
*env
);
46 void (*helper_tlbp
)(struct CPUMIPSState
*env
);
47 void (*helper_tlbr
)(struct CPUMIPSState
*env
);
48 void (*helper_tlbinv
)(struct CPUMIPSState
*env
);
49 void (*helper_tlbinvf
)(struct CPUMIPSState
*env
);
52 r4k_tlb_t tlb
[MIPS_TLB_MAX
];
59 #define MSA_WRLEN (128)
61 enum CPUMIPSMSADataFormat
{
68 typedef union wr_t wr_t
;
70 int8_t b
[MSA_WRLEN
/8];
71 int16_t h
[MSA_WRLEN
/16];
72 int32_t w
[MSA_WRLEN
/32];
73 int64_t d
[MSA_WRLEN
/64];
76 typedef union fpr_t fpr_t
;
78 float64 fd
; /* ieee double precision */
79 float32 fs
[2];/* ieee single precision */
80 uint64_t d
; /* binary double fixed-point */
81 uint32_t w
[2]; /* binary single fixed-point */
82 /* FPU/MSA register mapping is not tested on big-endian hosts. */
83 wr_t wr
; /* vector data */
85 /* define FP_ENDIAN_IDX to access the same location
86 * in the fpr_t union regardless of the host endianness
88 #if defined(HOST_WORDS_BIGENDIAN)
89 # define FP_ENDIAN_IDX 1
91 # define FP_ENDIAN_IDX 0
94 typedef struct CPUMIPSFPUContext CPUMIPSFPUContext
;
95 struct CPUMIPSFPUContext
{
96 /* Floating point registers */
98 float_status fp_status
;
99 /* fpu implementation/revision register (fir) */
103 #define FCR0_HAS2008 23
114 uint32_t fcr31_rw_bitmask
;
117 #define FCR31_ABS2008 19
118 #define FCR31_NAN2008 18
119 #define SET_FP_COND(num,env) do { ((env).fcr31) |= ((num) ? (1 << ((num) + 24)) : (1 << 23)); } while(0)
120 #define CLEAR_FP_COND(num,env) do { ((env).fcr31) &= ~((num) ? (1 << ((num) + 24)) : (1 << 23)); } while(0)
121 #define GET_FP_COND(env) ((((env).fcr31 >> 24) & 0xfe) | (((env).fcr31 >> 23) & 0x1))
122 #define GET_FP_CAUSE(reg) (((reg) >> 12) & 0x3f)
123 #define GET_FP_ENABLE(reg) (((reg) >> 7) & 0x1f)
124 #define GET_FP_FLAGS(reg) (((reg) >> 2) & 0x1f)
125 #define SET_FP_CAUSE(reg,v) do { (reg) = ((reg) & ~(0x3f << 12)) | ((v & 0x3f) << 12); } while(0)
126 #define SET_FP_ENABLE(reg,v) do { (reg) = ((reg) & ~(0x1f << 7)) | ((v & 0x1f) << 7); } while(0)
127 #define SET_FP_FLAGS(reg,v) do { (reg) = ((reg) & ~(0x1f << 2)) | ((v & 0x1f) << 2); } while(0)
128 #define UPDATE_FP_FLAGS(reg,v) do { (reg) |= ((v & 0x1f) << 2); } while(0)
130 #define FP_UNDERFLOW 2
131 #define FP_OVERFLOW 4
133 #define FP_INVALID 16
134 #define FP_UNIMPLEMENTED 32
137 #define NB_MMU_MODES 4
138 #define TARGET_INSN_START_EXTRA_WORDS 2
140 typedef struct CPUMIPSMVPContext CPUMIPSMVPContext
;
141 struct CPUMIPSMVPContext
{
142 int32_t CP0_MVPControl
;
143 #define CP0MVPCo_CPA 3
144 #define CP0MVPCo_STLB 2
145 #define CP0MVPCo_VPC 1
146 #define CP0MVPCo_EVP 0
147 int32_t CP0_MVPConf0
;
148 #define CP0MVPC0_M 31
149 #define CP0MVPC0_TLBS 29
150 #define CP0MVPC0_GS 28
151 #define CP0MVPC0_PCP 27
152 #define CP0MVPC0_PTLBE 16
153 #define CP0MVPC0_TCA 15
154 #define CP0MVPC0_PVPE 10
155 #define CP0MVPC0_PTC 0
156 int32_t CP0_MVPConf1
;
157 #define CP0MVPC1_CIM 31
158 #define CP0MVPC1_CIF 30
159 #define CP0MVPC1_PCX 20
160 #define CP0MVPC1_PCP2 10
161 #define CP0MVPC1_PCP1 0
164 typedef struct mips_def_t mips_def_t
;
166 #define MIPS_SHADOW_SET_MAX 16
167 #define MIPS_TC_MAX 5
168 #define MIPS_FPU_MAX 1
169 #define MIPS_DSP_ACC 4
170 #define MIPS_KSCRATCH_NUM 6
171 #define MIPS_MAAR_MAX 16 /* Must be an even number. */
173 typedef struct TCState TCState
;
175 target_ulong gpr
[32];
177 target_ulong HI
[MIPS_DSP_ACC
];
178 target_ulong LO
[MIPS_DSP_ACC
];
179 target_ulong ACX
[MIPS_DSP_ACC
];
180 target_ulong DSPControl
;
181 int32_t CP0_TCStatus
;
182 #define CP0TCSt_TCU3 31
183 #define CP0TCSt_TCU2 30
184 #define CP0TCSt_TCU1 29
185 #define CP0TCSt_TCU0 28
186 #define CP0TCSt_TMX 27
187 #define CP0TCSt_RNST 23
188 #define CP0TCSt_TDS 21
189 #define CP0TCSt_DT 20
190 #define CP0TCSt_DA 15
192 #define CP0TCSt_TKSU 11
193 #define CP0TCSt_IXMT 10
194 #define CP0TCSt_TASID 0
196 #define CP0TCBd_CurTC 21
197 #define CP0TCBd_TBE 17
198 #define CP0TCBd_CurVPE 0
199 target_ulong CP0_TCHalt
;
200 target_ulong CP0_TCContext
;
201 target_ulong CP0_TCSchedule
;
202 target_ulong CP0_TCScheFBack
;
203 int32_t CP0_Debug_tcstatus
;
204 target_ulong CP0_UserLocal
;
209 #define MSACSR_FS_MASK (1 << MSACSR_FS)
211 #define MSACSR_NX_MASK (1 << MSACSR_NX)
213 #define MSACSR_CEF_MASK (0xffff << MSACSR_CEF)
215 #define MSACSR_RM_MASK (0x3 << MSACSR_RM)
216 #define MSACSR_MASK (MSACSR_RM_MASK | MSACSR_CEF_MASK | MSACSR_NX_MASK | \
219 float_status msa_fp_status
;
222 typedef struct CPUMIPSState CPUMIPSState
;
223 struct CPUMIPSState
{
225 CPUMIPSFPUContext active_fpu
;
228 uint32_t current_fpu
;
232 #if defined(TARGET_MIPS64)
233 # define PABITS_BASE 36
235 # define PABITS_BASE 32
237 target_ulong SEGMask
;
239 #define PAMASK_BASE ((1ULL << PABITS_BASE) - 1)
242 #define MSAIR_ProcID 8
246 /* CP0_MVP* are per MVP registers. */
247 int32_t CP0_VPControl
;
248 #define CP0VPCtl_DIS 0
250 int32_t CP0_VPEControl
;
251 #define CP0VPECo_YSI 21
252 #define CP0VPECo_GSI 20
253 #define CP0VPECo_EXCPT 16
254 #define CP0VPECo_TE 15
255 #define CP0VPECo_TargTC 0
256 int32_t CP0_VPEConf0
;
257 #define CP0VPEC0_M 31
258 #define CP0VPEC0_XTC 21
259 #define CP0VPEC0_TCS 19
260 #define CP0VPEC0_SCS 18
261 #define CP0VPEC0_DSC 17
262 #define CP0VPEC0_ICS 16
263 #define CP0VPEC0_MVP 1
264 #define CP0VPEC0_VPA 0
265 int32_t CP0_VPEConf1
;
266 #define CP0VPEC1_NCX 20
267 #define CP0VPEC1_NCP2 10
268 #define CP0VPEC1_NCP1 0
269 target_ulong CP0_YQMask
;
270 target_ulong CP0_VPESchedule
;
271 target_ulong CP0_VPEScheFBack
;
273 #define CP0VPEOpt_IWX7 15
274 #define CP0VPEOpt_IWX6 14
275 #define CP0VPEOpt_IWX5 13
276 #define CP0VPEOpt_IWX4 12
277 #define CP0VPEOpt_IWX3 11
278 #define CP0VPEOpt_IWX2 10
279 #define CP0VPEOpt_IWX1 9
280 #define CP0VPEOpt_IWX0 8
281 #define CP0VPEOpt_DWX7 7
282 #define CP0VPEOpt_DWX6 6
283 #define CP0VPEOpt_DWX5 5
284 #define CP0VPEOpt_DWX4 4
285 #define CP0VPEOpt_DWX3 3
286 #define CP0VPEOpt_DWX2 2
287 #define CP0VPEOpt_DWX1 1
288 #define CP0VPEOpt_DWX0 0
289 uint64_t CP0_EntryLo0
;
290 uint64_t CP0_EntryLo1
;
291 #if defined(TARGET_MIPS64)
292 # define CP0EnLo_RI 63
293 # define CP0EnLo_XI 62
295 # define CP0EnLo_RI 31
296 # define CP0EnLo_XI 30
298 int32_t CP0_GlobalNumber
;
300 target_ulong CP0_Context
;
301 target_ulong CP0_KScratch
[MIPS_KSCRATCH_NUM
];
302 int32_t CP0_PageMask
;
303 int32_t CP0_PageGrain_rw_bitmask
;
304 int32_t CP0_PageGrain
;
307 #define CP0PG_ELPA 29
309 target_ulong CP0_SegCtl0
;
310 target_ulong CP0_SegCtl1
;
311 target_ulong CP0_SegCtl2
;
313 #define CP0SC_PA_MASK (0x7FULL << CP0SC_PA)
314 #define CP0SC_PA_1GMASK (0x7EULL << CP0SC_PA)
316 #define CP0SC_AM_MASK (0x7ULL << CP0SC_AM)
317 #define CP0SC_AM_UK 0ULL
318 #define CP0SC_AM_MK 1ULL
319 #define CP0SC_AM_MSK 2ULL
320 #define CP0SC_AM_MUSK 3ULL
321 #define CP0SC_AM_MUSUK 4ULL
322 #define CP0SC_AM_USK 5ULL
323 #define CP0SC_AM_UUSK 7ULL
325 #define CP0SC_EU_MASK (1ULL << CP0SC_EU)
327 #define CP0SC_C_MASK (0x7ULL << CP0SC_C)
328 #define CP0SC_MASK (CP0SC_C_MASK | CP0SC_EU_MASK | CP0SC_AM_MASK | \
330 #define CP0SC_1GMASK (CP0SC_C_MASK | CP0SC_EU_MASK | CP0SC_AM_MASK | \
332 #define CP0SC0_MASK (CP0SC_MASK | (CP0SC_MASK << 16))
333 #define CP0SC1_XAM 59
334 #define CP0SC1_XAM_MASK (0x7ULL << CP0SC1_XAM)
335 #define CP0SC1_MASK (CP0SC_MASK | (CP0SC_MASK << 16) | CP0SC1_XAM_MASK)
337 #define CP0SC2_XR_MASK (0xFFULL << CP0SC2_XR)
338 #define CP0SC2_MASK (CP0SC_1GMASK | (CP0SC_1GMASK << 16) | CP0SC2_XR_MASK)
340 int32_t CP0_SRSConf0_rw_bitmask
;
341 int32_t CP0_SRSConf0
;
342 #define CP0SRSC0_M 31
343 #define CP0SRSC0_SRS3 20
344 #define CP0SRSC0_SRS2 10
345 #define CP0SRSC0_SRS1 0
346 int32_t CP0_SRSConf1_rw_bitmask
;
347 int32_t CP0_SRSConf1
;
348 #define CP0SRSC1_M 31
349 #define CP0SRSC1_SRS6 20
350 #define CP0SRSC1_SRS5 10
351 #define CP0SRSC1_SRS4 0
352 int32_t CP0_SRSConf2_rw_bitmask
;
353 int32_t CP0_SRSConf2
;
354 #define CP0SRSC2_M 31
355 #define CP0SRSC2_SRS9 20
356 #define CP0SRSC2_SRS8 10
357 #define CP0SRSC2_SRS7 0
358 int32_t CP0_SRSConf3_rw_bitmask
;
359 int32_t CP0_SRSConf3
;
360 #define CP0SRSC3_M 31
361 #define CP0SRSC3_SRS12 20
362 #define CP0SRSC3_SRS11 10
363 #define CP0SRSC3_SRS10 0
364 int32_t CP0_SRSConf4_rw_bitmask
;
365 int32_t CP0_SRSConf4
;
366 #define CP0SRSC4_SRS15 20
367 #define CP0SRSC4_SRS14 10
368 #define CP0SRSC4_SRS13 0
370 target_ulong CP0_BadVAddr
;
371 uint32_t CP0_BadInstr
;
372 uint32_t CP0_BadInstrP
;
374 target_ulong CP0_EntryHi
;
375 #define CP0EnHi_EHINV 10
376 target_ulong CP0_EntryHi_ASID_mask
;
401 #define CP0IntCtl_IPTI 29
402 #define CP0IntCtl_IPPCI 26
403 #define CP0IntCtl_VS 5
405 #define CP0SRSCtl_HSS 26
406 #define CP0SRSCtl_EICSS 18
407 #define CP0SRSCtl_ESS 12
408 #define CP0SRSCtl_PSS 6
409 #define CP0SRSCtl_CSS 0
411 #define CP0SRSMap_SSV7 28
412 #define CP0SRSMap_SSV6 24
413 #define CP0SRSMap_SSV5 20
414 #define CP0SRSMap_SSV4 16
415 #define CP0SRSMap_SSV3 12
416 #define CP0SRSMap_SSV2 8
417 #define CP0SRSMap_SSV1 4
418 #define CP0SRSMap_SSV0 0
428 #define CP0Ca_IP_mask 0x0000FF00
430 target_ulong CP0_EPC
;
432 target_ulong CP0_EBase
;
433 target_ulong CP0_EBaseWG_rw_bitmask
;
434 #define CP0EBase_WG 11
435 target_ulong CP0_CMGCRBase
;
478 #define CP0C3_CMGCR 29
479 #define CP0C3_MSAP 28
483 #define CP0C3_IPLW 21
484 #define CP0C3_MMAR 18
486 #define CP0C3_ISA_ON_EXC 16
488 #define CP0C3_ULRI 13
490 #define CP0C3_DSP2P 11
491 #define CP0C3_DSPP 10
501 int32_t CP0_Config4_rw_bitmask
;
505 #define CP0C4_KScrExist 16
506 #define CP0C4_MMUExtDef 14
507 #define CP0C4_FTLBPageSize 8
508 #define CP0C4_FTLBWays 4
509 #define CP0C4_FTLBSets 0
510 #define CP0C4_MMUSizeExt 0
512 int32_t CP0_Config5_rw_bitmask
;
517 #define CP0C5_MSAEn 27
527 #define CP0C5_NFExists 0
530 uint64_t CP0_MAAR
[MIPS_MAAR_MAX
];
532 /* XXX: Maybe make LLAddr per-TC? */
535 target_ulong llnewval
;
537 uint64_t CP0_LLAddr_rw_bitmask
;
538 int CP0_LLAddr_shift
;
539 target_ulong CP0_WatchLo
[8];
540 int32_t CP0_WatchHi
[8];
541 #define CP0WH_ASID 16
542 target_ulong CP0_XContext
;
543 int32_t CP0_Framemask
;
547 #define CP0DB_LSNM 28
548 #define CP0DB_Doze 27
549 #define CP0DB_Halt 26
551 #define CP0DB_IBEP 24
552 #define CP0DB_DBEP 21
553 #define CP0DB_IEXI 20
563 target_ulong CP0_DEPC
;
564 int32_t CP0_Performance0
;
573 target_ulong CP0_ErrorEPC
;
575 /* We waste some space so we can handle shadow registers like TCs. */
576 TCState tcs
[MIPS_SHADOW_SET_MAX
];
577 CPUMIPSFPUContext fpus
[MIPS_FPU_MAX
];
580 #define EXCP_TLB_NOMATCH 0x1
581 #define EXCP_INST_NOTAVAIL 0x2 /* No valid instruction word for BadInstr */
582 uint32_t hflags
; /* CPU State */
583 /* TMASK defines different execution modes */
584 #define MIPS_HFLAG_TMASK 0x1F5807FF
585 #define MIPS_HFLAG_MODE 0x00007 /* execution modes */
586 /* The KSU flags must be the lowest bits in hflags. The flag order
587 must be the same as defined for CP0 Status. This allows to use
588 the bits as the value of mmu_idx. */
589 #define MIPS_HFLAG_KSU 0x00003 /* kernel/supervisor/user mode mask */
590 #define MIPS_HFLAG_UM 0x00002 /* user mode flag */
591 #define MIPS_HFLAG_SM 0x00001 /* supervisor mode flag */
592 #define MIPS_HFLAG_KM 0x00000 /* kernel mode flag */
593 #define MIPS_HFLAG_DM 0x00004 /* Debug mode */
594 #define MIPS_HFLAG_64 0x00008 /* 64-bit instructions enabled */
595 #define MIPS_HFLAG_CP0 0x00010 /* CP0 enabled */
596 #define MIPS_HFLAG_FPU 0x00020 /* FPU enabled */
597 #define MIPS_HFLAG_F64 0x00040 /* 64-bit FPU enabled */
598 /* True if the MIPS IV COP1X instructions can be used. This also
599 controls the non-COP1X instructions RECIP.S, RECIP.D, RSQRT.S
601 #define MIPS_HFLAG_COP1X 0x00080 /* COP1X instructions enabled */
602 #define MIPS_HFLAG_RE 0x00100 /* Reversed endianness */
603 #define MIPS_HFLAG_AWRAP 0x00200 /* 32-bit compatibility address wrapping */
604 #define MIPS_HFLAG_M16 0x00400 /* MIPS16 mode flag */
605 #define MIPS_HFLAG_M16_SHIFT 10
606 /* If translation is interrupted between the branch instruction and
607 * the delay slot, record what type of branch it is so that we can
608 * resume translation properly. It might be possible to reduce
609 * this from three bits to two. */
610 #define MIPS_HFLAG_BMASK_BASE 0x803800
611 #define MIPS_HFLAG_B 0x00800 /* Unconditional branch */
612 #define MIPS_HFLAG_BC 0x01000 /* Conditional branch */
613 #define MIPS_HFLAG_BL 0x01800 /* Likely branch */
614 #define MIPS_HFLAG_BR 0x02000 /* branch to register (can't link TB) */
615 /* Extra flags about the current pending branch. */
616 #define MIPS_HFLAG_BMASK_EXT 0x7C000
617 #define MIPS_HFLAG_B16 0x04000 /* branch instruction was 16 bits */
618 #define MIPS_HFLAG_BDS16 0x08000 /* branch requires 16-bit delay slot */
619 #define MIPS_HFLAG_BDS32 0x10000 /* branch requires 32-bit delay slot */
620 #define MIPS_HFLAG_BDS_STRICT 0x20000 /* Strict delay slot size */
621 #define MIPS_HFLAG_BX 0x40000 /* branch exchanges execution mode */
622 #define MIPS_HFLAG_BMASK (MIPS_HFLAG_BMASK_BASE | MIPS_HFLAG_BMASK_EXT)
623 /* MIPS DSP resources access. */
624 #define MIPS_HFLAG_DSP 0x080000 /* Enable access to MIPS DSP resources. */
625 #define MIPS_HFLAG_DSPR2 0x100000 /* Enable access to MIPS DSPR2 resources. */
626 /* Extra flag about HWREna register. */
627 #define MIPS_HFLAG_HWRENA_ULR 0x200000 /* ULR bit from HWREna is set. */
628 #define MIPS_HFLAG_SBRI 0x400000 /* R6 SDBBP causes RI excpt. in user mode */
629 #define MIPS_HFLAG_FBNSLOT 0x800000 /* Forbidden slot */
630 #define MIPS_HFLAG_MSA 0x1000000
631 #define MIPS_HFLAG_FRE 0x2000000 /* FRE enabled */
632 #define MIPS_HFLAG_ELPA 0x4000000
633 #define MIPS_HFLAG_ITC_CACHE 0x8000000 /* CACHE instr. operates on ITC tag */
634 #define MIPS_HFLAG_ERL 0x10000000 /* error level flag */
635 target_ulong btarget
; /* Jump / branch target */
636 target_ulong bcond
; /* Branch condition (if needed) */
638 int SYNCI_Step
; /* Address step size for SYNCI */
639 int CCRes
; /* Cycle count resolution/divisor */
640 uint32_t CP0_Status_rw_bitmask
; /* Read/write bits in CP0_Status */
641 uint32_t CP0_TCStatus_rw_bitmask
; /* Read/write bits in CP0_TCStatus */
642 int insn_flags
; /* Supported instruction set */
644 /* Fields up to this point are cleared by a CPU reset */
645 struct {} end_reset_fields
;
649 /* Fields from here on are preserved across CPU reset. */
650 CPUMIPSMVPContext
*mvp
;
651 #if !defined(CONFIG_USER_ONLY)
652 CPUMIPSTLBContext
*tlb
;
655 const mips_def_t
*cpu_model
;
657 QEMUTimer
*timer
; /* Internal timer */
658 MemoryRegion
*itc_tag
; /* ITC Configuration Tags */
659 target_ulong exception_base
; /* ExceptionBase input to the core */
664 * @env: #CPUMIPSState
676 static inline MIPSCPU
*mips_env_get_cpu(CPUMIPSState
*env
)
678 return container_of(env
, MIPSCPU
, env
);
681 #define ENV_GET_CPU(e) CPU(mips_env_get_cpu(e))
683 #define ENV_OFFSET offsetof(MIPSCPU, env)
685 #ifndef CONFIG_USER_ONLY
686 extern const struct VMStateDescription vmstate_mips_cpu
;
689 void mips_cpu_do_interrupt(CPUState
*cpu
);
690 bool mips_cpu_exec_interrupt(CPUState
*cpu
, int int_req
);
691 void mips_cpu_dump_state(CPUState
*cpu
, FILE *f
, fprintf_function cpu_fprintf
,
693 hwaddr
mips_cpu_get_phys_page_debug(CPUState
*cpu
, vaddr addr
);
694 int mips_cpu_gdb_read_register(CPUState
*cpu
, uint8_t *buf
, int reg
);
695 int mips_cpu_gdb_write_register(CPUState
*cpu
, uint8_t *buf
, int reg
);
696 void mips_cpu_do_unaligned_access(CPUState
*cpu
, vaddr addr
,
697 MMUAccessType access_type
,
698 int mmu_idx
, uintptr_t retaddr
);
700 #if !defined(CONFIG_USER_ONLY)
701 int no_mmu_map_address (CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
702 target_ulong address
, int rw
, int access_type
);
703 int fixed_mmu_map_address (CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
704 target_ulong address
, int rw
, int access_type
);
705 int r4k_map_address (CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
706 target_ulong address
, int rw
, int access_type
);
707 void r4k_helper_tlbwi(CPUMIPSState
*env
);
708 void r4k_helper_tlbwr(CPUMIPSState
*env
);
709 void r4k_helper_tlbp(CPUMIPSState
*env
);
710 void r4k_helper_tlbr(CPUMIPSState
*env
);
711 void r4k_helper_tlbinv(CPUMIPSState
*env
);
712 void r4k_helper_tlbinvf(CPUMIPSState
*env
);
714 void mips_cpu_unassigned_access(CPUState
*cpu
, hwaddr addr
,
715 bool is_write
, bool is_exec
, int unused
,
719 void mips_cpu_list (FILE *f
, fprintf_function cpu_fprintf
);
721 #define cpu_signal_handler cpu_mips_signal_handler
722 #define cpu_list mips_cpu_list
724 extern void cpu_wrdsp(uint32_t rs
, uint32_t mask_num
, CPUMIPSState
*env
);
725 extern uint32_t cpu_rddsp(uint32_t mask_num
, CPUMIPSState
*env
);
727 /* MMU modes definitions. We carefully match the indices with our
729 #define MMU_MODE0_SUFFIX _kernel
730 #define MMU_MODE1_SUFFIX _super
731 #define MMU_MODE2_SUFFIX _user
732 #define MMU_MODE3_SUFFIX _error
733 #define MMU_USER_IDX 2
735 static inline int hflags_mmu_index(uint32_t hflags
)
737 if (hflags
& MIPS_HFLAG_ERL
) {
740 return hflags
& MIPS_HFLAG_KSU
;
744 static inline int cpu_mmu_index (CPUMIPSState
*env
, bool ifetch
)
746 return hflags_mmu_index(env
->hflags
);
749 static inline bool cpu_mips_hw_interrupts_enabled(CPUMIPSState
*env
)
751 return (env
->CP0_Status
& (1 << CP0St_IE
)) &&
752 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
753 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
754 !(env
->hflags
& MIPS_HFLAG_DM
) &&
755 /* Note that the TCStatus IXMT field is initialized to zero,
756 and only MT capable cores can set it to one. So we don't
757 need to check for MT capabilities here. */
758 !(env
->active_tc
.CP0_TCStatus
& (1 << CP0TCSt_IXMT
));
761 /* Check if there is pending and not masked out interrupt */
762 static inline bool cpu_mips_hw_interrupts_pending(CPUMIPSState
*env
)
768 pending
= env
->CP0_Cause
& CP0Ca_IP_mask
;
769 status
= env
->CP0_Status
& CP0Ca_IP_mask
;
771 if (env
->CP0_Config3
& (1 << CP0C3_VEIC
)) {
772 /* A MIPS configured with a vectorizing external interrupt controller
773 will feed a vector into the Cause pending lines. The core treats
774 the status lines as a vector level, not as indiviual masks. */
775 r
= pending
> status
;
777 /* A MIPS configured with compatibility or VInt (Vectored Interrupts)
778 treats the pending lines as individual interrupt lines, the status
779 lines are individual masks. */
780 r
= (pending
& status
) != 0;
785 #include "exec/cpu-all.h"
787 /* Memory access type :
788 * may be needed for precise access rights control and precise exceptions.
791 /* 1 bit to define user level / supervisor access */
794 /* 1 bit to indicate direction */
796 /* Type of instruction that generated the access */
797 ACCESS_CODE
= 0x10, /* Code fetch access */
798 ACCESS_INT
= 0x20, /* Integer load/store access */
799 ACCESS_FLOAT
= 0x30, /* floating point load/store access */
813 EXCP_EXT_INTERRUPT
, /* 8 */
829 EXCP_DWATCH
, /* 24 */
844 EXCP_LAST
= EXCP_TLBRI
,
846 /* Dummy exception for conditional stores. */
847 #define EXCP_SC 0x100
850 * This is an interrnally generated WAKE request line.
851 * It is driven by the CPU itself. Raised when the MT
852 * block wants to wake a VPE from an inactive state and
853 * cleared when VPE goes from active to inactive.
855 #define CPU_INTERRUPT_WAKE CPU_INTERRUPT_TGT_INT_0
857 void mips_tcg_init(void);
858 MIPSCPU
*cpu_mips_init(const char *cpu_model
);
859 int cpu_mips_signal_handler(int host_signum
, void *pinfo
, void *puc
);
861 #define cpu_init(cpu_model) CPU(cpu_mips_init(cpu_model))
862 bool cpu_supports_cps_smp(const char *cpu_model
);
863 bool cpu_supports_isa(const char *cpu_model
, unsigned int isa
);
864 void cpu_set_exception_base(int vp_index
, target_ulong address
);
866 /* TODO QOM'ify CPU reset and remove */
867 void cpu_state_reset(CPUMIPSState
*s
);
870 uint32_t cpu_mips_get_random (CPUMIPSState
*env
);
871 uint32_t cpu_mips_get_count (CPUMIPSState
*env
);
872 void cpu_mips_store_count (CPUMIPSState
*env
, uint32_t value
);
873 void cpu_mips_store_compare (CPUMIPSState
*env
, uint32_t value
);
874 void cpu_mips_start_count(CPUMIPSState
*env
);
875 void cpu_mips_stop_count(CPUMIPSState
*env
);
878 void cpu_mips_soft_irq(CPUMIPSState
*env
, int irq
, int level
);
881 int mips_cpu_handle_mmu_fault(CPUState
*cpu
, vaddr address
, int rw
,
885 uint32_t float_class_s(uint32_t arg
, float_status
*fst
);
886 uint64_t float_class_d(uint64_t arg
, float_status
*fst
);
888 #if !defined(CONFIG_USER_ONLY)
889 void r4k_invalidate_tlb (CPUMIPSState
*env
, int idx
, int use_extra
);
890 hwaddr
cpu_mips_translate_address (CPUMIPSState
*env
, target_ulong address
,
893 target_ulong
exception_resume_pc (CPUMIPSState
*env
);
896 extern unsigned int ieee_rm
[];
897 int ieee_ex_to_mips(int xcpt
);
899 static inline void restore_rounding_mode(CPUMIPSState
*env
)
901 set_float_rounding_mode(ieee_rm
[env
->active_fpu
.fcr31
& 3],
902 &env
->active_fpu
.fp_status
);
905 static inline void restore_flush_mode(CPUMIPSState
*env
)
907 set_flush_to_zero((env
->active_fpu
.fcr31
& (1 << FCR31_FS
)) != 0,
908 &env
->active_fpu
.fp_status
);
911 static inline void restore_snan_bit_mode(CPUMIPSState
*env
)
913 set_snan_bit_is_one((env
->active_fpu
.fcr31
& (1 << FCR31_NAN2008
)) == 0,
914 &env
->active_fpu
.fp_status
);
917 static inline void restore_fp_status(CPUMIPSState
*env
)
919 restore_rounding_mode(env
);
920 restore_flush_mode(env
);
921 restore_snan_bit_mode(env
);
924 static inline void restore_msa_fp_status(CPUMIPSState
*env
)
926 float_status
*status
= &env
->active_tc
.msa_fp_status
;
927 int rounding_mode
= (env
->active_tc
.msacsr
& MSACSR_RM_MASK
) >> MSACSR_RM
;
928 bool flush_to_zero
= (env
->active_tc
.msacsr
& MSACSR_FS_MASK
) != 0;
930 set_float_rounding_mode(ieee_rm
[rounding_mode
], status
);
931 set_flush_to_zero(flush_to_zero
, status
);
932 set_flush_inputs_to_zero(flush_to_zero
, status
);
935 static inline void restore_pamask(CPUMIPSState
*env
)
937 if (env
->hflags
& MIPS_HFLAG_ELPA
) {
938 env
->PAMask
= (1ULL << env
->PABITS
) - 1;
940 env
->PAMask
= PAMASK_BASE
;
944 static inline void cpu_get_tb_cpu_state(CPUMIPSState
*env
, target_ulong
*pc
,
945 target_ulong
*cs_base
, uint32_t *flags
)
947 *pc
= env
->active_tc
.PC
;
949 *flags
= env
->hflags
& (MIPS_HFLAG_TMASK
| MIPS_HFLAG_BMASK
|
950 MIPS_HFLAG_HWRENA_ULR
);
953 static inline int mips_vpe_active(CPUMIPSState
*env
)
957 /* Check that the VPE is enabled. */
958 if (!(env
->mvp
->CP0_MVPControl
& (1 << CP0MVPCo_EVP
))) {
961 /* Check that the VPE is activated. */
962 if (!(env
->CP0_VPEConf0
& (1 << CP0VPEC0_VPA
))) {
966 /* Now verify that there are active thread contexts in the VPE.
968 This assumes the CPU model will internally reschedule threads
969 if the active one goes to sleep. If there are no threads available
970 the active one will be in a sleeping state, and we can turn off
972 if (!(env
->active_tc
.CP0_TCStatus
& (1 << CP0TCSt_A
))) {
973 /* TC is not activated. */
976 if (env
->active_tc
.CP0_TCHalt
& 1) {
977 /* TC is in halt state. */
984 static inline int mips_vp_active(CPUMIPSState
*env
)
986 CPUState
*other_cs
= first_cpu
;
988 /* Check if the VP disabled other VPs (which means the VP is enabled) */
989 if ((env
->CP0_VPControl
>> CP0VPCtl_DIS
) & 1) {
993 /* Check if the virtual processor is disabled due to a DVP */
994 CPU_FOREACH(other_cs
) {
995 MIPSCPU
*other_cpu
= MIPS_CPU(other_cs
);
996 if ((&other_cpu
->env
!= env
) &&
997 ((other_cpu
->env
.CP0_VPControl
>> CP0VPCtl_DIS
) & 1)) {
1004 static inline void compute_hflags(CPUMIPSState
*env
)
1006 env
->hflags
&= ~(MIPS_HFLAG_COP1X
| MIPS_HFLAG_64
| MIPS_HFLAG_CP0
|
1007 MIPS_HFLAG_F64
| MIPS_HFLAG_FPU
| MIPS_HFLAG_KSU
|
1008 MIPS_HFLAG_AWRAP
| MIPS_HFLAG_DSP
| MIPS_HFLAG_DSPR2
|
1009 MIPS_HFLAG_SBRI
| MIPS_HFLAG_MSA
| MIPS_HFLAG_FRE
|
1010 MIPS_HFLAG_ELPA
| MIPS_HFLAG_ERL
);
1011 if (env
->CP0_Status
& (1 << CP0St_ERL
)) {
1012 env
->hflags
|= MIPS_HFLAG_ERL
;
1014 if (!(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
1015 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
1016 !(env
->hflags
& MIPS_HFLAG_DM
)) {
1017 env
->hflags
|= (env
->CP0_Status
>> CP0St_KSU
) & MIPS_HFLAG_KSU
;
1019 #if defined(TARGET_MIPS64)
1020 if ((env
->insn_flags
& ISA_MIPS3
) &&
1021 (((env
->hflags
& MIPS_HFLAG_KSU
) != MIPS_HFLAG_UM
) ||
1022 (env
->CP0_Status
& (1 << CP0St_PX
)) ||
1023 (env
->CP0_Status
& (1 << CP0St_UX
)))) {
1024 env
->hflags
|= MIPS_HFLAG_64
;
1027 if (!(env
->insn_flags
& ISA_MIPS3
)) {
1028 env
->hflags
|= MIPS_HFLAG_AWRAP
;
1029 } else if (((env
->hflags
& MIPS_HFLAG_KSU
) == MIPS_HFLAG_UM
) &&
1030 !(env
->CP0_Status
& (1 << CP0St_UX
))) {
1031 env
->hflags
|= MIPS_HFLAG_AWRAP
;
1032 } else if (env
->insn_flags
& ISA_MIPS64R6
) {
1033 /* Address wrapping for Supervisor and Kernel is specified in R6 */
1034 if ((((env
->hflags
& MIPS_HFLAG_KSU
) == MIPS_HFLAG_SM
) &&
1035 !(env
->CP0_Status
& (1 << CP0St_SX
))) ||
1036 (((env
->hflags
& MIPS_HFLAG_KSU
) == MIPS_HFLAG_KM
) &&
1037 !(env
->CP0_Status
& (1 << CP0St_KX
)))) {
1038 env
->hflags
|= MIPS_HFLAG_AWRAP
;
1042 if (((env
->CP0_Status
& (1 << CP0St_CU0
)) &&
1043 !(env
->insn_flags
& ISA_MIPS32R6
)) ||
1044 !(env
->hflags
& MIPS_HFLAG_KSU
)) {
1045 env
->hflags
|= MIPS_HFLAG_CP0
;
1047 if (env
->CP0_Status
& (1 << CP0St_CU1
)) {
1048 env
->hflags
|= MIPS_HFLAG_FPU
;
1050 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
1051 env
->hflags
|= MIPS_HFLAG_F64
;
1053 if (((env
->hflags
& MIPS_HFLAG_KSU
) != MIPS_HFLAG_KM
) &&
1054 (env
->CP0_Config5
& (1 << CP0C5_SBRI
))) {
1055 env
->hflags
|= MIPS_HFLAG_SBRI
;
1057 if (env
->insn_flags
& ASE_DSPR2
) {
1058 /* Enables access MIPS DSP resources, now our cpu is DSP ASER2,
1059 so enable to access DSPR2 resources. */
1060 if (env
->CP0_Status
& (1 << CP0St_MX
)) {
1061 env
->hflags
|= MIPS_HFLAG_DSP
| MIPS_HFLAG_DSPR2
;
1064 } else if (env
->insn_flags
& ASE_DSP
) {
1065 /* Enables access MIPS DSP resources, now our cpu is DSP ASE,
1066 so enable to access DSP resources. */
1067 if (env
->CP0_Status
& (1 << CP0St_MX
)) {
1068 env
->hflags
|= MIPS_HFLAG_DSP
;
1072 if (env
->insn_flags
& ISA_MIPS32R2
) {
1073 if (env
->active_fpu
.fcr0
& (1 << FCR0_F64
)) {
1074 env
->hflags
|= MIPS_HFLAG_COP1X
;
1076 } else if (env
->insn_flags
& ISA_MIPS32
) {
1077 if (env
->hflags
& MIPS_HFLAG_64
) {
1078 env
->hflags
|= MIPS_HFLAG_COP1X
;
1080 } else if (env
->insn_flags
& ISA_MIPS4
) {
1081 /* All supported MIPS IV CPUs use the XX (CU3) to enable
1082 and disable the MIPS IV extensions to the MIPS III ISA.
1083 Some other MIPS IV CPUs ignore the bit, so the check here
1084 would be too restrictive for them. */
1085 if (env
->CP0_Status
& (1U << CP0St_CU3
)) {
1086 env
->hflags
|= MIPS_HFLAG_COP1X
;
1089 if (env
->insn_flags
& ASE_MSA
) {
1090 if (env
->CP0_Config5
& (1 << CP0C5_MSAEn
)) {
1091 env
->hflags
|= MIPS_HFLAG_MSA
;
1094 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
1095 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
1096 env
->hflags
|= MIPS_HFLAG_FRE
;
1099 if (env
->CP0_Config3
& (1 << CP0C3_LPA
)) {
1100 if (env
->CP0_PageGrain
& (1 << CP0PG_ELPA
)) {
1101 env
->hflags
|= MIPS_HFLAG_ELPA
;
1106 void cpu_mips_tlb_flush(CPUMIPSState
*env
);
1107 void sync_c0_status(CPUMIPSState
*env
, CPUMIPSState
*cpu
, int tc
);
1108 void cpu_mips_store_status(CPUMIPSState
*env
, target_ulong val
);
1109 void cpu_mips_store_cause(CPUMIPSState
*env
, target_ulong val
);
1111 void QEMU_NORETURN
do_raise_exception_err(CPUMIPSState
*env
, uint32_t exception
,
1112 int error_code
, uintptr_t pc
);
1114 static inline void QEMU_NORETURN
do_raise_exception(CPUMIPSState
*env
,
1118 do_raise_exception_err(env
, exception
, 0, pc
);
1121 #endif /* MIPS_CPU_H */