4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/exec-all.h"
24 #include "exec/address-spaces.h"
26 /* Sparc MMU emulation */
28 #if defined(CONFIG_USER_ONLY)
30 int sparc_cpu_handle_mmu_fault(CPUState
*cs
, vaddr address
, int rw
,
34 cs
->exception_index
= TT_TFAULT
;
36 cs
->exception_index
= TT_DFAULT
;
43 #ifndef TARGET_SPARC64
45 * Sparc V8 Reference MMU (SRMMU)
47 static const int access_table
[8][8] = {
48 { 0, 0, 0, 0, 8, 0, 12, 12 },
49 { 0, 0, 0, 0, 8, 0, 0, 0 },
50 { 8, 8, 0, 0, 0, 8, 12, 12 },
51 { 8, 8, 0, 0, 0, 8, 0, 0 },
52 { 8, 0, 8, 0, 8, 8, 12, 12 },
53 { 8, 0, 8, 0, 8, 0, 8, 0 },
54 { 8, 8, 8, 0, 8, 8, 12, 12 },
55 { 8, 8, 8, 0, 8, 8, 8, 0 }
58 static const int perm_table
[2][8] = {
61 PAGE_READ
| PAGE_WRITE
,
62 PAGE_READ
| PAGE_EXEC
,
63 PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
,
65 PAGE_READ
| PAGE_WRITE
,
66 PAGE_READ
| PAGE_EXEC
,
67 PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
71 PAGE_READ
| PAGE_WRITE
,
72 PAGE_READ
| PAGE_EXEC
,
73 PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
,
81 static int get_physical_address(CPUSPARCState
*env
, hwaddr
*physical
,
82 int *prot
, int *access_index
,
83 target_ulong address
, int rw
, int mmu_idx
,
84 target_ulong
*page_size
)
89 int error_code
= 0, is_dirty
, is_user
;
90 unsigned long page_offset
;
91 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
93 is_user
= mmu_idx
== MMU_USER_IDX
;
95 if (mmu_idx
== MMU_PHYS_IDX
) {
96 *page_size
= TARGET_PAGE_SIZE
;
97 /* Boot mode: instruction fetches are taken from PROM */
98 if (rw
== 2 && (env
->mmuregs
[0] & env
->def
->mmu_bm
)) {
99 *physical
= env
->prom_addr
| (address
& 0x7ffffULL
);
100 *prot
= PAGE_READ
| PAGE_EXEC
;
104 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
108 *access_index
= ((rw
& 1) << 2) | (rw
& 2) | (is_user
? 0 : 1);
109 *physical
= 0xffffffffffff0000ULL
;
111 /* SPARC reference MMU table walk: Context table->L1->L2->PTE */
112 /* Context base + context number */
113 pde_ptr
= (env
->mmuregs
[1] << 4) + (env
->mmuregs
[2] << 2);
114 pde
= ldl_phys(cs
->as
, pde_ptr
);
117 switch (pde
& PTE_ENTRYTYPE_MASK
) {
119 case 0: /* Invalid */
121 case 2: /* L0 PTE, maybe should not happen? */
122 case 3: /* Reserved */
125 pde_ptr
= ((address
>> 22) & ~3) + ((pde
& ~3) << 4);
126 pde
= ldl_phys(cs
->as
, pde_ptr
);
128 switch (pde
& PTE_ENTRYTYPE_MASK
) {
130 case 0: /* Invalid */
131 return (1 << 8) | (1 << 2);
132 case 3: /* Reserved */
133 return (1 << 8) | (4 << 2);
135 pde_ptr
= ((address
& 0xfc0000) >> 16) + ((pde
& ~3) << 4);
136 pde
= ldl_phys(cs
->as
, pde_ptr
);
138 switch (pde
& PTE_ENTRYTYPE_MASK
) {
140 case 0: /* Invalid */
141 return (2 << 8) | (1 << 2);
142 case 3: /* Reserved */
143 return (2 << 8) | (4 << 2);
145 pde_ptr
= ((address
& 0x3f000) >> 10) + ((pde
& ~3) << 4);
146 pde
= ldl_phys(cs
->as
, pde_ptr
);
148 switch (pde
& PTE_ENTRYTYPE_MASK
) {
150 case 0: /* Invalid */
151 return (3 << 8) | (1 << 2);
152 case 1: /* PDE, should not happen */
153 case 3: /* Reserved */
154 return (3 << 8) | (4 << 2);
158 *page_size
= TARGET_PAGE_SIZE
;
161 page_offset
= address
& 0x3f000;
162 *page_size
= 0x40000;
166 page_offset
= address
& 0xfff000;
167 *page_size
= 0x1000000;
172 access_perms
= (pde
& PTE_ACCESS_MASK
) >> PTE_ACCESS_SHIFT
;
173 error_code
= access_table
[*access_index
][access_perms
];
174 if (error_code
&& !((env
->mmuregs
[0] & MMU_NF
) && is_user
)) {
178 /* update page modified and dirty bits */
179 is_dirty
= (rw
& 1) && !(pde
& PG_MODIFIED_MASK
);
180 if (!(pde
& PG_ACCESSED_MASK
) || is_dirty
) {
181 pde
|= PG_ACCESSED_MASK
;
183 pde
|= PG_MODIFIED_MASK
;
185 stl_phys_notdirty(cs
->as
, pde_ptr
, pde
);
188 /* the page can be put in the TLB */
189 *prot
= perm_table
[is_user
][access_perms
];
190 if (!(pde
& PG_MODIFIED_MASK
)) {
191 /* only set write access if already dirty... otherwise wait
193 *prot
&= ~PAGE_WRITE
;
196 /* Even if large ptes, we map only one 4KB page in the cache to
197 avoid filling it too fast */
198 *physical
= ((hwaddr
)(pde
& PTE_ADDR_MASK
) << 4) + page_offset
;
202 /* Perform address translation */
203 int sparc_cpu_handle_mmu_fault(CPUState
*cs
, vaddr address
, int rw
,
206 SPARCCPU
*cpu
= SPARC_CPU(cs
);
207 CPUSPARCState
*env
= &cpu
->env
;
210 target_ulong page_size
;
211 int error_code
= 0, prot
, access_index
;
213 address
&= TARGET_PAGE_MASK
;
214 error_code
= get_physical_address(env
, &paddr
, &prot
, &access_index
,
215 address
, rw
, mmu_idx
, &page_size
);
217 if (error_code
== 0) {
218 qemu_log_mask(CPU_LOG_MMU
,
219 "Translate at %" VADDR_PRIx
" -> " TARGET_FMT_plx
", vaddr "
220 TARGET_FMT_lx
"\n", address
, paddr
, vaddr
);
221 tlb_set_page(cs
, vaddr
, paddr
, prot
, mmu_idx
, page_size
);
225 if (env
->mmuregs
[3]) { /* Fault status register */
226 env
->mmuregs
[3] = 1; /* overflow (not read before another fault) */
228 env
->mmuregs
[3] |= (access_index
<< 5) | error_code
| 2;
229 env
->mmuregs
[4] = address
; /* Fault address register */
231 if ((env
->mmuregs
[0] & MMU_NF
) || env
->psret
== 0) {
232 /* No fault mode: if a mapping is available, just override
233 permissions. If no mapping is available, redirect accesses to
234 neverland. Fake/overridden mappings will be flushed when
235 switching to normal mode. */
236 prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
237 tlb_set_page(cs
, vaddr
, paddr
, prot
, mmu_idx
, TARGET_PAGE_SIZE
);
241 cs
->exception_index
= TT_TFAULT
;
243 cs
->exception_index
= TT_DFAULT
;
249 target_ulong
mmu_probe(CPUSPARCState
*env
, target_ulong address
, int mmulev
)
251 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
255 /* Context base + context number */
256 pde_ptr
= (hwaddr
)(env
->mmuregs
[1] << 4) +
257 (env
->mmuregs
[2] << 2);
258 pde
= ldl_phys(cs
->as
, pde_ptr
);
260 switch (pde
& PTE_ENTRYTYPE_MASK
) {
262 case 0: /* Invalid */
263 case 2: /* PTE, maybe should not happen? */
264 case 3: /* Reserved */
270 pde_ptr
= ((address
>> 22) & ~3) + ((pde
& ~3) << 4);
271 pde
= ldl_phys(cs
->as
, pde_ptr
);
273 switch (pde
& PTE_ENTRYTYPE_MASK
) {
275 case 0: /* Invalid */
276 case 3: /* Reserved */
284 pde_ptr
= ((address
& 0xfc0000) >> 16) + ((pde
& ~3) << 4);
285 pde
= ldl_phys(cs
->as
, pde_ptr
);
287 switch (pde
& PTE_ENTRYTYPE_MASK
) {
289 case 0: /* Invalid */
290 case 3: /* Reserved */
298 pde_ptr
= ((address
& 0x3f000) >> 10) + ((pde
& ~3) << 4);
299 pde
= ldl_phys(cs
->as
, pde_ptr
);
301 switch (pde
& PTE_ENTRYTYPE_MASK
) {
303 case 0: /* Invalid */
304 case 1: /* PDE, should not happen */
305 case 3: /* Reserved */
316 void dump_mmu(FILE *f
, fprintf_function cpu_fprintf
, CPUSPARCState
*env
)
318 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
319 target_ulong va
, va1
, va2
;
320 unsigned int n
, m
, o
;
324 pde_ptr
= (env
->mmuregs
[1] << 4) + (env
->mmuregs
[2] << 2);
325 pde
= ldl_phys(cs
->as
, pde_ptr
);
326 (*cpu_fprintf
)(f
, "Root ptr: " TARGET_FMT_plx
", ctx: %d\n",
327 (hwaddr
)env
->mmuregs
[1] << 4, env
->mmuregs
[2]);
328 for (n
= 0, va
= 0; n
< 256; n
++, va
+= 16 * 1024 * 1024) {
329 pde
= mmu_probe(env
, va
, 2);
331 pa
= cpu_get_phys_page_debug(cs
, va
);
332 (*cpu_fprintf
)(f
, "VA: " TARGET_FMT_lx
", PA: " TARGET_FMT_plx
333 " PDE: " TARGET_FMT_lx
"\n", va
, pa
, pde
);
334 for (m
= 0, va1
= va
; m
< 64; m
++, va1
+= 256 * 1024) {
335 pde
= mmu_probe(env
, va1
, 1);
337 pa
= cpu_get_phys_page_debug(cs
, va1
);
338 (*cpu_fprintf
)(f
, " VA: " TARGET_FMT_lx
", PA: "
339 TARGET_FMT_plx
" PDE: " TARGET_FMT_lx
"\n",
341 for (o
= 0, va2
= va1
; o
< 64; o
++, va2
+= 4 * 1024) {
342 pde
= mmu_probe(env
, va2
, 0);
344 pa
= cpu_get_phys_page_debug(cs
, va2
);
345 (*cpu_fprintf
)(f
, " VA: " TARGET_FMT_lx
", PA: "
346 TARGET_FMT_plx
" PTE: "
357 /* Gdb expects all registers windows to be flushed in ram. This function handles
358 * reads (and only reads) in stack frames as if windows were flushed. We assume
359 * that the sparc ABI is followed.
361 int sparc_cpu_memory_rw_debug(CPUState
*cs
, vaddr address
,
362 uint8_t *buf
, int len
, bool is_write
)
364 SPARCCPU
*cpu
= SPARC_CPU(cs
);
365 CPUSPARCState
*env
= &cpu
->env
;
366 target_ulong addr
= address
;
372 for (i
= 0; i
< env
->nwindows
; i
++) {
374 target_ulong fp
= env
->regbase
[cwp
* 16 + 22];
376 /* Assume fp == 0 means end of frame. */
381 cwp
= cpu_cwp_inc(env
, cwp
+ 1);
383 /* Invalid window ? */
384 if (env
->wim
& (1 << cwp
)) {
388 /* According to the ABI, the stack is growing downward. */
389 if (addr
+ len
< fp
) {
393 /* Not in this frame. */
394 if (addr
> fp
+ 64) {
398 /* Handle access before this window. */
401 if (cpu_memory_rw_debug(cs
, addr
, buf
, len1
, is_write
) != 0) {
409 /* Access byte per byte to registers. Not very efficient but speed
419 for (; len1
; len1
--) {
420 int reg
= cwp
* 16 + 8 + (off
>> 2);
425 u
.v
= cpu_to_be32(env
->regbase
[reg
]);
426 *buf
++ = u
.c
[off
& 3];
437 return cpu_memory_rw_debug(cs
, addr
, buf
, len
, is_write
);
440 #else /* !TARGET_SPARC64 */
442 /* 41 bit physical address space */
443 static inline hwaddr
ultrasparc_truncate_physical(uint64_t x
)
445 return x
& 0x1ffffffffffULL
;
449 * UltraSparc IIi I/DMMUs
452 /* Returns true if TTE tag is valid and matches virtual address value
453 in context requires virtual address mask value calculated from TTE
455 static inline int ultrasparc_tag_match(SparcTLBEntry
*tlb
,
456 uint64_t address
, uint64_t context
,
459 uint64_t mask
= -(8192ULL << 3 * TTE_PGSIZE(tlb
->tte
));
461 /* valid, context match, virtual address match? */
462 if (TTE_IS_VALID(tlb
->tte
) &&
463 (TTE_IS_GLOBAL(tlb
->tte
) || tlb_compare_context(tlb
, context
))
464 && compare_masked(address
, tlb
->tag
, mask
)) {
465 /* decode physical address */
466 *physical
= ((tlb
->tte
& mask
) | (address
& ~mask
)) & 0x1ffffffe000ULL
;
473 static int get_physical_address_data(CPUSPARCState
*env
,
474 hwaddr
*physical
, int *prot
,
475 target_ulong address
, int rw
, int mmu_idx
)
477 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
481 bool is_user
= false;
485 g_assert_not_reached();
490 context
= env
->dmmu
.mmu_primary_context
& 0x1fff;
491 sfsr
|= SFSR_CT_PRIMARY
;
493 case MMU_USER_SECONDARY_IDX
:
496 case MMU_KERNEL_SECONDARY_IDX
:
497 context
= env
->dmmu
.mmu_secondary_context
& 0x1fff;
498 sfsr
|= SFSR_CT_SECONDARY
;
500 case MMU_NUCLEUS_IDX
:
501 sfsr
|= SFSR_CT_NUCLEUS
;
509 sfsr
|= SFSR_WRITE_BIT
;
510 } else if (rw
== 4) {
514 for (i
= 0; i
< 64; i
++) {
515 /* ctx match, vaddr match, valid? */
516 if (ultrasparc_tag_match(&env
->dtlb
[i
], address
, context
, physical
)) {
520 /* multiple bits in SFSR.FT may be set on TT_DFAULT */
521 if (TTE_IS_PRIV(env
->dtlb
[i
].tte
) && is_user
) {
523 sfsr
|= SFSR_FT_PRIV_BIT
; /* privilege violation */
524 trace_mmu_helper_dfault(address
, context
, mmu_idx
, env
->tl
);
527 if (TTE_IS_SIDEEFFECT(env
->dtlb
[i
].tte
)) {
529 sfsr
|= SFSR_FT_NF_E_BIT
;
532 if (TTE_IS_NFO(env
->dtlb
[i
].tte
)) {
534 sfsr
|= SFSR_FT_NFO_BIT
;
539 /* faults above are reported with TT_DFAULT. */
540 cs
->exception_index
= TT_DFAULT
;
541 } else if (!TTE_IS_W_OK(env
->dtlb
[i
].tte
) && (rw
== 1)) {
543 cs
->exception_index
= TT_DPROT
;
545 trace_mmu_helper_dprot(address
, context
, mmu_idx
, env
->tl
);
550 if (TTE_IS_W_OK(env
->dtlb
[i
].tte
)) {
554 TTE_SET_USED(env
->dtlb
[i
].tte
);
559 if (env
->dmmu
.sfsr
& SFSR_VALID_BIT
) { /* Fault status register */
560 sfsr
|= SFSR_OW_BIT
; /* overflow (not read before
564 if (env
->pstate
& PS_PRIV
) {
568 /* FIXME: ASI field in SFSR must be set */
569 env
->dmmu
.sfsr
= sfsr
| SFSR_VALID_BIT
;
571 env
->dmmu
.sfar
= address
; /* Fault address register */
573 env
->dmmu
.tag_access
= (address
& ~0x1fffULL
) | context
;
579 trace_mmu_helper_dmiss(address
, context
);
583 * - UltraSPARC IIi: SFSR and SFAR unmodified
584 * - JPS1: SFAR updated and some fields of SFSR updated
586 env
->dmmu
.tag_access
= (address
& ~0x1fffULL
) | context
;
587 cs
->exception_index
= TT_DMISS
;
591 static int get_physical_address_code(CPUSPARCState
*env
,
592 hwaddr
*physical
, int *prot
,
593 target_ulong address
, int mmu_idx
)
595 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
598 bool is_user
= false;
602 case MMU_USER_SECONDARY_IDX
:
603 case MMU_KERNEL_SECONDARY_IDX
:
604 g_assert_not_reached();
609 context
= env
->dmmu
.mmu_primary_context
& 0x1fff;
617 /* PRIMARY context */
618 context
= env
->dmmu
.mmu_primary_context
& 0x1fff;
620 /* NUCLEUS context */
624 for (i
= 0; i
< 64; i
++) {
625 /* ctx match, vaddr match, valid? */
626 if (ultrasparc_tag_match(&env
->itlb
[i
],
627 address
, context
, physical
)) {
629 if (TTE_IS_PRIV(env
->itlb
[i
].tte
) && is_user
) {
630 /* Fault status register */
631 if (env
->immu
.sfsr
& SFSR_VALID_BIT
) {
632 env
->immu
.sfsr
= SFSR_OW_BIT
; /* overflow (not read before
637 if (env
->pstate
& PS_PRIV
) {
638 env
->immu
.sfsr
|= SFSR_PR_BIT
;
641 env
->immu
.sfsr
|= SFSR_CT_NUCLEUS
;
644 /* FIXME: ASI field in SFSR must be set */
645 env
->immu
.sfsr
|= SFSR_FT_PRIV_BIT
| SFSR_VALID_BIT
;
646 cs
->exception_index
= TT_TFAULT
;
648 env
->immu
.tag_access
= (address
& ~0x1fffULL
) | context
;
650 trace_mmu_helper_tfault(address
, context
);
655 TTE_SET_USED(env
->itlb
[i
].tte
);
660 trace_mmu_helper_tmiss(address
, context
);
662 /* Context is stored in DMMU (dmmuregs[1]) also for IMMU */
663 env
->immu
.tag_access
= (address
& ~0x1fffULL
) | context
;
664 cs
->exception_index
= TT_TMISS
;
668 static int get_physical_address(CPUSPARCState
*env
, hwaddr
*physical
,
669 int *prot
, int *access_index
,
670 target_ulong address
, int rw
, int mmu_idx
,
671 target_ulong
*page_size
)
673 /* ??? We treat everything as a small page, then explicitly flush
674 everything when an entry is evicted. */
675 *page_size
= TARGET_PAGE_SIZE
;
677 /* safety net to catch wrong softmmu index use from dynamic code */
678 if (env
->tl
> 0 && mmu_idx
!= MMU_NUCLEUS_IDX
) {
680 trace_mmu_helper_get_phys_addr_code(env
->tl
, mmu_idx
,
681 env
->dmmu
.mmu_primary_context
,
682 env
->dmmu
.mmu_secondary_context
,
685 trace_mmu_helper_get_phys_addr_data(env
->tl
, mmu_idx
,
686 env
->dmmu
.mmu_primary_context
,
687 env
->dmmu
.mmu_secondary_context
,
692 if (mmu_idx
== MMU_PHYS_IDX
) {
693 *physical
= ultrasparc_truncate_physical(address
);
694 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
699 return get_physical_address_code(env
, physical
, prot
, address
,
702 return get_physical_address_data(env
, physical
, prot
, address
, rw
,
707 /* Perform address translation */
708 int sparc_cpu_handle_mmu_fault(CPUState
*cs
, vaddr address
, int rw
,
711 SPARCCPU
*cpu
= SPARC_CPU(cs
);
712 CPUSPARCState
*env
= &cpu
->env
;
715 target_ulong page_size
;
716 int error_code
= 0, prot
, access_index
;
718 address
&= TARGET_PAGE_MASK
;
719 error_code
= get_physical_address(env
, &paddr
, &prot
, &access_index
,
720 address
, rw
, mmu_idx
, &page_size
);
721 if (error_code
== 0) {
724 trace_mmu_helper_mmu_fault(address
, paddr
, mmu_idx
, env
->tl
,
725 env
->dmmu
.mmu_primary_context
,
726 env
->dmmu
.mmu_secondary_context
);
728 tlb_set_page(cs
, vaddr
, paddr
, prot
, mmu_idx
, page_size
);
735 void dump_mmu(FILE *f
, fprintf_function cpu_fprintf
, CPUSPARCState
*env
)
740 (*cpu_fprintf
)(f
, "MMU contexts: Primary: %" PRId64
", Secondary: %"
742 env
->dmmu
.mmu_primary_context
,
743 env
->dmmu
.mmu_secondary_context
);
744 (*cpu_fprintf
)(f
, "DMMU Tag Access: %" PRIx64
", TSB Tag Target: %" PRIx64
745 "\n", env
->dmmu
.tag_access
, env
->dmmu
.tsb_tag_target
);
746 if ((env
->lsu
& DMMU_E
) == 0) {
747 (*cpu_fprintf
)(f
, "DMMU disabled\n");
749 (*cpu_fprintf
)(f
, "DMMU dump\n");
750 for (i
= 0; i
< 64; i
++) {
751 switch (TTE_PGSIZE(env
->dtlb
[i
].tte
)) {
766 if (TTE_IS_VALID(env
->dtlb
[i
].tte
)) {
767 (*cpu_fprintf
)(f
, "[%02u] VA: %" PRIx64
", PA: %llx"
768 ", %s, %s, %s, %s, ctx %" PRId64
" %s\n",
770 env
->dtlb
[i
].tag
& (uint64_t)~0x1fffULL
,
771 TTE_PA(env
->dtlb
[i
].tte
),
773 TTE_IS_PRIV(env
->dtlb
[i
].tte
) ? "priv" : "user",
774 TTE_IS_W_OK(env
->dtlb
[i
].tte
) ? "RW" : "RO",
775 TTE_IS_LOCKED(env
->dtlb
[i
].tte
) ?
776 "locked" : "unlocked",
777 env
->dtlb
[i
].tag
& (uint64_t)0x1fffULL
,
778 TTE_IS_GLOBAL(env
->dtlb
[i
].tte
) ?
783 if ((env
->lsu
& IMMU_E
) == 0) {
784 (*cpu_fprintf
)(f
, "IMMU disabled\n");
786 (*cpu_fprintf
)(f
, "IMMU dump\n");
787 for (i
= 0; i
< 64; i
++) {
788 switch (TTE_PGSIZE(env
->itlb
[i
].tte
)) {
803 if (TTE_IS_VALID(env
->itlb
[i
].tte
)) {
804 (*cpu_fprintf
)(f
, "[%02u] VA: %" PRIx64
", PA: %llx"
805 ", %s, %s, %s, ctx %" PRId64
" %s\n",
807 env
->itlb
[i
].tag
& (uint64_t)~0x1fffULL
,
808 TTE_PA(env
->itlb
[i
].tte
),
810 TTE_IS_PRIV(env
->itlb
[i
].tte
) ? "priv" : "user",
811 TTE_IS_LOCKED(env
->itlb
[i
].tte
) ?
812 "locked" : "unlocked",
813 env
->itlb
[i
].tag
& (uint64_t)0x1fffULL
,
814 TTE_IS_GLOBAL(env
->itlb
[i
].tte
) ?
821 #endif /* TARGET_SPARC64 */
823 static int cpu_sparc_get_phys_page(CPUSPARCState
*env
, hwaddr
*phys
,
824 target_ulong addr
, int rw
, int mmu_idx
)
826 target_ulong page_size
;
827 int prot
, access_index
;
829 return get_physical_address(env
, phys
, &prot
, &access_index
, addr
, rw
,
830 mmu_idx
, &page_size
);
833 #if defined(TARGET_SPARC64)
834 hwaddr
cpu_get_phys_page_nofault(CPUSPARCState
*env
, target_ulong addr
,
839 if (cpu_sparc_get_phys_page(env
, &phys_addr
, addr
, 4, mmu_idx
) != 0) {
846 hwaddr
sparc_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
848 SPARCCPU
*cpu
= SPARC_CPU(cs
);
849 CPUSPARCState
*env
= &cpu
->env
;
851 int mmu_idx
= cpu_mmu_index(env
, false);
852 MemoryRegionSection section
;
854 if (cpu_sparc_get_phys_page(env
, &phys_addr
, addr
, 2, mmu_idx
) != 0) {
855 if (cpu_sparc_get_phys_page(env
, &phys_addr
, addr
, 0, mmu_idx
) != 0) {
859 section
= memory_region_find(get_system_memory(), phys_addr
, 1);
860 memory_region_unref(section
.mr
);
861 if (!int128_nz(section
.size
)) {