Merge tag 'pull-loongarch-20241016' of https://gitlab.com/gaosong/qemu into staging
[qemu/armbru.git] / target / sparc / mmu_helper.c
blob9ff06026b8c1346639130b19fea687c9bf778411
1 /*
2 * Sparc MMU helpers
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/page-protection.h"
25 #include "qemu/qemu-print.h"
26 #include "trace.h"
28 /* Sparc MMU emulation */
30 #ifndef TARGET_SPARC64
32 * Sparc V8 Reference MMU (SRMMU)
34 static const int access_table[8][8] = {
35 { 0, 0, 0, 0, 8, 0, 12, 12 },
36 { 0, 0, 0, 0, 8, 0, 0, 0 },
37 { 8, 8, 0, 0, 0, 8, 12, 12 },
38 { 8, 8, 0, 0, 0, 8, 0, 0 },
39 { 8, 0, 8, 0, 8, 8, 12, 12 },
40 { 8, 0, 8, 0, 8, 0, 8, 0 },
41 { 8, 8, 8, 0, 8, 8, 12, 12 },
42 { 8, 8, 8, 0, 8, 8, 8, 0 }
45 static const int perm_table[2][8] = {
47 PAGE_READ,
48 PAGE_READ | PAGE_WRITE,
49 PAGE_READ | PAGE_EXEC,
50 PAGE_READ | PAGE_WRITE | PAGE_EXEC,
51 PAGE_EXEC,
52 PAGE_READ | PAGE_WRITE,
53 PAGE_READ | PAGE_EXEC,
54 PAGE_READ | PAGE_WRITE | PAGE_EXEC
57 PAGE_READ,
58 PAGE_READ | PAGE_WRITE,
59 PAGE_READ | PAGE_EXEC,
60 PAGE_READ | PAGE_WRITE | PAGE_EXEC,
61 PAGE_EXEC,
62 PAGE_READ,
68 static int get_physical_address(CPUSPARCState *env, CPUTLBEntryFull *full,
69 int *access_index, target_ulong address,
70 int rw, int mmu_idx)
72 int access_perms = 0;
73 hwaddr pde_ptr;
74 uint32_t pde;
75 int error_code = 0, is_dirty, is_user;
76 unsigned long page_offset;
77 CPUState *cs = env_cpu(env);
78 MemTxResult result;
80 is_user = mmu_idx == MMU_USER_IDX;
82 if (mmu_idx == MMU_PHYS_IDX) {
83 full->lg_page_size = TARGET_PAGE_BITS;
84 /* Boot mode: instruction fetches are taken from PROM */
85 if (rw == 2 && (env->mmuregs[0] & env->def.mmu_bm)) {
86 full->phys_addr = env->prom_addr | (address & 0x7ffffULL);
87 full->prot = PAGE_READ | PAGE_EXEC;
88 return 0;
90 full->phys_addr = address;
91 full->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
92 return 0;
95 *access_index = ((rw & 1) << 2) | (rw & 2) | (is_user ? 0 : 1);
96 full->phys_addr = 0xffffffffffff0000ULL;
98 /* SPARC reference MMU table walk: Context table->L1->L2->PTE */
99 /* Context base + context number */
100 pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2);
101 pde = address_space_ldl(cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result);
102 if (result != MEMTX_OK) {
103 return 4 << 2; /* Translation fault, L = 0 */
106 /* Ctx pde */
107 switch (pde & PTE_ENTRYTYPE_MASK) {
108 default:
109 case 0: /* Invalid */
110 return 1 << 2;
111 case 2: /* L0 PTE, maybe should not happen? */
112 case 3: /* Reserved */
113 return 4 << 2;
114 case 1: /* L0 PDE */
115 pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4);
116 pde = address_space_ldl(cs->as, pde_ptr,
117 MEMTXATTRS_UNSPECIFIED, &result);
118 if (result != MEMTX_OK) {
119 return (1 << 8) | (4 << 2); /* Translation fault, L = 1 */
122 switch (pde & PTE_ENTRYTYPE_MASK) {
123 default:
124 case 0: /* Invalid */
125 return (1 << 8) | (1 << 2);
126 case 3: /* Reserved */
127 return (1 << 8) | (4 << 2);
128 case 1: /* L1 PDE */
129 pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4);
130 pde = address_space_ldl(cs->as, pde_ptr,
131 MEMTXATTRS_UNSPECIFIED, &result);
132 if (result != MEMTX_OK) {
133 return (2 << 8) | (4 << 2); /* Translation fault, L = 2 */
136 switch (pde & PTE_ENTRYTYPE_MASK) {
137 default:
138 case 0: /* Invalid */
139 return (2 << 8) | (1 << 2);
140 case 3: /* Reserved */
141 return (2 << 8) | (4 << 2);
142 case 1: /* L2 PDE */
143 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4);
144 pde = address_space_ldl(cs->as, pde_ptr,
145 MEMTXATTRS_UNSPECIFIED, &result);
146 if (result != MEMTX_OK) {
147 return (3 << 8) | (4 << 2); /* Translation fault, L = 3 */
150 switch (pde & PTE_ENTRYTYPE_MASK) {
151 default:
152 case 0: /* Invalid */
153 return (3 << 8) | (1 << 2);
154 case 1: /* PDE, should not happen */
155 case 3: /* Reserved */
156 return (3 << 8) | (4 << 2);
157 case 2: /* L3 PTE */
158 page_offset = 0;
160 full->lg_page_size = TARGET_PAGE_BITS;
161 break;
162 case 2: /* L2 PTE */
163 page_offset = address & 0x3f000;
164 full->lg_page_size = 18;
166 break;
167 case 2: /* L1 PTE */
168 page_offset = address & 0xfff000;
169 full->lg_page_size = 24;
170 break;
174 /* check access */
175 access_perms = (pde & PTE_ACCESS_MASK) >> PTE_ACCESS_SHIFT;
176 error_code = access_table[*access_index][access_perms];
177 if (error_code && !((env->mmuregs[0] & MMU_NF) && is_user)) {
178 return error_code;
181 /* update page modified and dirty bits */
182 is_dirty = (rw & 1) && !(pde & PG_MODIFIED_MASK);
183 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
184 pde |= PG_ACCESSED_MASK;
185 if (is_dirty) {
186 pde |= PG_MODIFIED_MASK;
188 stl_phys_notdirty(cs->as, pde_ptr, pde);
191 /* the page can be put in the TLB */
192 full->prot = perm_table[is_user][access_perms];
193 if (!(pde & PG_MODIFIED_MASK)) {
194 /* only set write access if already dirty... otherwise wait
195 for dirty access */
196 full->prot &= ~PAGE_WRITE;
199 /* Even if large ptes, we map only one 4KB page in the cache to
200 avoid filling it too fast */
201 full->phys_addr = ((hwaddr)(pde & PTE_ADDR_MASK) << 4) + page_offset;
202 return error_code;
205 /* Perform address translation */
206 bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
207 MMUAccessType access_type, int mmu_idx,
208 bool probe, uintptr_t retaddr)
210 CPUSPARCState *env = cpu_env(cs);
211 CPUTLBEntryFull full = {};
212 target_ulong vaddr;
213 int error_code = 0, access_index;
216 * TODO: If we ever need tlb_vaddr_to_host for this target,
217 * then we must figure out how to manipulate FSR and FAR
218 * when both MMU_NF and probe are set. In the meantime,
219 * do not support this use case.
221 assert(!probe);
223 address &= TARGET_PAGE_MASK;
224 error_code = get_physical_address(env, &full, &access_index,
225 address, access_type, mmu_idx);
226 vaddr = address;
227 if (likely(error_code == 0)) {
228 qemu_log_mask(CPU_LOG_MMU,
229 "Translate at %" VADDR_PRIx " -> "
230 HWADDR_FMT_plx ", vaddr " TARGET_FMT_lx "\n",
231 address, full.phys_addr, vaddr);
232 tlb_set_page_full(cs, mmu_idx, vaddr, &full);
233 return true;
236 if (env->mmuregs[3]) { /* Fault status register */
237 env->mmuregs[3] = 1; /* overflow (not read before another fault) */
239 env->mmuregs[3] |= (access_index << 5) | error_code | 2;
240 env->mmuregs[4] = address; /* Fault address register */
242 if ((env->mmuregs[0] & MMU_NF) || env->psret == 0) {
243 /* No fault mode: if a mapping is available, just override
244 permissions. If no mapping is available, redirect accesses to
245 neverland. Fake/overridden mappings will be flushed when
246 switching to normal mode. */
247 full.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
248 tlb_set_page_full(cs, mmu_idx, vaddr, &full);
249 return true;
250 } else {
251 if (access_type == MMU_INST_FETCH) {
252 cs->exception_index = TT_TFAULT;
253 } else {
254 cs->exception_index = TT_DFAULT;
256 cpu_loop_exit_restore(cs, retaddr);
260 target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev)
262 CPUState *cs = env_cpu(env);
263 hwaddr pde_ptr;
264 uint32_t pde;
265 MemTxResult result;
268 * TODO: MMU probe operations are supposed to set the fault
269 * status registers, but we don't do this.
272 /* Context base + context number */
273 pde_ptr = (hwaddr)(env->mmuregs[1] << 4) +
274 (env->mmuregs[2] << 2);
275 pde = address_space_ldl(cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result);
276 if (result != MEMTX_OK) {
277 return 0;
280 switch (pde & PTE_ENTRYTYPE_MASK) {
281 default:
282 case 0: /* Invalid */
283 case 2: /* PTE, maybe should not happen? */
284 case 3: /* Reserved */
285 return 0;
286 case 1: /* L1 PDE */
287 if (mmulev == 3) {
288 return pde;
290 pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4);
291 pde = address_space_ldl(cs->as, pde_ptr,
292 MEMTXATTRS_UNSPECIFIED, &result);
293 if (result != MEMTX_OK) {
294 return 0;
297 switch (pde & PTE_ENTRYTYPE_MASK) {
298 default:
299 case 0: /* Invalid */
300 case 3: /* Reserved */
301 return 0;
302 case 2: /* L1 PTE */
303 return pde;
304 case 1: /* L2 PDE */
305 if (mmulev == 2) {
306 return pde;
308 pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4);
309 pde = address_space_ldl(cs->as, pde_ptr,
310 MEMTXATTRS_UNSPECIFIED, &result);
311 if (result != MEMTX_OK) {
312 return 0;
315 switch (pde & PTE_ENTRYTYPE_MASK) {
316 default:
317 case 0: /* Invalid */
318 case 3: /* Reserved */
319 return 0;
320 case 2: /* L2 PTE */
321 return pde;
322 case 1: /* L3 PDE */
323 if (mmulev == 1) {
324 return pde;
326 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4);
327 pde = address_space_ldl(cs->as, pde_ptr,
328 MEMTXATTRS_UNSPECIFIED, &result);
329 if (result != MEMTX_OK) {
330 return 0;
333 switch (pde & PTE_ENTRYTYPE_MASK) {
334 default:
335 case 0: /* Invalid */
336 case 1: /* PDE, should not happen */
337 case 3: /* Reserved */
338 return 0;
339 case 2: /* L3 PTE */
340 return pde;
345 return 0;
348 void dump_mmu(CPUSPARCState *env)
350 CPUState *cs = env_cpu(env);
351 target_ulong va, va1, va2;
352 unsigned int n, m, o;
353 hwaddr pa;
354 uint32_t pde;
356 qemu_printf("Root ptr: " HWADDR_FMT_plx ", ctx: %d\n",
357 (hwaddr)env->mmuregs[1] << 4, env->mmuregs[2]);
358 for (n = 0, va = 0; n < 256; n++, va += 16 * 1024 * 1024) {
359 pde = mmu_probe(env, va, 2);
360 if (pde) {
361 pa = cpu_get_phys_page_debug(cs, va);
362 qemu_printf("VA: " TARGET_FMT_lx ", PA: " HWADDR_FMT_plx
363 " PDE: " TARGET_FMT_lx "\n", va, pa, pde);
364 for (m = 0, va1 = va; m < 64; m++, va1 += 256 * 1024) {
365 pde = mmu_probe(env, va1, 1);
366 if (pde) {
367 pa = cpu_get_phys_page_debug(cs, va1);
368 qemu_printf(" VA: " TARGET_FMT_lx ", PA: "
369 HWADDR_FMT_plx " PDE: " TARGET_FMT_lx "\n",
370 va1, pa, pde);
371 for (o = 0, va2 = va1; o < 64; o++, va2 += 4 * 1024) {
372 pde = mmu_probe(env, va2, 0);
373 if (pde) {
374 pa = cpu_get_phys_page_debug(cs, va2);
375 qemu_printf(" VA: " TARGET_FMT_lx ", PA: "
376 HWADDR_FMT_plx " PTE: "
377 TARGET_FMT_lx "\n",
378 va2, pa, pde);
387 /* Gdb expects all registers windows to be flushed in ram. This function handles
388 * reads (and only reads) in stack frames as if windows were flushed. We assume
389 * that the sparc ABI is followed.
391 int sparc_cpu_memory_rw_debug(CPUState *cs, vaddr address,
392 uint8_t *buf, int len, bool is_write)
394 CPUSPARCState *env = cpu_env(cs);
395 target_ulong addr = address;
396 int i;
397 int len1;
398 int cwp = env->cwp;
400 if (!is_write) {
401 for (i = 0; i < env->nwindows; i++) {
402 int off;
403 target_ulong fp = env->regbase[cwp * 16 + 22];
405 /* Assume fp == 0 means end of frame. */
406 if (fp == 0) {
407 break;
410 cwp = cpu_cwp_inc(env, cwp + 1);
412 /* Invalid window ? */
413 if (env->wim & (1 << cwp)) {
414 break;
417 /* According to the ABI, the stack is growing downward. */
418 if (addr + len < fp) {
419 break;
422 /* Not in this frame. */
423 if (addr > fp + 64) {
424 continue;
427 /* Handle access before this window. */
428 if (addr < fp) {
429 len1 = fp - addr;
430 if (cpu_memory_rw_debug(cs, addr, buf, len1, is_write) != 0) {
431 return -1;
433 addr += len1;
434 len -= len1;
435 buf += len1;
438 /* Access byte per byte to registers. Not very efficient but speed
439 * is not critical.
441 off = addr - fp;
442 len1 = 64 - off;
444 if (len1 > len) {
445 len1 = len;
448 for (; len1; len1--) {
449 int reg = cwp * 16 + 8 + (off >> 2);
450 union {
451 uint32_t v;
452 uint8_t c[4];
453 } u;
454 u.v = cpu_to_be32(env->regbase[reg]);
455 *buf++ = u.c[off & 3];
456 addr++;
457 len--;
458 off++;
461 if (len == 0) {
462 return 0;
466 return cpu_memory_rw_debug(cs, addr, buf, len, is_write);
469 #else /* !TARGET_SPARC64 */
471 /* 41 bit physical address space */
472 static inline hwaddr ultrasparc_truncate_physical(uint64_t x)
474 return x & 0x1ffffffffffULL;
478 * UltraSparc IIi I/DMMUs
481 /* Returns true if TTE tag is valid and matches virtual address value
482 in context requires virtual address mask value calculated from TTE
483 entry size */
484 static inline int ultrasparc_tag_match(SparcTLBEntry *tlb,
485 uint64_t address, uint64_t context,
486 hwaddr *physical)
488 uint64_t mask = -(8192ULL << 3 * TTE_PGSIZE(tlb->tte));
490 /* valid, context match, virtual address match? */
491 if (TTE_IS_VALID(tlb->tte) &&
492 (TTE_IS_GLOBAL(tlb->tte) || tlb_compare_context(tlb, context))
493 && compare_masked(address, tlb->tag, mask)) {
494 /* decode physical address */
495 *physical = ((tlb->tte & mask) | (address & ~mask)) & 0x1ffffffe000ULL;
496 return 1;
499 return 0;
502 static uint64_t build_sfsr(CPUSPARCState *env, int mmu_idx, int rw)
504 uint64_t sfsr = SFSR_VALID_BIT;
506 switch (mmu_idx) {
507 case MMU_PHYS_IDX:
508 sfsr |= SFSR_CT_NOTRANS;
509 break;
510 case MMU_USER_IDX:
511 case MMU_KERNEL_IDX:
512 sfsr |= SFSR_CT_PRIMARY;
513 break;
514 case MMU_USER_SECONDARY_IDX:
515 case MMU_KERNEL_SECONDARY_IDX:
516 sfsr |= SFSR_CT_SECONDARY;
517 break;
518 case MMU_NUCLEUS_IDX:
519 sfsr |= SFSR_CT_NUCLEUS;
520 break;
521 default:
522 g_assert_not_reached();
525 if (rw == 1) {
526 sfsr |= SFSR_WRITE_BIT;
527 } else if (rw == 4) {
528 sfsr |= SFSR_NF_BIT;
531 if (env->pstate & PS_PRIV) {
532 sfsr |= SFSR_PR_BIT;
535 if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */
536 sfsr |= SFSR_OW_BIT; /* overflow (not read before another fault) */
539 /* FIXME: ASI field in SFSR must be set */
541 return sfsr;
544 static int get_physical_address_data(CPUSPARCState *env, CPUTLBEntryFull *full,
545 target_ulong address, int rw, int mmu_idx)
547 CPUState *cs = env_cpu(env);
548 unsigned int i;
549 uint64_t sfsr;
550 uint64_t context;
551 bool is_user = false;
553 sfsr = build_sfsr(env, mmu_idx, rw);
555 switch (mmu_idx) {
556 case MMU_PHYS_IDX:
557 g_assert_not_reached();
558 case MMU_USER_IDX:
559 is_user = true;
560 /* fallthru */
561 case MMU_KERNEL_IDX:
562 context = env->dmmu.mmu_primary_context & 0x1fff;
563 break;
564 case MMU_USER_SECONDARY_IDX:
565 is_user = true;
566 /* fallthru */
567 case MMU_KERNEL_SECONDARY_IDX:
568 context = env->dmmu.mmu_secondary_context & 0x1fff;
569 break;
570 default:
571 context = 0;
572 break;
575 for (i = 0; i < 64; i++) {
576 /* ctx match, vaddr match, valid? */
577 if (ultrasparc_tag_match(&env->dtlb[i], address, context,
578 &full->phys_addr)) {
579 int do_fault = 0;
581 if (TTE_IS_IE(env->dtlb[i].tte)) {
582 full->tlb_fill_flags |= TLB_BSWAP;
585 /* access ok? */
586 /* multiple bits in SFSR.FT may be set on TT_DFAULT */
587 if (TTE_IS_PRIV(env->dtlb[i].tte) && is_user) {
588 do_fault = 1;
589 sfsr |= SFSR_FT_PRIV_BIT; /* privilege violation */
590 trace_mmu_helper_dfault(address, context, mmu_idx, env->tl);
592 if (rw == 4) {
593 if (TTE_IS_SIDEEFFECT(env->dtlb[i].tte)) {
594 do_fault = 1;
595 sfsr |= SFSR_FT_NF_E_BIT;
597 } else {
598 if (TTE_IS_NFO(env->dtlb[i].tte)) {
599 do_fault = 1;
600 sfsr |= SFSR_FT_NFO_BIT;
604 if (do_fault) {
605 /* faults above are reported with TT_DFAULT. */
606 cs->exception_index = TT_DFAULT;
607 } else if (!TTE_IS_W_OK(env->dtlb[i].tte) && (rw == 1)) {
608 do_fault = 1;
609 cs->exception_index = TT_DPROT;
611 trace_mmu_helper_dprot(address, context, mmu_idx, env->tl);
614 if (!do_fault) {
615 full->prot = PAGE_READ;
616 if (TTE_IS_W_OK(env->dtlb[i].tte)) {
617 full->prot |= PAGE_WRITE;
620 TTE_SET_USED(env->dtlb[i].tte);
622 return 0;
625 env->dmmu.sfsr = sfsr;
626 env->dmmu.sfar = address; /* Fault address register */
627 env->dmmu.tag_access = (address & ~0x1fffULL) | context;
628 return 1;
632 trace_mmu_helper_dmiss(address, context);
635 * On MMU misses:
636 * - UltraSPARC IIi: SFSR and SFAR unmodified
637 * - JPS1: SFAR updated and some fields of SFSR updated
639 env->dmmu.tag_access = (address & ~0x1fffULL) | context;
640 cs->exception_index = TT_DMISS;
641 return 1;
644 static int get_physical_address_code(CPUSPARCState *env, CPUTLBEntryFull *full,
645 target_ulong address, int mmu_idx)
647 CPUState *cs = env_cpu(env);
648 unsigned int i;
649 uint64_t context;
650 bool is_user = false;
652 switch (mmu_idx) {
653 case MMU_PHYS_IDX:
654 case MMU_USER_SECONDARY_IDX:
655 case MMU_KERNEL_SECONDARY_IDX:
656 g_assert_not_reached();
657 case MMU_USER_IDX:
658 is_user = true;
659 /* fallthru */
660 case MMU_KERNEL_IDX:
661 context = env->dmmu.mmu_primary_context & 0x1fff;
662 break;
663 default:
664 context = 0;
665 break;
668 if (env->tl == 0) {
669 /* PRIMARY context */
670 context = env->dmmu.mmu_primary_context & 0x1fff;
671 } else {
672 /* NUCLEUS context */
673 context = 0;
676 for (i = 0; i < 64; i++) {
677 /* ctx match, vaddr match, valid? */
678 if (ultrasparc_tag_match(&env->itlb[i],
679 address, context, &full->phys_addr)) {
680 /* access ok? */
681 if (TTE_IS_PRIV(env->itlb[i].tte) && is_user) {
682 /* Fault status register */
683 if (env->immu.sfsr & SFSR_VALID_BIT) {
684 env->immu.sfsr = SFSR_OW_BIT; /* overflow (not read before
685 another fault) */
686 } else {
687 env->immu.sfsr = 0;
689 if (env->pstate & PS_PRIV) {
690 env->immu.sfsr |= SFSR_PR_BIT;
692 if (env->tl > 0) {
693 env->immu.sfsr |= SFSR_CT_NUCLEUS;
696 /* FIXME: ASI field in SFSR must be set */
697 env->immu.sfsr |= SFSR_FT_PRIV_BIT | SFSR_VALID_BIT;
698 cs->exception_index = TT_TFAULT;
700 env->immu.tag_access = (address & ~0x1fffULL) | context;
702 trace_mmu_helper_tfault(address, context);
704 return 1;
706 full->prot = PAGE_EXEC;
707 TTE_SET_USED(env->itlb[i].tte);
708 return 0;
712 trace_mmu_helper_tmiss(address, context);
714 /* Context is stored in DMMU (dmmuregs[1]) also for IMMU */
715 env->immu.tag_access = (address & ~0x1fffULL) | context;
716 cs->exception_index = TT_TMISS;
717 return 1;
720 static int get_physical_address(CPUSPARCState *env, CPUTLBEntryFull *full,
721 int *access_index, target_ulong address,
722 int rw, int mmu_idx)
724 /* ??? We treat everything as a small page, then explicitly flush
725 everything when an entry is evicted. */
726 full->lg_page_size = TARGET_PAGE_BITS;
728 /* safety net to catch wrong softmmu index use from dynamic code */
729 if (env->tl > 0 && mmu_idx != MMU_NUCLEUS_IDX) {
730 if (rw == 2) {
731 trace_mmu_helper_get_phys_addr_code(env->tl, mmu_idx,
732 env->dmmu.mmu_primary_context,
733 env->dmmu.mmu_secondary_context,
734 address);
735 } else {
736 trace_mmu_helper_get_phys_addr_data(env->tl, mmu_idx,
737 env->dmmu.mmu_primary_context,
738 env->dmmu.mmu_secondary_context,
739 address);
743 if (mmu_idx == MMU_PHYS_IDX) {
744 full->phys_addr = ultrasparc_truncate_physical(address);
745 full->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
746 return 0;
749 if (rw == 2) {
750 return get_physical_address_code(env, full, address, mmu_idx);
751 } else {
752 return get_physical_address_data(env, full, address, rw, mmu_idx);
756 /* Perform address translation */
757 bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
758 MMUAccessType access_type, int mmu_idx,
759 bool probe, uintptr_t retaddr)
761 CPUSPARCState *env = cpu_env(cs);
762 CPUTLBEntryFull full = {};
763 int error_code = 0, access_index;
765 address &= TARGET_PAGE_MASK;
766 error_code = get_physical_address(env, &full, &access_index,
767 address, access_type, mmu_idx);
768 if (likely(error_code == 0)) {
769 trace_mmu_helper_mmu_fault(address, full.phys_addr, mmu_idx, env->tl,
770 env->dmmu.mmu_primary_context,
771 env->dmmu.mmu_secondary_context);
772 tlb_set_page_full(cs, mmu_idx, address, &full);
773 return true;
775 if (probe) {
776 return false;
778 cpu_loop_exit_restore(cs, retaddr);
781 void dump_mmu(CPUSPARCState *env)
783 unsigned int i;
784 const char *mask;
786 qemu_printf("MMU contexts: Primary: %" PRId64 ", Secondary: %"
787 PRId64 "\n",
788 env->dmmu.mmu_primary_context,
789 env->dmmu.mmu_secondary_context);
790 qemu_printf("DMMU Tag Access: %" PRIx64 ", TSB Tag Target: %" PRIx64
791 "\n", env->dmmu.tag_access, env->dmmu.tsb_tag_target);
792 if ((env->lsu & DMMU_E) == 0) {
793 qemu_printf("DMMU disabled\n");
794 } else {
795 qemu_printf("DMMU dump\n");
796 for (i = 0; i < 64; i++) {
797 switch (TTE_PGSIZE(env->dtlb[i].tte)) {
798 default:
799 case 0x0:
800 mask = " 8k";
801 break;
802 case 0x1:
803 mask = " 64k";
804 break;
805 case 0x2:
806 mask = "512k";
807 break;
808 case 0x3:
809 mask = " 4M";
810 break;
812 if (TTE_IS_VALID(env->dtlb[i].tte)) {
813 qemu_printf("[%02u] VA: %" PRIx64 ", PA: %llx"
814 ", %s, %s, %s, %s, ie %s, ctx %" PRId64 " %s\n",
816 env->dtlb[i].tag & (uint64_t)~0x1fffULL,
817 TTE_PA(env->dtlb[i].tte),
818 mask,
819 TTE_IS_PRIV(env->dtlb[i].tte) ? "priv" : "user",
820 TTE_IS_W_OK(env->dtlb[i].tte) ? "RW" : "RO",
821 TTE_IS_LOCKED(env->dtlb[i].tte) ?
822 "locked" : "unlocked",
823 TTE_IS_IE(env->dtlb[i].tte) ?
824 "yes" : "no",
825 env->dtlb[i].tag & (uint64_t)0x1fffULL,
826 TTE_IS_GLOBAL(env->dtlb[i].tte) ?
827 "global" : "local");
831 if ((env->lsu & IMMU_E) == 0) {
832 qemu_printf("IMMU disabled\n");
833 } else {
834 qemu_printf("IMMU dump\n");
835 for (i = 0; i < 64; i++) {
836 switch (TTE_PGSIZE(env->itlb[i].tte)) {
837 default:
838 case 0x0:
839 mask = " 8k";
840 break;
841 case 0x1:
842 mask = " 64k";
843 break;
844 case 0x2:
845 mask = "512k";
846 break;
847 case 0x3:
848 mask = " 4M";
849 break;
851 if (TTE_IS_VALID(env->itlb[i].tte)) {
852 qemu_printf("[%02u] VA: %" PRIx64 ", PA: %llx"
853 ", %s, %s, %s, ctx %" PRId64 " %s\n",
855 env->itlb[i].tag & (uint64_t)~0x1fffULL,
856 TTE_PA(env->itlb[i].tte),
857 mask,
858 TTE_IS_PRIV(env->itlb[i].tte) ? "priv" : "user",
859 TTE_IS_LOCKED(env->itlb[i].tte) ?
860 "locked" : "unlocked",
861 env->itlb[i].tag & (uint64_t)0x1fffULL,
862 TTE_IS_GLOBAL(env->itlb[i].tte) ?
863 "global" : "local");
869 #endif /* TARGET_SPARC64 */
871 static int cpu_sparc_get_phys_page(CPUSPARCState *env, hwaddr *phys,
872 target_ulong addr, int rw, int mmu_idx)
874 CPUTLBEntryFull full = {};
875 int access_index, ret;
877 ret = get_physical_address(env, &full, &access_index, addr, rw, mmu_idx);
878 if (ret == 0) {
879 *phys = full.phys_addr;
881 return ret;
884 #if defined(TARGET_SPARC64)
885 hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr,
886 int mmu_idx)
888 hwaddr phys_addr;
890 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 4, mmu_idx) != 0) {
891 return -1;
893 return phys_addr;
895 #endif
897 hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
899 CPUSPARCState *env = cpu_env(cs);
900 hwaddr phys_addr;
901 int mmu_idx = cpu_mmu_index(cs, false);
903 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) {
904 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 0, mmu_idx) != 0) {
905 return -1;
908 return phys_addr;
911 G_NORETURN void sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
912 MMUAccessType access_type,
913 int mmu_idx,
914 uintptr_t retaddr)
916 CPUSPARCState *env = cpu_env(cs);
918 #ifdef TARGET_SPARC64
919 env->dmmu.sfsr = build_sfsr(env, mmu_idx, access_type);
920 env->dmmu.sfar = addr;
921 #else
922 env->mmuregs[4] = addr;
923 #endif
925 cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr);