2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
23 #include "sysemu/kvm.h"
25 #include "mmu-hash64.h"
26 #include "mmu-hash32.h"
27 #include "exec/exec-all.h"
29 #include "helper_regs.h"
30 #include "qemu/error-report.h"
31 #include "qemu/main-loop.h"
32 #include "qemu/qemu-print.h"
34 #include "mmu-book3s-v3.h"
35 #include "mmu-radix64.h"
37 /* #define DUMP_PAGE_TABLES */
39 void ppc_store_sdr1(CPUPPCState
*env
, target_ulong value
)
41 PowerPCCPU
*cpu
= env_archcpu(env
);
42 qemu_log_mask(CPU_LOG_MMU
, "%s: " TARGET_FMT_lx
"\n", __func__
, value
);
43 assert(!cpu
->env
.has_hv_mode
|| !cpu
->vhyp
);
44 #if defined(TARGET_PPC64)
45 if (mmu_is_64bit(env
->mmu_model
)) {
46 target_ulong sdr_mask
= SDR_64_HTABORG
| SDR_64_HTABSIZE
;
47 target_ulong htabsize
= value
& SDR_64_HTABSIZE
;
49 if (value
& ~sdr_mask
) {
50 qemu_log_mask(LOG_GUEST_ERROR
, "Invalid bits 0x"TARGET_FMT_lx
51 " set in SDR1", value
& ~sdr_mask
);
55 qemu_log_mask(LOG_GUEST_ERROR
, "Invalid HTABSIZE 0x" TARGET_FMT_lx
56 " stored in SDR1", htabsize
);
60 #endif /* defined(TARGET_PPC64) */
61 /* FIXME: Should check for valid HTABMASK values in 32-bit case */
62 env
->spr
[SPR_SDR1
] = value
;
65 /*****************************************************************************/
66 /* PowerPC MMU emulation */
68 static int pp_check(int key
, int pp
, int nx
)
72 /* Compute access rights */
95 access
= PAGE_READ
| PAGE_WRITE
;
106 static int check_prot(int prot
, MMUAccessType access_type
)
108 return prot
& prot_for_access_type(access_type
) ? 0 : -2;
111 int ppc6xx_tlb_getnum(CPUPPCState
*env
, target_ulong eaddr
,
112 int way
, int is_code
)
116 /* Select TLB num in a way from address */
117 nr
= (eaddr
>> TARGET_PAGE_BITS
) & (env
->tlb_per_way
- 1);
119 nr
+= env
->tlb_per_way
* way
;
120 /* 6xx have separate TLBs for instructions and data */
121 if (is_code
&& env
->id_tlbs
== 1) {
128 static int ppc6xx_tlb_pte_check(mmu_ctx_t
*ctx
, target_ulong pte0
,
129 target_ulong pte1
, int h
,
130 MMUAccessType access_type
)
132 target_ulong ptem
, mmask
;
133 int access
, ret
, pteh
, ptev
, pp
;
136 /* Check validity and table match */
137 ptev
= pte_is_valid(pte0
);
138 pteh
= (pte0
>> 6) & 1;
139 if (ptev
&& h
== pteh
) {
140 /* Check vsid & api */
141 ptem
= pte0
& PTE_PTEM_MASK
;
142 mmask
= PTE_CHECK_MASK
;
143 pp
= pte1
& 0x00000003;
144 if (ptem
== ctx
->ptem
) {
145 if (ctx
->raddr
!= (hwaddr
)-1ULL) {
146 /* all matches should have equal RPN, WIMG & PP */
147 if ((ctx
->raddr
& mmask
) != (pte1
& mmask
)) {
148 qemu_log_mask(CPU_LOG_MMU
, "Bad RPN/WIMG/PP\n");
152 /* Compute access rights */
153 access
= pp_check(ctx
->key
, pp
, ctx
->nx
);
154 /* Keep the matching PTE information */
157 ret
= check_prot(ctx
->prot
, access_type
);
160 qemu_log_mask(CPU_LOG_MMU
, "PTE access granted !\n");
162 /* Access right violation */
163 qemu_log_mask(CPU_LOG_MMU
, "PTE access rejected\n");
171 static int pte_update_flags(mmu_ctx_t
*ctx
, target_ulong
*pte1p
,
172 int ret
, MMUAccessType access_type
)
176 /* Update page flags */
177 if (!(*pte1p
& 0x00000100)) {
178 /* Update accessed flag */
179 *pte1p
|= 0x00000100;
182 if (!(*pte1p
& 0x00000080)) {
183 if (access_type
== MMU_DATA_STORE
&& ret
== 0) {
184 /* Update changed flag */
185 *pte1p
|= 0x00000080;
188 /* Force page fault for first write access */
189 ctx
->prot
&= ~PAGE_WRITE
;
196 /* Software driven TLB helpers */
198 static int ppc6xx_tlb_check(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
199 target_ulong eaddr
, MMUAccessType access_type
)
206 ret
= -1; /* No TLB found */
207 for (way
= 0; way
< env
->nb_ways
; way
++) {
208 nr
= ppc6xx_tlb_getnum(env
, eaddr
, way
, access_type
== MMU_INST_FETCH
);
209 tlb
= &env
->tlb
.tlb6
[nr
];
210 /* This test "emulates" the PTE index match for hardware TLBs */
211 if ((eaddr
& TARGET_PAGE_MASK
) != tlb
->EPN
) {
212 qemu_log_mask(CPU_LOG_MMU
, "TLB %d/%d %s [" TARGET_FMT_lx
213 " " TARGET_FMT_lx
"] <> " TARGET_FMT_lx
"\n",
215 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
216 tlb
->EPN
, tlb
->EPN
+ TARGET_PAGE_SIZE
, eaddr
);
219 qemu_log_mask(CPU_LOG_MMU
, "TLB %d/%d %s " TARGET_FMT_lx
" <> "
220 TARGET_FMT_lx
" " TARGET_FMT_lx
" %c %c\n",
222 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
223 tlb
->EPN
, eaddr
, tlb
->pte1
,
224 access_type
== MMU_DATA_STORE
? 'S' : 'L',
225 access_type
== MMU_INST_FETCH
? 'I' : 'D');
226 switch (ppc6xx_tlb_pte_check(ctx
, tlb
->pte0
, tlb
->pte1
,
229 /* TLB inconsistency */
232 /* Access violation */
243 * XXX: we should go on looping to check all TLBs
244 * consistency but we can speed-up the whole thing as
245 * the result would be undefined if TLBs are not
255 qemu_log_mask(CPU_LOG_MMU
, "found TLB at addr " HWADDR_FMT_plx
256 " prot=%01x ret=%d\n",
257 ctx
->raddr
& TARGET_PAGE_MASK
, ctx
->prot
, ret
);
258 /* Update page flags */
259 pte_update_flags(ctx
, &env
->tlb
.tlb6
[best
].pte1
, ret
, access_type
);
265 /* Perform BAT hit & translation */
266 static inline void bat_size_prot(CPUPPCState
*env
, target_ulong
*blp
,
267 int *validp
, int *protp
, target_ulong
*BATu
,
273 bl
= (*BATu
& 0x00001FFC) << 15;
276 if ((!FIELD_EX64(env
->msr
, MSR
, PR
) && (*BATu
& 0x00000002)) ||
277 (FIELD_EX64(env
->msr
, MSR
, PR
) && (*BATu
& 0x00000001))) {
279 pp
= *BATl
& 0x00000003;
281 prot
= PAGE_READ
| PAGE_EXEC
;
292 static int get_bat_6xx_tlb(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
293 target_ulong
virtual, MMUAccessType access_type
)
295 target_ulong
*BATlt
, *BATut
, *BATu
, *BATl
;
296 target_ulong BEPIl
, BEPIu
, bl
;
299 bool ifetch
= access_type
== MMU_INST_FETCH
;
301 qemu_log_mask(CPU_LOG_MMU
, "%s: %cBAT v " TARGET_FMT_lx
"\n", __func__
,
302 ifetch
? 'I' : 'D', virtual);
304 BATlt
= env
->IBAT
[1];
305 BATut
= env
->IBAT
[0];
307 BATlt
= env
->DBAT
[1];
308 BATut
= env
->DBAT
[0];
310 for (i
= 0; i
< env
->nb_BATs
; i
++) {
313 BEPIu
= *BATu
& 0xF0000000;
314 BEPIl
= *BATu
& 0x0FFE0000;
315 bat_size_prot(env
, &bl
, &valid
, &prot
, BATu
, BATl
);
316 qemu_log_mask(CPU_LOG_MMU
, "%s: %cBAT%d v " TARGET_FMT_lx
" BATu "
317 TARGET_FMT_lx
" BATl " TARGET_FMT_lx
"\n", __func__
,
318 ifetch
? 'I' : 'D', i
, virtual, *BATu
, *BATl
);
319 if ((virtual & 0xF0000000) == BEPIu
&&
320 ((virtual & 0x0FFE0000) & ~bl
) == BEPIl
) {
323 /* Get physical address */
324 ctx
->raddr
= (*BATl
& 0xF0000000) |
325 ((virtual & 0x0FFE0000 & bl
) | (*BATl
& 0x0FFE0000)) |
326 (virtual & 0x0001F000);
327 /* Compute access rights */
329 ret
= check_prot(ctx
->prot
, access_type
);
331 qemu_log_mask(CPU_LOG_MMU
, "BAT %d match: r " HWADDR_FMT_plx
332 " prot=%c%c\n", i
, ctx
->raddr
,
333 ctx
->prot
& PAGE_READ
? 'R' : '-',
334 ctx
->prot
& PAGE_WRITE
? 'W' : '-');
341 if (qemu_log_enabled()) {
342 qemu_log_mask(CPU_LOG_MMU
, "no BAT match for "
343 TARGET_FMT_lx
":\n", virtual);
344 for (i
= 0; i
< 4; i
++) {
347 BEPIu
= *BATu
& 0xF0000000;
348 BEPIl
= *BATu
& 0x0FFE0000;
349 bl
= (*BATu
& 0x00001FFC) << 15;
350 qemu_log_mask(CPU_LOG_MMU
, "%s: %cBAT%d v "
351 TARGET_FMT_lx
" BATu " TARGET_FMT_lx
352 " BATl " TARGET_FMT_lx
"\n\t" TARGET_FMT_lx
" "
353 TARGET_FMT_lx
" " TARGET_FMT_lx
"\n",
354 __func__
, ifetch
? 'I' : 'D', i
, virtual,
355 *BATu
, *BATl
, BEPIu
, BEPIl
, bl
);
363 /* Perform segment based translation */
364 static int get_segment_6xx_tlb(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
365 target_ulong eaddr
, MMUAccessType access_type
,
368 PowerPCCPU
*cpu
= env_archcpu(env
);
371 int ds
, target_page_bits
;
374 target_ulong sr
, pgidx
;
376 pr
= FIELD_EX64(env
->msr
, MSR
, PR
);
379 sr
= env
->sr
[eaddr
>> 28];
380 ctx
->key
= (((sr
& 0x20000000) && pr
) ||
381 ((sr
& 0x40000000) && !pr
)) ? 1 : 0;
382 ds
= sr
& 0x80000000 ? 1 : 0;
383 ctx
->nx
= sr
& 0x10000000 ? 1 : 0;
384 vsid
= sr
& 0x00FFFFFF;
385 target_page_bits
= TARGET_PAGE_BITS
;
386 qemu_log_mask(CPU_LOG_MMU
,
387 "Check segment v=" TARGET_FMT_lx
" %d " TARGET_FMT_lx
388 " nip=" TARGET_FMT_lx
" lr=" TARGET_FMT_lx
389 " ir=%d dr=%d pr=%d %d t=%d\n",
390 eaddr
, (int)(eaddr
>> 28), sr
, env
->nip
, env
->lr
,
391 (int)FIELD_EX64(env
->msr
, MSR
, IR
),
392 (int)FIELD_EX64(env
->msr
, MSR
, DR
), pr
? 1 : 0,
393 access_type
== MMU_DATA_STORE
, type
);
394 pgidx
= (eaddr
& ~SEGMENT_MASK_256M
) >> target_page_bits
;
396 ctx
->ptem
= (vsid
<< 7) | (pgidx
>> 10);
398 qemu_log_mask(CPU_LOG_MMU
,
399 "pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx
"\n",
400 ctx
->key
, ds
, ctx
->nx
, vsid
);
403 /* Check if instruction fetch is allowed, if needed */
404 if (type
!= ACCESS_CODE
|| ctx
->nx
== 0) {
405 /* Page address translation */
406 qemu_log_mask(CPU_LOG_MMU
, "htab_base " HWADDR_FMT_plx
407 " htab_mask " HWADDR_FMT_plx
408 " hash " HWADDR_FMT_plx
"\n",
409 ppc_hash32_hpt_base(cpu
), ppc_hash32_hpt_mask(cpu
), hash
);
411 ctx
->hash
[1] = ~hash
;
413 /* Initialize real address with an invalid value */
414 ctx
->raddr
= (hwaddr
)-1ULL;
415 /* Software TLB search */
416 ret
= ppc6xx_tlb_check(env
, ctx
, eaddr
, access_type
);
417 #if defined(DUMP_PAGE_TABLES)
418 if (qemu_loglevel_mask(CPU_LOG_MMU
)) {
419 CPUState
*cs
= env_cpu(env
);
421 uint32_t a0
, a1
, a2
, a3
;
423 qemu_log("Page table: " HWADDR_FMT_plx
" len " HWADDR_FMT_plx
424 "\n", ppc_hash32_hpt_base(cpu
),
425 ppc_hash32_hpt_mask(cpu
) + 0x80);
426 for (curaddr
= ppc_hash32_hpt_base(cpu
);
427 curaddr
< (ppc_hash32_hpt_base(cpu
)
428 + ppc_hash32_hpt_mask(cpu
) + 0x80);
430 a0
= ldl_phys(cs
->as
, curaddr
);
431 a1
= ldl_phys(cs
->as
, curaddr
+ 4);
432 a2
= ldl_phys(cs
->as
, curaddr
+ 8);
433 a3
= ldl_phys(cs
->as
, curaddr
+ 12);
434 if (a0
!= 0 || a1
!= 0 || a2
!= 0 || a3
!= 0) {
435 qemu_log(HWADDR_FMT_plx
": %08x %08x %08x %08x\n",
436 curaddr
, a0
, a1
, a2
, a3
);
442 qemu_log_mask(CPU_LOG_MMU
, "No access allowed\n");
446 qemu_log_mask(CPU_LOG_MMU
, "direct store...\n");
447 /* Direct-store segment : absolutely *BUGGY* for now */
451 /* Integer load/store : only access allowed */
454 /* No code fetch is allowed in direct-store areas */
457 /* Floating point load/store */
460 /* lwarx, ldarx or srwcx. */
464 * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi
466 * Should make the instruction do no-op. As it already do
467 * no-op, it's quite easy :-)
475 qemu_log_mask(CPU_LOG_MMU
, "ERROR: instruction should not need "
476 "address translation\n");
479 if ((access_type
== MMU_DATA_STORE
|| ctx
->key
!= 1) &&
480 (access_type
== MMU_DATA_LOAD
|| ctx
->key
!= 0)) {
491 /* Generic TLB check function for embedded PowerPC implementations */
492 int ppcemb_tlb_check(CPUPPCState
*env
, ppcemb_tlb_t
*tlb
,
494 target_ulong address
, uint32_t pid
, int ext
,
499 /* Check valid flag */
500 if (!(tlb
->prot
& PAGE_VALID
)) {
503 mask
= ~(tlb
->size
- 1);
504 qemu_log_mask(CPU_LOG_MMU
, "%s: TLB %d address " TARGET_FMT_lx
505 " PID %u <=> " TARGET_FMT_lx
" " TARGET_FMT_lx
" %u %x\n",
506 __func__
, i
, address
, pid
, tlb
->EPN
,
507 mask
, (uint32_t)tlb
->PID
, tlb
->prot
);
509 if (tlb
->PID
!= 0 && tlb
->PID
!= pid
) {
512 /* Check effective address */
513 if ((address
& mask
) != tlb
->EPN
) {
516 *raddrp
= (tlb
->RPN
& mask
) | (address
& ~mask
);
518 /* Extend the physical address to 36 bits */
519 *raddrp
|= (uint64_t)(tlb
->RPN
& 0xF) << 32;
525 static int mmu40x_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
526 target_ulong address
,
527 MMUAccessType access_type
)
531 int i
, ret
, zsel
, zpr
, pr
;
534 raddr
= (hwaddr
)-1ULL;
535 pr
= FIELD_EX64(env
->msr
, MSR
, PR
);
536 for (i
= 0; i
< env
->nb_tlb
; i
++) {
537 tlb
= &env
->tlb
.tlbe
[i
];
538 if (ppcemb_tlb_check(env
, tlb
, &raddr
, address
,
539 env
->spr
[SPR_40x_PID
], 0, i
) < 0) {
542 zsel
= (tlb
->attr
>> 4) & 0xF;
543 zpr
= (env
->spr
[SPR_40x_ZPR
] >> (30 - (2 * zsel
))) & 0x3;
544 qemu_log_mask(CPU_LOG_MMU
,
545 "%s: TLB %d zsel %d zpr %d ty %d attr %08x\n",
546 __func__
, i
, zsel
, zpr
, access_type
, tlb
->attr
);
547 /* Check execute enable bit */
555 /* All accesses granted */
556 ctx
->prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
561 /* Raise Zone protection fault. */
562 env
->spr
[SPR_40x_ESR
] = 1 << 22;
570 /* Check from TLB entry */
571 ctx
->prot
= tlb
->prot
;
572 ret
= check_prot(ctx
->prot
, access_type
);
574 env
->spr
[SPR_40x_ESR
] = 0;
580 qemu_log_mask(CPU_LOG_MMU
, "%s: access granted " TARGET_FMT_lx
581 " => " HWADDR_FMT_plx
582 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
587 qemu_log_mask(CPU_LOG_MMU
, "%s: access refused " TARGET_FMT_lx
588 " => " HWADDR_FMT_plx
589 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
594 static int mmubooke_check_tlb(CPUPPCState
*env
, ppcemb_tlb_t
*tlb
,
595 hwaddr
*raddr
, int *prot
, target_ulong address
,
596 MMUAccessType access_type
, int i
)
600 if (ppcemb_tlb_check(env
, tlb
, raddr
, address
,
601 env
->spr
[SPR_BOOKE_PID
],
602 !env
->nb_pids
, i
) >= 0) {
606 if (env
->spr
[SPR_BOOKE_PID1
] &&
607 ppcemb_tlb_check(env
, tlb
, raddr
, address
,
608 env
->spr
[SPR_BOOKE_PID1
], 0, i
) >= 0) {
612 if (env
->spr
[SPR_BOOKE_PID2
] &&
613 ppcemb_tlb_check(env
, tlb
, raddr
, address
,
614 env
->spr
[SPR_BOOKE_PID2
], 0, i
) >= 0) {
618 qemu_log_mask(CPU_LOG_MMU
, "%s: TLB entry not found\n", __func__
);
623 if (FIELD_EX64(env
->msr
, MSR
, PR
)) {
624 prot2
= tlb
->prot
& 0xF;
626 prot2
= (tlb
->prot
>> 4) & 0xF;
629 /* Check the address space */
630 if ((access_type
== MMU_INST_FETCH
?
631 FIELD_EX64(env
->msr
, MSR
, IR
) :
632 FIELD_EX64(env
->msr
, MSR
, DR
)) != (tlb
->attr
& 1)) {
633 qemu_log_mask(CPU_LOG_MMU
, "%s: AS doesn't match\n", __func__
);
638 if (prot2
& prot_for_access_type(access_type
)) {
639 qemu_log_mask(CPU_LOG_MMU
, "%s: good TLB!\n", __func__
);
643 qemu_log_mask(CPU_LOG_MMU
, "%s: no prot match: %x\n", __func__
, prot2
);
644 return access_type
== MMU_INST_FETCH
? -3 : -2;
647 static int mmubooke_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
648 target_ulong address
,
649 MMUAccessType access_type
)
656 raddr
= (hwaddr
)-1ULL;
657 for (i
= 0; i
< env
->nb_tlb
; i
++) {
658 tlb
= &env
->tlb
.tlbe
[i
];
659 ret
= mmubooke_check_tlb(env
, tlb
, &raddr
, &ctx
->prot
, address
,
668 qemu_log_mask(CPU_LOG_MMU
, "%s: access granted " TARGET_FMT_lx
669 " => " HWADDR_FMT_plx
" %d %d\n", __func__
,
670 address
, ctx
->raddr
, ctx
->prot
, ret
);
672 qemu_log_mask(CPU_LOG_MMU
, "%s: access refused " TARGET_FMT_lx
673 " => " HWADDR_FMT_plx
" %d %d\n", __func__
,
674 address
, raddr
, ctx
->prot
, ret
);
680 hwaddr
booke206_tlb_to_page_size(CPUPPCState
*env
,
685 tlbm_size
= (tlb
->mas1
& MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
687 return 1024ULL << tlbm_size
;
690 /* TLB check function for MAS based SoftTLBs */
691 int ppcmas_tlb_check(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
,
692 hwaddr
*raddrp
, target_ulong address
,
698 if (!FIELD_EX64(env
->msr
, MSR
, CM
)) {
699 /* In 32bit mode we can only address 32bit EAs */
700 address
= (uint32_t)address
;
703 /* Check valid flag */
704 if (!(tlb
->mas1
& MAS1_VALID
)) {
708 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
709 qemu_log_mask(CPU_LOG_MMU
, "%s: TLB ADDR=0x" TARGET_FMT_lx
710 " PID=0x%x MAS1=0x%x MAS2=0x%" PRIx64
" mask=0x%"
711 HWADDR_PRIx
" MAS7_3=0x%" PRIx64
" MAS8=0x%" PRIx32
"\n",
712 __func__
, address
, pid
, tlb
->mas1
, tlb
->mas2
, mask
,
713 tlb
->mas7_3
, tlb
->mas8
);
716 tlb_pid
= (tlb
->mas1
& MAS1_TID_MASK
) >> MAS1_TID_SHIFT
;
717 if (tlb_pid
!= 0 && tlb_pid
!= pid
) {
721 /* Check effective address */
722 if ((address
& mask
) != (tlb
->mas2
& MAS2_EPN_MASK
)) {
727 *raddrp
= (tlb
->mas7_3
& mask
) | (address
& ~mask
);
733 static bool is_epid_mmu(int mmu_idx
)
735 return mmu_idx
== PPC_TLB_EPID_STORE
|| mmu_idx
== PPC_TLB_EPID_LOAD
;
738 static uint32_t mmubooke206_esr(int mmu_idx
, MMUAccessType access_type
)
741 if (access_type
== MMU_DATA_STORE
) {
744 if (is_epid_mmu(mmu_idx
)) {
751 * Get EPID register given the mmu_idx. If this is regular load,
752 * construct the EPID access bits from current processor state
754 * Get the effective AS and PR bits and the PID. The PID is returned
755 * only if EPID load is requested, otherwise the caller must detect
756 * the correct EPID. Return true if valid EPID is returned.
758 static bool mmubooke206_get_as(CPUPPCState
*env
,
759 int mmu_idx
, uint32_t *epid_out
,
760 bool *as_out
, bool *pr_out
)
762 if (is_epid_mmu(mmu_idx
)) {
764 if (mmu_idx
== PPC_TLB_EPID_STORE
) {
765 epidr
= env
->spr
[SPR_BOOKE_EPSC
];
767 epidr
= env
->spr
[SPR_BOOKE_EPLC
];
769 *epid_out
= (epidr
& EPID_EPID
) >> EPID_EPID_SHIFT
;
770 *as_out
= !!(epidr
& EPID_EAS
);
771 *pr_out
= !!(epidr
& EPID_EPR
);
774 *as_out
= FIELD_EX64(env
->msr
, MSR
, DS
);
775 *pr_out
= FIELD_EX64(env
->msr
, MSR
, PR
);
780 /* Check if the tlb found by hashing really matches */
781 static int mmubooke206_check_tlb(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
,
782 hwaddr
*raddr
, int *prot
,
783 target_ulong address
,
784 MMUAccessType access_type
, int mmu_idx
)
789 bool use_epid
= mmubooke206_get_as(env
, mmu_idx
, &epid
, &as
, &pr
);
792 if (ppcmas_tlb_check(env
, tlb
, raddr
, address
,
793 env
->spr
[SPR_BOOKE_PID
]) >= 0) {
797 if (env
->spr
[SPR_BOOKE_PID1
] &&
798 ppcmas_tlb_check(env
, tlb
, raddr
, address
,
799 env
->spr
[SPR_BOOKE_PID1
]) >= 0) {
803 if (env
->spr
[SPR_BOOKE_PID2
] &&
804 ppcmas_tlb_check(env
, tlb
, raddr
, address
,
805 env
->spr
[SPR_BOOKE_PID2
]) >= 0) {
809 if (ppcmas_tlb_check(env
, tlb
, raddr
, address
, epid
) >= 0) {
814 qemu_log_mask(CPU_LOG_MMU
, "%s: No TLB entry found for effective address "
815 "0x" TARGET_FMT_lx
"\n", __func__
, address
);
821 if (tlb
->mas7_3
& MAS3_UR
) {
824 if (tlb
->mas7_3
& MAS3_UW
) {
827 if (tlb
->mas7_3
& MAS3_UX
) {
831 if (tlb
->mas7_3
& MAS3_SR
) {
834 if (tlb
->mas7_3
& MAS3_SW
) {
837 if (tlb
->mas7_3
& MAS3_SX
) {
842 /* Check the address space and permissions */
843 if (access_type
== MMU_INST_FETCH
) {
844 /* There is no way to fetch code using epid load */
846 as
= FIELD_EX64(env
->msr
, MSR
, IR
);
849 if (as
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
850 qemu_log_mask(CPU_LOG_MMU
, "%s: AS doesn't match\n", __func__
);
855 if (prot2
& prot_for_access_type(access_type
)) {
856 qemu_log_mask(CPU_LOG_MMU
, "%s: good TLB!\n", __func__
);
860 qemu_log_mask(CPU_LOG_MMU
, "%s: no prot match: %x\n", __func__
, prot2
);
861 return access_type
== MMU_INST_FETCH
? -3 : -2;
864 static int mmubooke206_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
865 target_ulong address
,
866 MMUAccessType access_type
,
874 raddr
= (hwaddr
)-1ULL;
876 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
877 int ways
= booke206_tlb_ways(env
, i
);
879 for (j
= 0; j
< ways
; j
++) {
880 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
884 ret
= mmubooke206_check_tlb(env
, tlb
, &raddr
, &ctx
->prot
, address
,
885 access_type
, mmu_idx
);
896 qemu_log_mask(CPU_LOG_MMU
, "%s: access granted " TARGET_FMT_lx
897 " => " HWADDR_FMT_plx
" %d %d\n", __func__
, address
,
898 ctx
->raddr
, ctx
->prot
, ret
);
900 qemu_log_mask(CPU_LOG_MMU
, "%s: access refused " TARGET_FMT_lx
901 " => " HWADDR_FMT_plx
" %d %d\n", __func__
, address
,
902 raddr
, ctx
->prot
, ret
);
908 static const char *book3e_tsize_to_str
[32] = {
909 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K",
910 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M",
911 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G",
915 static void mmubooke_dump_mmu(CPUPPCState
*env
)
920 if (kvm_enabled() && !env
->kvm_sw_tlb
) {
921 qemu_printf("Cannot access KVM TLB\n");
925 qemu_printf("\nTLB:\n");
926 qemu_printf("Effective Physical Size PID Prot "
929 entry
= &env
->tlb
.tlbe
[0];
930 for (i
= 0; i
< env
->nb_tlb
; i
++, entry
++) {
933 uint64_t size
= (uint64_t)entry
->size
;
936 /* Check valid flag */
937 if (!(entry
->prot
& PAGE_VALID
)) {
941 mask
= ~(entry
->size
- 1);
942 ea
= entry
->EPN
& mask
;
943 pa
= entry
->RPN
& mask
;
944 /* Extend the physical address to 36 bits */
945 pa
|= (hwaddr
)(entry
->RPN
& 0xF) << 32;
946 if (size
>= 1 * MiB
) {
947 snprintf(size_buf
, sizeof(size_buf
), "%3" PRId64
"M", size
/ MiB
);
949 snprintf(size_buf
, sizeof(size_buf
), "%3" PRId64
"k", size
/ KiB
);
951 qemu_printf("0x%016" PRIx64
" 0x%016" PRIx64
" %s %-5u %08x %08x\n",
952 (uint64_t)ea
, (uint64_t)pa
, size_buf
, (uint32_t)entry
->PID
,
953 entry
->prot
, entry
->attr
);
958 static void mmubooke206_dump_one_tlb(CPUPPCState
*env
, int tlbn
, int offset
,
964 qemu_printf("\nTLB%d:\n", tlbn
);
965 qemu_printf("Effective Physical Size TID TS SRWX"
966 " URWX WIMGE U0123\n");
968 entry
= &env
->tlb
.tlbm
[offset
];
969 for (i
= 0; i
< tlbsize
; i
++, entry
++) {
973 if (!(entry
->mas1
& MAS1_VALID
)) {
977 tsize
= (entry
->mas1
& MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
978 size
= 1024ULL << tsize
;
979 ea
= entry
->mas2
& ~(size
- 1);
980 pa
= entry
->mas7_3
& ~(size
- 1);
982 qemu_printf("0x%016" PRIx64
" 0x%016" PRIx64
" %4s %-5u %1u S%c%c%c"
983 " U%c%c%c %c%c%c%c%c U%c%c%c%c\n",
984 (uint64_t)ea
, (uint64_t)pa
,
985 book3e_tsize_to_str
[tsize
],
986 (entry
->mas1
& MAS1_TID_MASK
) >> MAS1_TID_SHIFT
,
987 (entry
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
,
988 entry
->mas7_3
& MAS3_SR
? 'R' : '-',
989 entry
->mas7_3
& MAS3_SW
? 'W' : '-',
990 entry
->mas7_3
& MAS3_SX
? 'X' : '-',
991 entry
->mas7_3
& MAS3_UR
? 'R' : '-',
992 entry
->mas7_3
& MAS3_UW
? 'W' : '-',
993 entry
->mas7_3
& MAS3_UX
? 'X' : '-',
994 entry
->mas2
& MAS2_W
? 'W' : '-',
995 entry
->mas2
& MAS2_I
? 'I' : '-',
996 entry
->mas2
& MAS2_M
? 'M' : '-',
997 entry
->mas2
& MAS2_G
? 'G' : '-',
998 entry
->mas2
& MAS2_E
? 'E' : '-',
999 entry
->mas7_3
& MAS3_U0
? '0' : '-',
1000 entry
->mas7_3
& MAS3_U1
? '1' : '-',
1001 entry
->mas7_3
& MAS3_U2
? '2' : '-',
1002 entry
->mas7_3
& MAS3_U3
? '3' : '-');
1006 static void mmubooke206_dump_mmu(CPUPPCState
*env
)
1011 if (kvm_enabled() && !env
->kvm_sw_tlb
) {
1012 qemu_printf("Cannot access KVM TLB\n");
1016 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1017 int size
= booke206_tlb_size(env
, i
);
1023 mmubooke206_dump_one_tlb(env
, i
, offset
, size
);
1028 static void mmu6xx_dump_BATs(CPUPPCState
*env
, int type
)
1030 target_ulong
*BATlt
, *BATut
, *BATu
, *BATl
;
1031 target_ulong BEPIl
, BEPIu
, bl
;
1036 BATlt
= env
->IBAT
[1];
1037 BATut
= env
->IBAT
[0];
1040 BATlt
= env
->DBAT
[1];
1041 BATut
= env
->DBAT
[0];
1045 for (i
= 0; i
< env
->nb_BATs
; i
++) {
1048 BEPIu
= *BATu
& 0xF0000000;
1049 BEPIl
= *BATu
& 0x0FFE0000;
1050 bl
= (*BATu
& 0x00001FFC) << 15;
1051 qemu_printf("%s BAT%d BATu " TARGET_FMT_lx
1052 " BATl " TARGET_FMT_lx
"\n\t" TARGET_FMT_lx
" "
1053 TARGET_FMT_lx
" " TARGET_FMT_lx
"\n",
1054 type
== ACCESS_CODE
? "code" : "data", i
,
1055 *BATu
, *BATl
, BEPIu
, BEPIl
, bl
);
1059 static void mmu6xx_dump_mmu(CPUPPCState
*env
)
1061 PowerPCCPU
*cpu
= env_archcpu(env
);
1064 int type
, way
, entry
, i
;
1066 qemu_printf("HTAB base = 0x%"HWADDR_PRIx
"\n", ppc_hash32_hpt_base(cpu
));
1067 qemu_printf("HTAB mask = 0x%"HWADDR_PRIx
"\n", ppc_hash32_hpt_mask(cpu
));
1069 qemu_printf("\nSegment registers:\n");
1070 for (i
= 0; i
< 32; i
++) {
1072 if (sr
& 0x80000000) {
1073 qemu_printf("%02d T=%d Ks=%d Kp=%d BUID=0x%03x "
1074 "CNTLR_SPEC=0x%05x\n", i
,
1075 sr
& 0x80000000 ? 1 : 0, sr
& 0x40000000 ? 1 : 0,
1076 sr
& 0x20000000 ? 1 : 0, (uint32_t)((sr
>> 20) & 0x1FF),
1077 (uint32_t)(sr
& 0xFFFFF));
1079 qemu_printf("%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i
,
1080 sr
& 0x80000000 ? 1 : 0, sr
& 0x40000000 ? 1 : 0,
1081 sr
& 0x20000000 ? 1 : 0, sr
& 0x10000000 ? 1 : 0,
1082 (uint32_t)(sr
& 0x00FFFFFF));
1086 qemu_printf("\nBATs:\n");
1087 mmu6xx_dump_BATs(env
, ACCESS_INT
);
1088 mmu6xx_dump_BATs(env
, ACCESS_CODE
);
1090 if (env
->id_tlbs
!= 1) {
1091 qemu_printf("ERROR: 6xx MMU should have separated TLB"
1092 " for code and data\n");
1095 qemu_printf("\nTLBs [EPN EPN + SIZE]\n");
1097 for (type
= 0; type
< 2; type
++) {
1098 for (way
= 0; way
< env
->nb_ways
; way
++) {
1099 for (entry
= env
->nb_tlb
* type
+ env
->tlb_per_way
* way
;
1100 entry
< (env
->nb_tlb
* type
+ env
->tlb_per_way
* (way
+ 1));
1103 tlb
= &env
->tlb
.tlb6
[entry
];
1104 qemu_printf("%s TLB %02d/%02d way:%d %s ["
1105 TARGET_FMT_lx
" " TARGET_FMT_lx
"]\n",
1106 type
? "code" : "data", entry
% env
->nb_tlb
,
1108 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
1109 tlb
->EPN
, tlb
->EPN
+ TARGET_PAGE_SIZE
);
1115 void dump_mmu(CPUPPCState
*env
)
1117 switch (env
->mmu_model
) {
1118 case POWERPC_MMU_BOOKE
:
1119 mmubooke_dump_mmu(env
);
1121 case POWERPC_MMU_BOOKE206
:
1122 mmubooke206_dump_mmu(env
);
1124 case POWERPC_MMU_SOFT_6xx
:
1125 mmu6xx_dump_mmu(env
);
1127 #if defined(TARGET_PPC64)
1128 case POWERPC_MMU_64B
:
1129 case POWERPC_MMU_2_03
:
1130 case POWERPC_MMU_2_06
:
1131 case POWERPC_MMU_2_07
:
1132 dump_slb(env_archcpu(env
));
1134 case POWERPC_MMU_3_00
:
1135 if (ppc64_v3_radix(env_archcpu(env
))) {
1136 qemu_log_mask(LOG_UNIMP
, "%s: the PPC64 MMU is unsupported\n",
1139 dump_slb(env_archcpu(env
));
1144 qemu_log_mask(LOG_UNIMP
, "%s: unimplemented\n", __func__
);
1148 static int check_physical(CPUPPCState
*env
, mmu_ctx_t
*ctx
, target_ulong eaddr
,
1149 MMUAccessType access_type
)
1152 ctx
->prot
= PAGE_READ
| PAGE_EXEC
;
1154 switch (env
->mmu_model
) {
1155 case POWERPC_MMU_SOFT_6xx
:
1156 case POWERPC_MMU_SOFT_4xx
:
1157 case POWERPC_MMU_REAL
:
1158 case POWERPC_MMU_BOOKE
:
1159 ctx
->prot
|= PAGE_WRITE
;
1163 /* Caller's checks mean we should never get here for other models */
1164 g_assert_not_reached();
1170 int get_physical_address_wtlb(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1172 MMUAccessType access_type
, int type
,
1176 bool real_mode
= (type
== ACCESS_CODE
&& !FIELD_EX64(env
->msr
, MSR
, IR
)) ||
1177 (type
!= ACCESS_CODE
&& !FIELD_EX64(env
->msr
, MSR
, DR
));
1179 switch (env
->mmu_model
) {
1180 case POWERPC_MMU_SOFT_6xx
:
1182 ret
= check_physical(env
, ctx
, eaddr
, access_type
);
1184 /* Try to find a BAT */
1185 if (env
->nb_BATs
!= 0) {
1186 ret
= get_bat_6xx_tlb(env
, ctx
, eaddr
, access_type
);
1189 /* We didn't match any BAT entry or don't have BATs */
1190 ret
= get_segment_6xx_tlb(env
, ctx
, eaddr
, access_type
, type
);
1195 case POWERPC_MMU_SOFT_4xx
:
1197 ret
= check_physical(env
, ctx
, eaddr
, access_type
);
1199 ret
= mmu40x_get_physical_address(env
, ctx
, eaddr
, access_type
);
1202 case POWERPC_MMU_BOOKE
:
1203 ret
= mmubooke_get_physical_address(env
, ctx
, eaddr
, access_type
);
1205 case POWERPC_MMU_BOOKE206
:
1206 ret
= mmubooke206_get_physical_address(env
, ctx
, eaddr
, access_type
,
1209 case POWERPC_MMU_MPC8xx
:
1211 cpu_abort(env_cpu(env
), "MPC8xx MMU model is not implemented\n");
1213 case POWERPC_MMU_REAL
:
1215 ret
= check_physical(env
, ctx
, eaddr
, access_type
);
1217 cpu_abort(env_cpu(env
),
1218 "PowerPC in real mode do not do any translation\n");
1222 cpu_abort(env_cpu(env
), "Unknown or invalid MMU model\n");
1229 static void booke206_update_mas_tlb_miss(CPUPPCState
*env
, target_ulong address
,
1230 MMUAccessType access_type
, int mmu_idx
)
1234 uint32_t missed_tid
= 0;
1235 bool use_epid
= mmubooke206_get_as(env
, mmu_idx
, &epid
, &as
, &pr
);
1237 if (access_type
== MMU_INST_FETCH
) {
1238 as
= FIELD_EX64(env
->msr
, MSR
, IR
);
1240 env
->spr
[SPR_BOOKE_MAS0
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TLBSELD_MASK
;
1241 env
->spr
[SPR_BOOKE_MAS1
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TSIZED_MASK
;
1242 env
->spr
[SPR_BOOKE_MAS2
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_WIMGED_MASK
;
1243 env
->spr
[SPR_BOOKE_MAS3
] = 0;
1244 env
->spr
[SPR_BOOKE_MAS6
] = 0;
1245 env
->spr
[SPR_BOOKE_MAS7
] = 0;
1249 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_TS
;
1250 env
->spr
[SPR_BOOKE_MAS6
] |= MAS6_SAS
;
1253 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_VALID
;
1254 env
->spr
[SPR_BOOKE_MAS2
] |= address
& MAS2_EPN_MASK
;
1257 switch (env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TIDSELD_PIDZ
) {
1258 case MAS4_TIDSELD_PID0
:
1259 missed_tid
= env
->spr
[SPR_BOOKE_PID
];
1261 case MAS4_TIDSELD_PID1
:
1262 missed_tid
= env
->spr
[SPR_BOOKE_PID1
];
1264 case MAS4_TIDSELD_PID2
:
1265 missed_tid
= env
->spr
[SPR_BOOKE_PID2
];
1268 env
->spr
[SPR_BOOKE_MAS6
] |= env
->spr
[SPR_BOOKE_PID
] << 16;
1271 env
->spr
[SPR_BOOKE_MAS6
] |= missed_tid
<< 16;
1273 env
->spr
[SPR_BOOKE_MAS1
] |= (missed_tid
<< MAS1_TID_SHIFT
);
1276 /* next victim logic */
1277 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_ESEL_SHIFT
;
1279 env
->last_way
&= booke206_tlb_ways(env
, 0) - 1;
1280 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
1283 /* Perform address translation */
1284 /* TODO: Split this by mmu_model. */
1285 static bool ppc_jumbo_xlate(PowerPCCPU
*cpu
, vaddr eaddr
,
1286 MMUAccessType access_type
,
1287 hwaddr
*raddrp
, int *psizep
, int *protp
,
1288 int mmu_idx
, bool guest_visible
)
1290 CPUState
*cs
= CPU(cpu
);
1291 CPUPPCState
*env
= &cpu
->env
;
1296 if (access_type
== MMU_INST_FETCH
) {
1299 } else if (guest_visible
) {
1301 type
= env
->access_type
;
1306 ret
= get_physical_address_wtlb(env
, &ctx
, eaddr
, access_type
,
1309 *raddrp
= ctx
.raddr
;
1311 *psizep
= TARGET_PAGE_BITS
;
1315 if (guest_visible
) {
1316 log_cpu_state_mask(CPU_LOG_MMU
, cs
, 0);
1317 if (type
== ACCESS_CODE
) {
1320 /* No matches in page tables or TLB */
1321 switch (env
->mmu_model
) {
1322 case POWERPC_MMU_SOFT_6xx
:
1323 cs
->exception_index
= POWERPC_EXCP_IFTLB
;
1324 env
->error_code
= 1 << 18;
1325 env
->spr
[SPR_IMISS
] = eaddr
;
1326 env
->spr
[SPR_ICMP
] = 0x80000000 | ctx
.ptem
;
1328 case POWERPC_MMU_SOFT_4xx
:
1329 cs
->exception_index
= POWERPC_EXCP_ITLB
;
1330 env
->error_code
= 0;
1331 env
->spr
[SPR_40x_DEAR
] = eaddr
;
1332 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1334 case POWERPC_MMU_BOOKE206
:
1335 booke206_update_mas_tlb_miss(env
, eaddr
, 2, mmu_idx
);
1337 case POWERPC_MMU_BOOKE
:
1338 cs
->exception_index
= POWERPC_EXCP_ITLB
;
1339 env
->error_code
= 0;
1340 env
->spr
[SPR_BOOKE_DEAR
] = eaddr
;
1341 env
->spr
[SPR_BOOKE_ESR
] = mmubooke206_esr(mmu_idx
, MMU_DATA_LOAD
);
1343 case POWERPC_MMU_MPC8xx
:
1344 cpu_abort(cs
, "MPC8xx MMU model is not implemented\n");
1345 case POWERPC_MMU_REAL
:
1346 cpu_abort(cs
, "PowerPC in real mode should never raise "
1347 "any MMU exceptions\n");
1349 cpu_abort(cs
, "Unknown or invalid MMU model\n");
1353 /* Access rights violation */
1354 cs
->exception_index
= POWERPC_EXCP_ISI
;
1355 if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
1356 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
1357 env
->error_code
= 0;
1359 env
->error_code
= 0x08000000;
1363 /* No execute protection violation */
1364 if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
1365 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
1366 env
->spr
[SPR_BOOKE_ESR
] = 0x00000000;
1367 env
->error_code
= 0;
1369 env
->error_code
= 0x10000000;
1371 cs
->exception_index
= POWERPC_EXCP_ISI
;
1374 /* Direct store exception */
1375 /* No code fetch is allowed in direct-store areas */
1376 cs
->exception_index
= POWERPC_EXCP_ISI
;
1377 if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
1378 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
1379 env
->error_code
= 0;
1381 env
->error_code
= 0x10000000;
1388 /* No matches in page tables or TLB */
1389 switch (env
->mmu_model
) {
1390 case POWERPC_MMU_SOFT_6xx
:
1391 if (access_type
== MMU_DATA_STORE
) {
1392 cs
->exception_index
= POWERPC_EXCP_DSTLB
;
1393 env
->error_code
= 1 << 16;
1395 cs
->exception_index
= POWERPC_EXCP_DLTLB
;
1396 env
->error_code
= 0;
1398 env
->spr
[SPR_DMISS
] = eaddr
;
1399 env
->spr
[SPR_DCMP
] = 0x80000000 | ctx
.ptem
;
1401 env
->error_code
|= ctx
.key
<< 19;
1402 env
->spr
[SPR_HASH1
] = ppc_hash32_hpt_base(cpu
) +
1403 get_pteg_offset32(cpu
, ctx
.hash
[0]);
1404 env
->spr
[SPR_HASH2
] = ppc_hash32_hpt_base(cpu
) +
1405 get_pteg_offset32(cpu
, ctx
.hash
[1]);
1407 case POWERPC_MMU_SOFT_4xx
:
1408 cs
->exception_index
= POWERPC_EXCP_DTLB
;
1409 env
->error_code
= 0;
1410 env
->spr
[SPR_40x_DEAR
] = eaddr
;
1411 if (access_type
== MMU_DATA_STORE
) {
1412 env
->spr
[SPR_40x_ESR
] = 0x00800000;
1414 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1417 case POWERPC_MMU_MPC8xx
:
1419 cpu_abort(cs
, "MPC8xx MMU model is not implemented\n");
1420 case POWERPC_MMU_BOOKE206
:
1421 booke206_update_mas_tlb_miss(env
, eaddr
, access_type
, mmu_idx
);
1423 case POWERPC_MMU_BOOKE
:
1424 cs
->exception_index
= POWERPC_EXCP_DTLB
;
1425 env
->error_code
= 0;
1426 env
->spr
[SPR_BOOKE_DEAR
] = eaddr
;
1427 env
->spr
[SPR_BOOKE_ESR
] = mmubooke206_esr(mmu_idx
, access_type
);
1429 case POWERPC_MMU_REAL
:
1430 cpu_abort(cs
, "PowerPC in real mode should never raise "
1431 "any MMU exceptions\n");
1433 cpu_abort(cs
, "Unknown or invalid MMU model\n");
1437 /* Access rights violation */
1438 cs
->exception_index
= POWERPC_EXCP_DSI
;
1439 env
->error_code
= 0;
1440 if (env
->mmu_model
== POWERPC_MMU_SOFT_4xx
) {
1441 env
->spr
[SPR_40x_DEAR
] = eaddr
;
1442 if (access_type
== MMU_DATA_STORE
) {
1443 env
->spr
[SPR_40x_ESR
] |= 0x00800000;
1445 } else if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
1446 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
1447 env
->spr
[SPR_BOOKE_DEAR
] = eaddr
;
1448 env
->spr
[SPR_BOOKE_ESR
] = mmubooke206_esr(mmu_idx
, access_type
);
1450 env
->spr
[SPR_DAR
] = eaddr
;
1451 if (access_type
== MMU_DATA_STORE
) {
1452 env
->spr
[SPR_DSISR
] = 0x0A000000;
1454 env
->spr
[SPR_DSISR
] = 0x08000000;
1459 /* Direct store exception */
1462 /* Floating point load/store */
1463 cs
->exception_index
= POWERPC_EXCP_ALIGN
;
1464 env
->error_code
= POWERPC_EXCP_ALIGN_FP
;
1465 env
->spr
[SPR_DAR
] = eaddr
;
1468 /* lwarx, ldarx or stwcx. */
1469 cs
->exception_index
= POWERPC_EXCP_DSI
;
1470 env
->error_code
= 0;
1471 env
->spr
[SPR_DAR
] = eaddr
;
1472 if (access_type
== MMU_DATA_STORE
) {
1473 env
->spr
[SPR_DSISR
] = 0x06000000;
1475 env
->spr
[SPR_DSISR
] = 0x04000000;
1479 /* eciwx or ecowx */
1480 cs
->exception_index
= POWERPC_EXCP_DSI
;
1481 env
->error_code
= 0;
1482 env
->spr
[SPR_DAR
] = eaddr
;
1483 if (access_type
== MMU_DATA_STORE
) {
1484 env
->spr
[SPR_DSISR
] = 0x06100000;
1486 env
->spr
[SPR_DSISR
] = 0x04100000;
1490 printf("DSI: invalid exception (%d)\n", ret
);
1491 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
1493 POWERPC_EXCP_INVAL
| POWERPC_EXCP_INVAL_INVAL
;
1494 env
->spr
[SPR_DAR
] = eaddr
;
1504 /*****************************************************************************/
1506 bool ppc_xlate(PowerPCCPU
*cpu
, vaddr eaddr
, MMUAccessType access_type
,
1507 hwaddr
*raddrp
, int *psizep
, int *protp
,
1508 int mmu_idx
, bool guest_visible
)
1510 switch (cpu
->env
.mmu_model
) {
1511 #if defined(TARGET_PPC64)
1512 case POWERPC_MMU_3_00
:
1513 if (ppc64_v3_radix(cpu
)) {
1514 return ppc_radix64_xlate(cpu
, eaddr
, access_type
, raddrp
,
1515 psizep
, protp
, mmu_idx
, guest_visible
);
1518 case POWERPC_MMU_64B
:
1519 case POWERPC_MMU_2_03
:
1520 case POWERPC_MMU_2_06
:
1521 case POWERPC_MMU_2_07
:
1522 return ppc_hash64_xlate(cpu
, eaddr
, access_type
,
1523 raddrp
, psizep
, protp
, mmu_idx
, guest_visible
);
1526 case POWERPC_MMU_32B
:
1527 return ppc_hash32_xlate(cpu
, eaddr
, access_type
, raddrp
,
1528 psizep
, protp
, mmu_idx
, guest_visible
);
1531 return ppc_jumbo_xlate(cpu
, eaddr
, access_type
, raddrp
,
1532 psizep
, protp
, mmu_idx
, guest_visible
);
1536 hwaddr
ppc_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
1538 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1543 * Some MMUs have separate TLBs for code and data. If we only
1544 * try an MMU_DATA_LOAD, we may not be able to read instructions
1545 * mapped by code TLBs, so we also try a MMU_INST_FETCH.
1547 if (ppc_xlate(cpu
, addr
, MMU_DATA_LOAD
, &raddr
, &s
, &p
,
1548 cpu_mmu_index(&cpu
->env
, false), false) ||
1549 ppc_xlate(cpu
, addr
, MMU_INST_FETCH
, &raddr
, &s
, &p
,
1550 cpu_mmu_index(&cpu
->env
, true), false)) {
1551 return raddr
& TARGET_PAGE_MASK
;