2 * PowerPC emulation helpers for qemu.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "helper_regs.h"
29 #include "qemu-common.h"
36 //#define DEBUG_SOFTWARE_TLB
37 //#define DUMP_PAGE_TABLES
38 //#define DEBUG_EXCEPTIONS
39 //#define FLUSH_ALL_TLBS
42 # define LOG_MMU(...) qemu_log(__VA_ARGS__)
43 # define LOG_MMU_STATE(env) log_cpu_state((env), 0)
45 # define LOG_MMU(...) do { } while (0)
46 # define LOG_MMU_STATE(...) do { } while (0)
50 #ifdef DEBUG_SOFTWARE_TLB
51 # define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
53 # define LOG_SWTLB(...) do { } while (0)
57 # define LOG_BATS(...) qemu_log(__VA_ARGS__)
59 # define LOG_BATS(...) do { } while (0)
63 # define LOG_SLB(...) qemu_log(__VA_ARGS__)
65 # define LOG_SLB(...) do { } while (0)
68 #ifdef DEBUG_EXCEPTIONS
69 # define LOG_EXCP(...) qemu_log(__VA_ARGS__)
71 # define LOG_EXCP(...) do { } while (0)
75 /*****************************************************************************/
76 /* PowerPC MMU emulation */
78 #if defined(CONFIG_USER_ONLY)
79 int cpu_ppc_handle_mmu_fault (CPUState
*env
, target_ulong address
, int rw
,
80 int mmu_idx
, int is_softmmu
)
82 int exception
, error_code
;
85 exception
= POWERPC_EXCP_ISI
;
86 error_code
= 0x40000000;
88 exception
= POWERPC_EXCP_DSI
;
89 error_code
= 0x40000000;
91 error_code
|= 0x02000000;
92 env
->spr
[SPR_DAR
] = address
;
93 env
->spr
[SPR_DSISR
] = error_code
;
95 env
->exception_index
= exception
;
96 env
->error_code
= error_code
;
101 target_phys_addr_t
cpu_get_phys_page_debug (CPUState
*env
, target_ulong addr
)
107 /* Common routines used by software and hardware TLBs emulation */
108 static inline int pte_is_valid(target_ulong pte0
)
110 return pte0
& 0x80000000 ? 1 : 0;
113 static inline void pte_invalidate(target_ulong
*pte0
)
115 *pte0
&= ~0x80000000;
118 #if defined(TARGET_PPC64)
119 static inline int pte64_is_valid(target_ulong pte0
)
121 return pte0
& 0x0000000000000001ULL
? 1 : 0;
124 static inline void pte64_invalidate(target_ulong
*pte0
)
126 *pte0
&= ~0x0000000000000001ULL
;
130 #define PTE_PTEM_MASK 0x7FFFFFBF
131 #define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B)
132 #if defined(TARGET_PPC64)
133 #define PTE64_PTEM_MASK 0xFFFFFFFFFFFFFF80ULL
134 #define PTE64_CHECK_MASK (TARGET_PAGE_MASK | 0x7F)
137 static inline int pp_check(int key
, int pp
, int nx
)
141 /* Compute access rights */
142 /* When pp is 3/7, the result is undefined. Set it to noaccess */
149 access
|= PAGE_WRITE
;
167 access
= PAGE_READ
| PAGE_WRITE
;
177 static inline int check_prot(int prot
, int rw
, int access_type
)
181 if (access_type
== ACCESS_CODE
) {
182 if (prot
& PAGE_EXEC
)
187 if (prot
& PAGE_WRITE
)
192 if (prot
& PAGE_READ
)
201 static inline int _pte_check(mmu_ctx_t
*ctx
, int is_64b
, target_ulong pte0
,
202 target_ulong pte1
, int h
, int rw
, int type
)
204 target_ulong ptem
, mmask
;
205 int access
, ret
, pteh
, ptev
, pp
;
209 /* Check validity and table match */
210 #if defined(TARGET_PPC64)
212 ptev
= pte64_is_valid(pte0
);
213 pteh
= (pte0
>> 1) & 1;
217 ptev
= pte_is_valid(pte0
);
218 pteh
= (pte0
>> 6) & 1;
220 if (ptev
&& h
== pteh
) {
221 /* Check vsid & api */
222 #if defined(TARGET_PPC64)
224 ptem
= pte0
& PTE64_PTEM_MASK
;
225 mmask
= PTE64_CHECK_MASK
;
226 pp
= (pte1
& 0x00000003) | ((pte1
>> 61) & 0x00000004);
227 ctx
->nx
= (pte1
>> 2) & 1; /* No execute bit */
228 ctx
->nx
|= (pte1
>> 3) & 1; /* Guarded bit */
232 ptem
= pte0
& PTE_PTEM_MASK
;
233 mmask
= PTE_CHECK_MASK
;
234 pp
= pte1
& 0x00000003;
236 if (ptem
== ctx
->ptem
) {
237 if (ctx
->raddr
!= (target_phys_addr_t
)-1ULL) {
238 /* all matches should have equal RPN, WIMG & PP */
239 if ((ctx
->raddr
& mmask
) != (pte1
& mmask
)) {
240 qemu_log("Bad RPN/WIMG/PP\n");
244 /* Compute access rights */
245 access
= pp_check(ctx
->key
, pp
, ctx
->nx
);
246 /* Keep the matching PTE informations */
249 ret
= check_prot(ctx
->prot
, rw
, type
);
252 LOG_MMU("PTE access granted !\n");
254 /* Access right violation */
255 LOG_MMU("PTE access rejected\n");
263 static inline int pte32_check(mmu_ctx_t
*ctx
, target_ulong pte0
,
264 target_ulong pte1
, int h
, int rw
, int type
)
266 return _pte_check(ctx
, 0, pte0
, pte1
, h
, rw
, type
);
269 #if defined(TARGET_PPC64)
270 static inline int pte64_check(mmu_ctx_t
*ctx
, target_ulong pte0
,
271 target_ulong pte1
, int h
, int rw
, int type
)
273 return _pte_check(ctx
, 1, pte0
, pte1
, h
, rw
, type
);
277 static inline int pte_update_flags(mmu_ctx_t
*ctx
, target_ulong
*pte1p
,
282 /* Update page flags */
283 if (!(*pte1p
& 0x00000100)) {
284 /* Update accessed flag */
285 *pte1p
|= 0x00000100;
288 if (!(*pte1p
& 0x00000080)) {
289 if (rw
== 1 && ret
== 0) {
290 /* Update changed flag */
291 *pte1p
|= 0x00000080;
294 /* Force page fault for first write access */
295 ctx
->prot
&= ~PAGE_WRITE
;
302 /* Software driven TLB helpers */
303 static inline int ppc6xx_tlb_getnum(CPUState
*env
, target_ulong eaddr
, int way
,
308 /* Select TLB num in a way from address */
309 nr
= (eaddr
>> TARGET_PAGE_BITS
) & (env
->tlb_per_way
- 1);
311 nr
+= env
->tlb_per_way
* way
;
312 /* 6xx have separate TLBs for instructions and data */
313 if (is_code
&& env
->id_tlbs
== 1)
319 static inline void ppc6xx_tlb_invalidate_all(CPUState
*env
)
324 //LOG_SWTLB("Invalidate all TLBs\n");
325 /* Invalidate all defined software TLB */
327 if (env
->id_tlbs
== 1)
329 for (nr
= 0; nr
< max
; nr
++) {
330 tlb
= &env
->tlb
[nr
].tlb6
;
331 pte_invalidate(&tlb
->pte0
);
336 static inline void __ppc6xx_tlb_invalidate_virt(CPUState
*env
,
338 int is_code
, int match_epn
)
340 #if !defined(FLUSH_ALL_TLBS)
344 /* Invalidate ITLB + DTLB, all ways */
345 for (way
= 0; way
< env
->nb_ways
; way
++) {
346 nr
= ppc6xx_tlb_getnum(env
, eaddr
, way
, is_code
);
347 tlb
= &env
->tlb
[nr
].tlb6
;
348 if (pte_is_valid(tlb
->pte0
) && (match_epn
== 0 || eaddr
== tlb
->EPN
)) {
349 LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx
"\n", nr
,
351 pte_invalidate(&tlb
->pte0
);
352 tlb_flush_page(env
, tlb
->EPN
);
356 /* XXX: PowerPC specification say this is valid as well */
357 ppc6xx_tlb_invalidate_all(env
);
361 static inline void ppc6xx_tlb_invalidate_virt(CPUState
*env
,
362 target_ulong eaddr
, int is_code
)
364 __ppc6xx_tlb_invalidate_virt(env
, eaddr
, is_code
, 0);
367 void ppc6xx_tlb_store (CPUState
*env
, target_ulong EPN
, int way
, int is_code
,
368 target_ulong pte0
, target_ulong pte1
)
373 nr
= ppc6xx_tlb_getnum(env
, EPN
, way
, is_code
);
374 tlb
= &env
->tlb
[nr
].tlb6
;
375 LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
376 " PTE1 " TARGET_FMT_lx
"\n", nr
, env
->nb_tlb
, EPN
, pte0
, pte1
);
377 /* Invalidate any pending reference in Qemu for this virtual address */
378 __ppc6xx_tlb_invalidate_virt(env
, EPN
, is_code
, 1);
382 /* Store last way for LRU mechanism */
386 static inline int ppc6xx_tlb_check(CPUState
*env
, mmu_ctx_t
*ctx
,
387 target_ulong eaddr
, int rw
, int access_type
)
394 ret
= -1; /* No TLB found */
395 for (way
= 0; way
< env
->nb_ways
; way
++) {
396 nr
= ppc6xx_tlb_getnum(env
, eaddr
, way
,
397 access_type
== ACCESS_CODE
? 1 : 0);
398 tlb
= &env
->tlb
[nr
].tlb6
;
399 /* This test "emulates" the PTE index match for hardware TLBs */
400 if ((eaddr
& TARGET_PAGE_MASK
) != tlb
->EPN
) {
401 LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx
" " TARGET_FMT_lx
402 "] <> " TARGET_FMT_lx
"\n", nr
, env
->nb_tlb
,
403 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
404 tlb
->EPN
, tlb
->EPN
+ TARGET_PAGE_SIZE
, eaddr
);
407 LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx
" <> " TARGET_FMT_lx
" "
408 TARGET_FMT_lx
" %c %c\n", nr
, env
->nb_tlb
,
409 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
410 tlb
->EPN
, eaddr
, tlb
->pte1
,
411 rw
? 'S' : 'L', access_type
== ACCESS_CODE
? 'I' : 'D');
412 switch (pte32_check(ctx
, tlb
->pte0
, tlb
->pte1
, 0, rw
, access_type
)) {
414 /* TLB inconsistency */
417 /* Access violation */
427 /* XXX: we should go on looping to check all TLBs consistency
428 * but we can speed-up the whole thing as the
429 * result would be undefined if TLBs are not consistent.
438 LOG_SWTLB("found TLB at addr " TARGET_FMT_plx
" prot=%01x ret=%d\n",
439 ctx
->raddr
& TARGET_PAGE_MASK
, ctx
->prot
, ret
);
440 /* Update page flags */
441 pte_update_flags(ctx
, &env
->tlb
[best
].tlb6
.pte1
, ret
, rw
);
447 /* Perform BAT hit & translation */
448 static inline void bat_size_prot(CPUState
*env
, target_ulong
*blp
, int *validp
,
449 int *protp
, target_ulong
*BATu
,
455 bl
= (*BATu
& 0x00001FFC) << 15;
458 if (((msr_pr
== 0) && (*BATu
& 0x00000002)) ||
459 ((msr_pr
!= 0) && (*BATu
& 0x00000001))) {
461 pp
= *BATl
& 0x00000003;
463 prot
= PAGE_READ
| PAGE_EXEC
;
473 static inline void bat_601_size_prot(CPUState
*env
, target_ulong
*blp
,
474 int *validp
, int *protp
,
475 target_ulong
*BATu
, target_ulong
*BATl
)
478 int key
, pp
, valid
, prot
;
480 bl
= (*BATl
& 0x0000003F) << 17;
481 LOG_BATS("b %02x ==> bl " TARGET_FMT_lx
" msk " TARGET_FMT_lx
"\n",
482 (uint8_t)(*BATl
& 0x0000003F), bl
, ~bl
);
484 valid
= (*BATl
>> 6) & 1;
486 pp
= *BATu
& 0x00000003;
488 key
= (*BATu
>> 3) & 1;
490 key
= (*BATu
>> 2) & 1;
491 prot
= pp_check(key
, pp
, 0);
498 static inline int get_bat(CPUState
*env
, mmu_ctx_t
*ctx
, target_ulong
virtual,
501 target_ulong
*BATlt
, *BATut
, *BATu
, *BATl
;
502 target_ulong base
, BEPIl
, BEPIu
, bl
;
506 LOG_BATS("%s: %cBAT v " TARGET_FMT_lx
"\n", __func__
,
507 type
== ACCESS_CODE
? 'I' : 'D', virtual);
510 BATlt
= env
->IBAT
[1];
511 BATut
= env
->IBAT
[0];
514 BATlt
= env
->DBAT
[1];
515 BATut
= env
->DBAT
[0];
518 base
= virtual & 0xFFFC0000;
519 for (i
= 0; i
< env
->nb_BATs
; i
++) {
522 BEPIu
= *BATu
& 0xF0000000;
523 BEPIl
= *BATu
& 0x0FFE0000;
524 if (unlikely(env
->mmu_model
== POWERPC_MMU_601
)) {
525 bat_601_size_prot(env
, &bl
, &valid
, &prot
, BATu
, BATl
);
527 bat_size_prot(env
, &bl
, &valid
, &prot
, BATu
, BATl
);
529 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx
" BATu " TARGET_FMT_lx
530 " BATl " TARGET_FMT_lx
"\n", __func__
,
531 type
== ACCESS_CODE
? 'I' : 'D', i
, virtual, *BATu
, *BATl
);
532 if ((virtual & 0xF0000000) == BEPIu
&&
533 ((virtual & 0x0FFE0000) & ~bl
) == BEPIl
) {
536 /* Get physical address */
537 ctx
->raddr
= (*BATl
& 0xF0000000) |
538 ((virtual & 0x0FFE0000 & bl
) | (*BATl
& 0x0FFE0000)) |
539 (virtual & 0x0001F000);
540 /* Compute access rights */
542 ret
= check_prot(ctx
->prot
, rw
, type
);
544 LOG_BATS("BAT %d match: r " TARGET_FMT_plx
" prot=%c%c\n",
545 i
, ctx
->raddr
, ctx
->prot
& PAGE_READ
? 'R' : '-',
546 ctx
->prot
& PAGE_WRITE
? 'W' : '-');
552 #if defined(DEBUG_BATS)
553 if (qemu_log_enabled()) {
554 LOG_BATS("no BAT match for " TARGET_FMT_lx
":\n", virtual);
555 for (i
= 0; i
< 4; i
++) {
558 BEPIu
= *BATu
& 0xF0000000;
559 BEPIl
= *BATu
& 0x0FFE0000;
560 bl
= (*BATu
& 0x00001FFC) << 15;
561 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx
" BATu " TARGET_FMT_lx
562 " BATl " TARGET_FMT_lx
" \n\t" TARGET_FMT_lx
" "
563 TARGET_FMT_lx
" " TARGET_FMT_lx
"\n",
564 __func__
, type
== ACCESS_CODE
? 'I' : 'D', i
, virtual,
565 *BATu
, *BATl
, BEPIu
, BEPIl
, bl
);
574 /* PTE table lookup */
575 static inline int _find_pte(mmu_ctx_t
*ctx
, int is_64b
, int h
, int rw
,
576 int type
, int target_page_bits
)
578 target_ulong base
, pte0
, pte1
;
582 ret
= -1; /* No entry found */
583 base
= ctx
->pg_addr
[h
];
584 for (i
= 0; i
< 8; i
++) {
585 #if defined(TARGET_PPC64)
587 pte0
= ldq_phys(base
+ (i
* 16));
588 pte1
= ldq_phys(base
+ (i
* 16) + 8);
590 /* We have a TLB that saves 4K pages, so let's
591 * split a huge page to 4k chunks */
592 if (target_page_bits
!= TARGET_PAGE_BITS
)
593 pte1
|= (ctx
->eaddr
& (( 1 << target_page_bits
) - 1))
596 r
= pte64_check(ctx
, pte0
, pte1
, h
, rw
, type
);
597 LOG_MMU("Load pte from " TARGET_FMT_lx
" => " TARGET_FMT_lx
" "
598 TARGET_FMT_lx
" %d %d %d " TARGET_FMT_lx
"\n",
599 base
+ (i
* 16), pte0
, pte1
, (int)(pte0
& 1), h
,
600 (int)((pte0
>> 1) & 1), ctx
->ptem
);
604 pte0
= ldl_phys(base
+ (i
* 8));
605 pte1
= ldl_phys(base
+ (i
* 8) + 4);
606 r
= pte32_check(ctx
, pte0
, pte1
, h
, rw
, type
);
607 LOG_MMU("Load pte from " TARGET_FMT_lx
" => " TARGET_FMT_lx
" "
608 TARGET_FMT_lx
" %d %d %d " TARGET_FMT_lx
"\n",
609 base
+ (i
* 8), pte0
, pte1
, (int)(pte0
>> 31), h
,
610 (int)((pte0
>> 6) & 1), ctx
->ptem
);
614 /* PTE inconsistency */
617 /* Access violation */
627 /* XXX: we should go on looping to check all PTEs consistency
628 * but if we can speed-up the whole thing as the
629 * result would be undefined if PTEs are not consistent.
638 LOG_MMU("found PTE at addr " TARGET_FMT_lx
" prot=%01x ret=%d\n",
639 ctx
->raddr
, ctx
->prot
, ret
);
640 /* Update page flags */
642 if (pte_update_flags(ctx
, &pte1
, ret
, rw
) == 1) {
643 #if defined(TARGET_PPC64)
645 stq_phys_notdirty(base
+ (good
* 16) + 8, pte1
);
649 stl_phys_notdirty(base
+ (good
* 8) + 4, pte1
);
657 static inline int find_pte32(mmu_ctx_t
*ctx
, int h
, int rw
, int type
,
658 int target_page_bits
)
660 return _find_pte(ctx
, 0, h
, rw
, type
, target_page_bits
);
663 #if defined(TARGET_PPC64)
664 static inline int find_pte64(mmu_ctx_t
*ctx
, int h
, int rw
, int type
,
665 int target_page_bits
)
667 return _find_pte(ctx
, 1, h
, rw
, type
, target_page_bits
);
671 static inline int find_pte(CPUState
*env
, mmu_ctx_t
*ctx
, int h
, int rw
,
672 int type
, int target_page_bits
)
674 #if defined(TARGET_PPC64)
675 if (env
->mmu_model
& POWERPC_MMU_64
)
676 return find_pte64(ctx
, h
, rw
, type
, target_page_bits
);
679 return find_pte32(ctx
, h
, rw
, type
, target_page_bits
);
682 #if defined(TARGET_PPC64)
683 static ppc_slb_t
*slb_get_entry(CPUPPCState
*env
, int nr
)
685 ppc_slb_t
*retval
= &env
->slb
[nr
];
687 #if 0 // XXX implement bridge mode?
688 if (env
->spr
[SPR_ASR
] & 1) {
689 target_phys_addr_t sr_base
;
691 sr_base
= env
->spr
[SPR_ASR
] & 0xfffffffffffff000;
692 sr_base
+= (12 * nr
);
694 retval
->tmp64
= ldq_phys(sr_base
);
695 retval
->tmp
= ldl_phys(sr_base
+ 8);
702 static void slb_set_entry(CPUPPCState
*env
, int nr
, ppc_slb_t
*slb
)
704 ppc_slb_t
*entry
= &env
->slb
[nr
];
709 entry
->tmp64
= slb
->tmp64
;
710 entry
->tmp
= slb
->tmp
;
713 static inline int slb_is_valid(ppc_slb_t
*slb
)
715 return (int)(slb
->tmp64
& 0x0000000008000000ULL
);
718 static inline void slb_invalidate(ppc_slb_t
*slb
)
720 slb
->tmp64
&= ~0x0000000008000000ULL
;
723 static inline int slb_lookup(CPUPPCState
*env
, target_ulong eaddr
,
724 target_ulong
*vsid
, target_ulong
*page_mask
,
725 int *attr
, int *target_page_bits
)
731 LOG_SLB("%s: eaddr " TARGET_FMT_lx
"\n", __func__
, eaddr
);
732 mask
= 0x0000000000000000ULL
; /* Avoid gcc warning */
733 for (n
= 0; n
< env
->slb_nr
; n
++) {
734 ppc_slb_t
*slb
= slb_get_entry(env
, n
);
736 LOG_SLB("%s: seg %d %016" PRIx64
" %08"
737 PRIx32
"\n", __func__
, n
, slb
->tmp64
, slb
->tmp
);
738 if (slb_is_valid(slb
)) {
739 /* SLB entry is valid */
740 if (slb
->tmp
& 0x8) {
742 mask
= 0xFFFF000000000000ULL
;
743 if (target_page_bits
)
744 *target_page_bits
= 24; // XXX 16M pages?
747 mask
= 0xFFFFFFFFF0000000ULL
;
748 if (target_page_bits
)
749 *target_page_bits
= TARGET_PAGE_BITS
;
751 if ((eaddr
& mask
) == (slb
->tmp64
& mask
)) {
753 *vsid
= ((slb
->tmp64
<< 24) | (slb
->tmp
>> 8)) & 0x0003FFFFFFFFFFFFULL
;
755 *attr
= slb
->tmp
& 0xFF;
765 void ppc_slb_invalidate_all (CPUPPCState
*env
)
767 int n
, do_invalidate
;
770 /* XXX: Warning: slbia never invalidates the first segment */
771 for (n
= 1; n
< env
->slb_nr
; n
++) {
772 ppc_slb_t
*slb
= slb_get_entry(env
, n
);
774 if (slb_is_valid(slb
)) {
776 slb_set_entry(env
, n
, slb
);
777 /* XXX: given the fact that segment size is 256 MB or 1TB,
778 * and we still don't have a tlb_flush_mask(env, n, mask)
779 * in Qemu, we just invalidate all TLBs
788 void ppc_slb_invalidate_one (CPUPPCState
*env
, uint64_t T0
)
790 target_ulong vsid
, page_mask
;
794 n
= slb_lookup(env
, T0
, &vsid
, &page_mask
, &attr
, NULL
);
796 ppc_slb_t
*slb
= slb_get_entry(env
, n
);
798 if (slb_is_valid(slb
)) {
800 slb_set_entry(env
, n
, slb
);
801 /* XXX: given the fact that segment size is 256 MB or 1TB,
802 * and we still don't have a tlb_flush_mask(env, n, mask)
803 * in Qemu, we just invalidate all TLBs
810 target_ulong
ppc_load_slb (CPUPPCState
*env
, int slb_nr
)
813 ppc_slb_t
*slb
= slb_get_entry(env
, slb_nr
);
815 if (slb_is_valid(slb
)) {
816 /* SLB entry is valid */
817 /* Copy SLB bits 62:88 to Rt 37:63 (VSID 23:49) */
818 rt
= slb
->tmp
>> 8; /* 65:88 => 40:63 */
819 rt
|= (slb
->tmp64
& 0x7) << 24; /* 62:64 => 37:39 */
820 /* Copy SLB bits 89:92 to Rt 33:36 (KsKpNL) */
821 rt
|= ((slb
->tmp
>> 4) & 0xF) << 27;
825 LOG_SLB("%s: %016" PRIx64
" %08" PRIx32
" => %d "
826 TARGET_FMT_lx
"\n", __func__
, slb
->tmp64
, slb
->tmp
, slb_nr
, rt
);
831 void ppc_store_slb (CPUPPCState
*env
, target_ulong rb
, target_ulong rs
)
837 int flags
, valid
, slb_nr
;
840 flags
= ((rs
>> 8) & 0xf);
843 valid
= (rb
& (1 << 27));
846 slb
= slb_get_entry(env
, slb_nr
);
847 slb
->tmp64
= (esid
<< 28) | valid
| (vsid
>> 24);
848 slb
->tmp
= (vsid
<< 8) | (flags
<< 3);
850 LOG_SLB("%s: %d " TARGET_FMT_lx
" - " TARGET_FMT_lx
" => %016" PRIx64
851 " %08" PRIx32
"\n", __func__
, slb_nr
, rb
, rs
, slb
->tmp64
,
854 slb_set_entry(env
, slb_nr
, slb
);
856 #endif /* defined(TARGET_PPC64) */
858 /* Perform segment based translation */
859 static inline target_phys_addr_t
get_pgaddr(target_phys_addr_t sdr1
,
861 target_phys_addr_t hash
,
862 target_phys_addr_t mask
)
864 return (sdr1
& ((target_phys_addr_t
)(-1ULL) << sdr_sh
)) | (hash
& mask
);
867 static inline int get_segment(CPUState
*env
, mmu_ctx_t
*ctx
,
868 target_ulong eaddr
, int rw
, int type
)
870 target_phys_addr_t sdr
, hash
, mask
, sdr_mask
, htab_mask
;
871 target_ulong sr
, vsid
, vsid_mask
, pgidx
, page_mask
;
872 #if defined(TARGET_PPC64)
875 int ds
, vsid_sh
, sdr_sh
, pr
, target_page_bits
;
879 #if defined(TARGET_PPC64)
880 if (env
->mmu_model
& POWERPC_MMU_64
) {
881 LOG_MMU("Check SLBs\n");
882 ret
= slb_lookup(env
, eaddr
, &vsid
, &page_mask
, &attr
,
886 ctx
->key
= ((attr
& 0x40) && (pr
!= 0)) ||
887 ((attr
& 0x80) && (pr
== 0)) ? 1 : 0;
889 ctx
->nx
= attr
& 0x10 ? 1 : 0;
891 vsid_mask
= 0x00003FFFFFFFFF80ULL
;
896 #endif /* defined(TARGET_PPC64) */
898 sr
= env
->sr
[eaddr
>> 28];
899 page_mask
= 0x0FFFFFFF;
900 ctx
->key
= (((sr
& 0x20000000) && (pr
!= 0)) ||
901 ((sr
& 0x40000000) && (pr
== 0))) ? 1 : 0;
902 ds
= sr
& 0x80000000 ? 1 : 0;
903 ctx
->nx
= sr
& 0x10000000 ? 1 : 0;
904 vsid
= sr
& 0x00FFFFFF;
905 vsid_mask
= 0x01FFFFC0;
909 target_page_bits
= TARGET_PAGE_BITS
;
910 LOG_MMU("Check segment v=" TARGET_FMT_lx
" %d " TARGET_FMT_lx
" nip="
911 TARGET_FMT_lx
" lr=" TARGET_FMT_lx
912 " ir=%d dr=%d pr=%d %d t=%d\n",
913 eaddr
, (int)(eaddr
>> 28), sr
, env
->nip
, env
->lr
, (int)msr_ir
,
914 (int)msr_dr
, pr
!= 0 ? 1 : 0, rw
, type
);
916 LOG_MMU("pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx
"\n",
917 ctx
->key
, ds
, ctx
->nx
, vsid
);
920 /* Check if instruction fetch is allowed, if needed */
921 if (type
!= ACCESS_CODE
|| ctx
->nx
== 0) {
922 /* Page address translation */
923 /* Primary table address */
925 pgidx
= (eaddr
& page_mask
) >> target_page_bits
;
926 #if defined(TARGET_PPC64)
927 if (env
->mmu_model
& POWERPC_MMU_64
) {
928 htab_mask
= 0x0FFFFFFF >> (28 - (sdr
& 0x1F));
929 /* XXX: this is false for 1 TB segments */
930 hash
= ((vsid
^ pgidx
) << vsid_sh
) & vsid_mask
;
934 htab_mask
= sdr
& 0x000001FF;
935 hash
= ((vsid
^ pgidx
) << vsid_sh
) & vsid_mask
;
937 mask
= (htab_mask
<< sdr_sh
) | sdr_mask
;
938 LOG_MMU("sdr " TARGET_FMT_plx
" sh %d hash " TARGET_FMT_plx
939 " mask " TARGET_FMT_plx
" " TARGET_FMT_lx
"\n",
940 sdr
, sdr_sh
, hash
, mask
, page_mask
);
941 ctx
->pg_addr
[0] = get_pgaddr(sdr
, sdr_sh
, hash
, mask
);
942 /* Secondary table address */
943 hash
= (~hash
) & vsid_mask
;
944 LOG_MMU("sdr " TARGET_FMT_plx
" sh %d hash " TARGET_FMT_plx
945 " mask " TARGET_FMT_plx
"\n", sdr
, sdr_sh
, hash
, mask
);
946 ctx
->pg_addr
[1] = get_pgaddr(sdr
, sdr_sh
, hash
, mask
);
947 #if defined(TARGET_PPC64)
948 if (env
->mmu_model
& POWERPC_MMU_64
) {
949 /* Only 5 bits of the page index are used in the AVPN */
950 if (target_page_bits
> 23) {
951 ctx
->ptem
= (vsid
<< 12) |
952 ((pgidx
<< (target_page_bits
- 16)) & 0xF80);
954 ctx
->ptem
= (vsid
<< 12) | ((pgidx
>> 4) & 0x0F80);
959 ctx
->ptem
= (vsid
<< 7) | (pgidx
>> 10);
961 /* Initialize real address with an invalid value */
962 ctx
->raddr
= (target_phys_addr_t
)-1ULL;
963 if (unlikely(env
->mmu_model
== POWERPC_MMU_SOFT_6xx
||
964 env
->mmu_model
== POWERPC_MMU_SOFT_74xx
)) {
965 /* Software TLB search */
966 ret
= ppc6xx_tlb_check(env
, ctx
, eaddr
, rw
, type
);
968 LOG_MMU("0 sdr1=" TARGET_FMT_plx
" vsid=" TARGET_FMT_lx
" "
969 "api=" TARGET_FMT_lx
" hash=" TARGET_FMT_plx
970 " pg_addr=" TARGET_FMT_plx
"\n",
971 sdr
, vsid
, pgidx
, hash
, ctx
->pg_addr
[0]);
972 /* Primary table lookup */
973 ret
= find_pte(env
, ctx
, 0, rw
, type
, target_page_bits
);
975 /* Secondary table lookup */
976 if (eaddr
!= 0xEFFFFFFF)
977 LOG_MMU("1 sdr1=" TARGET_FMT_plx
" vsid=" TARGET_FMT_lx
" "
978 "api=" TARGET_FMT_lx
" hash=" TARGET_FMT_plx
979 " pg_addr=" TARGET_FMT_plx
"\n", sdr
, vsid
,
980 pgidx
, hash
, ctx
->pg_addr
[1]);
981 ret2
= find_pte(env
, ctx
, 1, rw
, type
,
987 #if defined (DUMP_PAGE_TABLES)
988 if (qemu_log_enabled()) {
989 target_phys_addr_t curaddr
;
990 uint32_t a0
, a1
, a2
, a3
;
991 qemu_log("Page table: " TARGET_FMT_plx
" len " TARGET_FMT_plx
992 "\n", sdr
, mask
+ 0x80);
993 for (curaddr
= sdr
; curaddr
< (sdr
+ mask
+ 0x80);
995 a0
= ldl_phys(curaddr
);
996 a1
= ldl_phys(curaddr
+ 4);
997 a2
= ldl_phys(curaddr
+ 8);
998 a3
= ldl_phys(curaddr
+ 12);
999 if (a0
!= 0 || a1
!= 0 || a2
!= 0 || a3
!= 0) {
1000 qemu_log(TARGET_FMT_plx
": %08x %08x %08x %08x\n",
1001 curaddr
, a0
, a1
, a2
, a3
);
1007 LOG_MMU("No access allowed\n");
1011 LOG_MMU("direct store...\n");
1012 /* Direct-store segment : absolutely *BUGGY* for now */
1015 /* Integer load/store : only access allowed */
1018 /* No code fetch is allowed in direct-store areas */
1021 /* Floating point load/store */
1024 /* lwarx, ldarx or srwcx. */
1027 /* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */
1028 /* Should make the instruction do no-op.
1029 * As it already do no-op, it's quite easy :-)
1034 /* eciwx or ecowx */
1037 qemu_log("ERROR: instruction should not need "
1038 "address translation\n");
1041 if ((rw
== 1 || ctx
->key
!= 1) && (rw
== 0 || ctx
->key
!= 0)) {
1052 /* Generic TLB check function for embedded PowerPC implementations */
1053 static inline int ppcemb_tlb_check(CPUState
*env
, ppcemb_tlb_t
*tlb
,
1054 target_phys_addr_t
*raddrp
,
1055 target_ulong address
, uint32_t pid
, int ext
,
1060 /* Check valid flag */
1061 if (!(tlb
->prot
& PAGE_VALID
)) {
1062 qemu_log("%s: TLB %d not valid\n", __func__
, i
);
1065 mask
= ~(tlb
->size
- 1);
1066 LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx
" PID %u <=> " TARGET_FMT_lx
1067 " " TARGET_FMT_lx
" %u\n", __func__
, i
, address
, pid
, tlb
->EPN
,
1068 mask
, (uint32_t)tlb
->PID
);
1070 if (tlb
->PID
!= 0 && tlb
->PID
!= pid
)
1072 /* Check effective address */
1073 if ((address
& mask
) != tlb
->EPN
)
1075 *raddrp
= (tlb
->RPN
& mask
) | (address
& ~mask
);
1076 #if (TARGET_PHYS_ADDR_BITS >= 36)
1078 /* Extend the physical address to 36 bits */
1079 *raddrp
|= (target_phys_addr_t
)(tlb
->RPN
& 0xF) << 32;
1086 /* Generic TLB search function for PowerPC embedded implementations */
1087 int ppcemb_tlb_search (CPUPPCState
*env
, target_ulong address
, uint32_t pid
)
1090 target_phys_addr_t raddr
;
1093 /* Default return value is no match */
1095 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1096 tlb
= &env
->tlb
[i
].tlbe
;
1097 if (ppcemb_tlb_check(env
, tlb
, &raddr
, address
, pid
, 0, i
) == 0) {
1106 /* Helpers specific to PowerPC 40x implementations */
1107 static inline void ppc4xx_tlb_invalidate_all(CPUState
*env
)
1112 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1113 tlb
= &env
->tlb
[i
].tlbe
;
1114 tlb
->prot
&= ~PAGE_VALID
;
1119 static inline void ppc4xx_tlb_invalidate_virt(CPUState
*env
,
1120 target_ulong eaddr
, uint32_t pid
)
1122 #if !defined(FLUSH_ALL_TLBS)
1124 target_phys_addr_t raddr
;
1125 target_ulong page
, end
;
1128 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1129 tlb
= &env
->tlb
[i
].tlbe
;
1130 if (ppcemb_tlb_check(env
, tlb
, &raddr
, eaddr
, pid
, 0, i
) == 0) {
1131 end
= tlb
->EPN
+ tlb
->size
;
1132 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
)
1133 tlb_flush_page(env
, page
);
1134 tlb
->prot
&= ~PAGE_VALID
;
1139 ppc4xx_tlb_invalidate_all(env
);
1143 static int mmu40x_get_physical_address (CPUState
*env
, mmu_ctx_t
*ctx
,
1144 target_ulong address
, int rw
, int access_type
)
1147 target_phys_addr_t raddr
;
1148 int i
, ret
, zsel
, zpr
, pr
;
1151 raddr
= (target_phys_addr_t
)-1ULL;
1153 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1154 tlb
= &env
->tlb
[i
].tlbe
;
1155 if (ppcemb_tlb_check(env
, tlb
, &raddr
, address
,
1156 env
->spr
[SPR_40x_PID
], 0, i
) < 0)
1158 zsel
= (tlb
->attr
>> 4) & 0xF;
1159 zpr
= (env
->spr
[SPR_40x_ZPR
] >> (28 - (2 * zsel
))) & 0x3;
1160 LOG_SWTLB("%s: TLB %d zsel %d zpr %d rw %d attr %08x\n",
1161 __func__
, i
, zsel
, zpr
, rw
, tlb
->attr
);
1162 /* Check execute enable bit */
1169 /* All accesses granted */
1170 ctx
->prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
1182 /* Check from TLB entry */
1183 /* XXX: there is a problem here or in the TLB fill code... */
1184 ctx
->prot
= tlb
->prot
;
1185 ctx
->prot
|= PAGE_EXEC
;
1186 ret
= check_prot(ctx
->prot
, rw
, access_type
);
1191 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
1192 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
1197 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
1198 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
1203 void store_40x_sler (CPUPPCState
*env
, uint32_t val
)
1205 /* XXX: TO BE FIXED */
1206 if (val
!= 0x00000000) {
1207 cpu_abort(env
, "Little-endian regions are not supported by now\n");
1209 env
->spr
[SPR_405_SLER
] = val
;
1212 static int mmubooke_get_physical_address (CPUState
*env
, mmu_ctx_t
*ctx
,
1213 target_ulong address
, int rw
,
1217 target_phys_addr_t raddr
;
1221 raddr
= (target_phys_addr_t
)-1ULL;
1222 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1223 tlb
= &env
->tlb
[i
].tlbe
;
1224 if (ppcemb_tlb_check(env
, tlb
, &raddr
, address
,
1225 env
->spr
[SPR_BOOKE_PID
], 1, i
) < 0)
1228 prot
= tlb
->prot
& 0xF;
1230 prot
= (tlb
->prot
>> 4) & 0xF;
1231 /* Check the address space */
1232 if (access_type
== ACCESS_CODE
) {
1233 if (msr_ir
!= (tlb
->attr
& 1))
1236 if (prot
& PAGE_EXEC
) {
1242 if (msr_dr
!= (tlb
->attr
& 1))
1245 if ((!rw
&& prot
& PAGE_READ
) || (rw
&& (prot
& PAGE_WRITE
))) {
1258 static inline int check_physical(CPUState
*env
, mmu_ctx_t
*ctx
,
1259 target_ulong eaddr
, int rw
)
1264 ctx
->prot
= PAGE_READ
| PAGE_EXEC
;
1266 switch (env
->mmu_model
) {
1267 case POWERPC_MMU_32B
:
1268 case POWERPC_MMU_601
:
1269 case POWERPC_MMU_SOFT_6xx
:
1270 case POWERPC_MMU_SOFT_74xx
:
1271 case POWERPC_MMU_SOFT_4xx
:
1272 case POWERPC_MMU_REAL
:
1273 case POWERPC_MMU_BOOKE
:
1274 ctx
->prot
|= PAGE_WRITE
;
1276 #if defined(TARGET_PPC64)
1277 case POWERPC_MMU_620
:
1278 case POWERPC_MMU_64B
:
1279 /* Real address are 60 bits long */
1280 ctx
->raddr
&= 0x0FFFFFFFFFFFFFFFULL
;
1281 ctx
->prot
|= PAGE_WRITE
;
1284 case POWERPC_MMU_SOFT_4xx_Z
:
1285 if (unlikely(msr_pe
!= 0)) {
1286 /* 403 family add some particular protections,
1287 * using PBL/PBU registers for accesses with no translation.
1290 /* Check PLB validity */
1291 (env
->pb
[0] < env
->pb
[1] &&
1292 /* and address in plb area */
1293 eaddr
>= env
->pb
[0] && eaddr
< env
->pb
[1]) ||
1294 (env
->pb
[2] < env
->pb
[3] &&
1295 eaddr
>= env
->pb
[2] && eaddr
< env
->pb
[3]) ? 1 : 0;
1296 if (in_plb
^ msr_px
) {
1297 /* Access in protected area */
1299 /* Access is not allowed */
1303 /* Read-write access is allowed */
1304 ctx
->prot
|= PAGE_WRITE
;
1308 case POWERPC_MMU_MPC8xx
:
1310 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1312 case POWERPC_MMU_BOOKE_FSL
:
1314 cpu_abort(env
, "BookE FSL MMU model not implemented\n");
1317 cpu_abort(env
, "Unknown or invalid MMU model\n");
1324 int get_physical_address (CPUState
*env
, mmu_ctx_t
*ctx
, target_ulong eaddr
,
1325 int rw
, int access_type
)
1330 qemu_log("%s\n", __func__
);
1332 if ((access_type
== ACCESS_CODE
&& msr_ir
== 0) ||
1333 (access_type
!= ACCESS_CODE
&& msr_dr
== 0)) {
1334 /* No address translation */
1335 ret
= check_physical(env
, ctx
, eaddr
, rw
);
1338 switch (env
->mmu_model
) {
1339 case POWERPC_MMU_32B
:
1340 case POWERPC_MMU_601
:
1341 case POWERPC_MMU_SOFT_6xx
:
1342 case POWERPC_MMU_SOFT_74xx
:
1343 /* Try to find a BAT */
1344 if (env
->nb_BATs
!= 0)
1345 ret
= get_bat(env
, ctx
, eaddr
, rw
, access_type
);
1346 #if defined(TARGET_PPC64)
1347 case POWERPC_MMU_620
:
1348 case POWERPC_MMU_64B
:
1351 /* We didn't match any BAT entry or don't have BATs */
1352 ret
= get_segment(env
, ctx
, eaddr
, rw
, access_type
);
1355 case POWERPC_MMU_SOFT_4xx
:
1356 case POWERPC_MMU_SOFT_4xx_Z
:
1357 ret
= mmu40x_get_physical_address(env
, ctx
, eaddr
,
1360 case POWERPC_MMU_BOOKE
:
1361 ret
= mmubooke_get_physical_address(env
, ctx
, eaddr
,
1364 case POWERPC_MMU_MPC8xx
:
1366 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1368 case POWERPC_MMU_BOOKE_FSL
:
1370 cpu_abort(env
, "BookE FSL MMU model not implemented\n");
1372 case POWERPC_MMU_REAL
:
1373 cpu_abort(env
, "PowerPC in real mode do not do any translation\n");
1376 cpu_abort(env
, "Unknown or invalid MMU model\n");
1381 qemu_log("%s address " TARGET_FMT_lx
" => %d " TARGET_FMT_plx
"\n",
1382 __func__
, eaddr
, ret
, ctx
->raddr
);
1388 target_phys_addr_t
cpu_get_phys_page_debug (CPUState
*env
, target_ulong addr
)
1392 if (unlikely(get_physical_address(env
, &ctx
, addr
, 0, ACCESS_INT
) != 0))
1395 return ctx
.raddr
& TARGET_PAGE_MASK
;
1398 /* Perform address translation */
1399 int cpu_ppc_handle_mmu_fault (CPUState
*env
, target_ulong address
, int rw
,
1400 int mmu_idx
, int is_softmmu
)
1409 access_type
= ACCESS_CODE
;
1412 access_type
= env
->access_type
;
1414 ret
= get_physical_address(env
, &ctx
, address
, rw
, access_type
);
1416 ret
= tlb_set_page_exec(env
, address
& TARGET_PAGE_MASK
,
1417 ctx
.raddr
& TARGET_PAGE_MASK
, ctx
.prot
,
1418 mmu_idx
, is_softmmu
);
1419 } else if (ret
< 0) {
1421 if (access_type
== ACCESS_CODE
) {
1424 /* No matches in page tables or TLB */
1425 switch (env
->mmu_model
) {
1426 case POWERPC_MMU_SOFT_6xx
:
1427 env
->exception_index
= POWERPC_EXCP_IFTLB
;
1428 env
->error_code
= 1 << 18;
1429 env
->spr
[SPR_IMISS
] = address
;
1430 env
->spr
[SPR_ICMP
] = 0x80000000 | ctx
.ptem
;
1432 case POWERPC_MMU_SOFT_74xx
:
1433 env
->exception_index
= POWERPC_EXCP_IFTLB
;
1435 case POWERPC_MMU_SOFT_4xx
:
1436 case POWERPC_MMU_SOFT_4xx_Z
:
1437 env
->exception_index
= POWERPC_EXCP_ITLB
;
1438 env
->error_code
= 0;
1439 env
->spr
[SPR_40x_DEAR
] = address
;
1440 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1442 case POWERPC_MMU_32B
:
1443 case POWERPC_MMU_601
:
1444 #if defined(TARGET_PPC64)
1445 case POWERPC_MMU_620
:
1446 case POWERPC_MMU_64B
:
1448 env
->exception_index
= POWERPC_EXCP_ISI
;
1449 env
->error_code
= 0x40000000;
1451 case POWERPC_MMU_BOOKE
:
1453 cpu_abort(env
, "BookE MMU model is not implemented\n");
1455 case POWERPC_MMU_BOOKE_FSL
:
1457 cpu_abort(env
, "BookE FSL MMU model is not implemented\n");
1459 case POWERPC_MMU_MPC8xx
:
1461 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1463 case POWERPC_MMU_REAL
:
1464 cpu_abort(env
, "PowerPC in real mode should never raise "
1465 "any MMU exceptions\n");
1468 cpu_abort(env
, "Unknown or invalid MMU model\n");
1473 /* Access rights violation */
1474 env
->exception_index
= POWERPC_EXCP_ISI
;
1475 env
->error_code
= 0x08000000;
1478 /* No execute protection violation */
1479 env
->exception_index
= POWERPC_EXCP_ISI
;
1480 env
->error_code
= 0x10000000;
1483 /* Direct store exception */
1484 /* No code fetch is allowed in direct-store areas */
1485 env
->exception_index
= POWERPC_EXCP_ISI
;
1486 env
->error_code
= 0x10000000;
1488 #if defined(TARGET_PPC64)
1490 /* No match in segment table */
1491 if (env
->mmu_model
== POWERPC_MMU_620
) {
1492 env
->exception_index
= POWERPC_EXCP_ISI
;
1493 /* XXX: this might be incorrect */
1494 env
->error_code
= 0x40000000;
1496 env
->exception_index
= POWERPC_EXCP_ISEG
;
1497 env
->error_code
= 0;
1505 /* No matches in page tables or TLB */
1506 switch (env
->mmu_model
) {
1507 case POWERPC_MMU_SOFT_6xx
:
1509 env
->exception_index
= POWERPC_EXCP_DSTLB
;
1510 env
->error_code
= 1 << 16;
1512 env
->exception_index
= POWERPC_EXCP_DLTLB
;
1513 env
->error_code
= 0;
1515 env
->spr
[SPR_DMISS
] = address
;
1516 env
->spr
[SPR_DCMP
] = 0x80000000 | ctx
.ptem
;
1518 env
->error_code
|= ctx
.key
<< 19;
1519 env
->spr
[SPR_HASH1
] = ctx
.pg_addr
[0];
1520 env
->spr
[SPR_HASH2
] = ctx
.pg_addr
[1];
1522 case POWERPC_MMU_SOFT_74xx
:
1524 env
->exception_index
= POWERPC_EXCP_DSTLB
;
1526 env
->exception_index
= POWERPC_EXCP_DLTLB
;
1529 /* Implement LRU algorithm */
1530 env
->error_code
= ctx
.key
<< 19;
1531 env
->spr
[SPR_TLBMISS
] = (address
& ~((target_ulong
)0x3)) |
1532 ((env
->last_way
+ 1) & (env
->nb_ways
- 1));
1533 env
->spr
[SPR_PTEHI
] = 0x80000000 | ctx
.ptem
;
1535 case POWERPC_MMU_SOFT_4xx
:
1536 case POWERPC_MMU_SOFT_4xx_Z
:
1537 env
->exception_index
= POWERPC_EXCP_DTLB
;
1538 env
->error_code
= 0;
1539 env
->spr
[SPR_40x_DEAR
] = address
;
1541 env
->spr
[SPR_40x_ESR
] = 0x00800000;
1543 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1545 case POWERPC_MMU_32B
:
1546 case POWERPC_MMU_601
:
1547 #if defined(TARGET_PPC64)
1548 case POWERPC_MMU_620
:
1549 case POWERPC_MMU_64B
:
1551 env
->exception_index
= POWERPC_EXCP_DSI
;
1552 env
->error_code
= 0;
1553 env
->spr
[SPR_DAR
] = address
;
1555 env
->spr
[SPR_DSISR
] = 0x42000000;
1557 env
->spr
[SPR_DSISR
] = 0x40000000;
1559 case POWERPC_MMU_MPC8xx
:
1561 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1563 case POWERPC_MMU_BOOKE
:
1565 cpu_abort(env
, "BookE MMU model is not implemented\n");
1567 case POWERPC_MMU_BOOKE_FSL
:
1569 cpu_abort(env
, "BookE FSL MMU model is not implemented\n");
1571 case POWERPC_MMU_REAL
:
1572 cpu_abort(env
, "PowerPC in real mode should never raise "
1573 "any MMU exceptions\n");
1576 cpu_abort(env
, "Unknown or invalid MMU model\n");
1581 /* Access rights violation */
1582 env
->exception_index
= POWERPC_EXCP_DSI
;
1583 env
->error_code
= 0;
1584 env
->spr
[SPR_DAR
] = address
;
1586 env
->spr
[SPR_DSISR
] = 0x0A000000;
1588 env
->spr
[SPR_DSISR
] = 0x08000000;
1591 /* Direct store exception */
1592 switch (access_type
) {
1594 /* Floating point load/store */
1595 env
->exception_index
= POWERPC_EXCP_ALIGN
;
1596 env
->error_code
= POWERPC_EXCP_ALIGN_FP
;
1597 env
->spr
[SPR_DAR
] = address
;
1600 /* lwarx, ldarx or stwcx. */
1601 env
->exception_index
= POWERPC_EXCP_DSI
;
1602 env
->error_code
= 0;
1603 env
->spr
[SPR_DAR
] = address
;
1605 env
->spr
[SPR_DSISR
] = 0x06000000;
1607 env
->spr
[SPR_DSISR
] = 0x04000000;
1610 /* eciwx or ecowx */
1611 env
->exception_index
= POWERPC_EXCP_DSI
;
1612 env
->error_code
= 0;
1613 env
->spr
[SPR_DAR
] = address
;
1615 env
->spr
[SPR_DSISR
] = 0x06100000;
1617 env
->spr
[SPR_DSISR
] = 0x04100000;
1620 printf("DSI: invalid exception (%d)\n", ret
);
1621 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
1623 POWERPC_EXCP_INVAL
| POWERPC_EXCP_INVAL_INVAL
;
1624 env
->spr
[SPR_DAR
] = address
;
1628 #if defined(TARGET_PPC64)
1630 /* No match in segment table */
1631 if (env
->mmu_model
== POWERPC_MMU_620
) {
1632 env
->exception_index
= POWERPC_EXCP_DSI
;
1633 env
->error_code
= 0;
1634 env
->spr
[SPR_DAR
] = address
;
1635 /* XXX: this might be incorrect */
1637 env
->spr
[SPR_DSISR
] = 0x42000000;
1639 env
->spr
[SPR_DSISR
] = 0x40000000;
1641 env
->exception_index
= POWERPC_EXCP_DSEG
;
1642 env
->error_code
= 0;
1643 env
->spr
[SPR_DAR
] = address
;
1650 printf("%s: set exception to %d %02x\n", __func__
,
1651 env
->exception
, env
->error_code
);
1659 /*****************************************************************************/
1660 /* BATs management */
1661 #if !defined(FLUSH_ALL_TLBS)
1662 static inline void do_invalidate_BAT(CPUPPCState
*env
, target_ulong BATu
,
1665 target_ulong base
, end
, page
;
1667 base
= BATu
& ~0x0001FFFF;
1668 end
= base
+ mask
+ 0x00020000;
1669 LOG_BATS("Flush BAT from " TARGET_FMT_lx
" to " TARGET_FMT_lx
" ("
1670 TARGET_FMT_lx
")\n", base
, end
, mask
);
1671 for (page
= base
; page
!= end
; page
+= TARGET_PAGE_SIZE
)
1672 tlb_flush_page(env
, page
);
1673 LOG_BATS("Flush done\n");
1677 static inline void dump_store_bat(CPUPPCState
*env
, char ID
, int ul
, int nr
,
1680 LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx
" (" TARGET_FMT_lx
")\n", ID
,
1681 nr
, ul
== 0 ? 'u' : 'l', value
, env
->nip
);
1684 void ppc_store_ibatu (CPUPPCState
*env
, int nr
, target_ulong value
)
1688 dump_store_bat(env
, 'I', 0, nr
, value
);
1689 if (env
->IBAT
[0][nr
] != value
) {
1690 mask
= (value
<< 15) & 0x0FFE0000UL
;
1691 #if !defined(FLUSH_ALL_TLBS)
1692 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
1694 /* When storing valid upper BAT, mask BEPI and BRPN
1695 * and invalidate all TLBs covered by this BAT
1697 mask
= (value
<< 15) & 0x0FFE0000UL
;
1698 env
->IBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
1699 (value
& ~0x0001FFFFUL
& ~mask
);
1700 env
->IBAT
[1][nr
] = (env
->IBAT
[1][nr
] & 0x0000007B) |
1701 (env
->IBAT
[1][nr
] & ~0x0001FFFF & ~mask
);
1702 #if !defined(FLUSH_ALL_TLBS)
1703 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
1710 void ppc_store_ibatl (CPUPPCState
*env
, int nr
, target_ulong value
)
1712 dump_store_bat(env
, 'I', 1, nr
, value
);
1713 env
->IBAT
[1][nr
] = value
;
1716 void ppc_store_dbatu (CPUPPCState
*env
, int nr
, target_ulong value
)
1720 dump_store_bat(env
, 'D', 0, nr
, value
);
1721 if (env
->DBAT
[0][nr
] != value
) {
1722 /* When storing valid upper BAT, mask BEPI and BRPN
1723 * and invalidate all TLBs covered by this BAT
1725 mask
= (value
<< 15) & 0x0FFE0000UL
;
1726 #if !defined(FLUSH_ALL_TLBS)
1727 do_invalidate_BAT(env
, env
->DBAT
[0][nr
], mask
);
1729 mask
= (value
<< 15) & 0x0FFE0000UL
;
1730 env
->DBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
1731 (value
& ~0x0001FFFFUL
& ~mask
);
1732 env
->DBAT
[1][nr
] = (env
->DBAT
[1][nr
] & 0x0000007B) |
1733 (env
->DBAT
[1][nr
] & ~0x0001FFFF & ~mask
);
1734 #if !defined(FLUSH_ALL_TLBS)
1735 do_invalidate_BAT(env
, env
->DBAT
[0][nr
], mask
);
1742 void ppc_store_dbatl (CPUPPCState
*env
, int nr
, target_ulong value
)
1744 dump_store_bat(env
, 'D', 1, nr
, value
);
1745 env
->DBAT
[1][nr
] = value
;
1748 void ppc_store_ibatu_601 (CPUPPCState
*env
, int nr
, target_ulong value
)
1753 dump_store_bat(env
, 'I', 0, nr
, value
);
1754 if (env
->IBAT
[0][nr
] != value
) {
1756 mask
= (env
->IBAT
[1][nr
] << 17) & 0x0FFE0000UL
;
1757 if (env
->IBAT
[1][nr
] & 0x40) {
1758 /* Invalidate BAT only if it is valid */
1759 #if !defined(FLUSH_ALL_TLBS)
1760 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
1765 /* When storing valid upper BAT, mask BEPI and BRPN
1766 * and invalidate all TLBs covered by this BAT
1768 env
->IBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
1769 (value
& ~0x0001FFFFUL
& ~mask
);
1770 env
->DBAT
[0][nr
] = env
->IBAT
[0][nr
];
1771 if (env
->IBAT
[1][nr
] & 0x40) {
1772 #if !defined(FLUSH_ALL_TLBS)
1773 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
1778 #if defined(FLUSH_ALL_TLBS)
1785 void ppc_store_ibatl_601 (CPUPPCState
*env
, int nr
, target_ulong value
)
1790 dump_store_bat(env
, 'I', 1, nr
, value
);
1791 if (env
->IBAT
[1][nr
] != value
) {
1793 if (env
->IBAT
[1][nr
] & 0x40) {
1794 #if !defined(FLUSH_ALL_TLBS)
1795 mask
= (env
->IBAT
[1][nr
] << 17) & 0x0FFE0000UL
;
1796 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
1802 #if !defined(FLUSH_ALL_TLBS)
1803 mask
= (value
<< 17) & 0x0FFE0000UL
;
1804 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
1809 env
->IBAT
[1][nr
] = value
;
1810 env
->DBAT
[1][nr
] = value
;
1811 #if defined(FLUSH_ALL_TLBS)
1818 /*****************************************************************************/
1819 /* TLB management */
1820 void ppc_tlb_invalidate_all (CPUPPCState
*env
)
1822 switch (env
->mmu_model
) {
1823 case POWERPC_MMU_SOFT_6xx
:
1824 case POWERPC_MMU_SOFT_74xx
:
1825 ppc6xx_tlb_invalidate_all(env
);
1827 case POWERPC_MMU_SOFT_4xx
:
1828 case POWERPC_MMU_SOFT_4xx_Z
:
1829 ppc4xx_tlb_invalidate_all(env
);
1831 case POWERPC_MMU_REAL
:
1832 cpu_abort(env
, "No TLB for PowerPC 4xx in real mode\n");
1834 case POWERPC_MMU_MPC8xx
:
1836 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1838 case POWERPC_MMU_BOOKE
:
1840 cpu_abort(env
, "BookE MMU model is not implemented\n");
1842 case POWERPC_MMU_BOOKE_FSL
:
1845 cpu_abort(env
, "BookE MMU model is not implemented\n");
1847 case POWERPC_MMU_32B
:
1848 case POWERPC_MMU_601
:
1849 #if defined(TARGET_PPC64)
1850 case POWERPC_MMU_620
:
1851 case POWERPC_MMU_64B
:
1852 #endif /* defined(TARGET_PPC64) */
1857 cpu_abort(env
, "Unknown MMU model\n");
1862 void ppc_tlb_invalidate_one (CPUPPCState
*env
, target_ulong addr
)
1864 #if !defined(FLUSH_ALL_TLBS)
1865 addr
&= TARGET_PAGE_MASK
;
1866 switch (env
->mmu_model
) {
1867 case POWERPC_MMU_SOFT_6xx
:
1868 case POWERPC_MMU_SOFT_74xx
:
1869 ppc6xx_tlb_invalidate_virt(env
, addr
, 0);
1870 if (env
->id_tlbs
== 1)
1871 ppc6xx_tlb_invalidate_virt(env
, addr
, 1);
1873 case POWERPC_MMU_SOFT_4xx
:
1874 case POWERPC_MMU_SOFT_4xx_Z
:
1875 ppc4xx_tlb_invalidate_virt(env
, addr
, env
->spr
[SPR_40x_PID
]);
1877 case POWERPC_MMU_REAL
:
1878 cpu_abort(env
, "No TLB for PowerPC 4xx in real mode\n");
1880 case POWERPC_MMU_MPC8xx
:
1882 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1884 case POWERPC_MMU_BOOKE
:
1886 cpu_abort(env
, "BookE MMU model is not implemented\n");
1888 case POWERPC_MMU_BOOKE_FSL
:
1890 cpu_abort(env
, "BookE FSL MMU model is not implemented\n");
1892 case POWERPC_MMU_32B
:
1893 case POWERPC_MMU_601
:
1894 /* tlbie invalidate TLBs for all segments */
1895 addr
&= ~((target_ulong
)-1ULL << 28);
1896 /* XXX: this case should be optimized,
1897 * giving a mask to tlb_flush_page
1899 tlb_flush_page(env
, addr
| (0x0 << 28));
1900 tlb_flush_page(env
, addr
| (0x1 << 28));
1901 tlb_flush_page(env
, addr
| (0x2 << 28));
1902 tlb_flush_page(env
, addr
| (0x3 << 28));
1903 tlb_flush_page(env
, addr
| (0x4 << 28));
1904 tlb_flush_page(env
, addr
| (0x5 << 28));
1905 tlb_flush_page(env
, addr
| (0x6 << 28));
1906 tlb_flush_page(env
, addr
| (0x7 << 28));
1907 tlb_flush_page(env
, addr
| (0x8 << 28));
1908 tlb_flush_page(env
, addr
| (0x9 << 28));
1909 tlb_flush_page(env
, addr
| (0xA << 28));
1910 tlb_flush_page(env
, addr
| (0xB << 28));
1911 tlb_flush_page(env
, addr
| (0xC << 28));
1912 tlb_flush_page(env
, addr
| (0xD << 28));
1913 tlb_flush_page(env
, addr
| (0xE << 28));
1914 tlb_flush_page(env
, addr
| (0xF << 28));
1916 #if defined(TARGET_PPC64)
1917 case POWERPC_MMU_620
:
1918 case POWERPC_MMU_64B
:
1919 /* tlbie invalidate TLBs for all segments */
1920 /* XXX: given the fact that there are too many segments to invalidate,
1921 * and we still don't have a tlb_flush_mask(env, n, mask) in Qemu,
1922 * we just invalidate all TLBs
1926 #endif /* defined(TARGET_PPC64) */
1929 cpu_abort(env
, "Unknown MMU model\n");
1933 ppc_tlb_invalidate_all(env
);
1937 /*****************************************************************************/
1938 /* Special registers manipulation */
1939 #if defined(TARGET_PPC64)
1940 void ppc_store_asr (CPUPPCState
*env
, target_ulong value
)
1942 if (env
->asr
!= value
) {
1949 void ppc_store_sdr1 (CPUPPCState
*env
, target_ulong value
)
1951 LOG_MMU("%s: " TARGET_FMT_lx
"\n", __func__
, value
);
1952 if (env
->sdr1
!= value
) {
1953 /* XXX: for PowerPC 64, should check that the HTABSIZE value
1961 #if defined(TARGET_PPC64)
1962 target_ulong
ppc_load_sr (CPUPPCState
*env
, int slb_nr
)
1969 void ppc_store_sr (CPUPPCState
*env
, int srnum
, target_ulong value
)
1971 LOG_MMU("%s: reg=%d " TARGET_FMT_lx
" " TARGET_FMT_lx
"\n", __func__
,
1972 srnum
, value
, env
->sr
[srnum
]);
1973 #if defined(TARGET_PPC64)
1974 if (env
->mmu_model
& POWERPC_MMU_64
) {
1975 uint64_t rb
= 0, rs
= 0;
1978 rb
|= ((uint32_t)srnum
& 0xf) << 28;
1979 /* Set the valid bit */
1982 rb
|= (uint32_t)srnum
;
1985 rs
|= (value
& 0xfffffff) << 12;
1987 rs
|= ((value
>> 27) & 0xf) << 9;
1989 ppc_store_slb(env
, rb
, rs
);
1992 if (env
->sr
[srnum
] != value
) {
1993 env
->sr
[srnum
] = value
;
1994 /* Invalidating 256MB of virtual memory in 4kB pages is way longer than
1995 flusing the whole TLB. */
1996 #if !defined(FLUSH_ALL_TLBS) && 0
1998 target_ulong page
, end
;
1999 /* Invalidate 256 MB of virtual memory */
2000 page
= (16 << 20) * srnum
;
2001 end
= page
+ (16 << 20);
2002 for (; page
!= end
; page
+= TARGET_PAGE_SIZE
)
2003 tlb_flush_page(env
, page
);
2010 #endif /* !defined (CONFIG_USER_ONLY) */
2012 /* GDBstub can read and write MSR... */
2013 void ppc_store_msr (CPUPPCState
*env
, target_ulong value
)
2015 hreg_store_msr(env
, value
, 0);
2018 /*****************************************************************************/
2019 /* Exception processing */
2020 #if defined (CONFIG_USER_ONLY)
2021 void do_interrupt (CPUState
*env
)
2023 env
->exception_index
= POWERPC_EXCP_NONE
;
2024 env
->error_code
= 0;
2027 void ppc_hw_interrupt (CPUState
*env
)
2029 env
->exception_index
= POWERPC_EXCP_NONE
;
2030 env
->error_code
= 0;
2032 #else /* defined (CONFIG_USER_ONLY) */
2033 static inline void dump_syscall(CPUState
*env
)
2035 qemu_log_mask(CPU_LOG_INT
, "syscall r0=%016" PRIx64
" r3=%016" PRIx64
2036 " r4=%016" PRIx64
" r5=%016" PRIx64
" r6=%016" PRIx64
2037 " nip=" TARGET_FMT_lx
"\n",
2038 ppc_dump_gpr(env
, 0), ppc_dump_gpr(env
, 3),
2039 ppc_dump_gpr(env
, 4), ppc_dump_gpr(env
, 5),
2040 ppc_dump_gpr(env
, 6), env
->nip
);
2043 /* Note that this function should be greatly optimized
2044 * when called with a constant excp, from ppc_hw_interrupt
2046 static inline void powerpc_excp(CPUState
*env
, int excp_model
, int excp
)
2048 target_ulong msr
, new_msr
, vector
;
2049 int srr0
, srr1
, asrr0
, asrr1
;
2050 int lpes0
, lpes1
, lev
;
2053 /* XXX: find a suitable condition to enable the hypervisor mode */
2054 lpes0
= (env
->spr
[SPR_LPCR
] >> 1) & 1;
2055 lpes1
= (env
->spr
[SPR_LPCR
] >> 2) & 1;
2057 /* Those values ensure we won't enter the hypervisor mode */
2062 qemu_log_mask(CPU_LOG_INT
, "Raise exception at " TARGET_FMT_lx
2063 " => %08x (%02x)\n", env
->nip
, excp
, env
->error_code
);
2070 msr
&= ~((target_ulong
)0x783F0000);
2072 case POWERPC_EXCP_NONE
:
2073 /* Should never happen */
2075 case POWERPC_EXCP_CRITICAL
: /* Critical input */
2076 new_msr
&= ~((target_ulong
)1 << MSR_RI
); /* XXX: check this */
2077 switch (excp_model
) {
2078 case POWERPC_EXCP_40x
:
2079 srr0
= SPR_40x_SRR2
;
2080 srr1
= SPR_40x_SRR3
;
2082 case POWERPC_EXCP_BOOKE
:
2083 srr0
= SPR_BOOKE_CSRR0
;
2084 srr1
= SPR_BOOKE_CSRR1
;
2086 case POWERPC_EXCP_G2
:
2092 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
2094 /* Machine check exception is not enabled.
2095 * Enter checkstop state.
2097 if (qemu_log_enabled()) {
2098 qemu_log("Machine check while not allowed. "
2099 "Entering checkstop state\n");
2101 fprintf(stderr
, "Machine check while not allowed. "
2102 "Entering checkstop state\n");
2105 env
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
2107 new_msr
&= ~((target_ulong
)1 << MSR_RI
);
2108 new_msr
&= ~((target_ulong
)1 << MSR_ME
);
2110 /* XXX: find a suitable condition to enable the hypervisor mode */
2111 new_msr
|= (target_ulong
)MSR_HVB
;
2113 /* XXX: should also have something loaded in DAR / DSISR */
2114 switch (excp_model
) {
2115 case POWERPC_EXCP_40x
:
2116 srr0
= SPR_40x_SRR2
;
2117 srr1
= SPR_40x_SRR3
;
2119 case POWERPC_EXCP_BOOKE
:
2120 srr0
= SPR_BOOKE_MCSRR0
;
2121 srr1
= SPR_BOOKE_MCSRR1
;
2122 asrr0
= SPR_BOOKE_CSRR0
;
2123 asrr1
= SPR_BOOKE_CSRR1
;
2129 case POWERPC_EXCP_DSI
: /* Data storage exception */
2130 LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx
" DAR=" TARGET_FMT_lx
2131 "\n", env
->spr
[SPR_DSISR
], env
->spr
[SPR_DAR
]);
2132 new_msr
&= ~((target_ulong
)1 << MSR_RI
);
2134 new_msr
|= (target_ulong
)MSR_HVB
;
2136 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
2137 LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx
", nip=" TARGET_FMT_lx
2138 "\n", msr
, env
->nip
);
2139 new_msr
&= ~((target_ulong
)1 << MSR_RI
);
2141 new_msr
|= (target_ulong
)MSR_HVB
;
2142 msr
|= env
->error_code
;
2144 case POWERPC_EXCP_EXTERNAL
: /* External input */
2145 new_msr
&= ~((target_ulong
)1 << MSR_RI
);
2147 new_msr
|= (target_ulong
)MSR_HVB
;
2149 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
2150 new_msr
&= ~((target_ulong
)1 << MSR_RI
);
2152 new_msr
|= (target_ulong
)MSR_HVB
;
2153 /* XXX: this is false */
2154 /* Get rS/rD and rA from faulting opcode */
2155 env
->spr
[SPR_DSISR
] |= (ldl_code((env
->nip
- 4)) & 0x03FF0000) >> 16;
2157 case POWERPC_EXCP_PROGRAM
: /* Program exception */
2158 switch (env
->error_code
& ~0xF) {
2159 case POWERPC_EXCP_FP
:
2160 if ((msr_fe0
== 0 && msr_fe1
== 0) || msr_fp
== 0) {
2161 LOG_EXCP("Ignore floating point exception\n");
2162 env
->exception_index
= POWERPC_EXCP_NONE
;
2163 env
->error_code
= 0;
2166 new_msr
&= ~((target_ulong
)1 << MSR_RI
);
2168 new_msr
|= (target_ulong
)MSR_HVB
;
2170 if (msr_fe0
== msr_fe1
)
2174 case POWERPC_EXCP_INVAL
:
2175 LOG_EXCP("Invalid instruction at " TARGET_FMT_lx
"\n", env
->nip
);
2176 new_msr
&= ~((target_ulong
)1 << MSR_RI
);
2178 new_msr
|= (target_ulong
)MSR_HVB
;
2181 case POWERPC_EXCP_PRIV
:
2182 new_msr
&= ~((target_ulong
)1 << MSR_RI
);
2184 new_msr
|= (target_ulong
)MSR_HVB
;
2187 case POWERPC_EXCP_TRAP
:
2188 new_msr
&= ~((target_ulong
)1 << MSR_RI
);
2190 new_msr
|= (target_ulong
)MSR_HVB
;
2194 /* Should never occur */
2195 cpu_abort(env
, "Invalid program exception %d. Aborting\n",
2200 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
2201 new_msr
&= ~((target_ulong
)1 << MSR_RI
);
2203 new_msr
|= (target_ulong
)MSR_HVB
;
2205 case POWERPC_EXCP_SYSCALL
: /* System call exception */
2206 /* NOTE: this is a temporary hack to support graphics OSI
2207 calls from the MOL driver */
2208 /* XXX: To be removed */
2209 if (env
->gpr
[3] == 0x113724fa && env
->gpr
[4] == 0x77810f9b &&
2211 if (env
->osi_call(env
) != 0) {
2212 env
->exception_index
= POWERPC_EXCP_NONE
;
2213 env
->error_code
= 0;
2218 new_msr
&= ~((target_ulong
)1 << MSR_RI
);
2219 lev
= env
->error_code
;
2220 if (lev
== 1 || (lpes0
== 0 && lpes1
== 0))
2221 new_msr
|= (target_ulong
)MSR_HVB
;
2223 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
2224 new_msr
&= ~((target_ulong
)1 << MSR_RI
);
2226 case POWERPC_EXCP_DECR
: /* Decrementer exception */
2227 new_msr
&= ~((target_ulong
)1 << MSR_RI
);
2229 new_msr
|= (target_ulong
)MSR_HVB
;
2231 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
2233 LOG_EXCP("FIT exception\n");
2234 new_msr
&= ~((target_ulong
)1 << MSR_RI
); /* XXX: check this */
2236 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
2237 LOG_EXCP("WDT exception\n");
2238 switch (excp_model
) {
2239 case POWERPC_EXCP_BOOKE
:
2240 srr0
= SPR_BOOKE_CSRR0
;
2241 srr1
= SPR_BOOKE_CSRR1
;
2246 new_msr
&= ~((target_ulong
)1 << MSR_RI
); /* XXX: check this */
2248 case POWERPC_EXCP_DTLB
: /* Data TLB error */
2249 new_msr
&= ~((target_ulong
)1 << MSR_RI
); /* XXX: check this */
2251 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
2252 new_msr
&= ~((target_ulong
)1 << MSR_RI
); /* XXX: check this */
2254 case POWERPC_EXCP_DEBUG
: /* Debug interrupt */
2255 switch (excp_model
) {
2256 case POWERPC_EXCP_BOOKE
:
2257 srr0
= SPR_BOOKE_DSRR0
;
2258 srr1
= SPR_BOOKE_DSRR1
;
2259 asrr0
= SPR_BOOKE_CSRR0
;
2260 asrr1
= SPR_BOOKE_CSRR1
;
2266 cpu_abort(env
, "Debug exception is not implemented yet !\n");
2268 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavailable */
2269 new_msr
&= ~((target_ulong
)1 << MSR_RI
); /* XXX: check this */
2271 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data interrupt */
2273 cpu_abort(env
, "Embedded floating point data exception "
2274 "is not implemented yet !\n");
2276 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round interrupt */
2278 cpu_abort(env
, "Embedded floating point round exception "
2279 "is not implemented yet !\n");
2281 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor interrupt */
2282 new_msr
&= ~((target_ulong
)1 << MSR_RI
);
2285 "Performance counter exception is not implemented yet !\n");
2287 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
2290 "Embedded doorbell interrupt is not implemented yet !\n");
2292 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
2293 switch (excp_model
) {
2294 case POWERPC_EXCP_BOOKE
:
2295 srr0
= SPR_BOOKE_CSRR0
;
2296 srr1
= SPR_BOOKE_CSRR1
;
2302 cpu_abort(env
, "Embedded doorbell critical interrupt "
2303 "is not implemented yet !\n");
2305 case POWERPC_EXCP_RESET
: /* System reset exception */
2306 new_msr
&= ~((target_ulong
)1 << MSR_RI
);
2308 /* XXX: find a suitable condition to enable the hypervisor mode */
2309 new_msr
|= (target_ulong
)MSR_HVB
;
2312 case POWERPC_EXCP_DSEG
: /* Data segment exception */
2313 new_msr
&= ~((target_ulong
)1 << MSR_RI
);
2315 new_msr
|= (target_ulong
)MSR_HVB
;
2317 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
2318 new_msr
&= ~((target_ulong
)1 << MSR_RI
);
2320 new_msr
|= (target_ulong
)MSR_HVB
;
2322 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
2325 new_msr
|= (target_ulong
)MSR_HVB
;
2327 case POWERPC_EXCP_TRACE
: /* Trace exception */
2328 new_msr
&= ~((target_ulong
)1 << MSR_RI
);
2330 new_msr
|= (target_ulong
)MSR_HVB
;
2332 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
2335 new_msr
|= (target_ulong
)MSR_HVB
;
2337 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage exception */
2340 new_msr
|= (target_ulong
)MSR_HVB
;
2342 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
2345 new_msr
|= (target_ulong
)MSR_HVB
;
2347 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment exception */
2350 new_msr
|= (target_ulong
)MSR_HVB
;
2352 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
2353 new_msr
&= ~((target_ulong
)1 << MSR_RI
);
2355 new_msr
|= (target_ulong
)MSR_HVB
;
2357 case POWERPC_EXCP_PIT
: /* Programmable interval timer interrupt */
2358 LOG_EXCP("PIT exception\n");
2359 new_msr
&= ~((target_ulong
)1 << MSR_RI
); /* XXX: check this */
2361 case POWERPC_EXCP_IO
: /* IO error exception */
2363 cpu_abort(env
, "601 IO error exception is not implemented yet !\n");
2365 case POWERPC_EXCP_RUNM
: /* Run mode exception */
2367 cpu_abort(env
, "601 run mode exception is not implemented yet !\n");
2369 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
2371 cpu_abort(env
, "602 emulation trap exception "
2372 "is not implemented yet !\n");
2374 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
2375 new_msr
&= ~((target_ulong
)1 << MSR_RI
); /* XXX: check this */
2376 if (lpes1
== 0) /* XXX: check this */
2377 new_msr
|= (target_ulong
)MSR_HVB
;
2378 switch (excp_model
) {
2379 case POWERPC_EXCP_602
:
2380 case POWERPC_EXCP_603
:
2381 case POWERPC_EXCP_603E
:
2382 case POWERPC_EXCP_G2
:
2384 case POWERPC_EXCP_7x5
:
2386 case POWERPC_EXCP_74xx
:
2389 cpu_abort(env
, "Invalid instruction TLB miss exception\n");
2393 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
2394 new_msr
&= ~((target_ulong
)1 << MSR_RI
); /* XXX: check this */
2395 if (lpes1
== 0) /* XXX: check this */
2396 new_msr
|= (target_ulong
)MSR_HVB
;
2397 switch (excp_model
) {
2398 case POWERPC_EXCP_602
:
2399 case POWERPC_EXCP_603
:
2400 case POWERPC_EXCP_603E
:
2401 case POWERPC_EXCP_G2
:
2403 case POWERPC_EXCP_7x5
:
2405 case POWERPC_EXCP_74xx
:
2408 cpu_abort(env
, "Invalid data load TLB miss exception\n");
2412 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
2413 new_msr
&= ~((target_ulong
)1 << MSR_RI
); /* XXX: check this */
2414 if (lpes1
== 0) /* XXX: check this */
2415 new_msr
|= (target_ulong
)MSR_HVB
;
2416 switch (excp_model
) {
2417 case POWERPC_EXCP_602
:
2418 case POWERPC_EXCP_603
:
2419 case POWERPC_EXCP_603E
:
2420 case POWERPC_EXCP_G2
:
2422 /* Swap temporary saved registers with GPRs */
2423 if (!(new_msr
& ((target_ulong
)1 << MSR_TGPR
))) {
2424 new_msr
|= (target_ulong
)1 << MSR_TGPR
;
2425 hreg_swap_gpr_tgpr(env
);
2428 case POWERPC_EXCP_7x5
:
2430 #if defined (DEBUG_SOFTWARE_TLB)
2431 if (qemu_log_enabled()) {
2433 target_ulong
*miss
, *cmp
;
2435 if (excp
== POWERPC_EXCP_IFTLB
) {
2438 miss
= &env
->spr
[SPR_IMISS
];
2439 cmp
= &env
->spr
[SPR_ICMP
];
2441 if (excp
== POWERPC_EXCP_DLTLB
)
2446 miss
= &env
->spr
[SPR_DMISS
];
2447 cmp
= &env
->spr
[SPR_DCMP
];
2449 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx
" %cC "
2450 TARGET_FMT_lx
" H1 " TARGET_FMT_lx
" H2 "
2451 TARGET_FMT_lx
" %08x\n", es
, en
, *miss
, en
, *cmp
,
2452 env
->spr
[SPR_HASH1
], env
->spr
[SPR_HASH2
],
2456 msr
|= env
->crf
[0] << 28;
2457 msr
|= env
->error_code
; /* key, D/I, S/L bits */
2458 /* Set way using a LRU mechanism */
2459 msr
|= ((env
->last_way
+ 1) & (env
->nb_ways
- 1)) << 17;
2461 case POWERPC_EXCP_74xx
:
2463 #if defined (DEBUG_SOFTWARE_TLB)
2464 if (qemu_log_enabled()) {
2466 target_ulong
*miss
, *cmp
;
2468 if (excp
== POWERPC_EXCP_IFTLB
) {
2471 miss
= &env
->spr
[SPR_TLBMISS
];
2472 cmp
= &env
->spr
[SPR_PTEHI
];
2474 if (excp
== POWERPC_EXCP_DLTLB
)
2479 miss
= &env
->spr
[SPR_TLBMISS
];
2480 cmp
= &env
->spr
[SPR_PTEHI
];
2482 qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx
" %cC "
2483 TARGET_FMT_lx
" %08x\n", es
, en
, *miss
, en
, *cmp
,
2487 msr
|= env
->error_code
; /* key bit */
2490 cpu_abort(env
, "Invalid data store TLB miss exception\n");
2494 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
2496 cpu_abort(env
, "Floating point assist exception "
2497 "is not implemented yet !\n");
2499 case POWERPC_EXCP_DABR
: /* Data address breakpoint */
2501 cpu_abort(env
, "DABR exception is not implemented yet !\n");
2503 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
2505 cpu_abort(env
, "IABR exception is not implemented yet !\n");
2507 case POWERPC_EXCP_SMI
: /* System management interrupt */
2509 cpu_abort(env
, "SMI exception is not implemented yet !\n");
2511 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
2513 cpu_abort(env
, "Thermal management exception "
2514 "is not implemented yet !\n");
2516 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor interrupt */
2517 new_msr
&= ~((target_ulong
)1 << MSR_RI
);
2519 new_msr
|= (target_ulong
)MSR_HVB
;
2522 "Performance counter exception is not implemented yet !\n");
2524 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
2526 cpu_abort(env
, "VPU assist exception is not implemented yet !\n");
2528 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
2531 "970 soft-patch exception is not implemented yet !\n");
2533 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
2536 "970 maintenance exception is not implemented yet !\n");
2538 case POWERPC_EXCP_MEXTBR
: /* Maskable external breakpoint */
2540 cpu_abort(env
, "Maskable external exception "
2541 "is not implemented yet !\n");
2543 case POWERPC_EXCP_NMEXTBR
: /* Non maskable external breakpoint */
2545 cpu_abort(env
, "Non maskable external exception "
2546 "is not implemented yet !\n");
2550 cpu_abort(env
, "Invalid PowerPC exception %d. Aborting\n", excp
);
2553 /* save current instruction location */
2554 env
->spr
[srr0
] = env
->nip
- 4;
2557 /* save next instruction location */
2558 env
->spr
[srr0
] = env
->nip
;
2562 env
->spr
[srr1
] = msr
;
2563 /* If any alternate SRR register are defined, duplicate saved values */
2565 env
->spr
[asrr0
] = env
->spr
[srr0
];
2567 env
->spr
[asrr1
] = env
->spr
[srr1
];
2568 /* If we disactivated any translation, flush TLBs */
2569 if (new_msr
& ((1 << MSR_IR
) | (1 << MSR_DR
)))
2571 /* reload MSR with correct bits */
2572 new_msr
&= ~((target_ulong
)1 << MSR_EE
);
2573 new_msr
&= ~((target_ulong
)1 << MSR_PR
);
2574 new_msr
&= ~((target_ulong
)1 << MSR_FP
);
2575 new_msr
&= ~((target_ulong
)1 << MSR_FE0
);
2576 new_msr
&= ~((target_ulong
)1 << MSR_SE
);
2577 new_msr
&= ~((target_ulong
)1 << MSR_BE
);
2578 new_msr
&= ~((target_ulong
)1 << MSR_FE1
);
2579 new_msr
&= ~((target_ulong
)1 << MSR_IR
);
2580 new_msr
&= ~((target_ulong
)1 << MSR_DR
);
2581 #if 0 /* Fix this: not on all targets */
2582 new_msr
&= ~((target_ulong
)1 << MSR_PMM
);
2584 new_msr
&= ~((target_ulong
)1 << MSR_LE
);
2586 new_msr
|= (target_ulong
)1 << MSR_LE
;
2588 new_msr
&= ~((target_ulong
)1 << MSR_LE
);
2589 /* Jump to handler */
2590 vector
= env
->excp_vectors
[excp
];
2591 if (vector
== (target_ulong
)-1ULL) {
2592 cpu_abort(env
, "Raised an exception without defined vector %d\n",
2595 vector
|= env
->excp_prefix
;
2596 #if defined(TARGET_PPC64)
2597 if (excp_model
== POWERPC_EXCP_BOOKE
) {
2599 new_msr
&= ~((target_ulong
)1 << MSR_CM
);
2600 vector
= (uint32_t)vector
;
2602 new_msr
|= (target_ulong
)1 << MSR_CM
;
2605 if (!msr_isf
&& !(env
->mmu_model
& POWERPC_MMU_64
)) {
2606 new_msr
&= ~((target_ulong
)1 << MSR_SF
);
2607 vector
= (uint32_t)vector
;
2609 new_msr
|= (target_ulong
)1 << MSR_SF
;
2613 /* XXX: we don't use hreg_store_msr here as already have treated
2614 * any special case that could occur. Just store MSR and update hflags
2616 env
->msr
= new_msr
& env
->msr_mask
;
2617 hreg_compute_hflags(env
);
2619 /* Reset exception state */
2620 env
->exception_index
= POWERPC_EXCP_NONE
;
2621 env
->error_code
= 0;
2624 void do_interrupt (CPUState
*env
)
2626 powerpc_excp(env
, env
->excp_model
, env
->exception_index
);
2629 void ppc_hw_interrupt (CPUPPCState
*env
)
2634 qemu_log_mask(CPU_LOG_INT
, "%s: %p pending %08x req %08x me %d ee %d\n",
2635 __func__
, env
, env
->pending_interrupts
,
2636 env
->interrupt_request
, (int)msr_me
, (int)msr_ee
);
2638 /* External reset */
2639 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_RESET
)) {
2640 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_RESET
);
2641 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_RESET
);
2644 /* Machine check exception */
2645 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_MCK
)) {
2646 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_MCK
);
2647 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_MCHECK
);
2651 /* External debug exception */
2652 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_DEBUG
)) {
2653 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_DEBUG
);
2654 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_DEBUG
);
2659 /* XXX: find a suitable condition to enable the hypervisor mode */
2660 hdice
= env
->spr
[SPR_LPCR
] & 1;
2664 if ((msr_ee
!= 0 || msr_hv
== 0 || msr_pr
!= 0) && hdice
!= 0) {
2665 /* Hypervisor decrementer exception */
2666 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_HDECR
)) {
2667 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_HDECR
);
2668 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_HDECR
);
2673 /* External critical interrupt */
2674 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_CEXT
)) {
2675 /* Taking a critical external interrupt does not clear the external
2676 * critical interrupt status
2679 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_CEXT
);
2681 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_CRITICAL
);
2686 /* Watchdog timer on embedded PowerPC */
2687 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_WDT
)) {
2688 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_WDT
);
2689 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_WDT
);
2692 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_CDOORBELL
)) {
2693 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_CDOORBELL
);
2694 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_DOORCI
);
2697 /* Fixed interval timer on embedded PowerPC */
2698 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_FIT
)) {
2699 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_FIT
);
2700 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_FIT
);
2703 /* Programmable interval timer on embedded PowerPC */
2704 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_PIT
)) {
2705 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_PIT
);
2706 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_PIT
);
2709 /* Decrementer exception */
2710 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_DECR
)) {
2711 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_DECR
);
2712 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_DECR
);
2715 /* External interrupt */
2716 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_EXT
)) {
2717 /* Taking an external interrupt does not clear the external
2721 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_EXT
);
2723 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_EXTERNAL
);
2726 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_DOORBELL
)) {
2727 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_DOORBELL
);
2728 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_DOORI
);
2731 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_PERFM
)) {
2732 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_PERFM
);
2733 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_PERFM
);
2736 /* Thermal interrupt */
2737 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_THERM
)) {
2738 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_THERM
);
2739 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_THERM
);
2744 #endif /* !CONFIG_USER_ONLY */
2746 void cpu_dump_rfi (target_ulong RA
, target_ulong msr
)
2748 qemu_log("Return from exception at " TARGET_FMT_lx
" with flags "
2749 TARGET_FMT_lx
"\n", RA
, msr
);
2752 void cpu_ppc_reset (void *opaque
)
2754 CPUPPCState
*env
= opaque
;
2757 if (qemu_loglevel_mask(CPU_LOG_RESET
)) {
2758 qemu_log("CPU Reset (CPU %d)\n", env
->cpu_index
);
2759 log_cpu_state(env
, 0);
2762 msr
= (target_ulong
)0;
2764 /* XXX: find a suitable condition to enable the hypervisor mode */
2765 msr
|= (target_ulong
)MSR_HVB
;
2767 msr
|= (target_ulong
)0 << MSR_AP
; /* TO BE CHECKED */
2768 msr
|= (target_ulong
)0 << MSR_SA
; /* TO BE CHECKED */
2769 msr
|= (target_ulong
)1 << MSR_EP
;
2770 #if defined (DO_SINGLE_STEP) && 0
2771 /* Single step trace mode */
2772 msr
|= (target_ulong
)1 << MSR_SE
;
2773 msr
|= (target_ulong
)1 << MSR_BE
;
2775 #if defined(CONFIG_USER_ONLY)
2776 msr
|= (target_ulong
)1 << MSR_FP
; /* Allow floating point usage */
2777 msr
|= (target_ulong
)1 << MSR_VR
; /* Allow altivec usage */
2778 msr
|= (target_ulong
)1 << MSR_SPE
; /* Allow SPE usage */
2779 msr
|= (target_ulong
)1 << MSR_PR
;
2781 env
->excp_prefix
= env
->hreset_excp_prefix
;
2782 env
->nip
= env
->hreset_vector
| env
->excp_prefix
;
2783 if (env
->mmu_model
!= POWERPC_MMU_REAL
)
2784 ppc_tlb_invalidate_all(env
);
2786 env
->msr
= msr
& env
->msr_mask
;
2787 #if defined(TARGET_PPC64)
2788 if (env
->mmu_model
& POWERPC_MMU_64
)
2789 env
->msr
|= (1ULL << MSR_SF
);
2791 hreg_compute_hflags(env
);
2792 env
->reserve_addr
= (target_ulong
)-1ULL;
2793 /* Be sure no exception or interrupt is pending */
2794 env
->pending_interrupts
= 0;
2795 env
->exception_index
= POWERPC_EXCP_NONE
;
2796 env
->error_code
= 0;
2797 /* Flush all TLBs */
2801 CPUPPCState
*cpu_ppc_init (const char *cpu_model
)
2804 const ppc_def_t
*def
;
2806 def
= cpu_ppc_find_by_name(cpu_model
);
2810 env
= qemu_mallocz(sizeof(CPUPPCState
));
2812 ppc_translate_init();
2813 env
->cpu_model_str
= cpu_model
;
2814 cpu_ppc_register_internal(env
, def
);
2817 qemu_init_vcpu(env
);
2822 void cpu_ppc_close (CPUPPCState
*env
)
2824 /* Should also remove all opcode tables... */