virtagent: various bits to build QEMU with virtagent
[qemu/mdroth.git] / target-ppc / helper.c
blob4b491012d726cede9bd49ae5d267d0fbc1dcc459
1 /*
2 * PowerPC emulation helpers for qemu.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
24 #include <signal.h>
26 #include "cpu.h"
27 #include "exec-all.h"
28 #include "helper_regs.h"
29 #include "qemu-common.h"
30 #include "kvm.h"
32 //#define DEBUG_MMU
33 //#define DEBUG_BATS
34 //#define DEBUG_SLB
35 //#define DEBUG_SOFTWARE_TLB
36 //#define DUMP_PAGE_TABLES
37 //#define DEBUG_EXCEPTIONS
38 //#define FLUSH_ALL_TLBS
40 #ifdef DEBUG_MMU
41 # define LOG_MMU(...) qemu_log(__VA_ARGS__)
42 # define LOG_MMU_STATE(env) log_cpu_state((env), 0)
43 #else
44 # define LOG_MMU(...) do { } while (0)
45 # define LOG_MMU_STATE(...) do { } while (0)
46 #endif
49 #ifdef DEBUG_SOFTWARE_TLB
50 # define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
51 #else
52 # define LOG_SWTLB(...) do { } while (0)
53 #endif
55 #ifdef DEBUG_BATS
56 # define LOG_BATS(...) qemu_log(__VA_ARGS__)
57 #else
58 # define LOG_BATS(...) do { } while (0)
59 #endif
61 #ifdef DEBUG_SLB
62 # define LOG_SLB(...) qemu_log(__VA_ARGS__)
63 #else
64 # define LOG_SLB(...) do { } while (0)
65 #endif
67 #ifdef DEBUG_EXCEPTIONS
68 # define LOG_EXCP(...) qemu_log(__VA_ARGS__)
69 #else
70 # define LOG_EXCP(...) do { } while (0)
71 #endif
74 /*****************************************************************************/
75 /* PowerPC MMU emulation */
77 #if defined(CONFIG_USER_ONLY)
78 int cpu_ppc_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
79 int mmu_idx, int is_softmmu)
81 int exception, error_code;
83 if (rw == 2) {
84 exception = POWERPC_EXCP_ISI;
85 error_code = 0x40000000;
86 } else {
87 exception = POWERPC_EXCP_DSI;
88 error_code = 0x40000000;
89 if (rw)
90 error_code |= 0x02000000;
91 env->spr[SPR_DAR] = address;
92 env->spr[SPR_DSISR] = error_code;
94 env->exception_index = exception;
95 env->error_code = error_code;
97 return 1;
100 #else
101 /* Common routines used by software and hardware TLBs emulation */
102 static inline int pte_is_valid(target_ulong pte0)
104 return pte0 & 0x80000000 ? 1 : 0;
107 static inline void pte_invalidate(target_ulong *pte0)
109 *pte0 &= ~0x80000000;
112 #if defined(TARGET_PPC64)
113 static inline int pte64_is_valid(target_ulong pte0)
115 return pte0 & 0x0000000000000001ULL ? 1 : 0;
118 static inline void pte64_invalidate(target_ulong *pte0)
120 *pte0 &= ~0x0000000000000001ULL;
122 #endif
124 #define PTE_PTEM_MASK 0x7FFFFFBF
125 #define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B)
126 #if defined(TARGET_PPC64)
127 #define PTE64_PTEM_MASK 0xFFFFFFFFFFFFFF80ULL
128 #define PTE64_CHECK_MASK (TARGET_PAGE_MASK | 0x7F)
129 #endif
131 static inline int pp_check(int key, int pp, int nx)
133 int access;
135 /* Compute access rights */
136 /* When pp is 3/7, the result is undefined. Set it to noaccess */
137 access = 0;
138 if (key == 0) {
139 switch (pp) {
140 case 0x0:
141 case 0x1:
142 case 0x2:
143 access |= PAGE_WRITE;
144 /* No break here */
145 case 0x3:
146 case 0x6:
147 access |= PAGE_READ;
148 break;
150 } else {
151 switch (pp) {
152 case 0x0:
153 case 0x6:
154 access = 0;
155 break;
156 case 0x1:
157 case 0x3:
158 access = PAGE_READ;
159 break;
160 case 0x2:
161 access = PAGE_READ | PAGE_WRITE;
162 break;
165 if (nx == 0)
166 access |= PAGE_EXEC;
168 return access;
171 static inline int check_prot(int prot, int rw, int access_type)
173 int ret;
175 if (access_type == ACCESS_CODE) {
176 if (prot & PAGE_EXEC)
177 ret = 0;
178 else
179 ret = -2;
180 } else if (rw) {
181 if (prot & PAGE_WRITE)
182 ret = 0;
183 else
184 ret = -2;
185 } else {
186 if (prot & PAGE_READ)
187 ret = 0;
188 else
189 ret = -2;
192 return ret;
195 static inline int _pte_check(mmu_ctx_t *ctx, int is_64b, target_ulong pte0,
196 target_ulong pte1, int h, int rw, int type)
198 target_ulong ptem, mmask;
199 int access, ret, pteh, ptev, pp;
201 ret = -1;
202 /* Check validity and table match */
203 #if defined(TARGET_PPC64)
204 if (is_64b) {
205 ptev = pte64_is_valid(pte0);
206 pteh = (pte0 >> 1) & 1;
207 } else
208 #endif
210 ptev = pte_is_valid(pte0);
211 pteh = (pte0 >> 6) & 1;
213 if (ptev && h == pteh) {
214 /* Check vsid & api */
215 #if defined(TARGET_PPC64)
216 if (is_64b) {
217 ptem = pte0 & PTE64_PTEM_MASK;
218 mmask = PTE64_CHECK_MASK;
219 pp = (pte1 & 0x00000003) | ((pte1 >> 61) & 0x00000004);
220 ctx->nx = (pte1 >> 2) & 1; /* No execute bit */
221 ctx->nx |= (pte1 >> 3) & 1; /* Guarded bit */
222 } else
223 #endif
225 ptem = pte0 & PTE_PTEM_MASK;
226 mmask = PTE_CHECK_MASK;
227 pp = pte1 & 0x00000003;
229 if (ptem == ctx->ptem) {
230 if (ctx->raddr != (target_phys_addr_t)-1ULL) {
231 /* all matches should have equal RPN, WIMG & PP */
232 if ((ctx->raddr & mmask) != (pte1 & mmask)) {
233 qemu_log("Bad RPN/WIMG/PP\n");
234 return -3;
237 /* Compute access rights */
238 access = pp_check(ctx->key, pp, ctx->nx);
239 /* Keep the matching PTE informations */
240 ctx->raddr = pte1;
241 ctx->prot = access;
242 ret = check_prot(ctx->prot, rw, type);
243 if (ret == 0) {
244 /* Access granted */
245 LOG_MMU("PTE access granted !\n");
246 } else {
247 /* Access right violation */
248 LOG_MMU("PTE access rejected\n");
253 return ret;
256 static inline int pte32_check(mmu_ctx_t *ctx, target_ulong pte0,
257 target_ulong pte1, int h, int rw, int type)
259 return _pte_check(ctx, 0, pte0, pte1, h, rw, type);
262 #if defined(TARGET_PPC64)
263 static inline int pte64_check(mmu_ctx_t *ctx, target_ulong pte0,
264 target_ulong pte1, int h, int rw, int type)
266 return _pte_check(ctx, 1, pte0, pte1, h, rw, type);
268 #endif
270 static inline int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p,
271 int ret, int rw)
273 int store = 0;
275 /* Update page flags */
276 if (!(*pte1p & 0x00000100)) {
277 /* Update accessed flag */
278 *pte1p |= 0x00000100;
279 store = 1;
281 if (!(*pte1p & 0x00000080)) {
282 if (rw == 1 && ret == 0) {
283 /* Update changed flag */
284 *pte1p |= 0x00000080;
285 store = 1;
286 } else {
287 /* Force page fault for first write access */
288 ctx->prot &= ~PAGE_WRITE;
292 return store;
295 /* Software driven TLB helpers */
296 static inline int ppc6xx_tlb_getnum(CPUState *env, target_ulong eaddr, int way,
297 int is_code)
299 int nr;
301 /* Select TLB num in a way from address */
302 nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1);
303 /* Select TLB way */
304 nr += env->tlb_per_way * way;
305 /* 6xx have separate TLBs for instructions and data */
306 if (is_code && env->id_tlbs == 1)
307 nr += env->nb_tlb;
309 return nr;
312 static inline void ppc6xx_tlb_invalidate_all(CPUState *env)
314 ppc6xx_tlb_t *tlb;
315 int nr, max;
317 //LOG_SWTLB("Invalidate all TLBs\n");
318 /* Invalidate all defined software TLB */
319 max = env->nb_tlb;
320 if (env->id_tlbs == 1)
321 max *= 2;
322 for (nr = 0; nr < max; nr++) {
323 tlb = &env->tlb[nr].tlb6;
324 pte_invalidate(&tlb->pte0);
326 tlb_flush(env, 1);
329 static inline void __ppc6xx_tlb_invalidate_virt(CPUState *env,
330 target_ulong eaddr,
331 int is_code, int match_epn)
333 #if !defined(FLUSH_ALL_TLBS)
334 ppc6xx_tlb_t *tlb;
335 int way, nr;
337 /* Invalidate ITLB + DTLB, all ways */
338 for (way = 0; way < env->nb_ways; way++) {
339 nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code);
340 tlb = &env->tlb[nr].tlb6;
341 if (pte_is_valid(tlb->pte0) && (match_epn == 0 || eaddr == tlb->EPN)) {
342 LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx "\n", nr,
343 env->nb_tlb, eaddr);
344 pte_invalidate(&tlb->pte0);
345 tlb_flush_page(env, tlb->EPN);
348 #else
349 /* XXX: PowerPC specification say this is valid as well */
350 ppc6xx_tlb_invalidate_all(env);
351 #endif
354 static inline void ppc6xx_tlb_invalidate_virt(CPUState *env,
355 target_ulong eaddr, int is_code)
357 __ppc6xx_tlb_invalidate_virt(env, eaddr, is_code, 0);
360 void ppc6xx_tlb_store (CPUState *env, target_ulong EPN, int way, int is_code,
361 target_ulong pte0, target_ulong pte1)
363 ppc6xx_tlb_t *tlb;
364 int nr;
366 nr = ppc6xx_tlb_getnum(env, EPN, way, is_code);
367 tlb = &env->tlb[nr].tlb6;
368 LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
369 " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb, EPN, pte0, pte1);
370 /* Invalidate any pending reference in Qemu for this virtual address */
371 __ppc6xx_tlb_invalidate_virt(env, EPN, is_code, 1);
372 tlb->pte0 = pte0;
373 tlb->pte1 = pte1;
374 tlb->EPN = EPN;
375 /* Store last way for LRU mechanism */
376 env->last_way = way;
379 static inline int ppc6xx_tlb_check(CPUState *env, mmu_ctx_t *ctx,
380 target_ulong eaddr, int rw, int access_type)
382 ppc6xx_tlb_t *tlb;
383 int nr, best, way;
384 int ret;
386 best = -1;
387 ret = -1; /* No TLB found */
388 for (way = 0; way < env->nb_ways; way++) {
389 nr = ppc6xx_tlb_getnum(env, eaddr, way,
390 access_type == ACCESS_CODE ? 1 : 0);
391 tlb = &env->tlb[nr].tlb6;
392 /* This test "emulates" the PTE index match for hardware TLBs */
393 if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) {
394 LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx " " TARGET_FMT_lx
395 "] <> " TARGET_FMT_lx "\n", nr, env->nb_tlb,
396 pte_is_valid(tlb->pte0) ? "valid" : "inval",
397 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr);
398 continue;
400 LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx " <> " TARGET_FMT_lx " "
401 TARGET_FMT_lx " %c %c\n", nr, env->nb_tlb,
402 pte_is_valid(tlb->pte0) ? "valid" : "inval",
403 tlb->EPN, eaddr, tlb->pte1,
404 rw ? 'S' : 'L', access_type == ACCESS_CODE ? 'I' : 'D');
405 switch (pte32_check(ctx, tlb->pte0, tlb->pte1, 0, rw, access_type)) {
406 case -3:
407 /* TLB inconsistency */
408 return -1;
409 case -2:
410 /* Access violation */
411 ret = -2;
412 best = nr;
413 break;
414 case -1:
415 default:
416 /* No match */
417 break;
418 case 0:
419 /* access granted */
420 /* XXX: we should go on looping to check all TLBs consistency
421 * but we can speed-up the whole thing as the
422 * result would be undefined if TLBs are not consistent.
424 ret = 0;
425 best = nr;
426 goto done;
429 if (best != -1) {
430 done:
431 LOG_SWTLB("found TLB at addr " TARGET_FMT_plx " prot=%01x ret=%d\n",
432 ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret);
433 /* Update page flags */
434 pte_update_flags(ctx, &env->tlb[best].tlb6.pte1, ret, rw);
437 return ret;
440 /* Perform BAT hit & translation */
441 static inline void bat_size_prot(CPUState *env, target_ulong *blp, int *validp,
442 int *protp, target_ulong *BATu,
443 target_ulong *BATl)
445 target_ulong bl;
446 int pp, valid, prot;
448 bl = (*BATu & 0x00001FFC) << 15;
449 valid = 0;
450 prot = 0;
451 if (((msr_pr == 0) && (*BATu & 0x00000002)) ||
452 ((msr_pr != 0) && (*BATu & 0x00000001))) {
453 valid = 1;
454 pp = *BATl & 0x00000003;
455 if (pp != 0) {
456 prot = PAGE_READ | PAGE_EXEC;
457 if (pp == 0x2)
458 prot |= PAGE_WRITE;
461 *blp = bl;
462 *validp = valid;
463 *protp = prot;
466 static inline void bat_601_size_prot(CPUState *env, target_ulong *blp,
467 int *validp, int *protp,
468 target_ulong *BATu, target_ulong *BATl)
470 target_ulong bl;
471 int key, pp, valid, prot;
473 bl = (*BATl & 0x0000003F) << 17;
474 LOG_BATS("b %02x ==> bl " TARGET_FMT_lx " msk " TARGET_FMT_lx "\n",
475 (uint8_t)(*BATl & 0x0000003F), bl, ~bl);
476 prot = 0;
477 valid = (*BATl >> 6) & 1;
478 if (valid) {
479 pp = *BATu & 0x00000003;
480 if (msr_pr == 0)
481 key = (*BATu >> 3) & 1;
482 else
483 key = (*BATu >> 2) & 1;
484 prot = pp_check(key, pp, 0);
486 *blp = bl;
487 *validp = valid;
488 *protp = prot;
491 static inline int get_bat(CPUState *env, mmu_ctx_t *ctx, target_ulong virtual,
492 int rw, int type)
494 target_ulong *BATlt, *BATut, *BATu, *BATl;
495 target_ulong BEPIl, BEPIu, bl;
496 int i, valid, prot;
497 int ret = -1;
499 LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__,
500 type == ACCESS_CODE ? 'I' : 'D', virtual);
501 switch (type) {
502 case ACCESS_CODE:
503 BATlt = env->IBAT[1];
504 BATut = env->IBAT[0];
505 break;
506 default:
507 BATlt = env->DBAT[1];
508 BATut = env->DBAT[0];
509 break;
511 for (i = 0; i < env->nb_BATs; i++) {
512 BATu = &BATut[i];
513 BATl = &BATlt[i];
514 BEPIu = *BATu & 0xF0000000;
515 BEPIl = *BATu & 0x0FFE0000;
516 if (unlikely(env->mmu_model == POWERPC_MMU_601)) {
517 bat_601_size_prot(env, &bl, &valid, &prot, BATu, BATl);
518 } else {
519 bat_size_prot(env, &bl, &valid, &prot, BATu, BATl);
521 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
522 " BATl " TARGET_FMT_lx "\n", __func__,
523 type == ACCESS_CODE ? 'I' : 'D', i, virtual, *BATu, *BATl);
524 if ((virtual & 0xF0000000) == BEPIu &&
525 ((virtual & 0x0FFE0000) & ~bl) == BEPIl) {
526 /* BAT matches */
527 if (valid != 0) {
528 /* Get physical address */
529 ctx->raddr = (*BATl & 0xF0000000) |
530 ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) |
531 (virtual & 0x0001F000);
532 /* Compute access rights */
533 ctx->prot = prot;
534 ret = check_prot(ctx->prot, rw, type);
535 if (ret == 0)
536 LOG_BATS("BAT %d match: r " TARGET_FMT_plx " prot=%c%c\n",
537 i, ctx->raddr, ctx->prot & PAGE_READ ? 'R' : '-',
538 ctx->prot & PAGE_WRITE ? 'W' : '-');
539 break;
543 if (ret < 0) {
544 #if defined(DEBUG_BATS)
545 if (qemu_log_enabled()) {
546 LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", virtual);
547 for (i = 0; i < 4; i++) {
548 BATu = &BATut[i];
549 BATl = &BATlt[i];
550 BEPIu = *BATu & 0xF0000000;
551 BEPIl = *BATu & 0x0FFE0000;
552 bl = (*BATu & 0x00001FFC) << 15;
553 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
554 " BATl " TARGET_FMT_lx " \n\t" TARGET_FMT_lx " "
555 TARGET_FMT_lx " " TARGET_FMT_lx "\n",
556 __func__, type == ACCESS_CODE ? 'I' : 'D', i, virtual,
557 *BATu, *BATl, BEPIu, BEPIl, bl);
560 #endif
562 /* No hit */
563 return ret;
566 /* PTE table lookup */
567 static inline int _find_pte(mmu_ctx_t *ctx, int is_64b, int h, int rw,
568 int type, int target_page_bits)
570 target_ulong base, pte0, pte1;
571 int i, good = -1;
572 int ret, r;
574 ret = -1; /* No entry found */
575 base = ctx->pg_addr[h];
576 for (i = 0; i < 8; i++) {
577 #if defined(TARGET_PPC64)
578 if (is_64b) {
579 pte0 = ldq_phys(base + (i * 16));
580 pte1 = ldq_phys(base + (i * 16) + 8);
582 /* We have a TLB that saves 4K pages, so let's
583 * split a huge page to 4k chunks */
584 if (target_page_bits != TARGET_PAGE_BITS)
585 pte1 |= (ctx->eaddr & (( 1 << target_page_bits ) - 1))
586 & TARGET_PAGE_MASK;
588 r = pte64_check(ctx, pte0, pte1, h, rw, type);
589 LOG_MMU("Load pte from " TARGET_FMT_lx " => " TARGET_FMT_lx " "
590 TARGET_FMT_lx " %d %d %d " TARGET_FMT_lx "\n",
591 base + (i * 16), pte0, pte1, (int)(pte0 & 1), h,
592 (int)((pte0 >> 1) & 1), ctx->ptem);
593 } else
594 #endif
596 pte0 = ldl_phys(base + (i * 8));
597 pte1 = ldl_phys(base + (i * 8) + 4);
598 r = pte32_check(ctx, pte0, pte1, h, rw, type);
599 LOG_MMU("Load pte from " TARGET_FMT_lx " => " TARGET_FMT_lx " "
600 TARGET_FMT_lx " %d %d %d " TARGET_FMT_lx "\n",
601 base + (i * 8), pte0, pte1, (int)(pte0 >> 31), h,
602 (int)((pte0 >> 6) & 1), ctx->ptem);
604 switch (r) {
605 case -3:
606 /* PTE inconsistency */
607 return -1;
608 case -2:
609 /* Access violation */
610 ret = -2;
611 good = i;
612 break;
613 case -1:
614 default:
615 /* No PTE match */
616 break;
617 case 0:
618 /* access granted */
619 /* XXX: we should go on looping to check all PTEs consistency
620 * but if we can speed-up the whole thing as the
621 * result would be undefined if PTEs are not consistent.
623 ret = 0;
624 good = i;
625 goto done;
628 if (good != -1) {
629 done:
630 LOG_MMU("found PTE at addr " TARGET_FMT_lx " prot=%01x ret=%d\n",
631 ctx->raddr, ctx->prot, ret);
632 /* Update page flags */
633 pte1 = ctx->raddr;
634 if (pte_update_flags(ctx, &pte1, ret, rw) == 1) {
635 #if defined(TARGET_PPC64)
636 if (is_64b) {
637 stq_phys_notdirty(base + (good * 16) + 8, pte1);
638 } else
639 #endif
641 stl_phys_notdirty(base + (good * 8) + 4, pte1);
646 return ret;
649 static inline int find_pte32(mmu_ctx_t *ctx, int h, int rw, int type,
650 int target_page_bits)
652 return _find_pte(ctx, 0, h, rw, type, target_page_bits);
655 #if defined(TARGET_PPC64)
656 static inline int find_pte64(mmu_ctx_t *ctx, int h, int rw, int type,
657 int target_page_bits)
659 return _find_pte(ctx, 1, h, rw, type, target_page_bits);
661 #endif
663 static inline int find_pte(CPUState *env, mmu_ctx_t *ctx, int h, int rw,
664 int type, int target_page_bits)
666 #if defined(TARGET_PPC64)
667 if (env->mmu_model & POWERPC_MMU_64)
668 return find_pte64(ctx, h, rw, type, target_page_bits);
669 #endif
671 return find_pte32(ctx, h, rw, type, target_page_bits);
674 #if defined(TARGET_PPC64)
675 static ppc_slb_t *slb_get_entry(CPUPPCState *env, int nr)
677 ppc_slb_t *retval = &env->slb[nr];
679 #if 0 // XXX implement bridge mode?
680 if (env->spr[SPR_ASR] & 1) {
681 target_phys_addr_t sr_base;
683 sr_base = env->spr[SPR_ASR] & 0xfffffffffffff000;
684 sr_base += (12 * nr);
686 retval->tmp64 = ldq_phys(sr_base);
687 retval->tmp = ldl_phys(sr_base + 8);
689 #endif
691 return retval;
694 static void slb_set_entry(CPUPPCState *env, int nr, ppc_slb_t *slb)
696 ppc_slb_t *entry = &env->slb[nr];
698 if (slb == entry)
699 return;
701 entry->tmp64 = slb->tmp64;
702 entry->tmp = slb->tmp;
705 static inline int slb_is_valid(ppc_slb_t *slb)
707 return (int)(slb->tmp64 & 0x0000000008000000ULL);
710 static inline void slb_invalidate(ppc_slb_t *slb)
712 slb->tmp64 &= ~0x0000000008000000ULL;
715 static inline int slb_lookup(CPUPPCState *env, target_ulong eaddr,
716 target_ulong *vsid, target_ulong *page_mask,
717 int *attr, int *target_page_bits)
719 target_ulong mask;
720 int n, ret;
722 ret = -5;
723 LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr);
724 mask = 0x0000000000000000ULL; /* Avoid gcc warning */
725 for (n = 0; n < env->slb_nr; n++) {
726 ppc_slb_t *slb = slb_get_entry(env, n);
728 LOG_SLB("%s: seg %d %016" PRIx64 " %08"
729 PRIx32 "\n", __func__, n, slb->tmp64, slb->tmp);
730 if (slb_is_valid(slb)) {
731 /* SLB entry is valid */
732 mask = 0xFFFFFFFFF0000000ULL;
733 if (slb->tmp & 0x8) {
734 /* 16 MB PTEs */
735 if (target_page_bits)
736 *target_page_bits = 24;
737 } else {
738 /* 4 KB PTEs */
739 if (target_page_bits)
740 *target_page_bits = TARGET_PAGE_BITS;
742 if ((eaddr & mask) == (slb->tmp64 & mask)) {
743 /* SLB match */
744 *vsid = ((slb->tmp64 << 24) | (slb->tmp >> 8)) & 0x0003FFFFFFFFFFFFULL;
745 *page_mask = ~mask;
746 *attr = slb->tmp & 0xFF;
747 ret = n;
748 break;
753 return ret;
756 void ppc_slb_invalidate_all (CPUPPCState *env)
758 int n, do_invalidate;
760 do_invalidate = 0;
761 /* XXX: Warning: slbia never invalidates the first segment */
762 for (n = 1; n < env->slb_nr; n++) {
763 ppc_slb_t *slb = slb_get_entry(env, n);
765 if (slb_is_valid(slb)) {
766 slb_invalidate(slb);
767 slb_set_entry(env, n, slb);
768 /* XXX: given the fact that segment size is 256 MB or 1TB,
769 * and we still don't have a tlb_flush_mask(env, n, mask)
770 * in Qemu, we just invalidate all TLBs
772 do_invalidate = 1;
775 if (do_invalidate)
776 tlb_flush(env, 1);
779 void ppc_slb_invalidate_one (CPUPPCState *env, uint64_t T0)
781 target_ulong vsid, page_mask;
782 int attr;
783 int n;
785 n = slb_lookup(env, T0, &vsid, &page_mask, &attr, NULL);
786 if (n >= 0) {
787 ppc_slb_t *slb = slb_get_entry(env, n);
789 if (slb_is_valid(slb)) {
790 slb_invalidate(slb);
791 slb_set_entry(env, n, slb);
792 /* XXX: given the fact that segment size is 256 MB or 1TB,
793 * and we still don't have a tlb_flush_mask(env, n, mask)
794 * in Qemu, we just invalidate all TLBs
796 tlb_flush(env, 1);
801 target_ulong ppc_load_slb (CPUPPCState *env, int slb_nr)
803 target_ulong rt;
804 ppc_slb_t *slb = slb_get_entry(env, slb_nr);
806 if (slb_is_valid(slb)) {
807 /* SLB entry is valid */
808 /* Copy SLB bits 62:88 to Rt 37:63 (VSID 23:49) */
809 rt = slb->tmp >> 8; /* 65:88 => 40:63 */
810 rt |= (slb->tmp64 & 0x7) << 24; /* 62:64 => 37:39 */
811 /* Copy SLB bits 89:92 to Rt 33:36 (KsKpNL) */
812 rt |= ((slb->tmp >> 4) & 0xF) << 27;
813 } else {
814 rt = 0;
816 LOG_SLB("%s: %016" PRIx64 " %08" PRIx32 " => %d "
817 TARGET_FMT_lx "\n", __func__, slb->tmp64, slb->tmp, slb_nr, rt);
819 return rt;
822 void ppc_store_slb (CPUPPCState *env, target_ulong rb, target_ulong rs)
824 ppc_slb_t *slb;
826 uint64_t vsid;
827 uint64_t esid;
828 int flags, valid, slb_nr;
830 vsid = rs >> 12;
831 flags = ((rs >> 8) & 0xf);
833 esid = rb >> 28;
834 valid = (rb & (1 << 27));
835 slb_nr = rb & 0xfff;
837 slb = slb_get_entry(env, slb_nr);
838 slb->tmp64 = (esid << 28) | valid | (vsid >> 24);
839 slb->tmp = (vsid << 8) | (flags << 3);
841 LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64
842 " %08" PRIx32 "\n", __func__, slb_nr, rb, rs, slb->tmp64,
843 slb->tmp);
845 slb_set_entry(env, slb_nr, slb);
847 #endif /* defined(TARGET_PPC64) */
849 /* Perform segment based translation */
850 static inline target_phys_addr_t get_pgaddr(target_phys_addr_t sdr1,
851 int sdr_sh,
852 target_phys_addr_t hash,
853 target_phys_addr_t mask)
855 return (sdr1 & ((target_phys_addr_t)(-1ULL) << sdr_sh)) | (hash & mask);
858 static inline int get_segment(CPUState *env, mmu_ctx_t *ctx,
859 target_ulong eaddr, int rw, int type)
861 target_phys_addr_t sdr, hash, mask, sdr_mask, htab_mask;
862 target_ulong sr, vsid, vsid_mask, pgidx, page_mask;
863 #if defined(TARGET_PPC64)
864 int attr;
865 #endif
866 int ds, vsid_sh, sdr_sh, pr, target_page_bits;
867 int ret, ret2;
869 pr = msr_pr;
870 #if defined(TARGET_PPC64)
871 if (env->mmu_model & POWERPC_MMU_64) {
872 LOG_MMU("Check SLBs\n");
873 ret = slb_lookup(env, eaddr, &vsid, &page_mask, &attr,
874 &target_page_bits);
875 if (ret < 0)
876 return ret;
877 ctx->key = ((attr & 0x40) && (pr != 0)) ||
878 ((attr & 0x80) && (pr == 0)) ? 1 : 0;
879 ds = 0;
880 ctx->nx = attr & 0x10 ? 1 : 0;
881 ctx->eaddr = eaddr;
882 vsid_mask = 0x00003FFFFFFFFF80ULL;
883 vsid_sh = 7;
884 sdr_sh = 18;
885 sdr_mask = 0x3FF80;
886 } else
887 #endif /* defined(TARGET_PPC64) */
889 sr = env->sr[eaddr >> 28];
890 page_mask = 0x0FFFFFFF;
891 ctx->key = (((sr & 0x20000000) && (pr != 0)) ||
892 ((sr & 0x40000000) && (pr == 0))) ? 1 : 0;
893 ds = sr & 0x80000000 ? 1 : 0;
894 ctx->nx = sr & 0x10000000 ? 1 : 0;
895 vsid = sr & 0x00FFFFFF;
896 vsid_mask = 0x01FFFFC0;
897 vsid_sh = 6;
898 sdr_sh = 16;
899 sdr_mask = 0xFFC0;
900 target_page_bits = TARGET_PAGE_BITS;
901 LOG_MMU("Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx " nip="
902 TARGET_FMT_lx " lr=" TARGET_FMT_lx
903 " ir=%d dr=%d pr=%d %d t=%d\n",
904 eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir,
905 (int)msr_dr, pr != 0 ? 1 : 0, rw, type);
907 LOG_MMU("pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n",
908 ctx->key, ds, ctx->nx, vsid);
909 ret = -1;
910 if (!ds) {
911 /* Check if instruction fetch is allowed, if needed */
912 if (type != ACCESS_CODE || ctx->nx == 0) {
913 /* Page address translation */
914 /* Primary table address */
915 sdr = env->sdr1;
916 pgidx = (eaddr & page_mask) >> target_page_bits;
917 #if defined(TARGET_PPC64)
918 if (env->mmu_model & POWERPC_MMU_64) {
919 htab_mask = 0x0FFFFFFF >> (28 - (sdr & 0x1F));
920 /* XXX: this is false for 1 TB segments */
921 hash = ((vsid ^ pgidx) << vsid_sh) & vsid_mask;
922 } else
923 #endif
925 htab_mask = sdr & 0x000001FF;
926 hash = ((vsid ^ pgidx) << vsid_sh) & vsid_mask;
928 mask = (htab_mask << sdr_sh) | sdr_mask;
929 LOG_MMU("sdr " TARGET_FMT_plx " sh %d hash " TARGET_FMT_plx
930 " mask " TARGET_FMT_plx " " TARGET_FMT_lx "\n",
931 sdr, sdr_sh, hash, mask, page_mask);
932 ctx->pg_addr[0] = get_pgaddr(sdr, sdr_sh, hash, mask);
933 /* Secondary table address */
934 hash = (~hash) & vsid_mask;
935 LOG_MMU("sdr " TARGET_FMT_plx " sh %d hash " TARGET_FMT_plx
936 " mask " TARGET_FMT_plx "\n", sdr, sdr_sh, hash, mask);
937 ctx->pg_addr[1] = get_pgaddr(sdr, sdr_sh, hash, mask);
938 #if defined(TARGET_PPC64)
939 if (env->mmu_model & POWERPC_MMU_64) {
940 /* Only 5 bits of the page index are used in the AVPN */
941 if (target_page_bits > 23) {
942 ctx->ptem = (vsid << 12) |
943 ((pgidx << (target_page_bits - 16)) & 0xF80);
944 } else {
945 ctx->ptem = (vsid << 12) | ((pgidx >> 4) & 0x0F80);
947 } else
948 #endif
950 ctx->ptem = (vsid << 7) | (pgidx >> 10);
952 /* Initialize real address with an invalid value */
953 ctx->raddr = (target_phys_addr_t)-1ULL;
954 if (unlikely(env->mmu_model == POWERPC_MMU_SOFT_6xx ||
955 env->mmu_model == POWERPC_MMU_SOFT_74xx)) {
956 /* Software TLB search */
957 ret = ppc6xx_tlb_check(env, ctx, eaddr, rw, type);
958 } else {
959 LOG_MMU("0 sdr1=" TARGET_FMT_plx " vsid=" TARGET_FMT_lx " "
960 "api=" TARGET_FMT_lx " hash=" TARGET_FMT_plx
961 " pg_addr=" TARGET_FMT_plx "\n",
962 sdr, vsid, pgidx, hash, ctx->pg_addr[0]);
963 /* Primary table lookup */
964 ret = find_pte(env, ctx, 0, rw, type, target_page_bits);
965 if (ret < 0) {
966 /* Secondary table lookup */
967 if (eaddr != 0xEFFFFFFF)
968 LOG_MMU("1 sdr1=" TARGET_FMT_plx " vsid=" TARGET_FMT_lx " "
969 "api=" TARGET_FMT_lx " hash=" TARGET_FMT_plx
970 " pg_addr=" TARGET_FMT_plx "\n", sdr, vsid,
971 pgidx, hash, ctx->pg_addr[1]);
972 ret2 = find_pte(env, ctx, 1, rw, type,
973 target_page_bits);
974 if (ret2 != -1)
975 ret = ret2;
978 #if defined (DUMP_PAGE_TABLES)
979 if (qemu_log_enabled()) {
980 target_phys_addr_t curaddr;
981 uint32_t a0, a1, a2, a3;
982 qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx
983 "\n", sdr, mask + 0x80);
984 for (curaddr = sdr; curaddr < (sdr + mask + 0x80);
985 curaddr += 16) {
986 a0 = ldl_phys(curaddr);
987 a1 = ldl_phys(curaddr + 4);
988 a2 = ldl_phys(curaddr + 8);
989 a3 = ldl_phys(curaddr + 12);
990 if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) {
991 qemu_log(TARGET_FMT_plx ": %08x %08x %08x %08x\n",
992 curaddr, a0, a1, a2, a3);
996 #endif
997 } else {
998 LOG_MMU("No access allowed\n");
999 ret = -3;
1001 } else {
1002 LOG_MMU("direct store...\n");
1003 /* Direct-store segment : absolutely *BUGGY* for now */
1004 switch (type) {
1005 case ACCESS_INT:
1006 /* Integer load/store : only access allowed */
1007 break;
1008 case ACCESS_CODE:
1009 /* No code fetch is allowed in direct-store areas */
1010 return -4;
1011 case ACCESS_FLOAT:
1012 /* Floating point load/store */
1013 return -4;
1014 case ACCESS_RES:
1015 /* lwarx, ldarx or srwcx. */
1016 return -4;
1017 case ACCESS_CACHE:
1018 /* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */
1019 /* Should make the instruction do no-op.
1020 * As it already do no-op, it's quite easy :-)
1022 ctx->raddr = eaddr;
1023 return 0;
1024 case ACCESS_EXT:
1025 /* eciwx or ecowx */
1026 return -4;
1027 default:
1028 qemu_log("ERROR: instruction should not need "
1029 "address translation\n");
1030 return -4;
1032 if ((rw == 1 || ctx->key != 1) && (rw == 0 || ctx->key != 0)) {
1033 ctx->raddr = eaddr;
1034 ret = 2;
1035 } else {
1036 ret = -2;
1040 return ret;
1043 /* Generic TLB check function for embedded PowerPC implementations */
1044 static inline int ppcemb_tlb_check(CPUState *env, ppcemb_tlb_t *tlb,
1045 target_phys_addr_t *raddrp,
1046 target_ulong address, uint32_t pid, int ext,
1047 int i)
1049 target_ulong mask;
1051 /* Check valid flag */
1052 if (!(tlb->prot & PAGE_VALID)) {
1053 return -1;
1055 mask = ~(tlb->size - 1);
1056 LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx " PID %u <=> " TARGET_FMT_lx
1057 " " TARGET_FMT_lx " %u\n", __func__, i, address, pid, tlb->EPN,
1058 mask, (uint32_t)tlb->PID);
1059 /* Check PID */
1060 if (tlb->PID != 0 && tlb->PID != pid)
1061 return -1;
1062 /* Check effective address */
1063 if ((address & mask) != tlb->EPN)
1064 return -1;
1065 *raddrp = (tlb->RPN & mask) | (address & ~mask);
1066 #if (TARGET_PHYS_ADDR_BITS >= 36)
1067 if (ext) {
1068 /* Extend the physical address to 36 bits */
1069 *raddrp |= (target_phys_addr_t)(tlb->RPN & 0xF) << 32;
1071 #endif
1073 return 0;
1076 /* Generic TLB search function for PowerPC embedded implementations */
1077 int ppcemb_tlb_search (CPUPPCState *env, target_ulong address, uint32_t pid)
1079 ppcemb_tlb_t *tlb;
1080 target_phys_addr_t raddr;
1081 int i, ret;
1083 /* Default return value is no match */
1084 ret = -1;
1085 for (i = 0; i < env->nb_tlb; i++) {
1086 tlb = &env->tlb[i].tlbe;
1087 if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, 0, i) == 0) {
1088 ret = i;
1089 break;
1093 return ret;
1096 /* Helpers specific to PowerPC 40x implementations */
1097 static inline void ppc4xx_tlb_invalidate_all(CPUState *env)
1099 ppcemb_tlb_t *tlb;
1100 int i;
1102 for (i = 0; i < env->nb_tlb; i++) {
1103 tlb = &env->tlb[i].tlbe;
1104 tlb->prot &= ~PAGE_VALID;
1106 tlb_flush(env, 1);
1109 static inline void ppc4xx_tlb_invalidate_virt(CPUState *env,
1110 target_ulong eaddr, uint32_t pid)
1112 #if !defined(FLUSH_ALL_TLBS)
1113 ppcemb_tlb_t *tlb;
1114 target_phys_addr_t raddr;
1115 target_ulong page, end;
1116 int i;
1118 for (i = 0; i < env->nb_tlb; i++) {
1119 tlb = &env->tlb[i].tlbe;
1120 if (ppcemb_tlb_check(env, tlb, &raddr, eaddr, pid, 0, i) == 0) {
1121 end = tlb->EPN + tlb->size;
1122 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
1123 tlb_flush_page(env, page);
1124 tlb->prot &= ~PAGE_VALID;
1125 break;
1128 #else
1129 ppc4xx_tlb_invalidate_all(env);
1130 #endif
1133 static int mmu40x_get_physical_address (CPUState *env, mmu_ctx_t *ctx,
1134 target_ulong address, int rw, int access_type)
1136 ppcemb_tlb_t *tlb;
1137 target_phys_addr_t raddr;
1138 int i, ret, zsel, zpr, pr;
1140 ret = -1;
1141 raddr = (target_phys_addr_t)-1ULL;
1142 pr = msr_pr;
1143 for (i = 0; i < env->nb_tlb; i++) {
1144 tlb = &env->tlb[i].tlbe;
1145 if (ppcemb_tlb_check(env, tlb, &raddr, address,
1146 env->spr[SPR_40x_PID], 0, i) < 0)
1147 continue;
1148 zsel = (tlb->attr >> 4) & 0xF;
1149 zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3;
1150 LOG_SWTLB("%s: TLB %d zsel %d zpr %d rw %d attr %08x\n",
1151 __func__, i, zsel, zpr, rw, tlb->attr);
1152 /* Check execute enable bit */
1153 switch (zpr) {
1154 case 0x2:
1155 if (pr != 0)
1156 goto check_perms;
1157 /* No break here */
1158 case 0x3:
1159 /* All accesses granted */
1160 ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1161 ret = 0;
1162 break;
1163 case 0x0:
1164 if (pr != 0) {
1165 /* Raise Zone protection fault. */
1166 env->spr[SPR_40x_ESR] = 1 << 22;
1167 ctx->prot = 0;
1168 ret = -2;
1169 break;
1171 /* No break here */
1172 case 0x1:
1173 check_perms:
1174 /* Check from TLB entry */
1175 ctx->prot = tlb->prot;
1176 ret = check_prot(ctx->prot, rw, access_type);
1177 if (ret == -2)
1178 env->spr[SPR_40x_ESR] = 0;
1179 break;
1181 if (ret >= 0) {
1182 ctx->raddr = raddr;
1183 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx
1184 " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
1185 ret);
1186 return 0;
1189 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx
1190 " %d %d\n", __func__, address, raddr, ctx->prot, ret);
1192 return ret;
1195 void store_40x_sler (CPUPPCState *env, uint32_t val)
1197 /* XXX: TO BE FIXED */
1198 if (val != 0x00000000) {
1199 cpu_abort(env, "Little-endian regions are not supported by now\n");
1201 env->spr[SPR_405_SLER] = val;
1204 static int mmubooke_get_physical_address (CPUState *env, mmu_ctx_t *ctx,
1205 target_ulong address, int rw,
1206 int access_type)
1208 ppcemb_tlb_t *tlb;
1209 target_phys_addr_t raddr;
1210 int i, prot, ret;
1212 ret = -1;
1213 raddr = (target_phys_addr_t)-1ULL;
1214 for (i = 0; i < env->nb_tlb; i++) {
1215 tlb = &env->tlb[i].tlbe;
1216 if (ppcemb_tlb_check(env, tlb, &raddr, address,
1217 env->spr[SPR_BOOKE_PID], 1, i) < 0)
1218 continue;
1219 if (msr_pr != 0)
1220 prot = tlb->prot & 0xF;
1221 else
1222 prot = (tlb->prot >> 4) & 0xF;
1223 /* Check the address space */
1224 if (access_type == ACCESS_CODE) {
1225 if (msr_ir != (tlb->attr & 1))
1226 continue;
1227 ctx->prot = prot;
1228 if (prot & PAGE_EXEC) {
1229 ret = 0;
1230 break;
1232 ret = -3;
1233 } else {
1234 if (msr_dr != (tlb->attr & 1))
1235 continue;
1236 ctx->prot = prot;
1237 if ((!rw && prot & PAGE_READ) || (rw && (prot & PAGE_WRITE))) {
1238 ret = 0;
1239 break;
1241 ret = -2;
1244 if (ret >= 0)
1245 ctx->raddr = raddr;
1247 return ret;
1250 static inline int check_physical(CPUState *env, mmu_ctx_t *ctx,
1251 target_ulong eaddr, int rw)
1253 int in_plb, ret;
1255 ctx->raddr = eaddr;
1256 ctx->prot = PAGE_READ | PAGE_EXEC;
1257 ret = 0;
1258 switch (env->mmu_model) {
1259 case POWERPC_MMU_32B:
1260 case POWERPC_MMU_601:
1261 case POWERPC_MMU_SOFT_6xx:
1262 case POWERPC_MMU_SOFT_74xx:
1263 case POWERPC_MMU_SOFT_4xx:
1264 case POWERPC_MMU_REAL:
1265 case POWERPC_MMU_BOOKE:
1266 ctx->prot |= PAGE_WRITE;
1267 break;
1268 #if defined(TARGET_PPC64)
1269 case POWERPC_MMU_620:
1270 case POWERPC_MMU_64B:
1271 /* Real address are 60 bits long */
1272 ctx->raddr &= 0x0FFFFFFFFFFFFFFFULL;
1273 ctx->prot |= PAGE_WRITE;
1274 break;
1275 #endif
1276 case POWERPC_MMU_SOFT_4xx_Z:
1277 if (unlikely(msr_pe != 0)) {
1278 /* 403 family add some particular protections,
1279 * using PBL/PBU registers for accesses with no translation.
1281 in_plb =
1282 /* Check PLB validity */
1283 (env->pb[0] < env->pb[1] &&
1284 /* and address in plb area */
1285 eaddr >= env->pb[0] && eaddr < env->pb[1]) ||
1286 (env->pb[2] < env->pb[3] &&
1287 eaddr >= env->pb[2] && eaddr < env->pb[3]) ? 1 : 0;
1288 if (in_plb ^ msr_px) {
1289 /* Access in protected area */
1290 if (rw == 1) {
1291 /* Access is not allowed */
1292 ret = -2;
1294 } else {
1295 /* Read-write access is allowed */
1296 ctx->prot |= PAGE_WRITE;
1299 break;
1300 case POWERPC_MMU_MPC8xx:
1301 /* XXX: TODO */
1302 cpu_abort(env, "MPC8xx MMU model is not implemented\n");
1303 break;
1304 case POWERPC_MMU_BOOKE_FSL:
1305 /* XXX: TODO */
1306 cpu_abort(env, "BookE FSL MMU model not implemented\n");
1307 break;
1308 default:
1309 cpu_abort(env, "Unknown or invalid MMU model\n");
1310 return -1;
1313 return ret;
1316 int get_physical_address (CPUState *env, mmu_ctx_t *ctx, target_ulong eaddr,
1317 int rw, int access_type)
1319 int ret;
1321 #if 0
1322 qemu_log("%s\n", __func__);
1323 #endif
1324 if ((access_type == ACCESS_CODE && msr_ir == 0) ||
1325 (access_type != ACCESS_CODE && msr_dr == 0)) {
1326 if (env->mmu_model == POWERPC_MMU_BOOKE) {
1327 /* The BookE MMU always performs address translation. The
1328 IS and DS bits only affect the address space. */
1329 ret = mmubooke_get_physical_address(env, ctx, eaddr,
1330 rw, access_type);
1331 } else {
1332 /* No address translation. */
1333 ret = check_physical(env, ctx, eaddr, rw);
1335 } else {
1336 ret = -1;
1337 switch (env->mmu_model) {
1338 case POWERPC_MMU_32B:
1339 case POWERPC_MMU_601:
1340 case POWERPC_MMU_SOFT_6xx:
1341 case POWERPC_MMU_SOFT_74xx:
1342 /* Try to find a BAT */
1343 if (env->nb_BATs != 0)
1344 ret = get_bat(env, ctx, eaddr, rw, access_type);
1345 #if defined(TARGET_PPC64)
1346 case POWERPC_MMU_620:
1347 case POWERPC_MMU_64B:
1348 #endif
1349 if (ret < 0) {
1350 /* We didn't match any BAT entry or don't have BATs */
1351 ret = get_segment(env, ctx, eaddr, rw, access_type);
1353 break;
1354 case POWERPC_MMU_SOFT_4xx:
1355 case POWERPC_MMU_SOFT_4xx_Z:
1356 ret = mmu40x_get_physical_address(env, ctx, eaddr,
1357 rw, access_type);
1358 break;
1359 case POWERPC_MMU_BOOKE:
1360 ret = mmubooke_get_physical_address(env, ctx, eaddr,
1361 rw, access_type);
1362 break;
1363 case POWERPC_MMU_MPC8xx:
1364 /* XXX: TODO */
1365 cpu_abort(env, "MPC8xx MMU model is not implemented\n");
1366 break;
1367 case POWERPC_MMU_BOOKE_FSL:
1368 /* XXX: TODO */
1369 cpu_abort(env, "BookE FSL MMU model not implemented\n");
1370 return -1;
1371 case POWERPC_MMU_REAL:
1372 cpu_abort(env, "PowerPC in real mode do not do any translation\n");
1373 return -1;
1374 default:
1375 cpu_abort(env, "Unknown or invalid MMU model\n");
1376 return -1;
1379 #if 0
1380 qemu_log("%s address " TARGET_FMT_lx " => %d " TARGET_FMT_plx "\n",
1381 __func__, eaddr, ret, ctx->raddr);
1382 #endif
1384 return ret;
1387 target_phys_addr_t cpu_get_phys_page_debug (CPUState *env, target_ulong addr)
1389 mmu_ctx_t ctx;
1391 if (unlikely(get_physical_address(env, &ctx, addr, 0, ACCESS_INT) != 0))
1392 return -1;
1394 return ctx.raddr & TARGET_PAGE_MASK;
1397 /* Perform address translation */
1398 int cpu_ppc_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
1399 int mmu_idx, int is_softmmu)
1401 mmu_ctx_t ctx;
1402 int access_type;
1403 int ret = 0;
1405 if (rw == 2) {
1406 /* code access */
1407 rw = 0;
1408 access_type = ACCESS_CODE;
1409 } else {
1410 /* data access */
1411 access_type = env->access_type;
1413 ret = get_physical_address(env, &ctx, address, rw, access_type);
1414 if (ret == 0) {
1415 tlb_set_page(env, address & TARGET_PAGE_MASK,
1416 ctx.raddr & TARGET_PAGE_MASK, ctx.prot,
1417 mmu_idx, TARGET_PAGE_SIZE);
1418 ret = 0;
1419 } else if (ret < 0) {
1420 LOG_MMU_STATE(env);
1421 if (access_type == ACCESS_CODE) {
1422 switch (ret) {
1423 case -1:
1424 /* No matches in page tables or TLB */
1425 switch (env->mmu_model) {
1426 case POWERPC_MMU_SOFT_6xx:
1427 env->exception_index = POWERPC_EXCP_IFTLB;
1428 env->error_code = 1 << 18;
1429 env->spr[SPR_IMISS] = address;
1430 env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem;
1431 goto tlb_miss;
1432 case POWERPC_MMU_SOFT_74xx:
1433 env->exception_index = POWERPC_EXCP_IFTLB;
1434 goto tlb_miss_74xx;
1435 case POWERPC_MMU_SOFT_4xx:
1436 case POWERPC_MMU_SOFT_4xx_Z:
1437 env->exception_index = POWERPC_EXCP_ITLB;
1438 env->error_code = 0;
1439 env->spr[SPR_40x_DEAR] = address;
1440 env->spr[SPR_40x_ESR] = 0x00000000;
1441 break;
1442 case POWERPC_MMU_32B:
1443 case POWERPC_MMU_601:
1444 #if defined(TARGET_PPC64)
1445 case POWERPC_MMU_620:
1446 case POWERPC_MMU_64B:
1447 #endif
1448 env->exception_index = POWERPC_EXCP_ISI;
1449 env->error_code = 0x40000000;
1450 break;
1451 case POWERPC_MMU_BOOKE:
1452 env->exception_index = POWERPC_EXCP_ITLB;
1453 env->error_code = 0;
1454 env->spr[SPR_BOOKE_DEAR] = address;
1455 return -1;
1456 case POWERPC_MMU_BOOKE_FSL:
1457 /* XXX: TODO */
1458 cpu_abort(env, "BookE FSL MMU model is not implemented\n");
1459 return -1;
1460 case POWERPC_MMU_MPC8xx:
1461 /* XXX: TODO */
1462 cpu_abort(env, "MPC8xx MMU model is not implemented\n");
1463 break;
1464 case POWERPC_MMU_REAL:
1465 cpu_abort(env, "PowerPC in real mode should never raise "
1466 "any MMU exceptions\n");
1467 return -1;
1468 default:
1469 cpu_abort(env, "Unknown or invalid MMU model\n");
1470 return -1;
1472 break;
1473 case -2:
1474 /* Access rights violation */
1475 env->exception_index = POWERPC_EXCP_ISI;
1476 env->error_code = 0x08000000;
1477 break;
1478 case -3:
1479 /* No execute protection violation */
1480 if (env->mmu_model == POWERPC_MMU_BOOKE) {
1481 env->spr[SPR_BOOKE_ESR] = 0x00000000;
1483 env->exception_index = POWERPC_EXCP_ISI;
1484 env->error_code = 0x10000000;
1485 break;
1486 case -4:
1487 /* Direct store exception */
1488 /* No code fetch is allowed in direct-store areas */
1489 env->exception_index = POWERPC_EXCP_ISI;
1490 env->error_code = 0x10000000;
1491 break;
1492 #if defined(TARGET_PPC64)
1493 case -5:
1494 /* No match in segment table */
1495 if (env->mmu_model == POWERPC_MMU_620) {
1496 env->exception_index = POWERPC_EXCP_ISI;
1497 /* XXX: this might be incorrect */
1498 env->error_code = 0x40000000;
1499 } else {
1500 env->exception_index = POWERPC_EXCP_ISEG;
1501 env->error_code = 0;
1503 break;
1504 #endif
1506 } else {
1507 switch (ret) {
1508 case -1:
1509 /* No matches in page tables or TLB */
1510 switch (env->mmu_model) {
1511 case POWERPC_MMU_SOFT_6xx:
1512 if (rw == 1) {
1513 env->exception_index = POWERPC_EXCP_DSTLB;
1514 env->error_code = 1 << 16;
1515 } else {
1516 env->exception_index = POWERPC_EXCP_DLTLB;
1517 env->error_code = 0;
1519 env->spr[SPR_DMISS] = address;
1520 env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem;
1521 tlb_miss:
1522 env->error_code |= ctx.key << 19;
1523 env->spr[SPR_HASH1] = ctx.pg_addr[0];
1524 env->spr[SPR_HASH2] = ctx.pg_addr[1];
1525 break;
1526 case POWERPC_MMU_SOFT_74xx:
1527 if (rw == 1) {
1528 env->exception_index = POWERPC_EXCP_DSTLB;
1529 } else {
1530 env->exception_index = POWERPC_EXCP_DLTLB;
1532 tlb_miss_74xx:
1533 /* Implement LRU algorithm */
1534 env->error_code = ctx.key << 19;
1535 env->spr[SPR_TLBMISS] = (address & ~((target_ulong)0x3)) |
1536 ((env->last_way + 1) & (env->nb_ways - 1));
1537 env->spr[SPR_PTEHI] = 0x80000000 | ctx.ptem;
1538 break;
1539 case POWERPC_MMU_SOFT_4xx:
1540 case POWERPC_MMU_SOFT_4xx_Z:
1541 env->exception_index = POWERPC_EXCP_DTLB;
1542 env->error_code = 0;
1543 env->spr[SPR_40x_DEAR] = address;
1544 if (rw)
1545 env->spr[SPR_40x_ESR] = 0x00800000;
1546 else
1547 env->spr[SPR_40x_ESR] = 0x00000000;
1548 break;
1549 case POWERPC_MMU_32B:
1550 case POWERPC_MMU_601:
1551 #if defined(TARGET_PPC64)
1552 case POWERPC_MMU_620:
1553 case POWERPC_MMU_64B:
1554 #endif
1555 env->exception_index = POWERPC_EXCP_DSI;
1556 env->error_code = 0;
1557 env->spr[SPR_DAR] = address;
1558 if (rw == 1)
1559 env->spr[SPR_DSISR] = 0x42000000;
1560 else
1561 env->spr[SPR_DSISR] = 0x40000000;
1562 break;
1563 case POWERPC_MMU_MPC8xx:
1564 /* XXX: TODO */
1565 cpu_abort(env, "MPC8xx MMU model is not implemented\n");
1566 break;
1567 case POWERPC_MMU_BOOKE:
1568 env->exception_index = POWERPC_EXCP_DTLB;
1569 env->error_code = 0;
1570 env->spr[SPR_BOOKE_DEAR] = address;
1571 env->spr[SPR_BOOKE_ESR] = rw ? 1 << ESR_ST : 0;
1572 return -1;
1573 case POWERPC_MMU_BOOKE_FSL:
1574 /* XXX: TODO */
1575 cpu_abort(env, "BookE FSL MMU model is not implemented\n");
1576 return -1;
1577 case POWERPC_MMU_REAL:
1578 cpu_abort(env, "PowerPC in real mode should never raise "
1579 "any MMU exceptions\n");
1580 return -1;
1581 default:
1582 cpu_abort(env, "Unknown or invalid MMU model\n");
1583 return -1;
1585 break;
1586 case -2:
1587 /* Access rights violation */
1588 env->exception_index = POWERPC_EXCP_DSI;
1589 env->error_code = 0;
1590 if (env->mmu_model == POWERPC_MMU_SOFT_4xx
1591 || env->mmu_model == POWERPC_MMU_SOFT_4xx_Z) {
1592 env->spr[SPR_40x_DEAR] = address;
1593 if (rw) {
1594 env->spr[SPR_40x_ESR] |= 0x00800000;
1596 } else if (env->mmu_model == POWERPC_MMU_BOOKE) {
1597 env->spr[SPR_BOOKE_DEAR] = address;
1598 env->spr[SPR_BOOKE_ESR] = rw ? 1 << ESR_ST : 0;
1599 } else {
1600 env->spr[SPR_DAR] = address;
1601 if (rw == 1) {
1602 env->spr[SPR_DSISR] = 0x0A000000;
1603 } else {
1604 env->spr[SPR_DSISR] = 0x08000000;
1607 break;
1608 case -4:
1609 /* Direct store exception */
1610 switch (access_type) {
1611 case ACCESS_FLOAT:
1612 /* Floating point load/store */
1613 env->exception_index = POWERPC_EXCP_ALIGN;
1614 env->error_code = POWERPC_EXCP_ALIGN_FP;
1615 env->spr[SPR_DAR] = address;
1616 break;
1617 case ACCESS_RES:
1618 /* lwarx, ldarx or stwcx. */
1619 env->exception_index = POWERPC_EXCP_DSI;
1620 env->error_code = 0;
1621 env->spr[SPR_DAR] = address;
1622 if (rw == 1)
1623 env->spr[SPR_DSISR] = 0x06000000;
1624 else
1625 env->spr[SPR_DSISR] = 0x04000000;
1626 break;
1627 case ACCESS_EXT:
1628 /* eciwx or ecowx */
1629 env->exception_index = POWERPC_EXCP_DSI;
1630 env->error_code = 0;
1631 env->spr[SPR_DAR] = address;
1632 if (rw == 1)
1633 env->spr[SPR_DSISR] = 0x06100000;
1634 else
1635 env->spr[SPR_DSISR] = 0x04100000;
1636 break;
1637 default:
1638 printf("DSI: invalid exception (%d)\n", ret);
1639 env->exception_index = POWERPC_EXCP_PROGRAM;
1640 env->error_code =
1641 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL;
1642 env->spr[SPR_DAR] = address;
1643 break;
1645 break;
1646 #if defined(TARGET_PPC64)
1647 case -5:
1648 /* No match in segment table */
1649 if (env->mmu_model == POWERPC_MMU_620) {
1650 env->exception_index = POWERPC_EXCP_DSI;
1651 env->error_code = 0;
1652 env->spr[SPR_DAR] = address;
1653 /* XXX: this might be incorrect */
1654 if (rw == 1)
1655 env->spr[SPR_DSISR] = 0x42000000;
1656 else
1657 env->spr[SPR_DSISR] = 0x40000000;
1658 } else {
1659 env->exception_index = POWERPC_EXCP_DSEG;
1660 env->error_code = 0;
1661 env->spr[SPR_DAR] = address;
1663 break;
1664 #endif
1667 #if 0
1668 printf("%s: set exception to %d %02x\n", __func__,
1669 env->exception, env->error_code);
1670 #endif
1671 ret = 1;
1674 return ret;
1677 /*****************************************************************************/
1678 /* BATs management */
1679 #if !defined(FLUSH_ALL_TLBS)
1680 static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu,
1681 target_ulong mask)
1683 target_ulong base, end, page;
1685 base = BATu & ~0x0001FFFF;
1686 end = base + mask + 0x00020000;
1687 LOG_BATS("Flush BAT from " TARGET_FMT_lx " to " TARGET_FMT_lx " ("
1688 TARGET_FMT_lx ")\n", base, end, mask);
1689 for (page = base; page != end; page += TARGET_PAGE_SIZE)
1690 tlb_flush_page(env, page);
1691 LOG_BATS("Flush done\n");
1693 #endif
1695 static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr,
1696 target_ulong value)
1698 LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", ID,
1699 nr, ul == 0 ? 'u' : 'l', value, env->nip);
1702 void ppc_store_ibatu (CPUPPCState *env, int nr, target_ulong value)
1704 target_ulong mask;
1706 dump_store_bat(env, 'I', 0, nr, value);
1707 if (env->IBAT[0][nr] != value) {
1708 mask = (value << 15) & 0x0FFE0000UL;
1709 #if !defined(FLUSH_ALL_TLBS)
1710 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1711 #endif
1712 /* When storing valid upper BAT, mask BEPI and BRPN
1713 * and invalidate all TLBs covered by this BAT
1715 mask = (value << 15) & 0x0FFE0000UL;
1716 env->IBAT[0][nr] = (value & 0x00001FFFUL) |
1717 (value & ~0x0001FFFFUL & ~mask);
1718 env->IBAT[1][nr] = (env->IBAT[1][nr] & 0x0000007B) |
1719 (env->IBAT[1][nr] & ~0x0001FFFF & ~mask);
1720 #if !defined(FLUSH_ALL_TLBS)
1721 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1722 #else
1723 tlb_flush(env, 1);
1724 #endif
1728 void ppc_store_ibatl (CPUPPCState *env, int nr, target_ulong value)
1730 dump_store_bat(env, 'I', 1, nr, value);
1731 env->IBAT[1][nr] = value;
1734 void ppc_store_dbatu (CPUPPCState *env, int nr, target_ulong value)
1736 target_ulong mask;
1738 dump_store_bat(env, 'D', 0, nr, value);
1739 if (env->DBAT[0][nr] != value) {
1740 /* When storing valid upper BAT, mask BEPI and BRPN
1741 * and invalidate all TLBs covered by this BAT
1743 mask = (value << 15) & 0x0FFE0000UL;
1744 #if !defined(FLUSH_ALL_TLBS)
1745 do_invalidate_BAT(env, env->DBAT[0][nr], mask);
1746 #endif
1747 mask = (value << 15) & 0x0FFE0000UL;
1748 env->DBAT[0][nr] = (value & 0x00001FFFUL) |
1749 (value & ~0x0001FFFFUL & ~mask);
1750 env->DBAT[1][nr] = (env->DBAT[1][nr] & 0x0000007B) |
1751 (env->DBAT[1][nr] & ~0x0001FFFF & ~mask);
1752 #if !defined(FLUSH_ALL_TLBS)
1753 do_invalidate_BAT(env, env->DBAT[0][nr], mask);
1754 #else
1755 tlb_flush(env, 1);
1756 #endif
1760 void ppc_store_dbatl (CPUPPCState *env, int nr, target_ulong value)
1762 dump_store_bat(env, 'D', 1, nr, value);
1763 env->DBAT[1][nr] = value;
1766 void ppc_store_ibatu_601 (CPUPPCState *env, int nr, target_ulong value)
1768 target_ulong mask;
1769 #if defined(FLUSH_ALL_TLBS)
1770 int do_inval;
1771 #endif
1773 dump_store_bat(env, 'I', 0, nr, value);
1774 if (env->IBAT[0][nr] != value) {
1775 #if defined(FLUSH_ALL_TLBS)
1776 do_inval = 0;
1777 #endif
1778 mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL;
1779 if (env->IBAT[1][nr] & 0x40) {
1780 /* Invalidate BAT only if it is valid */
1781 #if !defined(FLUSH_ALL_TLBS)
1782 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1783 #else
1784 do_inval = 1;
1785 #endif
1787 /* When storing valid upper BAT, mask BEPI and BRPN
1788 * and invalidate all TLBs covered by this BAT
1790 env->IBAT[0][nr] = (value & 0x00001FFFUL) |
1791 (value & ~0x0001FFFFUL & ~mask);
1792 env->DBAT[0][nr] = env->IBAT[0][nr];
1793 if (env->IBAT[1][nr] & 0x40) {
1794 #if !defined(FLUSH_ALL_TLBS)
1795 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1796 #else
1797 do_inval = 1;
1798 #endif
1800 #if defined(FLUSH_ALL_TLBS)
1801 if (do_inval)
1802 tlb_flush(env, 1);
1803 #endif
1807 void ppc_store_ibatl_601 (CPUPPCState *env, int nr, target_ulong value)
1809 target_ulong mask;
1810 #if defined(FLUSH_ALL_TLBS)
1811 int do_inval;
1812 #endif
1814 dump_store_bat(env, 'I', 1, nr, value);
1815 if (env->IBAT[1][nr] != value) {
1816 #if defined(FLUSH_ALL_TLBS)
1817 do_inval = 0;
1818 #endif
1819 if (env->IBAT[1][nr] & 0x40) {
1820 #if !defined(FLUSH_ALL_TLBS)
1821 mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL;
1822 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1823 #else
1824 do_inval = 1;
1825 #endif
1827 if (value & 0x40) {
1828 #if !defined(FLUSH_ALL_TLBS)
1829 mask = (value << 17) & 0x0FFE0000UL;
1830 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1831 #else
1832 do_inval = 1;
1833 #endif
1835 env->IBAT[1][nr] = value;
1836 env->DBAT[1][nr] = value;
1837 #if defined(FLUSH_ALL_TLBS)
1838 if (do_inval)
1839 tlb_flush(env, 1);
1840 #endif
1844 /*****************************************************************************/
1845 /* TLB management */
1846 void ppc_tlb_invalidate_all (CPUPPCState *env)
1848 switch (env->mmu_model) {
1849 case POWERPC_MMU_SOFT_6xx:
1850 case POWERPC_MMU_SOFT_74xx:
1851 ppc6xx_tlb_invalidate_all(env);
1852 break;
1853 case POWERPC_MMU_SOFT_4xx:
1854 case POWERPC_MMU_SOFT_4xx_Z:
1855 ppc4xx_tlb_invalidate_all(env);
1856 break;
1857 case POWERPC_MMU_REAL:
1858 cpu_abort(env, "No TLB for PowerPC 4xx in real mode\n");
1859 break;
1860 case POWERPC_MMU_MPC8xx:
1861 /* XXX: TODO */
1862 cpu_abort(env, "MPC8xx MMU model is not implemented\n");
1863 break;
1864 case POWERPC_MMU_BOOKE:
1865 tlb_flush(env, 1);
1866 break;
1867 case POWERPC_MMU_BOOKE_FSL:
1868 /* XXX: TODO */
1869 if (!kvm_enabled())
1870 cpu_abort(env, "BookE MMU model is not implemented\n");
1871 break;
1872 case POWERPC_MMU_32B:
1873 case POWERPC_MMU_601:
1874 #if defined(TARGET_PPC64)
1875 case POWERPC_MMU_620:
1876 case POWERPC_MMU_64B:
1877 #endif /* defined(TARGET_PPC64) */
1878 tlb_flush(env, 1);
1879 break;
1880 default:
1881 /* XXX: TODO */
1882 cpu_abort(env, "Unknown MMU model\n");
1883 break;
1887 void ppc_tlb_invalidate_one (CPUPPCState *env, target_ulong addr)
1889 #if !defined(FLUSH_ALL_TLBS)
1890 addr &= TARGET_PAGE_MASK;
1891 switch (env->mmu_model) {
1892 case POWERPC_MMU_SOFT_6xx:
1893 case POWERPC_MMU_SOFT_74xx:
1894 ppc6xx_tlb_invalidate_virt(env, addr, 0);
1895 if (env->id_tlbs == 1)
1896 ppc6xx_tlb_invalidate_virt(env, addr, 1);
1897 break;
1898 case POWERPC_MMU_SOFT_4xx:
1899 case POWERPC_MMU_SOFT_4xx_Z:
1900 ppc4xx_tlb_invalidate_virt(env, addr, env->spr[SPR_40x_PID]);
1901 break;
1902 case POWERPC_MMU_REAL:
1903 cpu_abort(env, "No TLB for PowerPC 4xx in real mode\n");
1904 break;
1905 case POWERPC_MMU_MPC8xx:
1906 /* XXX: TODO */
1907 cpu_abort(env, "MPC8xx MMU model is not implemented\n");
1908 break;
1909 case POWERPC_MMU_BOOKE:
1910 /* XXX: TODO */
1911 cpu_abort(env, "BookE MMU model is not implemented\n");
1912 break;
1913 case POWERPC_MMU_BOOKE_FSL:
1914 /* XXX: TODO */
1915 cpu_abort(env, "BookE FSL MMU model is not implemented\n");
1916 break;
1917 case POWERPC_MMU_32B:
1918 case POWERPC_MMU_601:
1919 /* tlbie invalidate TLBs for all segments */
1920 addr &= ~((target_ulong)-1ULL << 28);
1921 /* XXX: this case should be optimized,
1922 * giving a mask to tlb_flush_page
1924 tlb_flush_page(env, addr | (0x0 << 28));
1925 tlb_flush_page(env, addr | (0x1 << 28));
1926 tlb_flush_page(env, addr | (0x2 << 28));
1927 tlb_flush_page(env, addr | (0x3 << 28));
1928 tlb_flush_page(env, addr | (0x4 << 28));
1929 tlb_flush_page(env, addr | (0x5 << 28));
1930 tlb_flush_page(env, addr | (0x6 << 28));
1931 tlb_flush_page(env, addr | (0x7 << 28));
1932 tlb_flush_page(env, addr | (0x8 << 28));
1933 tlb_flush_page(env, addr | (0x9 << 28));
1934 tlb_flush_page(env, addr | (0xA << 28));
1935 tlb_flush_page(env, addr | (0xB << 28));
1936 tlb_flush_page(env, addr | (0xC << 28));
1937 tlb_flush_page(env, addr | (0xD << 28));
1938 tlb_flush_page(env, addr | (0xE << 28));
1939 tlb_flush_page(env, addr | (0xF << 28));
1940 break;
1941 #if defined(TARGET_PPC64)
1942 case POWERPC_MMU_620:
1943 case POWERPC_MMU_64B:
1944 /* tlbie invalidate TLBs for all segments */
1945 /* XXX: given the fact that there are too many segments to invalidate,
1946 * and we still don't have a tlb_flush_mask(env, n, mask) in Qemu,
1947 * we just invalidate all TLBs
1949 tlb_flush(env, 1);
1950 break;
1951 #endif /* defined(TARGET_PPC64) */
1952 default:
1953 /* XXX: TODO */
1954 cpu_abort(env, "Unknown MMU model\n");
1955 break;
1957 #else
1958 ppc_tlb_invalidate_all(env);
1959 #endif
1962 /*****************************************************************************/
1963 /* Special registers manipulation */
1964 #if defined(TARGET_PPC64)
1965 void ppc_store_asr (CPUPPCState *env, target_ulong value)
1967 if (env->asr != value) {
1968 env->asr = value;
1969 tlb_flush(env, 1);
1972 #endif
1974 void ppc_store_sdr1 (CPUPPCState *env, target_ulong value)
1976 LOG_MMU("%s: " TARGET_FMT_lx "\n", __func__, value);
1977 if (env->sdr1 != value) {
1978 /* XXX: for PowerPC 64, should check that the HTABSIZE value
1979 * is <= 28
1981 env->sdr1 = value;
1982 tlb_flush(env, 1);
1986 #if defined(TARGET_PPC64)
1987 target_ulong ppc_load_sr (CPUPPCState *env, int slb_nr)
1989 // XXX
1990 return 0;
1992 #endif
1994 void ppc_store_sr (CPUPPCState *env, int srnum, target_ulong value)
1996 LOG_MMU("%s: reg=%d " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__,
1997 srnum, value, env->sr[srnum]);
1998 #if defined(TARGET_PPC64)
1999 if (env->mmu_model & POWERPC_MMU_64) {
2000 uint64_t rb = 0, rs = 0;
2002 /* ESID = srnum */
2003 rb |= ((uint32_t)srnum & 0xf) << 28;
2004 /* Set the valid bit */
2005 rb |= 1 << 27;
2006 /* Index = ESID */
2007 rb |= (uint32_t)srnum;
2009 /* VSID = VSID */
2010 rs |= (value & 0xfffffff) << 12;
2011 /* flags = flags */
2012 rs |= ((value >> 27) & 0xf) << 9;
2014 ppc_store_slb(env, rb, rs);
2015 } else
2016 #endif
2017 if (env->sr[srnum] != value) {
2018 env->sr[srnum] = value;
2019 /* Invalidating 256MB of virtual memory in 4kB pages is way longer than
2020 flusing the whole TLB. */
2021 #if !defined(FLUSH_ALL_TLBS) && 0
2023 target_ulong page, end;
2024 /* Invalidate 256 MB of virtual memory */
2025 page = (16 << 20) * srnum;
2026 end = page + (16 << 20);
2027 for (; page != end; page += TARGET_PAGE_SIZE)
2028 tlb_flush_page(env, page);
2030 #else
2031 tlb_flush(env, 1);
2032 #endif
2035 #endif /* !defined (CONFIG_USER_ONLY) */
2037 /* GDBstub can read and write MSR... */
2038 void ppc_store_msr (CPUPPCState *env, target_ulong value)
2040 hreg_store_msr(env, value, 0);
2043 /*****************************************************************************/
2044 /* Exception processing */
2045 #if defined (CONFIG_USER_ONLY)
2046 void do_interrupt (CPUState *env)
2048 env->exception_index = POWERPC_EXCP_NONE;
2049 env->error_code = 0;
2052 void ppc_hw_interrupt (CPUState *env)
2054 env->exception_index = POWERPC_EXCP_NONE;
2055 env->error_code = 0;
2057 #else /* defined (CONFIG_USER_ONLY) */
2058 static inline void dump_syscall(CPUState *env)
2060 qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64 " r3=%016" PRIx64
2061 " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64
2062 " nip=" TARGET_FMT_lx "\n",
2063 ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3),
2064 ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5),
2065 ppc_dump_gpr(env, 6), env->nip);
2068 /* Note that this function should be greatly optimized
2069 * when called with a constant excp, from ppc_hw_interrupt
2071 static inline void powerpc_excp(CPUState *env, int excp_model, int excp)
2073 target_ulong msr, new_msr, vector;
2074 int srr0, srr1, asrr0, asrr1;
2075 int lpes0, lpes1, lev;
2077 if (0) {
2078 /* XXX: find a suitable condition to enable the hypervisor mode */
2079 lpes0 = (env->spr[SPR_LPCR] >> 1) & 1;
2080 lpes1 = (env->spr[SPR_LPCR] >> 2) & 1;
2081 } else {
2082 /* Those values ensure we won't enter the hypervisor mode */
2083 lpes0 = 0;
2084 lpes1 = 1;
2087 qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx
2088 " => %08x (%02x)\n", env->nip, excp, env->error_code);
2090 /* new srr1 value excluding must-be-zero bits */
2091 msr = env->msr & ~0x783f0000ULL;
2093 /* new interrupt handler msr */
2094 new_msr = env->msr & ((target_ulong)1 << MSR_ME);
2096 /* target registers */
2097 srr0 = SPR_SRR0;
2098 srr1 = SPR_SRR1;
2099 asrr0 = -1;
2100 asrr1 = -1;
2102 switch (excp) {
2103 case POWERPC_EXCP_NONE:
2104 /* Should never happen */
2105 return;
2106 case POWERPC_EXCP_CRITICAL: /* Critical input */
2107 switch (excp_model) {
2108 case POWERPC_EXCP_40x:
2109 srr0 = SPR_40x_SRR2;
2110 srr1 = SPR_40x_SRR3;
2111 break;
2112 case POWERPC_EXCP_BOOKE:
2113 srr0 = SPR_BOOKE_CSRR0;
2114 srr1 = SPR_BOOKE_CSRR1;
2115 break;
2116 case POWERPC_EXCP_G2:
2117 break;
2118 default:
2119 goto excp_invalid;
2121 goto store_next;
2122 case POWERPC_EXCP_MCHECK: /* Machine check exception */
2123 if (msr_me == 0) {
2124 /* Machine check exception is not enabled.
2125 * Enter checkstop state.
2127 if (qemu_log_enabled()) {
2128 qemu_log("Machine check while not allowed. "
2129 "Entering checkstop state\n");
2130 } else {
2131 fprintf(stderr, "Machine check while not allowed. "
2132 "Entering checkstop state\n");
2134 env->halted = 1;
2135 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
2137 if (0) {
2138 /* XXX: find a suitable condition to enable the hypervisor mode */
2139 new_msr |= (target_ulong)MSR_HVB;
2142 /* machine check exceptions don't have ME set */
2143 new_msr &= ~((target_ulong)1 << MSR_ME);
2145 /* XXX: should also have something loaded in DAR / DSISR */
2146 switch (excp_model) {
2147 case POWERPC_EXCP_40x:
2148 srr0 = SPR_40x_SRR2;
2149 srr1 = SPR_40x_SRR3;
2150 break;
2151 case POWERPC_EXCP_BOOKE:
2152 srr0 = SPR_BOOKE_MCSRR0;
2153 srr1 = SPR_BOOKE_MCSRR1;
2154 asrr0 = SPR_BOOKE_CSRR0;
2155 asrr1 = SPR_BOOKE_CSRR1;
2156 break;
2157 default:
2158 break;
2160 goto store_next;
2161 case POWERPC_EXCP_DSI: /* Data storage exception */
2162 LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx" DAR=" TARGET_FMT_lx
2163 "\n", env->spr[SPR_DSISR], env->spr[SPR_DAR]);
2164 if (lpes1 == 0)
2165 new_msr |= (target_ulong)MSR_HVB;
2166 goto store_next;
2167 case POWERPC_EXCP_ISI: /* Instruction storage exception */
2168 LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx ", nip=" TARGET_FMT_lx
2169 "\n", msr, env->nip);
2170 if (lpes1 == 0)
2171 new_msr |= (target_ulong)MSR_HVB;
2172 msr |= env->error_code;
2173 goto store_next;
2174 case POWERPC_EXCP_EXTERNAL: /* External input */
2175 if (lpes0 == 1)
2176 new_msr |= (target_ulong)MSR_HVB;
2177 goto store_next;
2178 case POWERPC_EXCP_ALIGN: /* Alignment exception */
2179 if (lpes1 == 0)
2180 new_msr |= (target_ulong)MSR_HVB;
2181 /* XXX: this is false */
2182 /* Get rS/rD and rA from faulting opcode */
2183 env->spr[SPR_DSISR] |= (ldl_code((env->nip - 4)) & 0x03FF0000) >> 16;
2184 goto store_current;
2185 case POWERPC_EXCP_PROGRAM: /* Program exception */
2186 switch (env->error_code & ~0xF) {
2187 case POWERPC_EXCP_FP:
2188 if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
2189 LOG_EXCP("Ignore floating point exception\n");
2190 env->exception_index = POWERPC_EXCP_NONE;
2191 env->error_code = 0;
2192 return;
2194 if (lpes1 == 0)
2195 new_msr |= (target_ulong)MSR_HVB;
2196 msr |= 0x00100000;
2197 if (msr_fe0 == msr_fe1)
2198 goto store_next;
2199 msr |= 0x00010000;
2200 break;
2201 case POWERPC_EXCP_INVAL:
2202 LOG_EXCP("Invalid instruction at " TARGET_FMT_lx "\n", env->nip);
2203 if (lpes1 == 0)
2204 new_msr |= (target_ulong)MSR_HVB;
2205 msr |= 0x00080000;
2206 break;
2207 case POWERPC_EXCP_PRIV:
2208 if (lpes1 == 0)
2209 new_msr |= (target_ulong)MSR_HVB;
2210 msr |= 0x00040000;
2211 break;
2212 case POWERPC_EXCP_TRAP:
2213 if (lpes1 == 0)
2214 new_msr |= (target_ulong)MSR_HVB;
2215 msr |= 0x00020000;
2216 break;
2217 default:
2218 /* Should never occur */
2219 cpu_abort(env, "Invalid program exception %d. Aborting\n",
2220 env->error_code);
2221 break;
2223 goto store_current;
2224 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
2225 if (lpes1 == 0)
2226 new_msr |= (target_ulong)MSR_HVB;
2227 goto store_current;
2228 case POWERPC_EXCP_SYSCALL: /* System call exception */
2229 dump_syscall(env);
2230 lev = env->error_code;
2231 if (lev == 1 || (lpes0 == 0 && lpes1 == 0))
2232 new_msr |= (target_ulong)MSR_HVB;
2233 goto store_next;
2234 case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */
2235 goto store_current;
2236 case POWERPC_EXCP_DECR: /* Decrementer exception */
2237 if (lpes1 == 0)
2238 new_msr |= (target_ulong)MSR_HVB;
2239 goto store_next;
2240 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */
2241 /* FIT on 4xx */
2242 LOG_EXCP("FIT exception\n");
2243 goto store_next;
2244 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */
2245 LOG_EXCP("WDT exception\n");
2246 switch (excp_model) {
2247 case POWERPC_EXCP_BOOKE:
2248 srr0 = SPR_BOOKE_CSRR0;
2249 srr1 = SPR_BOOKE_CSRR1;
2250 break;
2251 default:
2252 break;
2254 goto store_next;
2255 case POWERPC_EXCP_DTLB: /* Data TLB error */
2256 goto store_next;
2257 case POWERPC_EXCP_ITLB: /* Instruction TLB error */
2258 goto store_next;
2259 case POWERPC_EXCP_DEBUG: /* Debug interrupt */
2260 switch (excp_model) {
2261 case POWERPC_EXCP_BOOKE:
2262 srr0 = SPR_BOOKE_DSRR0;
2263 srr1 = SPR_BOOKE_DSRR1;
2264 asrr0 = SPR_BOOKE_CSRR0;
2265 asrr1 = SPR_BOOKE_CSRR1;
2266 break;
2267 default:
2268 break;
2270 /* XXX: TODO */
2271 cpu_abort(env, "Debug exception is not implemented yet !\n");
2272 goto store_next;
2273 case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable */
2274 goto store_current;
2275 case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */
2276 /* XXX: TODO */
2277 cpu_abort(env, "Embedded floating point data exception "
2278 "is not implemented yet !\n");
2279 goto store_next;
2280 case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */
2281 /* XXX: TODO */
2282 cpu_abort(env, "Embedded floating point round exception "
2283 "is not implemented yet !\n");
2284 goto store_next;
2285 case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */
2286 /* XXX: TODO */
2287 cpu_abort(env,
2288 "Performance counter exception is not implemented yet !\n");
2289 goto store_next;
2290 case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */
2291 /* XXX: TODO */
2292 cpu_abort(env,
2293 "Embedded doorbell interrupt is not implemented yet !\n");
2294 goto store_next;
2295 case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */
2296 switch (excp_model) {
2297 case POWERPC_EXCP_BOOKE:
2298 srr0 = SPR_BOOKE_CSRR0;
2299 srr1 = SPR_BOOKE_CSRR1;
2300 break;
2301 default:
2302 break;
2304 /* XXX: TODO */
2305 cpu_abort(env, "Embedded doorbell critical interrupt "
2306 "is not implemented yet !\n");
2307 goto store_next;
2308 case POWERPC_EXCP_RESET: /* System reset exception */
2309 if (msr_pow) {
2310 /* indicate that we resumed from power save mode */
2311 msr |= 0x10000;
2312 } else {
2313 new_msr &= ~((target_ulong)1 << MSR_ME);
2316 if (0) {
2317 /* XXX: find a suitable condition to enable the hypervisor mode */
2318 new_msr |= (target_ulong)MSR_HVB;
2320 goto store_next;
2321 case POWERPC_EXCP_DSEG: /* Data segment exception */
2322 if (lpes1 == 0)
2323 new_msr |= (target_ulong)MSR_HVB;
2324 goto store_next;
2325 case POWERPC_EXCP_ISEG: /* Instruction segment exception */
2326 if (lpes1 == 0)
2327 new_msr |= (target_ulong)MSR_HVB;
2328 goto store_next;
2329 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */
2330 srr0 = SPR_HSRR0;
2331 srr1 = SPR_HSRR1;
2332 new_msr |= (target_ulong)MSR_HVB;
2333 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
2334 goto store_next;
2335 case POWERPC_EXCP_TRACE: /* Trace exception */
2336 if (lpes1 == 0)
2337 new_msr |= (target_ulong)MSR_HVB;
2338 goto store_next;
2339 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */
2340 srr0 = SPR_HSRR0;
2341 srr1 = SPR_HSRR1;
2342 new_msr |= (target_ulong)MSR_HVB;
2343 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
2344 goto store_next;
2345 case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */
2346 srr0 = SPR_HSRR0;
2347 srr1 = SPR_HSRR1;
2348 new_msr |= (target_ulong)MSR_HVB;
2349 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
2350 goto store_next;
2351 case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */
2352 srr0 = SPR_HSRR0;
2353 srr1 = SPR_HSRR1;
2354 new_msr |= (target_ulong)MSR_HVB;
2355 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
2356 goto store_next;
2357 case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */
2358 srr0 = SPR_HSRR0;
2359 srr1 = SPR_HSRR1;
2360 new_msr |= (target_ulong)MSR_HVB;
2361 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
2362 goto store_next;
2363 case POWERPC_EXCP_VPU: /* Vector unavailable exception */
2364 if (lpes1 == 0)
2365 new_msr |= (target_ulong)MSR_HVB;
2366 goto store_current;
2367 case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */
2368 LOG_EXCP("PIT exception\n");
2369 goto store_next;
2370 case POWERPC_EXCP_IO: /* IO error exception */
2371 /* XXX: TODO */
2372 cpu_abort(env, "601 IO error exception is not implemented yet !\n");
2373 goto store_next;
2374 case POWERPC_EXCP_RUNM: /* Run mode exception */
2375 /* XXX: TODO */
2376 cpu_abort(env, "601 run mode exception is not implemented yet !\n");
2377 goto store_next;
2378 case POWERPC_EXCP_EMUL: /* Emulation trap exception */
2379 /* XXX: TODO */
2380 cpu_abort(env, "602 emulation trap exception "
2381 "is not implemented yet !\n");
2382 goto store_next;
2383 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */
2384 if (lpes1 == 0) /* XXX: check this */
2385 new_msr |= (target_ulong)MSR_HVB;
2386 switch (excp_model) {
2387 case POWERPC_EXCP_602:
2388 case POWERPC_EXCP_603:
2389 case POWERPC_EXCP_603E:
2390 case POWERPC_EXCP_G2:
2391 goto tlb_miss_tgpr;
2392 case POWERPC_EXCP_7x5:
2393 goto tlb_miss;
2394 case POWERPC_EXCP_74xx:
2395 goto tlb_miss_74xx;
2396 default:
2397 cpu_abort(env, "Invalid instruction TLB miss exception\n");
2398 break;
2400 break;
2401 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */
2402 if (lpes1 == 0) /* XXX: check this */
2403 new_msr |= (target_ulong)MSR_HVB;
2404 switch (excp_model) {
2405 case POWERPC_EXCP_602:
2406 case POWERPC_EXCP_603:
2407 case POWERPC_EXCP_603E:
2408 case POWERPC_EXCP_G2:
2409 goto tlb_miss_tgpr;
2410 case POWERPC_EXCP_7x5:
2411 goto tlb_miss;
2412 case POWERPC_EXCP_74xx:
2413 goto tlb_miss_74xx;
2414 default:
2415 cpu_abort(env, "Invalid data load TLB miss exception\n");
2416 break;
2418 break;
2419 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */
2420 if (lpes1 == 0) /* XXX: check this */
2421 new_msr |= (target_ulong)MSR_HVB;
2422 switch (excp_model) {
2423 case POWERPC_EXCP_602:
2424 case POWERPC_EXCP_603:
2425 case POWERPC_EXCP_603E:
2426 case POWERPC_EXCP_G2:
2427 tlb_miss_tgpr:
2428 /* Swap temporary saved registers with GPRs */
2429 if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) {
2430 new_msr |= (target_ulong)1 << MSR_TGPR;
2431 hreg_swap_gpr_tgpr(env);
2433 goto tlb_miss;
2434 case POWERPC_EXCP_7x5:
2435 tlb_miss:
2436 #if defined (DEBUG_SOFTWARE_TLB)
2437 if (qemu_log_enabled()) {
2438 const char *es;
2439 target_ulong *miss, *cmp;
2440 int en;
2441 if (excp == POWERPC_EXCP_IFTLB) {
2442 es = "I";
2443 en = 'I';
2444 miss = &env->spr[SPR_IMISS];
2445 cmp = &env->spr[SPR_ICMP];
2446 } else {
2447 if (excp == POWERPC_EXCP_DLTLB)
2448 es = "DL";
2449 else
2450 es = "DS";
2451 en = 'D';
2452 miss = &env->spr[SPR_DMISS];
2453 cmp = &env->spr[SPR_DCMP];
2455 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
2456 TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 "
2457 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
2458 env->spr[SPR_HASH1], env->spr[SPR_HASH2],
2459 env->error_code);
2461 #endif
2462 msr |= env->crf[0] << 28;
2463 msr |= env->error_code; /* key, D/I, S/L bits */
2464 /* Set way using a LRU mechanism */
2465 msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17;
2466 break;
2467 case POWERPC_EXCP_74xx:
2468 tlb_miss_74xx:
2469 #if defined (DEBUG_SOFTWARE_TLB)
2470 if (qemu_log_enabled()) {
2471 const char *es;
2472 target_ulong *miss, *cmp;
2473 int en;
2474 if (excp == POWERPC_EXCP_IFTLB) {
2475 es = "I";
2476 en = 'I';
2477 miss = &env->spr[SPR_TLBMISS];
2478 cmp = &env->spr[SPR_PTEHI];
2479 } else {
2480 if (excp == POWERPC_EXCP_DLTLB)
2481 es = "DL";
2482 else
2483 es = "DS";
2484 en = 'D';
2485 miss = &env->spr[SPR_TLBMISS];
2486 cmp = &env->spr[SPR_PTEHI];
2488 qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
2489 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
2490 env->error_code);
2492 #endif
2493 msr |= env->error_code; /* key bit */
2494 break;
2495 default:
2496 cpu_abort(env, "Invalid data store TLB miss exception\n");
2497 break;
2499 goto store_next;
2500 case POWERPC_EXCP_FPA: /* Floating-point assist exception */
2501 /* XXX: TODO */
2502 cpu_abort(env, "Floating point assist exception "
2503 "is not implemented yet !\n");
2504 goto store_next;
2505 case POWERPC_EXCP_DABR: /* Data address breakpoint */
2506 /* XXX: TODO */
2507 cpu_abort(env, "DABR exception is not implemented yet !\n");
2508 goto store_next;
2509 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
2510 /* XXX: TODO */
2511 cpu_abort(env, "IABR exception is not implemented yet !\n");
2512 goto store_next;
2513 case POWERPC_EXCP_SMI: /* System management interrupt */
2514 /* XXX: TODO */
2515 cpu_abort(env, "SMI exception is not implemented yet !\n");
2516 goto store_next;
2517 case POWERPC_EXCP_THERM: /* Thermal interrupt */
2518 /* XXX: TODO */
2519 cpu_abort(env, "Thermal management exception "
2520 "is not implemented yet !\n");
2521 goto store_next;
2522 case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */
2523 if (lpes1 == 0)
2524 new_msr |= (target_ulong)MSR_HVB;
2525 /* XXX: TODO */
2526 cpu_abort(env,
2527 "Performance counter exception is not implemented yet !\n");
2528 goto store_next;
2529 case POWERPC_EXCP_VPUA: /* Vector assist exception */
2530 /* XXX: TODO */
2531 cpu_abort(env, "VPU assist exception is not implemented yet !\n");
2532 goto store_next;
2533 case POWERPC_EXCP_SOFTP: /* Soft patch exception */
2534 /* XXX: TODO */
2535 cpu_abort(env,
2536 "970 soft-patch exception is not implemented yet !\n");
2537 goto store_next;
2538 case POWERPC_EXCP_MAINT: /* Maintenance exception */
2539 /* XXX: TODO */
2540 cpu_abort(env,
2541 "970 maintenance exception is not implemented yet !\n");
2542 goto store_next;
2543 case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */
2544 /* XXX: TODO */
2545 cpu_abort(env, "Maskable external exception "
2546 "is not implemented yet !\n");
2547 goto store_next;
2548 case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */
2549 /* XXX: TODO */
2550 cpu_abort(env, "Non maskable external exception "
2551 "is not implemented yet !\n");
2552 goto store_next;
2553 default:
2554 excp_invalid:
2555 cpu_abort(env, "Invalid PowerPC exception %d. Aborting\n", excp);
2556 break;
2557 store_current:
2558 /* save current instruction location */
2559 env->spr[srr0] = env->nip - 4;
2560 break;
2561 store_next:
2562 /* save next instruction location */
2563 env->spr[srr0] = env->nip;
2564 break;
2566 /* Save MSR */
2567 env->spr[srr1] = msr;
2568 /* If any alternate SRR register are defined, duplicate saved values */
2569 if (asrr0 != -1)
2570 env->spr[asrr0] = env->spr[srr0];
2571 if (asrr1 != -1)
2572 env->spr[asrr1] = env->spr[srr1];
2573 /* If we disactivated any translation, flush TLBs */
2574 if (new_msr & ((1 << MSR_IR) | (1 << MSR_DR)))
2575 tlb_flush(env, 1);
2577 if (msr_ile) {
2578 new_msr |= (target_ulong)1 << MSR_LE;
2581 /* Jump to handler */
2582 vector = env->excp_vectors[excp];
2583 if (vector == (target_ulong)-1ULL) {
2584 cpu_abort(env, "Raised an exception without defined vector %d\n",
2585 excp);
2587 vector |= env->excp_prefix;
2588 #if defined(TARGET_PPC64)
2589 if (excp_model == POWERPC_EXCP_BOOKE) {
2590 if (!msr_icm) {
2591 vector = (uint32_t)vector;
2592 } else {
2593 new_msr |= (target_ulong)1 << MSR_CM;
2595 } else {
2596 if (!msr_isf && !(env->mmu_model & POWERPC_MMU_64)) {
2597 vector = (uint32_t)vector;
2598 } else {
2599 new_msr |= (target_ulong)1 << MSR_SF;
2602 #endif
2603 /* XXX: we don't use hreg_store_msr here as already have treated
2604 * any special case that could occur. Just store MSR and update hflags
2606 env->msr = new_msr & env->msr_mask;
2607 hreg_compute_hflags(env);
2608 env->nip = vector;
2609 /* Reset exception state */
2610 env->exception_index = POWERPC_EXCP_NONE;
2611 env->error_code = 0;
2613 if (env->mmu_model == POWERPC_MMU_BOOKE) {
2614 /* XXX: The BookE changes address space when switching modes,
2615 we should probably implement that as different MMU indexes,
2616 but for the moment we do it the slow way and flush all. */
2617 tlb_flush(env, 1);
2621 void do_interrupt (CPUState *env)
2623 powerpc_excp(env, env->excp_model, env->exception_index);
2626 void ppc_hw_interrupt (CPUPPCState *env)
2628 int hdice;
2630 #if 0
2631 qemu_log_mask(CPU_LOG_INT, "%s: %p pending %08x req %08x me %d ee %d\n",
2632 __func__, env, env->pending_interrupts,
2633 env->interrupt_request, (int)msr_me, (int)msr_ee);
2634 #endif
2635 /* External reset */
2636 if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) {
2637 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET);
2638 powerpc_excp(env, env->excp_model, POWERPC_EXCP_RESET);
2639 return;
2641 /* Machine check exception */
2642 if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) {
2643 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK);
2644 powerpc_excp(env, env->excp_model, POWERPC_EXCP_MCHECK);
2645 return;
2647 #if 0 /* TODO */
2648 /* External debug exception */
2649 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) {
2650 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG);
2651 powerpc_excp(env, env->excp_model, POWERPC_EXCP_DEBUG);
2652 return;
2654 #endif
2655 if (0) {
2656 /* XXX: find a suitable condition to enable the hypervisor mode */
2657 hdice = env->spr[SPR_LPCR] & 1;
2658 } else {
2659 hdice = 0;
2661 if ((msr_ee != 0 || msr_hv == 0 || msr_pr != 0) && hdice != 0) {
2662 /* Hypervisor decrementer exception */
2663 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) {
2664 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
2665 powerpc_excp(env, env->excp_model, POWERPC_EXCP_HDECR);
2666 return;
2669 if (msr_ce != 0) {
2670 /* External critical interrupt */
2671 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) {
2672 /* Taking a critical external interrupt does not clear the external
2673 * critical interrupt status
2675 #if 0
2676 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CEXT);
2677 #endif
2678 powerpc_excp(env, env->excp_model, POWERPC_EXCP_CRITICAL);
2679 return;
2682 if (msr_ee != 0) {
2683 /* Watchdog timer on embedded PowerPC */
2684 if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) {
2685 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT);
2686 powerpc_excp(env, env->excp_model, POWERPC_EXCP_WDT);
2687 return;
2689 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) {
2690 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL);
2691 powerpc_excp(env, env->excp_model, POWERPC_EXCP_DOORCI);
2692 return;
2694 /* Fixed interval timer on embedded PowerPC */
2695 if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) {
2696 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT);
2697 powerpc_excp(env, env->excp_model, POWERPC_EXCP_FIT);
2698 return;
2700 /* Programmable interval timer on embedded PowerPC */
2701 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) {
2702 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT);
2703 powerpc_excp(env, env->excp_model, POWERPC_EXCP_PIT);
2704 return;
2706 /* Decrementer exception */
2707 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) {
2708 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR);
2709 powerpc_excp(env, env->excp_model, POWERPC_EXCP_DECR);
2710 return;
2712 /* External interrupt */
2713 if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) {
2714 /* Taking an external interrupt does not clear the external
2715 * interrupt status
2717 #if 0
2718 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_EXT);
2719 #endif
2720 powerpc_excp(env, env->excp_model, POWERPC_EXCP_EXTERNAL);
2721 return;
2723 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) {
2724 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
2725 powerpc_excp(env, env->excp_model, POWERPC_EXCP_DOORI);
2726 return;
2728 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) {
2729 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM);
2730 powerpc_excp(env, env->excp_model, POWERPC_EXCP_PERFM);
2731 return;
2733 /* Thermal interrupt */
2734 if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) {
2735 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM);
2736 powerpc_excp(env, env->excp_model, POWERPC_EXCP_THERM);
2737 return;
2741 #endif /* !CONFIG_USER_ONLY */
2743 void cpu_dump_rfi (target_ulong RA, target_ulong msr)
2745 qemu_log("Return from exception at " TARGET_FMT_lx " with flags "
2746 TARGET_FMT_lx "\n", RA, msr);
2749 void cpu_reset(CPUPPCState *env)
2751 target_ulong msr;
2753 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
2754 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
2755 log_cpu_state(env, 0);
2758 msr = (target_ulong)0;
2759 if (0) {
2760 /* XXX: find a suitable condition to enable the hypervisor mode */
2761 msr |= (target_ulong)MSR_HVB;
2763 msr |= (target_ulong)0 << MSR_AP; /* TO BE CHECKED */
2764 msr |= (target_ulong)0 << MSR_SA; /* TO BE CHECKED */
2765 msr |= (target_ulong)1 << MSR_EP;
2766 #if defined (DO_SINGLE_STEP) && 0
2767 /* Single step trace mode */
2768 msr |= (target_ulong)1 << MSR_SE;
2769 msr |= (target_ulong)1 << MSR_BE;
2770 #endif
2771 #if defined(CONFIG_USER_ONLY)
2772 msr |= (target_ulong)1 << MSR_FP; /* Allow floating point usage */
2773 msr |= (target_ulong)1 << MSR_VR; /* Allow altivec usage */
2774 msr |= (target_ulong)1 << MSR_SPE; /* Allow SPE usage */
2775 msr |= (target_ulong)1 << MSR_PR;
2776 #else
2777 env->excp_prefix = env->hreset_excp_prefix;
2778 env->nip = env->hreset_vector | env->excp_prefix;
2779 if (env->mmu_model != POWERPC_MMU_REAL)
2780 ppc_tlb_invalidate_all(env);
2781 #endif
2782 env->msr = msr & env->msr_mask;
2783 #if defined(TARGET_PPC64)
2784 if (env->mmu_model & POWERPC_MMU_64)
2785 env->msr |= (1ULL << MSR_SF);
2786 #endif
2787 hreg_compute_hflags(env);
2788 env->reserve_addr = (target_ulong)-1ULL;
2789 /* Be sure no exception or interrupt is pending */
2790 env->pending_interrupts = 0;
2791 env->exception_index = POWERPC_EXCP_NONE;
2792 env->error_code = 0;
2793 /* Flush all TLBs */
2794 tlb_flush(env, 1);
2797 CPUPPCState *cpu_ppc_init (const char *cpu_model)
2799 CPUPPCState *env;
2800 const ppc_def_t *def;
2802 def = cpu_ppc_find_by_name(cpu_model);
2803 if (!def)
2804 return NULL;
2806 env = qemu_mallocz(sizeof(CPUPPCState));
2807 cpu_exec_init(env);
2808 ppc_translate_init();
2809 env->cpu_model_str = cpu_model;
2810 cpu_ppc_register_internal(env, def);
2812 qemu_init_vcpu(env);
2814 return env;
2817 void cpu_ppc_close (CPUPPCState *env)
2819 /* Should also remove all opcode tables... */
2820 qemu_free(env);