Merge remote-tracking branch 'remotes/dgilbert-gitlab/tags/pull-migration-20210726a...
[qemu/armbru.git] / target / ppc / mmu_helper.c
blob869d24d3016d21c76c401b33b06f802829b76b8f
1 /*
2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "cpu.h"
23 #include "sysemu/kvm.h"
24 #include "kvm_ppc.h"
25 #include "mmu-hash64.h"
26 #include "mmu-hash32.h"
27 #include "exec/exec-all.h"
28 #include "exec/log.h"
29 #include "helper_regs.h"
30 #include "qemu/error-report.h"
31 #include "qemu/main-loop.h"
32 #include "qemu/qemu-print.h"
33 #include "internal.h"
34 #include "mmu-book3s-v3.h"
35 #include "mmu-radix64.h"
37 #ifdef CONFIG_TCG
38 #include "exec/helper-proto.h"
39 #include "exec/cpu_ldst.h"
40 #endif
41 /* #define DEBUG_MMU */
42 /* #define DEBUG_BATS */
43 /* #define DEBUG_SOFTWARE_TLB */
44 /* #define DUMP_PAGE_TABLES */
45 /* #define FLUSH_ALL_TLBS */
47 #ifdef DEBUG_MMU
48 # define LOG_MMU_STATE(cpu) log_cpu_state_mask(CPU_LOG_MMU, (cpu), 0)
49 #else
50 # define LOG_MMU_STATE(cpu) do { } while (0)
51 #endif
53 #ifdef DEBUG_SOFTWARE_TLB
54 # define LOG_SWTLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
55 #else
56 # define LOG_SWTLB(...) do { } while (0)
57 #endif
59 #ifdef DEBUG_BATS
60 # define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
61 #else
62 # define LOG_BATS(...) do { } while (0)
63 #endif
65 /*****************************************************************************/
66 /* PowerPC MMU emulation */
68 /* Context used internally during MMU translations */
69 typedef struct mmu_ctx_t mmu_ctx_t;
70 struct mmu_ctx_t {
71 hwaddr raddr; /* Real address */
72 hwaddr eaddr; /* Effective address */
73 int prot; /* Protection bits */
74 hwaddr hash[2]; /* Pagetable hash values */
75 target_ulong ptem; /* Virtual segment ID | API */
76 int key; /* Access key */
77 int nx; /* Non-execute area */
80 /* Common routines used by software and hardware TLBs emulation */
81 static inline int pte_is_valid(target_ulong pte0)
83 return pte0 & 0x80000000 ? 1 : 0;
86 static inline void pte_invalidate(target_ulong *pte0)
88 *pte0 &= ~0x80000000;
91 #define PTE_PTEM_MASK 0x7FFFFFBF
92 #define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B)
94 static int pp_check(int key, int pp, int nx)
96 int access;
98 /* Compute access rights */
99 access = 0;
100 if (key == 0) {
101 switch (pp) {
102 case 0x0:
103 case 0x1:
104 case 0x2:
105 access |= PAGE_WRITE;
106 /* fall through */
107 case 0x3:
108 access |= PAGE_READ;
109 break;
111 } else {
112 switch (pp) {
113 case 0x0:
114 access = 0;
115 break;
116 case 0x1:
117 case 0x3:
118 access = PAGE_READ;
119 break;
120 case 0x2:
121 access = PAGE_READ | PAGE_WRITE;
122 break;
125 if (nx == 0) {
126 access |= PAGE_EXEC;
129 return access;
132 static int check_prot(int prot, MMUAccessType access_type)
134 return prot & prot_for_access_type(access_type) ? 0 : -2;
137 static int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0,
138 target_ulong pte1, int h,
139 MMUAccessType access_type)
141 target_ulong ptem, mmask;
142 int access, ret, pteh, ptev, pp;
144 ret = -1;
145 /* Check validity and table match */
146 ptev = pte_is_valid(pte0);
147 pteh = (pte0 >> 6) & 1;
148 if (ptev && h == pteh) {
149 /* Check vsid & api */
150 ptem = pte0 & PTE_PTEM_MASK;
151 mmask = PTE_CHECK_MASK;
152 pp = pte1 & 0x00000003;
153 if (ptem == ctx->ptem) {
154 if (ctx->raddr != (hwaddr)-1ULL) {
155 /* all matches should have equal RPN, WIMG & PP */
156 if ((ctx->raddr & mmask) != (pte1 & mmask)) {
157 qemu_log_mask(CPU_LOG_MMU, "Bad RPN/WIMG/PP\n");
158 return -3;
161 /* Compute access rights */
162 access = pp_check(ctx->key, pp, ctx->nx);
163 /* Keep the matching PTE information */
164 ctx->raddr = pte1;
165 ctx->prot = access;
166 ret = check_prot(ctx->prot, access_type);
167 if (ret == 0) {
168 /* Access granted */
169 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
170 } else {
171 /* Access right violation */
172 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
177 return ret;
180 static int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p,
181 int ret, MMUAccessType access_type)
183 int store = 0;
185 /* Update page flags */
186 if (!(*pte1p & 0x00000100)) {
187 /* Update accessed flag */
188 *pte1p |= 0x00000100;
189 store = 1;
191 if (!(*pte1p & 0x00000080)) {
192 if (access_type == MMU_DATA_STORE && ret == 0) {
193 /* Update changed flag */
194 *pte1p |= 0x00000080;
195 store = 1;
196 } else {
197 /* Force page fault for first write access */
198 ctx->prot &= ~PAGE_WRITE;
202 return store;
205 /* Software driven TLB helpers */
206 static inline int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr,
207 int way, int is_code)
209 int nr;
211 /* Select TLB num in a way from address */
212 nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1);
213 /* Select TLB way */
214 nr += env->tlb_per_way * way;
215 /* 6xx have separate TLBs for instructions and data */
216 if (is_code && env->id_tlbs == 1) {
217 nr += env->nb_tlb;
220 return nr;
223 static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env)
225 ppc6xx_tlb_t *tlb;
226 int nr, max;
228 /* LOG_SWTLB("Invalidate all TLBs\n"); */
229 /* Invalidate all defined software TLB */
230 max = env->nb_tlb;
231 if (env->id_tlbs == 1) {
232 max *= 2;
234 for (nr = 0; nr < max; nr++) {
235 tlb = &env->tlb.tlb6[nr];
236 pte_invalidate(&tlb->pte0);
238 tlb_flush(env_cpu(env));
241 static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env,
242 target_ulong eaddr,
243 int is_code, int match_epn)
245 #if !defined(FLUSH_ALL_TLBS)
246 CPUState *cs = env_cpu(env);
247 ppc6xx_tlb_t *tlb;
248 int way, nr;
250 /* Invalidate ITLB + DTLB, all ways */
251 for (way = 0; way < env->nb_ways; way++) {
252 nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code);
253 tlb = &env->tlb.tlb6[nr];
254 if (pte_is_valid(tlb->pte0) && (match_epn == 0 || eaddr == tlb->EPN)) {
255 LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx "\n", nr,
256 env->nb_tlb, eaddr);
257 pte_invalidate(&tlb->pte0);
258 tlb_flush_page(cs, tlb->EPN);
261 #else
262 /* XXX: PowerPC specification say this is valid as well */
263 ppc6xx_tlb_invalidate_all(env);
264 #endif
267 static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState *env,
268 target_ulong eaddr, int is_code)
270 ppc6xx_tlb_invalidate_virt2(env, eaddr, is_code, 0);
273 #ifdef CONFIG_TCG
274 static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way,
275 int is_code, target_ulong pte0, target_ulong pte1)
277 ppc6xx_tlb_t *tlb;
278 int nr;
280 nr = ppc6xx_tlb_getnum(env, EPN, way, is_code);
281 tlb = &env->tlb.tlb6[nr];
282 LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
283 " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb, EPN, pte0, pte1);
284 /* Invalidate any pending reference in QEMU for this virtual address */
285 ppc6xx_tlb_invalidate_virt2(env, EPN, is_code, 1);
286 tlb->pte0 = pte0;
287 tlb->pte1 = pte1;
288 tlb->EPN = EPN;
289 /* Store last way for LRU mechanism */
290 env->last_way = way;
292 #endif
294 static int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx,
295 target_ulong eaddr, MMUAccessType access_type)
297 ppc6xx_tlb_t *tlb;
298 int nr, best, way;
299 int ret;
301 best = -1;
302 ret = -1; /* No TLB found */
303 for (way = 0; way < env->nb_ways; way++) {
304 nr = ppc6xx_tlb_getnum(env, eaddr, way, access_type == MMU_INST_FETCH);
305 tlb = &env->tlb.tlb6[nr];
306 /* This test "emulates" the PTE index match for hardware TLBs */
307 if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) {
308 LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx " " TARGET_FMT_lx
309 "] <> " TARGET_FMT_lx "\n", nr, env->nb_tlb,
310 pte_is_valid(tlb->pte0) ? "valid" : "inval",
311 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr);
312 continue;
314 LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx " <> " TARGET_FMT_lx " "
315 TARGET_FMT_lx " %c %c\n", nr, env->nb_tlb,
316 pte_is_valid(tlb->pte0) ? "valid" : "inval",
317 tlb->EPN, eaddr, tlb->pte1,
318 access_type == MMU_DATA_STORE ? 'S' : 'L',
319 access_type == MMU_INST_FETCH ? 'I' : 'D');
320 switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1,
321 0, access_type)) {
322 case -3:
323 /* TLB inconsistency */
324 return -1;
325 case -2:
326 /* Access violation */
327 ret = -2;
328 best = nr;
329 break;
330 case -1:
331 default:
332 /* No match */
333 break;
334 case 0:
335 /* access granted */
337 * XXX: we should go on looping to check all TLBs
338 * consistency but we can speed-up the whole thing as
339 * the result would be undefined if TLBs are not
340 * consistent.
342 ret = 0;
343 best = nr;
344 goto done;
347 if (best != -1) {
348 done:
349 LOG_SWTLB("found TLB at addr " TARGET_FMT_plx " prot=%01x ret=%d\n",
350 ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret);
351 /* Update page flags */
352 pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, access_type);
355 return ret;
358 /* Perform BAT hit & translation */
359 static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp,
360 int *validp, int *protp, target_ulong *BATu,
361 target_ulong *BATl)
363 target_ulong bl;
364 int pp, valid, prot;
366 bl = (*BATu & 0x00001FFC) << 15;
367 valid = 0;
368 prot = 0;
369 if (((msr_pr == 0) && (*BATu & 0x00000002)) ||
370 ((msr_pr != 0) && (*BATu & 0x00000001))) {
371 valid = 1;
372 pp = *BATl & 0x00000003;
373 if (pp != 0) {
374 prot = PAGE_READ | PAGE_EXEC;
375 if (pp == 0x2) {
376 prot |= PAGE_WRITE;
380 *blp = bl;
381 *validp = valid;
382 *protp = prot;
385 static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
386 target_ulong virtual, MMUAccessType access_type)
388 target_ulong *BATlt, *BATut, *BATu, *BATl;
389 target_ulong BEPIl, BEPIu, bl;
390 int i, valid, prot;
391 int ret = -1;
392 bool ifetch = access_type == MMU_INST_FETCH;
394 LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__,
395 ifetch ? 'I' : 'D', virtual);
396 if (ifetch) {
397 BATlt = env->IBAT[1];
398 BATut = env->IBAT[0];
399 } else {
400 BATlt = env->DBAT[1];
401 BATut = env->DBAT[0];
403 for (i = 0; i < env->nb_BATs; i++) {
404 BATu = &BATut[i];
405 BATl = &BATlt[i];
406 BEPIu = *BATu & 0xF0000000;
407 BEPIl = *BATu & 0x0FFE0000;
408 bat_size_prot(env, &bl, &valid, &prot, BATu, BATl);
409 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
410 " BATl " TARGET_FMT_lx "\n", __func__,
411 ifetch ? 'I' : 'D', i, virtual, *BATu, *BATl);
412 if ((virtual & 0xF0000000) == BEPIu &&
413 ((virtual & 0x0FFE0000) & ~bl) == BEPIl) {
414 /* BAT matches */
415 if (valid != 0) {
416 /* Get physical address */
417 ctx->raddr = (*BATl & 0xF0000000) |
418 ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) |
419 (virtual & 0x0001F000);
420 /* Compute access rights */
421 ctx->prot = prot;
422 ret = check_prot(ctx->prot, access_type);
423 if (ret == 0) {
424 LOG_BATS("BAT %d match: r " TARGET_FMT_plx " prot=%c%c\n",
425 i, ctx->raddr, ctx->prot & PAGE_READ ? 'R' : '-',
426 ctx->prot & PAGE_WRITE ? 'W' : '-');
428 break;
432 if (ret < 0) {
433 #if defined(DEBUG_BATS)
434 if (qemu_log_enabled()) {
435 LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", virtual);
436 for (i = 0; i < 4; i++) {
437 BATu = &BATut[i];
438 BATl = &BATlt[i];
439 BEPIu = *BATu & 0xF0000000;
440 BEPIl = *BATu & 0x0FFE0000;
441 bl = (*BATu & 0x00001FFC) << 15;
442 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
443 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " "
444 TARGET_FMT_lx " " TARGET_FMT_lx "\n",
445 __func__, ifetch ? 'I' : 'D', i, virtual,
446 *BATu, *BATl, BEPIu, BEPIl, bl);
449 #endif
451 /* No hit */
452 return ret;
455 /* Perform segment based translation */
456 static int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
457 target_ulong eaddr, MMUAccessType access_type,
458 int type)
460 PowerPCCPU *cpu = env_archcpu(env);
461 hwaddr hash;
462 target_ulong vsid;
463 int ds, pr, target_page_bits;
464 int ret;
465 target_ulong sr, pgidx;
467 pr = msr_pr;
468 ctx->eaddr = eaddr;
470 sr = env->sr[eaddr >> 28];
471 ctx->key = (((sr & 0x20000000) && (pr != 0)) ||
472 ((sr & 0x40000000) && (pr == 0))) ? 1 : 0;
473 ds = sr & 0x80000000 ? 1 : 0;
474 ctx->nx = sr & 0x10000000 ? 1 : 0;
475 vsid = sr & 0x00FFFFFF;
476 target_page_bits = TARGET_PAGE_BITS;
477 qemu_log_mask(CPU_LOG_MMU,
478 "Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx
479 " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx
480 " ir=%d dr=%d pr=%d %d t=%d\n",
481 eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir,
482 (int)msr_dr, pr != 0 ? 1 : 0, access_type == MMU_DATA_STORE, type);
483 pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits;
484 hash = vsid ^ pgidx;
485 ctx->ptem = (vsid << 7) | (pgidx >> 10);
487 qemu_log_mask(CPU_LOG_MMU,
488 "pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n",
489 ctx->key, ds, ctx->nx, vsid);
490 ret = -1;
491 if (!ds) {
492 /* Check if instruction fetch is allowed, if needed */
493 if (type != ACCESS_CODE || ctx->nx == 0) {
494 /* Page address translation */
495 qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx
496 " htab_mask " TARGET_FMT_plx
497 " hash " TARGET_FMT_plx "\n",
498 ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash);
499 ctx->hash[0] = hash;
500 ctx->hash[1] = ~hash;
502 /* Initialize real address with an invalid value */
503 ctx->raddr = (hwaddr)-1ULL;
504 /* Software TLB search */
505 ret = ppc6xx_tlb_check(env, ctx, eaddr, access_type);
506 #if defined(DUMP_PAGE_TABLES)
507 if (qemu_loglevel_mask(CPU_LOG_MMU)) {
508 CPUState *cs = env_cpu(env);
509 hwaddr curaddr;
510 uint32_t a0, a1, a2, a3;
512 qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx
513 "\n", ppc_hash32_hpt_base(cpu),
514 ppc_hash32_hpt_mask(cpu) + 0x80);
515 for (curaddr = ppc_hash32_hpt_base(cpu);
516 curaddr < (ppc_hash32_hpt_base(cpu)
517 + ppc_hash32_hpt_mask(cpu) + 0x80);
518 curaddr += 16) {
519 a0 = ldl_phys(cs->as, curaddr);
520 a1 = ldl_phys(cs->as, curaddr + 4);
521 a2 = ldl_phys(cs->as, curaddr + 8);
522 a3 = ldl_phys(cs->as, curaddr + 12);
523 if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) {
524 qemu_log(TARGET_FMT_plx ": %08x %08x %08x %08x\n",
525 curaddr, a0, a1, a2, a3);
529 #endif
530 } else {
531 qemu_log_mask(CPU_LOG_MMU, "No access allowed\n");
532 ret = -3;
534 } else {
535 target_ulong sr;
537 qemu_log_mask(CPU_LOG_MMU, "direct store...\n");
538 /* Direct-store segment : absolutely *BUGGY* for now */
541 * Direct-store implies a 32-bit MMU.
542 * Check the Segment Register's bus unit ID (BUID).
544 sr = env->sr[eaddr >> 28];
545 if ((sr & 0x1FF00000) >> 20 == 0x07f) {
547 * Memory-forced I/O controller interface access
549 * If T=1 and BUID=x'07F', the 601 performs a memory
550 * access to SR[28-31] LA[4-31], bypassing all protection
551 * mechanisms.
553 ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF);
554 ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
555 return 0;
558 switch (type) {
559 case ACCESS_INT:
560 /* Integer load/store : only access allowed */
561 break;
562 case ACCESS_CODE:
563 /* No code fetch is allowed in direct-store areas */
564 return -4;
565 case ACCESS_FLOAT:
566 /* Floating point load/store */
567 return -4;
568 case ACCESS_RES:
569 /* lwarx, ldarx or srwcx. */
570 return -4;
571 case ACCESS_CACHE:
573 * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi
575 * Should make the instruction do no-op. As it already do
576 * no-op, it's quite easy :-)
578 ctx->raddr = eaddr;
579 return 0;
580 case ACCESS_EXT:
581 /* eciwx or ecowx */
582 return -4;
583 default:
584 qemu_log_mask(CPU_LOG_MMU, "ERROR: instruction should not need "
585 "address translation\n");
586 return -4;
588 if ((access_type == MMU_DATA_STORE || ctx->key != 1) &&
589 (access_type == MMU_DATA_LOAD || ctx->key != 0)) {
590 ctx->raddr = eaddr;
591 ret = 2;
592 } else {
593 ret = -2;
597 return ret;
600 /* Generic TLB check function for embedded PowerPC implementations */
601 static int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb,
602 hwaddr *raddrp,
603 target_ulong address, uint32_t pid, int ext,
604 int i)
606 target_ulong mask;
608 /* Check valid flag */
609 if (!(tlb->prot & PAGE_VALID)) {
610 return -1;
612 mask = ~(tlb->size - 1);
613 LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx " PID %u <=> " TARGET_FMT_lx
614 " " TARGET_FMT_lx " %u %x\n", __func__, i, address, pid, tlb->EPN,
615 mask, (uint32_t)tlb->PID, tlb->prot);
616 /* Check PID */
617 if (tlb->PID != 0 && tlb->PID != pid) {
618 return -1;
620 /* Check effective address */
621 if ((address & mask) != tlb->EPN) {
622 return -1;
624 *raddrp = (tlb->RPN & mask) | (address & ~mask);
625 if (ext) {
626 /* Extend the physical address to 36 bits */
627 *raddrp |= (uint64_t)(tlb->RPN & 0xF) << 32;
630 return 0;
633 #ifdef CONFIG_TCG
634 /* Generic TLB search function for PowerPC embedded implementations */
635 static int ppcemb_tlb_search(CPUPPCState *env, target_ulong address,
636 uint32_t pid)
638 ppcemb_tlb_t *tlb;
639 hwaddr raddr;
640 int i, ret;
642 /* Default return value is no match */
643 ret = -1;
644 for (i = 0; i < env->nb_tlb; i++) {
645 tlb = &env->tlb.tlbe[i];
646 if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, 0, i) == 0) {
647 ret = i;
648 break;
652 return ret;
654 #endif
656 /* Helpers specific to PowerPC 40x implementations */
657 static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env)
659 ppcemb_tlb_t *tlb;
660 int i;
662 for (i = 0; i < env->nb_tlb; i++) {
663 tlb = &env->tlb.tlbe[i];
664 tlb->prot &= ~PAGE_VALID;
666 tlb_flush(env_cpu(env));
669 static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
670 target_ulong address,
671 MMUAccessType access_type)
673 ppcemb_tlb_t *tlb;
674 hwaddr raddr;
675 int i, ret, zsel, zpr, pr;
677 ret = -1;
678 raddr = (hwaddr)-1ULL;
679 pr = msr_pr;
680 for (i = 0; i < env->nb_tlb; i++) {
681 tlb = &env->tlb.tlbe[i];
682 if (ppcemb_tlb_check(env, tlb, &raddr, address,
683 env->spr[SPR_40x_PID], 0, i) < 0) {
684 continue;
686 zsel = (tlb->attr >> 4) & 0xF;
687 zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3;
688 LOG_SWTLB("%s: TLB %d zsel %d zpr %d ty %d attr %08x\n",
689 __func__, i, zsel, zpr, access_type, tlb->attr);
690 /* Check execute enable bit */
691 switch (zpr) {
692 case 0x2:
693 if (pr != 0) {
694 goto check_perms;
696 /* fall through */
697 case 0x3:
698 /* All accesses granted */
699 ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
700 ret = 0;
701 break;
702 case 0x0:
703 if (pr != 0) {
704 /* Raise Zone protection fault. */
705 env->spr[SPR_40x_ESR] = 1 << 22;
706 ctx->prot = 0;
707 ret = -2;
708 break;
710 /* fall through */
711 case 0x1:
712 check_perms:
713 /* Check from TLB entry */
714 ctx->prot = tlb->prot;
715 ret = check_prot(ctx->prot, access_type);
716 if (ret == -2) {
717 env->spr[SPR_40x_ESR] = 0;
719 break;
721 if (ret >= 0) {
722 ctx->raddr = raddr;
723 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx
724 " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
725 ret);
726 return 0;
729 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx
730 " %d %d\n", __func__, address, raddr, ctx->prot, ret);
732 return ret;
735 void store_40x_sler(CPUPPCState *env, uint32_t val)
737 /* XXX: TO BE FIXED */
738 if (val != 0x00000000) {
739 cpu_abort(env_cpu(env),
740 "Little-endian regions are not supported by now\n");
742 env->spr[SPR_405_SLER] = val;
745 static int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb,
746 hwaddr *raddr, int *prot, target_ulong address,
747 MMUAccessType access_type, int i)
749 int prot2;
751 if (ppcemb_tlb_check(env, tlb, raddr, address,
752 env->spr[SPR_BOOKE_PID],
753 !env->nb_pids, i) >= 0) {
754 goto found_tlb;
757 if (env->spr[SPR_BOOKE_PID1] &&
758 ppcemb_tlb_check(env, tlb, raddr, address,
759 env->spr[SPR_BOOKE_PID1], 0, i) >= 0) {
760 goto found_tlb;
763 if (env->spr[SPR_BOOKE_PID2] &&
764 ppcemb_tlb_check(env, tlb, raddr, address,
765 env->spr[SPR_BOOKE_PID2], 0, i) >= 0) {
766 goto found_tlb;
769 LOG_SWTLB("%s: TLB entry not found\n", __func__);
770 return -1;
772 found_tlb:
774 if (msr_pr != 0) {
775 prot2 = tlb->prot & 0xF;
776 } else {
777 prot2 = (tlb->prot >> 4) & 0xF;
780 /* Check the address space */
781 if ((access_type == MMU_INST_FETCH ? msr_ir : msr_dr) != (tlb->attr & 1)) {
782 LOG_SWTLB("%s: AS doesn't match\n", __func__);
783 return -1;
786 *prot = prot2;
787 if (prot2 & prot_for_access_type(access_type)) {
788 LOG_SWTLB("%s: good TLB!\n", __func__);
789 return 0;
792 LOG_SWTLB("%s: no prot match: %x\n", __func__, prot2);
793 return access_type == MMU_INST_FETCH ? -3 : -2;
796 static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
797 target_ulong address,
798 MMUAccessType access_type)
800 ppcemb_tlb_t *tlb;
801 hwaddr raddr;
802 int i, ret;
804 ret = -1;
805 raddr = (hwaddr)-1ULL;
806 for (i = 0; i < env->nb_tlb; i++) {
807 tlb = &env->tlb.tlbe[i];
808 ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address,
809 access_type, i);
810 if (ret != -1) {
811 break;
815 if (ret >= 0) {
816 ctx->raddr = raddr;
817 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx
818 " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
819 ret);
820 } else {
821 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx
822 " %d %d\n", __func__, address, raddr, ctx->prot, ret);
825 return ret;
828 #ifdef CONFIG_TCG
829 static void booke206_flush_tlb(CPUPPCState *env, int flags,
830 const int check_iprot)
832 int tlb_size;
833 int i, j;
834 ppcmas_tlb_t *tlb = env->tlb.tlbm;
836 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
837 if (flags & (1 << i)) {
838 tlb_size = booke206_tlb_size(env, i);
839 for (j = 0; j < tlb_size; j++) {
840 if (!check_iprot || !(tlb[j].mas1 & MAS1_IPROT)) {
841 tlb[j].mas1 &= ~MAS1_VALID;
845 tlb += booke206_tlb_size(env, i);
848 tlb_flush(env_cpu(env));
850 #endif
852 static hwaddr booke206_tlb_to_page_size(CPUPPCState *env,
853 ppcmas_tlb_t *tlb)
855 int tlbm_size;
857 tlbm_size = (tlb->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
859 return 1024ULL << tlbm_size;
862 /* TLB check function for MAS based SoftTLBs */
863 static int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb,
864 hwaddr *raddrp, target_ulong address,
865 uint32_t pid)
867 hwaddr mask;
868 uint32_t tlb_pid;
870 if (!msr_cm) {
871 /* In 32bit mode we can only address 32bit EAs */
872 address = (uint32_t)address;
875 /* Check valid flag */
876 if (!(tlb->mas1 & MAS1_VALID)) {
877 return -1;
880 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
881 LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx " PID=0x%x MAS1=0x%x MAS2=0x%"
882 PRIx64 " mask=0x%" HWADDR_PRIx " MAS7_3=0x%" PRIx64 " MAS8=0x%"
883 PRIx32 "\n", __func__, address, pid, tlb->mas1, tlb->mas2, mask,
884 tlb->mas7_3, tlb->mas8);
886 /* Check PID */
887 tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT;
888 if (tlb_pid != 0 && tlb_pid != pid) {
889 return -1;
892 /* Check effective address */
893 if ((address & mask) != (tlb->mas2 & MAS2_EPN_MASK)) {
894 return -1;
897 if (raddrp) {
898 *raddrp = (tlb->mas7_3 & mask) | (address & ~mask);
901 return 0;
904 static bool is_epid_mmu(int mmu_idx)
906 return mmu_idx == PPC_TLB_EPID_STORE || mmu_idx == PPC_TLB_EPID_LOAD;
909 static uint32_t mmubooke206_esr(int mmu_idx, MMUAccessType access_type)
911 uint32_t esr = 0;
912 if (access_type == MMU_DATA_STORE) {
913 esr |= ESR_ST;
915 if (is_epid_mmu(mmu_idx)) {
916 esr |= ESR_EPID;
918 return esr;
922 * Get EPID register given the mmu_idx. If this is regular load,
923 * construct the EPID access bits from current processor state
925 * Get the effective AS and PR bits and the PID. The PID is returned
926 * only if EPID load is requested, otherwise the caller must detect
927 * the correct EPID. Return true if valid EPID is returned.
929 static bool mmubooke206_get_as(CPUPPCState *env,
930 int mmu_idx, uint32_t *epid_out,
931 bool *as_out, bool *pr_out)
933 if (is_epid_mmu(mmu_idx)) {
934 uint32_t epidr;
935 if (mmu_idx == PPC_TLB_EPID_STORE) {
936 epidr = env->spr[SPR_BOOKE_EPSC];
937 } else {
938 epidr = env->spr[SPR_BOOKE_EPLC];
940 *epid_out = (epidr & EPID_EPID) >> EPID_EPID_SHIFT;
941 *as_out = !!(epidr & EPID_EAS);
942 *pr_out = !!(epidr & EPID_EPR);
943 return true;
944 } else {
945 *as_out = msr_ds;
946 *pr_out = msr_pr;
947 return false;
951 /* Check if the tlb found by hashing really matches */
952 static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb,
953 hwaddr *raddr, int *prot,
954 target_ulong address,
955 MMUAccessType access_type, int mmu_idx)
957 int prot2 = 0;
958 uint32_t epid;
959 bool as, pr;
960 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr);
962 if (!use_epid) {
963 if (ppcmas_tlb_check(env, tlb, raddr, address,
964 env->spr[SPR_BOOKE_PID]) >= 0) {
965 goto found_tlb;
968 if (env->spr[SPR_BOOKE_PID1] &&
969 ppcmas_tlb_check(env, tlb, raddr, address,
970 env->spr[SPR_BOOKE_PID1]) >= 0) {
971 goto found_tlb;
974 if (env->spr[SPR_BOOKE_PID2] &&
975 ppcmas_tlb_check(env, tlb, raddr, address,
976 env->spr[SPR_BOOKE_PID2]) >= 0) {
977 goto found_tlb;
979 } else {
980 if (ppcmas_tlb_check(env, tlb, raddr, address, epid) >= 0) {
981 goto found_tlb;
985 LOG_SWTLB("%s: TLB entry not found\n", __func__);
986 return -1;
988 found_tlb:
990 if (pr) {
991 if (tlb->mas7_3 & MAS3_UR) {
992 prot2 |= PAGE_READ;
994 if (tlb->mas7_3 & MAS3_UW) {
995 prot2 |= PAGE_WRITE;
997 if (tlb->mas7_3 & MAS3_UX) {
998 prot2 |= PAGE_EXEC;
1000 } else {
1001 if (tlb->mas7_3 & MAS3_SR) {
1002 prot2 |= PAGE_READ;
1004 if (tlb->mas7_3 & MAS3_SW) {
1005 prot2 |= PAGE_WRITE;
1007 if (tlb->mas7_3 & MAS3_SX) {
1008 prot2 |= PAGE_EXEC;
1012 /* Check the address space and permissions */
1013 if (access_type == MMU_INST_FETCH) {
1014 /* There is no way to fetch code using epid load */
1015 assert(!use_epid);
1016 as = msr_ir;
1019 if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
1020 LOG_SWTLB("%s: AS doesn't match\n", __func__);
1021 return -1;
1024 *prot = prot2;
1025 if (prot2 & prot_for_access_type(access_type)) {
1026 LOG_SWTLB("%s: good TLB!\n", __func__);
1027 return 0;
1030 LOG_SWTLB("%s: no prot match: %x\n", __func__, prot2);
1031 return access_type == MMU_INST_FETCH ? -3 : -2;
1034 static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
1035 target_ulong address,
1036 MMUAccessType access_type,
1037 int mmu_idx)
1039 ppcmas_tlb_t *tlb;
1040 hwaddr raddr;
1041 int i, j, ret;
1043 ret = -1;
1044 raddr = (hwaddr)-1ULL;
1046 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
1047 int ways = booke206_tlb_ways(env, i);
1049 for (j = 0; j < ways; j++) {
1050 tlb = booke206_get_tlbm(env, i, address, j);
1051 if (!tlb) {
1052 continue;
1054 ret = mmubooke206_check_tlb(env, tlb, &raddr, &ctx->prot, address,
1055 access_type, mmu_idx);
1056 if (ret != -1) {
1057 goto found_tlb;
1062 found_tlb:
1064 if (ret >= 0) {
1065 ctx->raddr = raddr;
1066 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx
1067 " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
1068 ret);
1069 } else {
1070 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx
1071 " %d %d\n", __func__, address, raddr, ctx->prot, ret);
1074 return ret;
1077 static const char *book3e_tsize_to_str[32] = {
1078 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K",
1079 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M",
1080 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G",
1081 "1T", "2T"
1084 static void mmubooke_dump_mmu(CPUPPCState *env)
1086 ppcemb_tlb_t *entry;
1087 int i;
1089 if (kvm_enabled() && !env->kvm_sw_tlb) {
1090 qemu_printf("Cannot access KVM TLB\n");
1091 return;
1094 qemu_printf("\nTLB:\n");
1095 qemu_printf("Effective Physical Size PID Prot "
1096 "Attr\n");
1098 entry = &env->tlb.tlbe[0];
1099 for (i = 0; i < env->nb_tlb; i++, entry++) {
1100 hwaddr ea, pa;
1101 target_ulong mask;
1102 uint64_t size = (uint64_t)entry->size;
1103 char size_buf[20];
1105 /* Check valid flag */
1106 if (!(entry->prot & PAGE_VALID)) {
1107 continue;
1110 mask = ~(entry->size - 1);
1111 ea = entry->EPN & mask;
1112 pa = entry->RPN & mask;
1113 /* Extend the physical address to 36 bits */
1114 pa |= (hwaddr)(entry->RPN & 0xF) << 32;
1115 if (size >= 1 * MiB) {
1116 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "M", size / MiB);
1117 } else {
1118 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "k", size / KiB);
1120 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n",
1121 (uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID,
1122 entry->prot, entry->attr);
1127 static void mmubooke206_dump_one_tlb(CPUPPCState *env, int tlbn, int offset,
1128 int tlbsize)
1130 ppcmas_tlb_t *entry;
1131 int i;
1133 qemu_printf("\nTLB%d:\n", tlbn);
1134 qemu_printf("Effective Physical Size TID TS SRWX"
1135 " URWX WIMGE U0123\n");
1137 entry = &env->tlb.tlbm[offset];
1138 for (i = 0; i < tlbsize; i++, entry++) {
1139 hwaddr ea, pa, size;
1140 int tsize;
1142 if (!(entry->mas1 & MAS1_VALID)) {
1143 continue;
1146 tsize = (entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
1147 size = 1024ULL << tsize;
1148 ea = entry->mas2 & ~(size - 1);
1149 pa = entry->mas7_3 & ~(size - 1);
1151 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c"
1152 "U%c%c%c %c%c%c%c%c U%c%c%c%c\n",
1153 (uint64_t)ea, (uint64_t)pa,
1154 book3e_tsize_to_str[tsize],
1155 (entry->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT,
1156 (entry->mas1 & MAS1_TS) >> MAS1_TS_SHIFT,
1157 entry->mas7_3 & MAS3_SR ? 'R' : '-',
1158 entry->mas7_3 & MAS3_SW ? 'W' : '-',
1159 entry->mas7_3 & MAS3_SX ? 'X' : '-',
1160 entry->mas7_3 & MAS3_UR ? 'R' : '-',
1161 entry->mas7_3 & MAS3_UW ? 'W' : '-',
1162 entry->mas7_3 & MAS3_UX ? 'X' : '-',
1163 entry->mas2 & MAS2_W ? 'W' : '-',
1164 entry->mas2 & MAS2_I ? 'I' : '-',
1165 entry->mas2 & MAS2_M ? 'M' : '-',
1166 entry->mas2 & MAS2_G ? 'G' : '-',
1167 entry->mas2 & MAS2_E ? 'E' : '-',
1168 entry->mas7_3 & MAS3_U0 ? '0' : '-',
1169 entry->mas7_3 & MAS3_U1 ? '1' : '-',
1170 entry->mas7_3 & MAS3_U2 ? '2' : '-',
1171 entry->mas7_3 & MAS3_U3 ? '3' : '-');
1175 static void mmubooke206_dump_mmu(CPUPPCState *env)
1177 int offset = 0;
1178 int i;
1180 if (kvm_enabled() && !env->kvm_sw_tlb) {
1181 qemu_printf("Cannot access KVM TLB\n");
1182 return;
1185 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
1186 int size = booke206_tlb_size(env, i);
1188 if (size == 0) {
1189 continue;
1192 mmubooke206_dump_one_tlb(env, i, offset, size);
1193 offset += size;
1197 static void mmu6xx_dump_BATs(CPUPPCState *env, int type)
1199 target_ulong *BATlt, *BATut, *BATu, *BATl;
1200 target_ulong BEPIl, BEPIu, bl;
1201 int i;
1203 switch (type) {
1204 case ACCESS_CODE:
1205 BATlt = env->IBAT[1];
1206 BATut = env->IBAT[0];
1207 break;
1208 default:
1209 BATlt = env->DBAT[1];
1210 BATut = env->DBAT[0];
1211 break;
1214 for (i = 0; i < env->nb_BATs; i++) {
1215 BATu = &BATut[i];
1216 BATl = &BATlt[i];
1217 BEPIu = *BATu & 0xF0000000;
1218 BEPIl = *BATu & 0x0FFE0000;
1219 bl = (*BATu & 0x00001FFC) << 15;
1220 qemu_printf("%s BAT%d BATu " TARGET_FMT_lx
1221 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " "
1222 TARGET_FMT_lx " " TARGET_FMT_lx "\n",
1223 type == ACCESS_CODE ? "code" : "data", i,
1224 *BATu, *BATl, BEPIu, BEPIl, bl);
1228 static void mmu6xx_dump_mmu(CPUPPCState *env)
1230 PowerPCCPU *cpu = env_archcpu(env);
1231 ppc6xx_tlb_t *tlb;
1232 target_ulong sr;
1233 int type, way, entry, i;
1235 qemu_printf("HTAB base = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_base(cpu));
1236 qemu_printf("HTAB mask = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_mask(cpu));
1238 qemu_printf("\nSegment registers:\n");
1239 for (i = 0; i < 32; i++) {
1240 sr = env->sr[i];
1241 if (sr & 0x80000000) {
1242 qemu_printf("%02d T=%d Ks=%d Kp=%d BUID=0x%03x "
1243 "CNTLR_SPEC=0x%05x\n", i,
1244 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0,
1245 sr & 0x20000000 ? 1 : 0, (uint32_t)((sr >> 20) & 0x1FF),
1246 (uint32_t)(sr & 0xFFFFF));
1247 } else {
1248 qemu_printf("%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i,
1249 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0,
1250 sr & 0x20000000 ? 1 : 0, sr & 0x10000000 ? 1 : 0,
1251 (uint32_t)(sr & 0x00FFFFFF));
1255 qemu_printf("\nBATs:\n");
1256 mmu6xx_dump_BATs(env, ACCESS_INT);
1257 mmu6xx_dump_BATs(env, ACCESS_CODE);
1259 if (env->id_tlbs != 1) {
1260 qemu_printf("ERROR: 6xx MMU should have separated TLB"
1261 " for code and data\n");
1264 qemu_printf("\nTLBs [EPN EPN + SIZE]\n");
1266 for (type = 0; type < 2; type++) {
1267 for (way = 0; way < env->nb_ways; way++) {
1268 for (entry = env->nb_tlb * type + env->tlb_per_way * way;
1269 entry < (env->nb_tlb * type + env->tlb_per_way * (way + 1));
1270 entry++) {
1272 tlb = &env->tlb.tlb6[entry];
1273 qemu_printf("%s TLB %02d/%02d way:%d %s ["
1274 TARGET_FMT_lx " " TARGET_FMT_lx "]\n",
1275 type ? "code" : "data", entry % env->nb_tlb,
1276 env->nb_tlb, way,
1277 pte_is_valid(tlb->pte0) ? "valid" : "inval",
1278 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE);
1284 void dump_mmu(CPUPPCState *env)
1286 switch (env->mmu_model) {
1287 case POWERPC_MMU_BOOKE:
1288 mmubooke_dump_mmu(env);
1289 break;
1290 case POWERPC_MMU_BOOKE206:
1291 mmubooke206_dump_mmu(env);
1292 break;
1293 case POWERPC_MMU_SOFT_6xx:
1294 case POWERPC_MMU_SOFT_74xx:
1295 mmu6xx_dump_mmu(env);
1296 break;
1297 #if defined(TARGET_PPC64)
1298 case POWERPC_MMU_64B:
1299 case POWERPC_MMU_2_03:
1300 case POWERPC_MMU_2_06:
1301 case POWERPC_MMU_2_07:
1302 dump_slb(env_archcpu(env));
1303 break;
1304 case POWERPC_MMU_3_00:
1305 if (ppc64_v3_radix(env_archcpu(env))) {
1306 qemu_log_mask(LOG_UNIMP, "%s: the PPC64 MMU is unsupported\n",
1307 __func__);
1308 } else {
1309 dump_slb(env_archcpu(env));
1311 break;
1312 #endif
1313 default:
1314 qemu_log_mask(LOG_UNIMP, "%s: unimplemented\n", __func__);
1318 static int check_physical(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr,
1319 MMUAccessType access_type)
1321 int in_plb, ret;
1323 ctx->raddr = eaddr;
1324 ctx->prot = PAGE_READ | PAGE_EXEC;
1325 ret = 0;
1326 switch (env->mmu_model) {
1327 case POWERPC_MMU_SOFT_6xx:
1328 case POWERPC_MMU_SOFT_74xx:
1329 case POWERPC_MMU_SOFT_4xx:
1330 case POWERPC_MMU_REAL:
1331 case POWERPC_MMU_BOOKE:
1332 ctx->prot |= PAGE_WRITE;
1333 break;
1335 case POWERPC_MMU_SOFT_4xx_Z:
1336 if (unlikely(msr_pe != 0)) {
1338 * 403 family add some particular protections, using
1339 * PBL/PBU registers for accesses with no translation.
1341 in_plb =
1342 /* Check PLB validity */
1343 (env->pb[0] < env->pb[1] &&
1344 /* and address in plb area */
1345 eaddr >= env->pb[0] && eaddr < env->pb[1]) ||
1346 (env->pb[2] < env->pb[3] &&
1347 eaddr >= env->pb[2] && eaddr < env->pb[3]) ? 1 : 0;
1348 if (in_plb ^ msr_px) {
1349 /* Access in protected area */
1350 if (access_type == MMU_DATA_STORE) {
1351 /* Access is not allowed */
1352 ret = -2;
1354 } else {
1355 /* Read-write access is allowed */
1356 ctx->prot |= PAGE_WRITE;
1359 break;
1361 default:
1362 /* Caller's checks mean we should never get here for other models */
1363 abort();
1364 return -1;
1367 return ret;
1370 static int get_physical_address_wtlb(CPUPPCState *env, mmu_ctx_t *ctx,
1371 target_ulong eaddr,
1372 MMUAccessType access_type, int type,
1373 int mmu_idx)
1375 int ret = -1;
1376 bool real_mode = (type == ACCESS_CODE && msr_ir == 0)
1377 || (type != ACCESS_CODE && msr_dr == 0);
1379 switch (env->mmu_model) {
1380 case POWERPC_MMU_SOFT_6xx:
1381 case POWERPC_MMU_SOFT_74xx:
1382 if (real_mode) {
1383 ret = check_physical(env, ctx, eaddr, access_type);
1384 } else {
1385 /* Try to find a BAT */
1386 if (env->nb_BATs != 0) {
1387 ret = get_bat_6xx_tlb(env, ctx, eaddr, access_type);
1389 if (ret < 0) {
1390 /* We didn't match any BAT entry or don't have BATs */
1391 ret = get_segment_6xx_tlb(env, ctx, eaddr, access_type, type);
1394 break;
1396 case POWERPC_MMU_SOFT_4xx:
1397 case POWERPC_MMU_SOFT_4xx_Z:
1398 if (real_mode) {
1399 ret = check_physical(env, ctx, eaddr, access_type);
1400 } else {
1401 ret = mmu40x_get_physical_address(env, ctx, eaddr, access_type);
1403 break;
1404 case POWERPC_MMU_BOOKE:
1405 ret = mmubooke_get_physical_address(env, ctx, eaddr, access_type);
1406 break;
1407 case POWERPC_MMU_BOOKE206:
1408 ret = mmubooke206_get_physical_address(env, ctx, eaddr, access_type,
1409 mmu_idx);
1410 break;
1411 case POWERPC_MMU_MPC8xx:
1412 /* XXX: TODO */
1413 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n");
1414 break;
1415 case POWERPC_MMU_REAL:
1416 if (real_mode) {
1417 ret = check_physical(env, ctx, eaddr, access_type);
1418 } else {
1419 cpu_abort(env_cpu(env),
1420 "PowerPC in real mode do not do any translation\n");
1422 return -1;
1423 default:
1424 cpu_abort(env_cpu(env), "Unknown or invalid MMU model\n");
1425 return -1;
1428 return ret;
1431 #ifdef CONFIG_TCG
1432 static int get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
1433 target_ulong eaddr, MMUAccessType access_type,
1434 int type)
1436 return get_physical_address_wtlb(env, ctx, eaddr, access_type, type, 0);
1438 #endif
1440 static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address,
1441 MMUAccessType access_type, int mmu_idx)
1443 uint32_t epid;
1444 bool as, pr;
1445 uint32_t missed_tid = 0;
1446 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr);
1448 if (access_type == MMU_INST_FETCH) {
1449 as = msr_ir;
1451 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK;
1452 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK;
1453 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK;
1454 env->spr[SPR_BOOKE_MAS3] = 0;
1455 env->spr[SPR_BOOKE_MAS6] = 0;
1456 env->spr[SPR_BOOKE_MAS7] = 0;
1458 /* AS */
1459 if (as) {
1460 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS;
1461 env->spr[SPR_BOOKE_MAS6] |= MAS6_SAS;
1464 env->spr[SPR_BOOKE_MAS1] |= MAS1_VALID;
1465 env->spr[SPR_BOOKE_MAS2] |= address & MAS2_EPN_MASK;
1467 if (!use_epid) {
1468 switch (env->spr[SPR_BOOKE_MAS4] & MAS4_TIDSELD_PIDZ) {
1469 case MAS4_TIDSELD_PID0:
1470 missed_tid = env->spr[SPR_BOOKE_PID];
1471 break;
1472 case MAS4_TIDSELD_PID1:
1473 missed_tid = env->spr[SPR_BOOKE_PID1];
1474 break;
1475 case MAS4_TIDSELD_PID2:
1476 missed_tid = env->spr[SPR_BOOKE_PID2];
1477 break;
1479 env->spr[SPR_BOOKE_MAS6] |= env->spr[SPR_BOOKE_PID] << 16;
1480 } else {
1481 missed_tid = epid;
1482 env->spr[SPR_BOOKE_MAS6] |= missed_tid << 16;
1484 env->spr[SPR_BOOKE_MAS1] |= (missed_tid << MAS1_TID_SHIFT);
1487 /* next victim logic */
1488 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT;
1489 env->last_way++;
1490 env->last_way &= booke206_tlb_ways(env, 0) - 1;
1491 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
1494 /* Perform address translation */
1495 /* TODO: Split this by mmu_model. */
1496 static bool ppc_jumbo_xlate(PowerPCCPU *cpu, vaddr eaddr,
1497 MMUAccessType access_type,
1498 hwaddr *raddrp, int *psizep, int *protp,
1499 int mmu_idx, bool guest_visible)
1501 CPUState *cs = CPU(cpu);
1502 CPUPPCState *env = &cpu->env;
1503 mmu_ctx_t ctx;
1504 int type;
1505 int ret;
1507 if (access_type == MMU_INST_FETCH) {
1508 /* code access */
1509 type = ACCESS_CODE;
1510 } else if (guest_visible) {
1511 /* data access */
1512 type = env->access_type;
1513 } else {
1514 type = ACCESS_INT;
1517 ret = get_physical_address_wtlb(env, &ctx, eaddr, access_type,
1518 type, mmu_idx);
1519 if (ret == 0) {
1520 *raddrp = ctx.raddr;
1521 *protp = ctx.prot;
1522 *psizep = TARGET_PAGE_BITS;
1523 return true;
1526 if (guest_visible) {
1527 LOG_MMU_STATE(cs);
1528 if (type == ACCESS_CODE) {
1529 switch (ret) {
1530 case -1:
1531 /* No matches in page tables or TLB */
1532 switch (env->mmu_model) {
1533 case POWERPC_MMU_SOFT_6xx:
1534 cs->exception_index = POWERPC_EXCP_IFTLB;
1535 env->error_code = 1 << 18;
1536 env->spr[SPR_IMISS] = eaddr;
1537 env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem;
1538 goto tlb_miss;
1539 case POWERPC_MMU_SOFT_74xx:
1540 cs->exception_index = POWERPC_EXCP_IFTLB;
1541 goto tlb_miss_74xx;
1542 case POWERPC_MMU_SOFT_4xx:
1543 case POWERPC_MMU_SOFT_4xx_Z:
1544 cs->exception_index = POWERPC_EXCP_ITLB;
1545 env->error_code = 0;
1546 env->spr[SPR_40x_DEAR] = eaddr;
1547 env->spr[SPR_40x_ESR] = 0x00000000;
1548 break;
1549 case POWERPC_MMU_BOOKE206:
1550 booke206_update_mas_tlb_miss(env, eaddr, 2, mmu_idx);
1551 /* fall through */
1552 case POWERPC_MMU_BOOKE:
1553 cs->exception_index = POWERPC_EXCP_ITLB;
1554 env->error_code = 0;
1555 env->spr[SPR_BOOKE_DEAR] = eaddr;
1556 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, MMU_DATA_LOAD);
1557 break;
1558 case POWERPC_MMU_MPC8xx:
1559 cpu_abort(cs, "MPC8xx MMU model is not implemented\n");
1560 case POWERPC_MMU_REAL:
1561 cpu_abort(cs, "PowerPC in real mode should never raise "
1562 "any MMU exceptions\n");
1563 default:
1564 cpu_abort(cs, "Unknown or invalid MMU model\n");
1566 break;
1567 case -2:
1568 /* Access rights violation */
1569 cs->exception_index = POWERPC_EXCP_ISI;
1570 env->error_code = 0x08000000;
1571 break;
1572 case -3:
1573 /* No execute protection violation */
1574 if ((env->mmu_model == POWERPC_MMU_BOOKE) ||
1575 (env->mmu_model == POWERPC_MMU_BOOKE206)) {
1576 env->spr[SPR_BOOKE_ESR] = 0x00000000;
1578 cs->exception_index = POWERPC_EXCP_ISI;
1579 env->error_code = 0x10000000;
1580 break;
1581 case -4:
1582 /* Direct store exception */
1583 /* No code fetch is allowed in direct-store areas */
1584 cs->exception_index = POWERPC_EXCP_ISI;
1585 env->error_code = 0x10000000;
1586 break;
1588 } else {
1589 switch (ret) {
1590 case -1:
1591 /* No matches in page tables or TLB */
1592 switch (env->mmu_model) {
1593 case POWERPC_MMU_SOFT_6xx:
1594 if (access_type == MMU_DATA_STORE) {
1595 cs->exception_index = POWERPC_EXCP_DSTLB;
1596 env->error_code = 1 << 16;
1597 } else {
1598 cs->exception_index = POWERPC_EXCP_DLTLB;
1599 env->error_code = 0;
1601 env->spr[SPR_DMISS] = eaddr;
1602 env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem;
1603 tlb_miss:
1604 env->error_code |= ctx.key << 19;
1605 env->spr[SPR_HASH1] = ppc_hash32_hpt_base(cpu) +
1606 get_pteg_offset32(cpu, ctx.hash[0]);
1607 env->spr[SPR_HASH2] = ppc_hash32_hpt_base(cpu) +
1608 get_pteg_offset32(cpu, ctx.hash[1]);
1609 break;
1610 case POWERPC_MMU_SOFT_74xx:
1611 if (access_type == MMU_DATA_STORE) {
1612 cs->exception_index = POWERPC_EXCP_DSTLB;
1613 } else {
1614 cs->exception_index = POWERPC_EXCP_DLTLB;
1616 tlb_miss_74xx:
1617 /* Implement LRU algorithm */
1618 env->error_code = ctx.key << 19;
1619 env->spr[SPR_TLBMISS] = (eaddr & ~((target_ulong)0x3)) |
1620 ((env->last_way + 1) & (env->nb_ways - 1));
1621 env->spr[SPR_PTEHI] = 0x80000000 | ctx.ptem;
1622 break;
1623 case POWERPC_MMU_SOFT_4xx:
1624 case POWERPC_MMU_SOFT_4xx_Z:
1625 cs->exception_index = POWERPC_EXCP_DTLB;
1626 env->error_code = 0;
1627 env->spr[SPR_40x_DEAR] = eaddr;
1628 if (access_type == MMU_DATA_STORE) {
1629 env->spr[SPR_40x_ESR] = 0x00800000;
1630 } else {
1631 env->spr[SPR_40x_ESR] = 0x00000000;
1633 break;
1634 case POWERPC_MMU_MPC8xx:
1635 /* XXX: TODO */
1636 cpu_abort(cs, "MPC8xx MMU model is not implemented\n");
1637 case POWERPC_MMU_BOOKE206:
1638 booke206_update_mas_tlb_miss(env, eaddr, access_type, mmu_idx);
1639 /* fall through */
1640 case POWERPC_MMU_BOOKE:
1641 cs->exception_index = POWERPC_EXCP_DTLB;
1642 env->error_code = 0;
1643 env->spr[SPR_BOOKE_DEAR] = eaddr;
1644 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type);
1645 break;
1646 case POWERPC_MMU_REAL:
1647 cpu_abort(cs, "PowerPC in real mode should never raise "
1648 "any MMU exceptions\n");
1649 default:
1650 cpu_abort(cs, "Unknown or invalid MMU model\n");
1652 break;
1653 case -2:
1654 /* Access rights violation */
1655 cs->exception_index = POWERPC_EXCP_DSI;
1656 env->error_code = 0;
1657 if (env->mmu_model == POWERPC_MMU_SOFT_4xx
1658 || env->mmu_model == POWERPC_MMU_SOFT_4xx_Z) {
1659 env->spr[SPR_40x_DEAR] = eaddr;
1660 if (access_type == MMU_DATA_STORE) {
1661 env->spr[SPR_40x_ESR] |= 0x00800000;
1663 } else if ((env->mmu_model == POWERPC_MMU_BOOKE) ||
1664 (env->mmu_model == POWERPC_MMU_BOOKE206)) {
1665 env->spr[SPR_BOOKE_DEAR] = eaddr;
1666 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type);
1667 } else {
1668 env->spr[SPR_DAR] = eaddr;
1669 if (access_type == MMU_DATA_STORE) {
1670 env->spr[SPR_DSISR] = 0x0A000000;
1671 } else {
1672 env->spr[SPR_DSISR] = 0x08000000;
1675 break;
1676 case -4:
1677 /* Direct store exception */
1678 switch (type) {
1679 case ACCESS_FLOAT:
1680 /* Floating point load/store */
1681 cs->exception_index = POWERPC_EXCP_ALIGN;
1682 env->error_code = POWERPC_EXCP_ALIGN_FP;
1683 env->spr[SPR_DAR] = eaddr;
1684 break;
1685 case ACCESS_RES:
1686 /* lwarx, ldarx or stwcx. */
1687 cs->exception_index = POWERPC_EXCP_DSI;
1688 env->error_code = 0;
1689 env->spr[SPR_DAR] = eaddr;
1690 if (access_type == MMU_DATA_STORE) {
1691 env->spr[SPR_DSISR] = 0x06000000;
1692 } else {
1693 env->spr[SPR_DSISR] = 0x04000000;
1695 break;
1696 case ACCESS_EXT:
1697 /* eciwx or ecowx */
1698 cs->exception_index = POWERPC_EXCP_DSI;
1699 env->error_code = 0;
1700 env->spr[SPR_DAR] = eaddr;
1701 if (access_type == MMU_DATA_STORE) {
1702 env->spr[SPR_DSISR] = 0x06100000;
1703 } else {
1704 env->spr[SPR_DSISR] = 0x04100000;
1706 break;
1707 default:
1708 printf("DSI: invalid exception (%d)\n", ret);
1709 cs->exception_index = POWERPC_EXCP_PROGRAM;
1710 env->error_code =
1711 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL;
1712 env->spr[SPR_DAR] = eaddr;
1713 break;
1715 break;
1719 return false;
1722 #ifdef CONFIG_TCG
1723 /*****************************************************************************/
1724 /* BATs management */
1725 #if !defined(FLUSH_ALL_TLBS)
1726 static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu,
1727 target_ulong mask)
1729 CPUState *cs = env_cpu(env);
1730 target_ulong base, end, page;
1732 base = BATu & ~0x0001FFFF;
1733 end = base + mask + 0x00020000;
1734 if (((end - base) >> TARGET_PAGE_BITS) > 1024) {
1735 /* Flushing 1024 4K pages is slower than a complete flush */
1736 LOG_BATS("Flush all BATs\n");
1737 tlb_flush(cs);
1738 LOG_BATS("Flush done\n");
1739 return;
1741 LOG_BATS("Flush BAT from " TARGET_FMT_lx " to " TARGET_FMT_lx " ("
1742 TARGET_FMT_lx ")\n", base, end, mask);
1743 for (page = base; page != end; page += TARGET_PAGE_SIZE) {
1744 tlb_flush_page(cs, page);
1746 LOG_BATS("Flush done\n");
1748 #endif
1750 static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr,
1751 target_ulong value)
1753 LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", ID,
1754 nr, ul == 0 ? 'u' : 'l', value, env->nip);
1757 void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value)
1759 target_ulong mask;
1761 dump_store_bat(env, 'I', 0, nr, value);
1762 if (env->IBAT[0][nr] != value) {
1763 mask = (value << 15) & 0x0FFE0000UL;
1764 #if !defined(FLUSH_ALL_TLBS)
1765 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1766 #endif
1768 * When storing valid upper BAT, mask BEPI and BRPN and
1769 * invalidate all TLBs covered by this BAT
1771 mask = (value << 15) & 0x0FFE0000UL;
1772 env->IBAT[0][nr] = (value & 0x00001FFFUL) |
1773 (value & ~0x0001FFFFUL & ~mask);
1774 env->IBAT[1][nr] = (env->IBAT[1][nr] & 0x0000007B) |
1775 (env->IBAT[1][nr] & ~0x0001FFFF & ~mask);
1776 #if !defined(FLUSH_ALL_TLBS)
1777 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1778 #else
1779 tlb_flush(env_cpu(env));
1780 #endif
1784 void helper_store_ibatl(CPUPPCState *env, uint32_t nr, target_ulong value)
1786 dump_store_bat(env, 'I', 1, nr, value);
1787 env->IBAT[1][nr] = value;
1790 void helper_store_dbatu(CPUPPCState *env, uint32_t nr, target_ulong value)
1792 target_ulong mask;
1794 dump_store_bat(env, 'D', 0, nr, value);
1795 if (env->DBAT[0][nr] != value) {
1797 * When storing valid upper BAT, mask BEPI and BRPN and
1798 * invalidate all TLBs covered by this BAT
1800 mask = (value << 15) & 0x0FFE0000UL;
1801 #if !defined(FLUSH_ALL_TLBS)
1802 do_invalidate_BAT(env, env->DBAT[0][nr], mask);
1803 #endif
1804 mask = (value << 15) & 0x0FFE0000UL;
1805 env->DBAT[0][nr] = (value & 0x00001FFFUL) |
1806 (value & ~0x0001FFFFUL & ~mask);
1807 env->DBAT[1][nr] = (env->DBAT[1][nr] & 0x0000007B) |
1808 (env->DBAT[1][nr] & ~0x0001FFFF & ~mask);
1809 #if !defined(FLUSH_ALL_TLBS)
1810 do_invalidate_BAT(env, env->DBAT[0][nr], mask);
1811 #else
1812 tlb_flush(env_cpu(env));
1813 #endif
1817 void helper_store_dbatl(CPUPPCState *env, uint32_t nr, target_ulong value)
1819 dump_store_bat(env, 'D', 1, nr, value);
1820 env->DBAT[1][nr] = value;
1823 void helper_store_601_batu(CPUPPCState *env, uint32_t nr, target_ulong value)
1825 target_ulong mask;
1826 #if defined(FLUSH_ALL_TLBS)
1827 int do_inval;
1828 #endif
1830 dump_store_bat(env, 'I', 0, nr, value);
1831 if (env->IBAT[0][nr] != value) {
1832 #if defined(FLUSH_ALL_TLBS)
1833 do_inval = 0;
1834 #endif
1835 mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL;
1836 if (env->IBAT[1][nr] & 0x40) {
1837 /* Invalidate BAT only if it is valid */
1838 #if !defined(FLUSH_ALL_TLBS)
1839 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1840 #else
1841 do_inval = 1;
1842 #endif
1845 * When storing valid upper BAT, mask BEPI and BRPN and
1846 * invalidate all TLBs covered by this BAT
1848 env->IBAT[0][nr] = (value & 0x00001FFFUL) |
1849 (value & ~0x0001FFFFUL & ~mask);
1850 env->DBAT[0][nr] = env->IBAT[0][nr];
1851 if (env->IBAT[1][nr] & 0x40) {
1852 #if !defined(FLUSH_ALL_TLBS)
1853 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1854 #else
1855 do_inval = 1;
1856 #endif
1858 #if defined(FLUSH_ALL_TLBS)
1859 if (do_inval) {
1860 tlb_flush(env_cpu(env));
1862 #endif
1866 void helper_store_601_batl(CPUPPCState *env, uint32_t nr, target_ulong value)
1868 #if !defined(FLUSH_ALL_TLBS)
1869 target_ulong mask;
1870 #else
1871 int do_inval;
1872 #endif
1874 dump_store_bat(env, 'I', 1, nr, value);
1875 if (env->IBAT[1][nr] != value) {
1876 #if defined(FLUSH_ALL_TLBS)
1877 do_inval = 0;
1878 #endif
1879 if (env->IBAT[1][nr] & 0x40) {
1880 #if !defined(FLUSH_ALL_TLBS)
1881 mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL;
1882 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1883 #else
1884 do_inval = 1;
1885 #endif
1887 if (value & 0x40) {
1888 #if !defined(FLUSH_ALL_TLBS)
1889 mask = (value << 17) & 0x0FFE0000UL;
1890 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1891 #else
1892 do_inval = 1;
1893 #endif
1895 env->IBAT[1][nr] = value;
1896 env->DBAT[1][nr] = value;
1897 #if defined(FLUSH_ALL_TLBS)
1898 if (do_inval) {
1899 tlb_flush(env_cpu(env));
1901 #endif
1904 #endif
1906 #ifdef CONFIG_TCG
1907 /*****************************************************************************/
1908 /* TLB management */
1909 void ppc_tlb_invalidate_all(CPUPPCState *env)
1911 #if defined(TARGET_PPC64)
1912 if (mmu_is_64bit(env->mmu_model)) {
1913 env->tlb_need_flush = 0;
1914 tlb_flush(env_cpu(env));
1915 } else
1916 #endif /* defined(TARGET_PPC64) */
1917 switch (env->mmu_model) {
1918 case POWERPC_MMU_SOFT_6xx:
1919 case POWERPC_MMU_SOFT_74xx:
1920 ppc6xx_tlb_invalidate_all(env);
1921 break;
1922 case POWERPC_MMU_SOFT_4xx:
1923 case POWERPC_MMU_SOFT_4xx_Z:
1924 ppc4xx_tlb_invalidate_all(env);
1925 break;
1926 case POWERPC_MMU_REAL:
1927 cpu_abort(env_cpu(env), "No TLB for PowerPC 4xx in real mode\n");
1928 break;
1929 case POWERPC_MMU_MPC8xx:
1930 /* XXX: TODO */
1931 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n");
1932 break;
1933 case POWERPC_MMU_BOOKE:
1934 tlb_flush(env_cpu(env));
1935 break;
1936 case POWERPC_MMU_BOOKE206:
1937 booke206_flush_tlb(env, -1, 0);
1938 break;
1939 case POWERPC_MMU_32B:
1940 case POWERPC_MMU_601:
1941 env->tlb_need_flush = 0;
1942 tlb_flush(env_cpu(env));
1943 break;
1944 default:
1945 /* XXX: TODO */
1946 cpu_abort(env_cpu(env), "Unknown MMU model %x\n", env->mmu_model);
1947 break;
1950 #endif
1952 #ifdef CONFIG_TCG
1953 void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
1955 #if !defined(FLUSH_ALL_TLBS)
1956 addr &= TARGET_PAGE_MASK;
1957 #if defined(TARGET_PPC64)
1958 if (mmu_is_64bit(env->mmu_model)) {
1959 /* tlbie invalidate TLBs for all segments */
1961 * XXX: given the fact that there are too many segments to invalidate,
1962 * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
1963 * we just invalidate all TLBs
1965 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
1966 } else
1967 #endif /* defined(TARGET_PPC64) */
1968 switch (env->mmu_model) {
1969 case POWERPC_MMU_SOFT_6xx:
1970 case POWERPC_MMU_SOFT_74xx:
1971 ppc6xx_tlb_invalidate_virt(env, addr, 0);
1972 if (env->id_tlbs == 1) {
1973 ppc6xx_tlb_invalidate_virt(env, addr, 1);
1975 break;
1976 case POWERPC_MMU_32B:
1977 case POWERPC_MMU_601:
1979 * Actual CPUs invalidate entire congruence classes based on
1980 * the geometry of their TLBs and some OSes take that into
1981 * account, we just mark the TLB to be flushed later (context
1982 * synchronizing event or sync instruction on 32-bit).
1984 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
1985 break;
1986 default:
1987 /* Should never reach here with other MMU models */
1988 assert(0);
1990 #else
1991 ppc_tlb_invalidate_all(env);
1992 #endif
1995 /*****************************************************************************/
1996 /* Special registers manipulation */
1998 /* Segment registers load and store */
1999 target_ulong helper_load_sr(CPUPPCState *env, target_ulong sr_num)
2001 #if defined(TARGET_PPC64)
2002 if (mmu_is_64bit(env->mmu_model)) {
2003 /* XXX */
2004 return 0;
2006 #endif
2007 return env->sr[sr_num];
2010 void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value)
2012 qemu_log_mask(CPU_LOG_MMU,
2013 "%s: reg=%d " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__,
2014 (int)srnum, value, env->sr[srnum]);
2015 #if defined(TARGET_PPC64)
2016 if (mmu_is_64bit(env->mmu_model)) {
2017 PowerPCCPU *cpu = env_archcpu(env);
2018 uint64_t esid, vsid;
2020 /* ESID = srnum */
2021 esid = ((uint64_t)(srnum & 0xf) << 28) | SLB_ESID_V;
2023 /* VSID = VSID */
2024 vsid = (value & 0xfffffff) << 12;
2025 /* flags = flags */
2026 vsid |= ((value >> 27) & 0xf) << 8;
2028 ppc_store_slb(cpu, srnum, esid, vsid);
2029 } else
2030 #endif
2031 if (env->sr[srnum] != value) {
2032 env->sr[srnum] = value;
2034 * Invalidating 256MB of virtual memory in 4kB pages is way
2035 * longer than flushing the whole TLB.
2037 #if !defined(FLUSH_ALL_TLBS) && 0
2039 target_ulong page, end;
2040 /* Invalidate 256 MB of virtual memory */
2041 page = (16 << 20) * srnum;
2042 end = page + (16 << 20);
2043 for (; page != end; page += TARGET_PAGE_SIZE) {
2044 tlb_flush_page(env_cpu(env), page);
2047 #else
2048 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
2049 #endif
2053 /* TLB management */
2054 void helper_tlbia(CPUPPCState *env)
2056 ppc_tlb_invalidate_all(env);
2059 void helper_tlbie(CPUPPCState *env, target_ulong addr)
2061 ppc_tlb_invalidate_one(env, addr);
2064 void helper_tlbiva(CPUPPCState *env, target_ulong addr)
2066 /* tlbiva instruction only exists on BookE */
2067 assert(env->mmu_model == POWERPC_MMU_BOOKE);
2068 /* XXX: TODO */
2069 cpu_abort(env_cpu(env), "BookE MMU model is not implemented\n");
2072 /* Software driven TLBs management */
2073 /* PowerPC 602/603 software TLB load instructions helpers */
2074 static void do_6xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code)
2076 target_ulong RPN, CMP, EPN;
2077 int way;
2079 RPN = env->spr[SPR_RPA];
2080 if (is_code) {
2081 CMP = env->spr[SPR_ICMP];
2082 EPN = env->spr[SPR_IMISS];
2083 } else {
2084 CMP = env->spr[SPR_DCMP];
2085 EPN = env->spr[SPR_DMISS];
2087 way = (env->spr[SPR_SRR1] >> 17) & 1;
2088 (void)EPN; /* avoid a compiler warning */
2089 LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
2090 " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
2091 RPN, way);
2092 /* Store this TLB */
2093 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
2094 way, is_code, CMP, RPN);
2097 void helper_6xx_tlbd(CPUPPCState *env, target_ulong EPN)
2099 do_6xx_tlb(env, EPN, 0);
2102 void helper_6xx_tlbi(CPUPPCState *env, target_ulong EPN)
2104 do_6xx_tlb(env, EPN, 1);
2107 /* PowerPC 74xx software TLB load instructions helpers */
2108 static void do_74xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code)
2110 target_ulong RPN, CMP, EPN;
2111 int way;
2113 RPN = env->spr[SPR_PTELO];
2114 CMP = env->spr[SPR_PTEHI];
2115 EPN = env->spr[SPR_TLBMISS] & ~0x3;
2116 way = env->spr[SPR_TLBMISS] & 0x3;
2117 (void)EPN; /* avoid a compiler warning */
2118 LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
2119 " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
2120 RPN, way);
2121 /* Store this TLB */
2122 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
2123 way, is_code, CMP, RPN);
2126 void helper_74xx_tlbd(CPUPPCState *env, target_ulong EPN)
2128 do_74xx_tlb(env, EPN, 0);
2131 void helper_74xx_tlbi(CPUPPCState *env, target_ulong EPN)
2133 do_74xx_tlb(env, EPN, 1);
2136 /*****************************************************************************/
2137 /* PowerPC 601 specific instructions (POWER bridge) */
2139 target_ulong helper_rac(CPUPPCState *env, target_ulong addr)
2141 mmu_ctx_t ctx;
2142 int nb_BATs;
2143 target_ulong ret = 0;
2146 * We don't have to generate many instances of this instruction,
2147 * as rac is supervisor only.
2149 * XXX: FIX THIS: Pretend we have no BAT
2151 nb_BATs = env->nb_BATs;
2152 env->nb_BATs = 0;
2153 if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0) {
2154 ret = ctx.raddr;
2156 env->nb_BATs = nb_BATs;
2157 return ret;
2160 static inline target_ulong booke_tlb_to_page_size(int size)
2162 return 1024 << (2 * size);
2165 static inline int booke_page_size_to_tlb(target_ulong page_size)
2167 int size;
2169 switch (page_size) {
2170 case 0x00000400UL:
2171 size = 0x0;
2172 break;
2173 case 0x00001000UL:
2174 size = 0x1;
2175 break;
2176 case 0x00004000UL:
2177 size = 0x2;
2178 break;
2179 case 0x00010000UL:
2180 size = 0x3;
2181 break;
2182 case 0x00040000UL:
2183 size = 0x4;
2184 break;
2185 case 0x00100000UL:
2186 size = 0x5;
2187 break;
2188 case 0x00400000UL:
2189 size = 0x6;
2190 break;
2191 case 0x01000000UL:
2192 size = 0x7;
2193 break;
2194 case 0x04000000UL:
2195 size = 0x8;
2196 break;
2197 case 0x10000000UL:
2198 size = 0x9;
2199 break;
2200 case 0x40000000UL:
2201 size = 0xA;
2202 break;
2203 #if defined(TARGET_PPC64)
2204 case 0x000100000000ULL:
2205 size = 0xB;
2206 break;
2207 case 0x000400000000ULL:
2208 size = 0xC;
2209 break;
2210 case 0x001000000000ULL:
2211 size = 0xD;
2212 break;
2213 case 0x004000000000ULL:
2214 size = 0xE;
2215 break;
2216 case 0x010000000000ULL:
2217 size = 0xF;
2218 break;
2219 #endif
2220 default:
2221 size = -1;
2222 break;
2225 return size;
2228 /* Helpers for 4xx TLB management */
2229 #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */
2231 #define PPC4XX_TLBHI_V 0x00000040
2232 #define PPC4XX_TLBHI_E 0x00000020
2233 #define PPC4XX_TLBHI_SIZE_MIN 0
2234 #define PPC4XX_TLBHI_SIZE_MAX 7
2235 #define PPC4XX_TLBHI_SIZE_DEFAULT 1
2236 #define PPC4XX_TLBHI_SIZE_SHIFT 7
2237 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007
2239 #define PPC4XX_TLBLO_EX 0x00000200
2240 #define PPC4XX_TLBLO_WR 0x00000100
2241 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
2242 #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
2244 target_ulong helper_4xx_tlbre_hi(CPUPPCState *env, target_ulong entry)
2246 ppcemb_tlb_t *tlb;
2247 target_ulong ret;
2248 int size;
2250 entry &= PPC4XX_TLB_ENTRY_MASK;
2251 tlb = &env->tlb.tlbe[entry];
2252 ret = tlb->EPN;
2253 if (tlb->prot & PAGE_VALID) {
2254 ret |= PPC4XX_TLBHI_V;
2256 size = booke_page_size_to_tlb(tlb->size);
2257 if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) {
2258 size = PPC4XX_TLBHI_SIZE_DEFAULT;
2260 ret |= size << PPC4XX_TLBHI_SIZE_SHIFT;
2261 env->spr[SPR_40x_PID] = tlb->PID;
2262 return ret;
2265 target_ulong helper_4xx_tlbre_lo(CPUPPCState *env, target_ulong entry)
2267 ppcemb_tlb_t *tlb;
2268 target_ulong ret;
2270 entry &= PPC4XX_TLB_ENTRY_MASK;
2271 tlb = &env->tlb.tlbe[entry];
2272 ret = tlb->RPN;
2273 if (tlb->prot & PAGE_EXEC) {
2274 ret |= PPC4XX_TLBLO_EX;
2276 if (tlb->prot & PAGE_WRITE) {
2277 ret |= PPC4XX_TLBLO_WR;
2279 return ret;
2282 void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry,
2283 target_ulong val)
2285 CPUState *cs = env_cpu(env);
2286 ppcemb_tlb_t *tlb;
2287 target_ulong page, end;
2289 LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry,
2290 val);
2291 entry &= PPC4XX_TLB_ENTRY_MASK;
2292 tlb = &env->tlb.tlbe[entry];
2293 /* Invalidate previous TLB (if it's valid) */
2294 if (tlb->prot & PAGE_VALID) {
2295 end = tlb->EPN + tlb->size;
2296 LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end "
2297 TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
2298 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
2299 tlb_flush_page(cs, page);
2302 tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
2303 & PPC4XX_TLBHI_SIZE_MASK);
2305 * We cannot handle TLB size < TARGET_PAGE_SIZE.
2306 * If this ever occurs, we should implement TARGET_PAGE_BITS_VARY
2308 if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
2309 cpu_abort(cs, "TLB size " TARGET_FMT_lu " < %u "
2310 "are not supported (%d)\n"
2311 "Please implement TARGET_PAGE_BITS_VARY\n",
2312 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
2314 tlb->EPN = val & ~(tlb->size - 1);
2315 if (val & PPC4XX_TLBHI_V) {
2316 tlb->prot |= PAGE_VALID;
2317 if (val & PPC4XX_TLBHI_E) {
2318 /* XXX: TO BE FIXED */
2319 cpu_abort(cs,
2320 "Little-endian TLB entries are not supported by now\n");
2322 } else {
2323 tlb->prot &= ~PAGE_VALID;
2325 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
2326 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
2327 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
2328 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
2329 tlb->prot & PAGE_READ ? 'r' : '-',
2330 tlb->prot & PAGE_WRITE ? 'w' : '-',
2331 tlb->prot & PAGE_EXEC ? 'x' : '-',
2332 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2333 /* Invalidate new TLB (if valid) */
2334 if (tlb->prot & PAGE_VALID) {
2335 end = tlb->EPN + tlb->size;
2336 LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end "
2337 TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
2338 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
2339 tlb_flush_page(cs, page);
2344 void helper_4xx_tlbwe_lo(CPUPPCState *env, target_ulong entry,
2345 target_ulong val)
2347 ppcemb_tlb_t *tlb;
2349 LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry,
2350 val);
2351 entry &= PPC4XX_TLB_ENTRY_MASK;
2352 tlb = &env->tlb.tlbe[entry];
2353 tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK;
2354 tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK;
2355 tlb->prot = PAGE_READ;
2356 if (val & PPC4XX_TLBLO_EX) {
2357 tlb->prot |= PAGE_EXEC;
2359 if (val & PPC4XX_TLBLO_WR) {
2360 tlb->prot |= PAGE_WRITE;
2362 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
2363 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
2364 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
2365 tlb->prot & PAGE_READ ? 'r' : '-',
2366 tlb->prot & PAGE_WRITE ? 'w' : '-',
2367 tlb->prot & PAGE_EXEC ? 'x' : '-',
2368 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2371 target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address)
2373 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
2376 /* PowerPC 440 TLB management */
2377 void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry,
2378 target_ulong value)
2380 ppcemb_tlb_t *tlb;
2381 target_ulong EPN, RPN, size;
2382 int do_flush_tlbs;
2384 LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n",
2385 __func__, word, (int)entry, value);
2386 do_flush_tlbs = 0;
2387 entry &= 0x3F;
2388 tlb = &env->tlb.tlbe[entry];
2389 switch (word) {
2390 default:
2391 /* Just here to please gcc */
2392 case 0:
2393 EPN = value & 0xFFFFFC00;
2394 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN) {
2395 do_flush_tlbs = 1;
2397 tlb->EPN = EPN;
2398 size = booke_tlb_to_page_size((value >> 4) & 0xF);
2399 if ((tlb->prot & PAGE_VALID) && tlb->size < size) {
2400 do_flush_tlbs = 1;
2402 tlb->size = size;
2403 tlb->attr &= ~0x1;
2404 tlb->attr |= (value >> 8) & 1;
2405 if (value & 0x200) {
2406 tlb->prot |= PAGE_VALID;
2407 } else {
2408 if (tlb->prot & PAGE_VALID) {
2409 tlb->prot &= ~PAGE_VALID;
2410 do_flush_tlbs = 1;
2413 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
2414 if (do_flush_tlbs) {
2415 tlb_flush(env_cpu(env));
2417 break;
2418 case 1:
2419 RPN = value & 0xFFFFFC0F;
2420 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN) {
2421 tlb_flush(env_cpu(env));
2423 tlb->RPN = RPN;
2424 break;
2425 case 2:
2426 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
2427 tlb->prot = tlb->prot & PAGE_VALID;
2428 if (value & 0x1) {
2429 tlb->prot |= PAGE_READ << 4;
2431 if (value & 0x2) {
2432 tlb->prot |= PAGE_WRITE << 4;
2434 if (value & 0x4) {
2435 tlb->prot |= PAGE_EXEC << 4;
2437 if (value & 0x8) {
2438 tlb->prot |= PAGE_READ;
2440 if (value & 0x10) {
2441 tlb->prot |= PAGE_WRITE;
2443 if (value & 0x20) {
2444 tlb->prot |= PAGE_EXEC;
2446 break;
2450 target_ulong helper_440_tlbre(CPUPPCState *env, uint32_t word,
2451 target_ulong entry)
2453 ppcemb_tlb_t *tlb;
2454 target_ulong ret;
2455 int size;
2457 entry &= 0x3F;
2458 tlb = &env->tlb.tlbe[entry];
2459 switch (word) {
2460 default:
2461 /* Just here to please gcc */
2462 case 0:
2463 ret = tlb->EPN;
2464 size = booke_page_size_to_tlb(tlb->size);
2465 if (size < 0 || size > 0xF) {
2466 size = 1;
2468 ret |= size << 4;
2469 if (tlb->attr & 0x1) {
2470 ret |= 0x100;
2472 if (tlb->prot & PAGE_VALID) {
2473 ret |= 0x200;
2475 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
2476 env->spr[SPR_440_MMUCR] |= tlb->PID;
2477 break;
2478 case 1:
2479 ret = tlb->RPN;
2480 break;
2481 case 2:
2482 ret = tlb->attr & ~0x1;
2483 if (tlb->prot & (PAGE_READ << 4)) {
2484 ret |= 0x1;
2486 if (tlb->prot & (PAGE_WRITE << 4)) {
2487 ret |= 0x2;
2489 if (tlb->prot & (PAGE_EXEC << 4)) {
2490 ret |= 0x4;
2492 if (tlb->prot & PAGE_READ) {
2493 ret |= 0x8;
2495 if (tlb->prot & PAGE_WRITE) {
2496 ret |= 0x10;
2498 if (tlb->prot & PAGE_EXEC) {
2499 ret |= 0x20;
2501 break;
2503 return ret;
2506 target_ulong helper_440_tlbsx(CPUPPCState *env, target_ulong address)
2508 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
2511 /* PowerPC BookE 2.06 TLB management */
2513 static ppcmas_tlb_t *booke206_cur_tlb(CPUPPCState *env)
2515 uint32_t tlbncfg = 0;
2516 int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT;
2517 int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK);
2518 int tlb;
2520 tlb = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
2521 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb];
2523 if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) {
2524 cpu_abort(env_cpu(env), "we don't support HES yet\n");
2527 return booke206_get_tlbm(env, tlb, ea, esel);
2530 void helper_booke_setpid(CPUPPCState *env, uint32_t pidn, target_ulong pid)
2532 env->spr[pidn] = pid;
2533 /* changing PIDs mean we're in a different address space now */
2534 tlb_flush(env_cpu(env));
2537 void helper_booke_set_eplc(CPUPPCState *env, target_ulong val)
2539 env->spr[SPR_BOOKE_EPLC] = val & EPID_MASK;
2540 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_LOAD);
2542 void helper_booke_set_epsc(CPUPPCState *env, target_ulong val)
2544 env->spr[SPR_BOOKE_EPSC] = val & EPID_MASK;
2545 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_STORE);
2548 static inline void flush_page(CPUPPCState *env, ppcmas_tlb_t *tlb)
2550 if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) {
2551 tlb_flush_page(env_cpu(env), tlb->mas2 & MAS2_EPN_MASK);
2552 } else {
2553 tlb_flush(env_cpu(env));
2557 void helper_booke206_tlbwe(CPUPPCState *env)
2559 uint32_t tlbncfg, tlbn;
2560 ppcmas_tlb_t *tlb;
2561 uint32_t size_tlb, size_ps;
2562 target_ulong mask;
2565 switch (env->spr[SPR_BOOKE_MAS0] & MAS0_WQ_MASK) {
2566 case MAS0_WQ_ALWAYS:
2567 /* good to go, write that entry */
2568 break;
2569 case MAS0_WQ_COND:
2570 /* XXX check if reserved */
2571 if (0) {
2572 return;
2574 break;
2575 case MAS0_WQ_CLR_RSRV:
2576 /* XXX clear entry */
2577 return;
2578 default:
2579 /* no idea what to do */
2580 return;
2583 if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) &&
2584 !msr_gs) {
2585 /* XXX we don't support direct LRAT setting yet */
2586 fprintf(stderr, "cpu: don't support LRAT setting yet\n");
2587 return;
2590 tlbn = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
2591 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn];
2593 tlb = booke206_cur_tlb(env);
2595 if (!tlb) {
2596 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
2597 POWERPC_EXCP_INVAL |
2598 POWERPC_EXCP_INVAL_INVAL, GETPC());
2601 /* check that we support the targeted size */
2602 size_tlb = (env->spr[SPR_BOOKE_MAS1] & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
2603 size_ps = booke206_tlbnps(env, tlbn);
2604 if ((env->spr[SPR_BOOKE_MAS1] & MAS1_VALID) && (tlbncfg & TLBnCFG_AVAIL) &&
2605 !(size_ps & (1 << size_tlb))) {
2606 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
2607 POWERPC_EXCP_INVAL |
2608 POWERPC_EXCP_INVAL_INVAL, GETPC());
2611 if (msr_gs) {
2612 cpu_abort(env_cpu(env), "missing HV implementation\n");
2615 if (tlb->mas1 & MAS1_VALID) {
2617 * Invalidate the page in QEMU TLB if it was a valid entry.
2619 * In "PowerPC e500 Core Family Reference Manual, Rev. 1",
2620 * Section "12.4.2 TLB Write Entry (tlbwe) Instruction":
2621 * (https://www.nxp.com/docs/en/reference-manual/E500CORERM.pdf)
2623 * "Note that when an L2 TLB entry is written, it may be displacing an
2624 * already valid entry in the same L2 TLB location (a victim). If a
2625 * valid L1 TLB entry corresponds to the L2 MMU victim entry, that L1
2626 * TLB entry is automatically invalidated."
2628 flush_page(env, tlb);
2631 tlb->mas7_3 = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) |
2632 env->spr[SPR_BOOKE_MAS3];
2633 tlb->mas1 = env->spr[SPR_BOOKE_MAS1];
2635 if ((env->spr[SPR_MMUCFG] & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
2636 /* For TLB which has a fixed size TSIZE is ignored with MAV2 */
2637 booke206_fixed_size_tlbn(env, tlbn, tlb);
2638 } else {
2639 if (!(tlbncfg & TLBnCFG_AVAIL)) {
2640 /* force !AVAIL TLB entries to correct page size */
2641 tlb->mas1 &= ~MAS1_TSIZE_MASK;
2642 /* XXX can be configured in MMUCSR0 */
2643 tlb->mas1 |= (tlbncfg & TLBnCFG_MINSIZE) >> 12;
2647 /* Make a mask from TLB size to discard invalid bits in EPN field */
2648 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
2649 /* Add a mask for page attributes */
2650 mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E;
2652 if (!msr_cm) {
2654 * Executing a tlbwe instruction in 32-bit mode will set bits
2655 * 0:31 of the TLB EPN field to zero.
2657 mask &= 0xffffffff;
2660 tlb->mas2 = env->spr[SPR_BOOKE_MAS2] & mask;
2662 if (!(tlbncfg & TLBnCFG_IPROT)) {
2663 /* no IPROT supported by TLB */
2664 tlb->mas1 &= ~MAS1_IPROT;
2667 flush_page(env, tlb);
2670 static inline void booke206_tlb_to_mas(CPUPPCState *env, ppcmas_tlb_t *tlb)
2672 int tlbn = booke206_tlbm_to_tlbn(env, tlb);
2673 int way = booke206_tlbm_to_way(env, tlb);
2675 env->spr[SPR_BOOKE_MAS0] = tlbn << MAS0_TLBSEL_SHIFT;
2676 env->spr[SPR_BOOKE_MAS0] |= way << MAS0_ESEL_SHIFT;
2677 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
2679 env->spr[SPR_BOOKE_MAS1] = tlb->mas1;
2680 env->spr[SPR_BOOKE_MAS2] = tlb->mas2;
2681 env->spr[SPR_BOOKE_MAS3] = tlb->mas7_3;
2682 env->spr[SPR_BOOKE_MAS7] = tlb->mas7_3 >> 32;
2685 void helper_booke206_tlbre(CPUPPCState *env)
2687 ppcmas_tlb_t *tlb = NULL;
2689 tlb = booke206_cur_tlb(env);
2690 if (!tlb) {
2691 env->spr[SPR_BOOKE_MAS1] = 0;
2692 } else {
2693 booke206_tlb_to_mas(env, tlb);
2697 void helper_booke206_tlbsx(CPUPPCState *env, target_ulong address)
2699 ppcmas_tlb_t *tlb = NULL;
2700 int i, j;
2701 hwaddr raddr;
2702 uint32_t spid, sas;
2704 spid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID_MASK) >> MAS6_SPID_SHIFT;
2705 sas = env->spr[SPR_BOOKE_MAS6] & MAS6_SAS;
2707 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
2708 int ways = booke206_tlb_ways(env, i);
2710 for (j = 0; j < ways; j++) {
2711 tlb = booke206_get_tlbm(env, i, address, j);
2713 if (!tlb) {
2714 continue;
2717 if (ppcmas_tlb_check(env, tlb, &raddr, address, spid)) {
2718 continue;
2721 if (sas != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
2722 continue;
2725 booke206_tlb_to_mas(env, tlb);
2726 return;
2730 /* no entry found, fill with defaults */
2731 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK;
2732 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK;
2733 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK;
2734 env->spr[SPR_BOOKE_MAS3] = 0;
2735 env->spr[SPR_BOOKE_MAS7] = 0;
2737 if (env->spr[SPR_BOOKE_MAS6] & MAS6_SAS) {
2738 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS;
2741 env->spr[SPR_BOOKE_MAS1] |= (env->spr[SPR_BOOKE_MAS6] >> 16)
2742 << MAS1_TID_SHIFT;
2744 /* next victim logic */
2745 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT;
2746 env->last_way++;
2747 env->last_way &= booke206_tlb_ways(env, 0) - 1;
2748 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
2751 static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn,
2752 uint32_t ea)
2754 int i;
2755 int ways = booke206_tlb_ways(env, tlbn);
2756 target_ulong mask;
2758 for (i = 0; i < ways; i++) {
2759 ppcmas_tlb_t *tlb = booke206_get_tlbm(env, tlbn, ea, i);
2760 if (!tlb) {
2761 continue;
2763 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
2764 if (((tlb->mas2 & MAS2_EPN_MASK) == (ea & mask)) &&
2765 !(tlb->mas1 & MAS1_IPROT)) {
2766 tlb->mas1 &= ~MAS1_VALID;
2771 void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address)
2773 CPUState *cs;
2775 if (address & 0x4) {
2776 /* flush all entries */
2777 if (address & 0x8) {
2778 /* flush all of TLB1 */
2779 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB1, 1);
2780 } else {
2781 /* flush all of TLB0 */
2782 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB0, 0);
2784 return;
2787 if (address & 0x8) {
2788 /* flush TLB1 entries */
2789 booke206_invalidate_ea_tlb(env, 1, address);
2790 CPU_FOREACH(cs) {
2791 tlb_flush(cs);
2793 } else {
2794 /* flush TLB0 entries */
2795 booke206_invalidate_ea_tlb(env, 0, address);
2796 CPU_FOREACH(cs) {
2797 tlb_flush_page(cs, address & MAS2_EPN_MASK);
2802 void helper_booke206_tlbilx0(CPUPPCState *env, target_ulong address)
2804 /* XXX missing LPID handling */
2805 booke206_flush_tlb(env, -1, 1);
2808 void helper_booke206_tlbilx1(CPUPPCState *env, target_ulong address)
2810 int i, j;
2811 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
2812 ppcmas_tlb_t *tlb = env->tlb.tlbm;
2813 int tlb_size;
2815 /* XXX missing LPID handling */
2816 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
2817 tlb_size = booke206_tlb_size(env, i);
2818 for (j = 0; j < tlb_size; j++) {
2819 if (!(tlb[j].mas1 & MAS1_IPROT) &&
2820 ((tlb[j].mas1 & MAS1_TID_MASK) == tid)) {
2821 tlb[j].mas1 &= ~MAS1_VALID;
2824 tlb += booke206_tlb_size(env, i);
2826 tlb_flush(env_cpu(env));
2829 void helper_booke206_tlbilx3(CPUPPCState *env, target_ulong address)
2831 int i, j;
2832 ppcmas_tlb_t *tlb;
2833 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
2834 int pid = tid >> MAS6_SPID_SHIFT;
2835 int sgs = env->spr[SPR_BOOKE_MAS5] & MAS5_SGS;
2836 int ind = (env->spr[SPR_BOOKE_MAS6] & MAS6_SIND) ? MAS1_IND : 0;
2837 /* XXX check for unsupported isize and raise an invalid opcode then */
2838 int size = env->spr[SPR_BOOKE_MAS6] & MAS6_ISIZE_MASK;
2839 /* XXX implement MAV2 handling */
2840 bool mav2 = false;
2842 /* XXX missing LPID handling */
2843 /* flush by pid and ea */
2844 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
2845 int ways = booke206_tlb_ways(env, i);
2847 for (j = 0; j < ways; j++) {
2848 tlb = booke206_get_tlbm(env, i, address, j);
2849 if (!tlb) {
2850 continue;
2852 if ((ppcmas_tlb_check(env, tlb, NULL, address, pid) != 0) ||
2853 (tlb->mas1 & MAS1_IPROT) ||
2854 ((tlb->mas1 & MAS1_IND) != ind) ||
2855 ((tlb->mas8 & MAS8_TGS) != sgs)) {
2856 continue;
2858 if (mav2 && ((tlb->mas1 & MAS1_TSIZE_MASK) != size)) {
2859 /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */
2860 continue;
2862 /* XXX e500mc doesn't match SAS, but other cores might */
2863 tlb->mas1 &= ~MAS1_VALID;
2866 tlb_flush(env_cpu(env));
2869 void helper_booke206_tlbflush(CPUPPCState *env, target_ulong type)
2871 int flags = 0;
2873 if (type & 2) {
2874 flags |= BOOKE206_FLUSH_TLB1;
2877 if (type & 4) {
2878 flags |= BOOKE206_FLUSH_TLB0;
2881 booke206_flush_tlb(env, flags, 1);
2885 void helper_check_tlb_flush_local(CPUPPCState *env)
2887 check_tlb_flush(env, false);
2890 void helper_check_tlb_flush_global(CPUPPCState *env)
2892 check_tlb_flush(env, true);
2894 #endif /* CONFIG_TCG */
2896 /*****************************************************************************/
2898 static bool ppc_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
2899 hwaddr *raddrp, int *psizep, int *protp,
2900 int mmu_idx, bool guest_visible)
2902 switch (cpu->env.mmu_model) {
2903 #if defined(TARGET_PPC64)
2904 case POWERPC_MMU_3_00:
2905 if (ppc64_v3_radix(cpu)) {
2906 return ppc_radix64_xlate(cpu, eaddr, access_type,
2907 raddrp, psizep, protp, mmu_idx, guest_visible);
2909 /* fall through */
2910 case POWERPC_MMU_64B:
2911 case POWERPC_MMU_2_03:
2912 case POWERPC_MMU_2_06:
2913 case POWERPC_MMU_2_07:
2914 return ppc_hash64_xlate(cpu, eaddr, access_type,
2915 raddrp, psizep, protp, mmu_idx, guest_visible);
2916 #endif
2918 case POWERPC_MMU_32B:
2919 case POWERPC_MMU_601:
2920 return ppc_hash32_xlate(cpu, eaddr, access_type,
2921 raddrp, psizep, protp, mmu_idx, guest_visible);
2923 default:
2924 return ppc_jumbo_xlate(cpu, eaddr, access_type, raddrp,
2925 psizep, protp, mmu_idx, guest_visible);
2929 hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
2931 PowerPCCPU *cpu = POWERPC_CPU(cs);
2932 hwaddr raddr;
2933 int s, p;
2936 * Some MMUs have separate TLBs for code and data. If we only
2937 * try an MMU_DATA_LOAD, we may not be able to read instructions
2938 * mapped by code TLBs, so we also try a MMU_INST_FETCH.
2940 if (ppc_xlate(cpu, addr, MMU_DATA_LOAD, &raddr, &s, &p,
2941 cpu_mmu_index(&cpu->env, false), false) ||
2942 ppc_xlate(cpu, addr, MMU_INST_FETCH, &raddr, &s, &p,
2943 cpu_mmu_index(&cpu->env, true), false)) {
2944 return raddr & TARGET_PAGE_MASK;
2946 return -1;
2949 #ifdef CONFIG_TCG
2950 bool ppc_cpu_tlb_fill(CPUState *cs, vaddr eaddr, int size,
2951 MMUAccessType access_type, int mmu_idx,
2952 bool probe, uintptr_t retaddr)
2954 PowerPCCPU *cpu = POWERPC_CPU(cs);
2955 hwaddr raddr;
2956 int page_size, prot;
2958 if (ppc_xlate(cpu, eaddr, access_type, &raddr,
2959 &page_size, &prot, mmu_idx, !probe)) {
2960 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
2961 prot, mmu_idx, 1UL << page_size);
2962 return true;
2964 if (probe) {
2965 return false;
2967 raise_exception_err_ra(&cpu->env, cs->exception_index,
2968 cpu->env.error_code, retaddr);
2970 #endif