4 * Created on: Aug 26, 2008
9 #include <asm/mpc5200b.h>
11 #include <aros/kernel.h>
12 #include <aros/libcall.h>
16 #include <proto/exec.h>
17 #include <proto/kernel.h>
19 #include "kernel_intern.h"
26 /* Search the MMU hash table for page table entry corresponding to virtual address given */
28 pte_t
*find_pte(uint64_t virt
)
30 pte_t
*ret
= (pte_t
*)0;
37 /* Calculate the hash function */
38 uint32_t hash
= (((virt
>> 12) & 0xffff) ^ (virt
>> 28)) & 0x7ffff;
40 /* what vsid we are looking for? */
41 vsid
= 0x80000000 | ((virt
>> 28) << 7) | ((virt
>> 22) & 0x3f);
43 /* What mask are we using? Depends on the size of MMU hashtable */
44 mask
= ((rdspr(SDR1
) & 0x1ff) << 16) | 0xffc0;
46 /* Get the first group of pte's */
47 pteg
= (pte_t
*)((rdspr(SDR1
) & ~0x1ff) | ((hash
<< 6) & mask
));
49 /* Search the primary group */
52 if (pteg
[i
].vsid
== vsid
)
60 * If the page was not found in primary group, get the second hash, second
61 * vsid and search the secondary group.
65 uint32_t hash2
= (~hash
) & 0x7ffff;
67 pteg
= (pte_t
*)((rdspr(SDR1
) & ~0x1ff) | ((hash2
<< 6) & mask
));
69 /* Search the secondary group */
72 if (pteg
[i
].vsid
== vsid
)
83 uint16_t mmu_protection(KRN_MapAttr flags
)
85 uint16_t ppc_prot
= 2 << 3; /* WIMG = 0010 */
87 if (flags
& MAP_Readable
)
91 if (flags
& MAP_Writable
)
93 ppc_prot
= (ppc_prot
| 2) & ~1;
96 if (flags
& MAP_WriteThrough
)
100 if (flags
& MAP_Guarded
)
104 if (flags
& MAP_CacheInhibit
)
106 ppc_prot
= (ppc_prot
| 4 << 3) & ~ (8 << 3);
109 return ppc_prot
& 0xfff;
112 int mmu_map_page(uint64_t virt
, uint32_t phys
, uint32_t prot
)
114 uint32_t mask
= ((rdspr(SDR1
) & 0x1ff) << 16) | 0xffc0;
120 /* Calculate the hash function */
121 uint32_t hash
= (((uint32_t)(virt
>> 12) & 0xffff) ^ (uint32_t)(virt
>> 28)) & 0x7ffff;
123 pteg
= (pte_t
*)((rdspr(SDR1
) & ~0x1ff) | ((hash
<< 6) & mask
));
125 for (ptenum
= 0; ptenum
< 8; ptenum
++)
127 if (!(pteg
[ptenum
].vsid
& 0x80000000))
136 uint32_t hash2
= (~hash
) & 0x7ffff;
137 pteg
= (pte_t
*)((rdspr(SDR1
) & ~0x1ff) | ((hash2
<< 6) & mask
));
139 for (ptenum
= 0; ptenum
< 8; ptenum
++)
141 if (!(pteg
[ptenum
].vsid
& 0x80000000))
144 local_pte
.vsid
= 0x40;
152 D(bug("[KRN] mmu_map_page(%06x%07x, %08x, %08x)\n", (uint32_t)(virt
>> 28), (uint32_t)(virt
& 0x0fffffff), phys
, prot
));
153 D(bug("[KRN] Run out of free page table entries\n"));
157 local_pte
.vsid
|= ((virt
>> 28) << 7);
158 local_pte
.vsid
|= ((virt
>> 22) & 0x3f);
159 local_pte
.vsid
|= 0x80000000;
160 local_pte
.rpn
= (phys
& ~0xfff) | (prot
& 0xfff);
164 asm volatile("dcbst 0,%0; sync;"::"r"(pte
));
165 asm volatile("tlbie %0"::"r"((uint32_t)virt
));
170 int mmu_map_area(uint64_t virt
, uint32_t phys
, uint32_t length
, uint32_t prot
)
172 bug("[KRN] mmu_map_area(%04x%08x, %08x, %08x, %04x)\n", (uint32_t)(virt
>> 32), (uint32_t)virt
, phys
, length
, prot
);
175 if (!mmu_map_page(virt
, phys
, prot
))
186 void mmu_init(char *mmu_dir
, uint32_t mmu_size
)
190 D(bug("[KRN] Initializing MMU\n"));
191 D(bug("[KRN] Location of MMU tables: %08x-%08x\n", mmu_dir
, mmu_dir
+ mmu_size
- 1));
193 if ((intptr_t)mmu_dir
& (mmu_size
- 1))
195 D(bug("[KRN] WRONG! The MMU dir must be located on mmu length boundary\n"));
200 /* Clear the MMU tables */
201 bzero(mmu_dir
, mmu_size
);
203 uint32_t sdr
= (intptr_t)mmu_dir
| ((mmu_size
>> 16) - 1);
205 D(bug("[KRN] SDR1 = %08x\n", sdr
));
209 /* Prepare the segment registers. The proper values for virtual address
210 * are to be determined later */
212 for (i
=0; i
< 16; i
++)
214 asm volatile ("mtsrin %0,%1"::"r"(0x20000000 | i
),"r"(i
<< 28));
217 D(bug("[KRN] Flushing TLB\n"));
218 for (ea
=0x00001000; ea
<= 0x0001f000; ea
+=0x1000)
219 asm volatile("tlbie %0"::"r"(ea
));
223 void __attribute__((noreturn
)) mmu_handler(regs_t
*ctx
, uint8_t exception
, void *self
)
225 struct KernelBase
*KernelBase
= getKernelBase();
226 struct ExecBase
*SysBase
= getSysBase();
228 ctx
->dar
= rdspr(19);
229 ctx
->dsisr
= rdspr(18);
231 /* SysBase access at 4UL? Occurs only with lwz instruction and DAR=4 */
232 if ((exception
== 3) && (ctx
->dar
== 4))
234 uint32_t insn
= *(uint32_t *)ctx
->srr0
;
236 if ((insn
& 0xfc000000) == 0x80000000)
238 int reg
= (insn
& 0x03e00000) >> 21;
240 ctx
->gpr
[reg
] = getSysBase();
243 core_LeaveInterrupt(ctx
);
247 D(bug("[KRN] Exception %d (%s) handler. Context @ %p, SysBase @ %p, KernelBase @ %p\n", exception
, exception
== 3 ? "DSI" : "ISI", ctx
, SysBase
, KernelBase
));
250 struct Task
*t
= FindTask(NULL
);
254 offset
= findNames(ctx
->srr0
, &mod
, &func
);
255 D(bug("[KRN] %s %p (%s)\n", t
->tc_Node
.ln_Type
== NT_TASK
? "Task":"Process", t
, t
->tc_Node
.ln_Name
? t
->tc_Node
.ln_Name
: "--unknown--"));
258 D(bug("[KRN] Crash at byte %d in func %s, module %s\n", offset
, func
, mod
));
260 D(bug("[KRN] Crash at byte %d in module %s\n", offset
, mod
));
262 D(bug("[KRN] SPLower=%08x SPUpper=%08x\n", t
->tc_SPLower
, t
->tc_SPUpper
));
263 D(bug("[KRN] Stack usage: %d bytes (%d %%)\n", t
->tc_SPUpper
- ctx
->gpr
[1],
264 100 * ((uintptr_t)t
->tc_SPUpper
- ctx
->gpr
[1]) / ((uintptr_t)t
->tc_SPUpper
- (uintptr_t)t
->tc_SPLower
)));
266 if (ctx
->gpr
[1] >= t
->tc_SPLower
&& ctx
->gpr
[1] < t
->tc_SPUpper
)
267 D(bug("[KRN] Stack in bounds\n"));
269 D(bug("[KRN] Stack exceeded the allowed size!\n"));
272 D(bug("[KRN] Attempt to %s address %08x.\n", ctx
->dsisr
& 0x02000000 ? "write to":"read from", ctx
->dar
));
274 D(bug("[KRN] SRR0=%08x, SRR1=%08x\n",ctx
->srr0
, ctx
->srr1
));
275 D(bug("[KRN] CTR=%08x LR=%08x XER=%08x CCR=%08x\n", ctx
->ctr
, ctx
->lr
, ctx
->xer
, ctx
->ccr
));
276 D(bug("[KRN] DAR=%08x DSISR=%08x\n", ctx
->dar
, ctx
->dsisr
));
278 D(bug("[KRN] HASH1=%08x HASH2=%08x IMISS=%08x DMISS=%08x ICMP=%08x DCMP=%08x\n",
279 rdspr(978), rdspr(979), rdspr(980), rdspr(976), rdspr(981), rdspr(977)));
281 D(bug("[KRN] SPRG0=%08x SPRG1=%08x SPRG2=%08x SPRG3=%08x SPRG4=%08x SPRG5=%08x\n",
282 rdspr(SPRG0
),rdspr(SPRG1
),rdspr(SPRG2
),rdspr(SPRG3
),rdspr(SPRG4
),rdspr(SPRG5
)));
284 D(bug("[KRN] GPR00=%08x GPR01=%08x GPR02=%08x GPR03=%08x\n",
285 ctx
->gpr
[0],ctx
->gpr
[1],ctx
->gpr
[2],ctx
->gpr
[3]));
286 D(bug("[KRN] GPR04=%08x GPR05=%08x GPR06=%08x GPR07=%08x\n",
287 ctx
->gpr
[4],ctx
->gpr
[5],ctx
->gpr
[6],ctx
->gpr
[7]));
288 D(bug("[KRN] GPR08=%08x GPR09=%08x GPR10=%08x GPR11=%08x\n",
289 ctx
->gpr
[8],ctx
->gpr
[9],ctx
->gpr
[10],ctx
->gpr
[11]));
290 D(bug("[KRN] GPR12=%08x GPR13=%08x GPR14=%08x GPR15=%08x\n",
291 ctx
->gpr
[12],ctx
->gpr
[13],ctx
->gpr
[14],ctx
->gpr
[15]));
293 D(bug("[KRN] GPR16=%08x GPR17=%08x GPR18=%08x GPR19=%08x\n",
294 ctx
->gpr
[16],ctx
->gpr
[17],ctx
->gpr
[18],ctx
->gpr
[19]));
295 D(bug("[KRN] GPR20=%08x GPR21=%08x GPR22=%08x GPR23=%08x\n",
296 ctx
->gpr
[20],ctx
->gpr
[21],ctx
->gpr
[22],ctx
->gpr
[23]));
297 D(bug("[KRN] GPR24=%08x GPR25=%08x GPR26=%08x GPR27=%08x\n",
298 ctx
->gpr
[24],ctx
->gpr
[25],ctx
->gpr
[26],ctx
->gpr
[27]));
299 D(bug("[KRN] GPR28=%08x GPR29=%08x GPR30=%08x GPR31=%08x\n",
300 ctx
->gpr
[28],ctx
->gpr
[29],ctx
->gpr
[30],ctx
->gpr
[31]));
303 D(bug("[KRN] Hash1 dump:\n[KRN] "));
304 uint32_t *hash
= (uint32_t)rdspr(978);
305 for (i
=0; i
< 8; i
++)
307 D(bug("%08x.%08x ", hash
[0], hash
[1]));
312 D(bug("\n[KRN] Hash2 dump:\n[KRN] "));
313 hash
= (uint32_t)rdspr(979);
314 for (i
=0; i
< 8; i
++)
316 D(bug("%08x.%08x ", hash
[0], hash
[1]));
322 D(bug("[KRN] Instruction dump:\n"));
323 ULONG
*p
= (ULONG
*)ctx
->srr0
;
324 for (i
=0; i
< 8; i
++)
326 if (find_pte((uint32_t)&p
[i
]))
327 D(bug("[KRN] %08x: %08x\n", &p
[i
], p
[i
]));
329 D(bug("[KRN] %08x: ?\n", &p
[i
]));
332 D(bug("[KRN] Backtrace:\n"));
333 uint32_t *sp
= ctx
->gpr
[1];
337 sp
= (uint32_t *)sp
[0];
340 offset
= findNames(sp
[1], &mod
, &func
);
343 D(bug("[KRN] %08x: byte %d in func %s, module %s\n", sp
[1], offset
, func
, mod
));
345 D(bug("[KRN] %08x: byte %d in module %s\n", sp
[1], offset
, mod
));
347 D(bug("[KRN] %08x\n", sp
[1]));
352 struct Task
*dead
= SysBase
->ThisTask
;
354 SysBase
->ThisTask
= NULL
;
355 KernelBase
->kb_LastDeadTask
= dead
;
357 Enqueue(&KernelBase
->kb_DeadTasks
, dead
);
363 D(bug("[KRN] **UNHANDLED EXCEPTION** stopping here...\n"));
366 wrmsr(rdmsr() | MSR_POW
);
370 AROS_LH3(void, KrnSetProtection
,
371 AROS_LHA(void *, address
, A0
),
372 AROS_LHA(uint32_t, length
, D0
),
373 AROS_LHA(KRN_MapAttr
, flags
, D1
),
374 struct KernelBase
*, KernelBase
, 9, Kernel
)
378 uint32_t ppc_prot
= mmu_protection(flags
);
379 uintptr_t virt
= (uintptr_t)address
;
382 D(bug("[KRN] KrnSetProtection(%08x, %08x, %04x)\n", virt
, virt
+ length
- 1, ppc_prot
));
385 length
= (length
+ 4095) & ~4095;
391 pte
= find_pte(virt
);
395 pte
->rpn
= (pte
->rpn
& 0xfffff000) | ppc_prot
;
406 AROS_LH4(int, KrnMapGlobal
,
407 AROS_LHA(void *, virtual, A0
),
408 AROS_LHA(void *, physical
, A1
),
409 AROS_LHA(uint32_t, length
, D0
),
410 AROS_LHA(KRN_MapAttr
, flags
, D1
),
411 struct KernelBase
*, KernelBase
, 9, Kernel
)
417 uint32_t ppc_prot
= mmu_protection(flags
);
419 D(bug("[KRN] KrnMapGlobal(%08x->%08x %08x %04x)\n", virtual, physical
, length
, flags
));
422 retval
= mmu_map_area((uint64_t)virtual & 0xffffffff, physical
, length
, ppc_prot
);
430 AROS_LH2(int, KrnUnmapGlobal
,
431 AROS_LHA(void *, virtual, A0
),
432 AROS_LHA(uint32_t, length
, D0
),
433 struct KernelBase
*, KernelBase
, 10, Kernel
)
439 uintptr_t virt
= (uintptr_t)virtual;
441 length
= (length
+ 4095) & ~4095;
446 pte_t
*pte
= find_pte(virt
);
458 uintptr_t virt2phys(uintptr_t virt
)
460 uintptr_t phys
= 0xffffffff;
461 pte_t
*pte
= find_pte(virt
);
465 phys
= pte
->rpn
& ~0xfff;
466 phys
|= virt
& 0xfff;
472 AROS_LH1(void *, KrnVirtualToPhysical
,
473 AROS_LHA(void *, virtual, A0
),
474 struct KernelBase
*, KernelBase
, 0, Kernel
)
478 uintptr_t virt
= (uintptr_t)virtual;
480 uint32_t msr
= goSuper();
482 phys
= virt2phys(virt
);
496 /* MMU exception handlers follow the G2 core manual */
497 static void __attribute__((used
)) __exception_template()
514 * Instruction TB miss flow
518 * srr0 -> address of instruction that missed
519 * srr1 -> 0:3=cr0 4=lru way bit 16:31 = saved MSR
521 * iMiss -> ea that missed
522 * iCmp -> the compare value for the va that missed
523 * hash1 -> pointer to first hash pteg
524 * hash2 -> pointer to second hash pteg
528 * r0 is saved counter
530 * r2 is pointer to pteg
531 * r3 is current compare value
533 asm volatile(".align 8; .globl __vector_imiss; .type __vector_imiss,@function\n"
535 " mfspr %r2,hash1 \n"
539 " addi %r2,%r2,-8 \n"
541 "im1: lwzu %r1,8(%r2) \n"
544 " bne instrSecHash\n"
546 " andi. %r3,%r1,8 \n"
549 " mfspr %r0,iMiss \n"
553 " ori %r1,%r1,0x100 \n"
556 " stb %r1,+6(%r2) \n"
560 " andi. %r1,%r3,0x0040 \n"
562 " mfspr %r2,hash2 \n"
563 " ori %r3,%r3,0x0040 \n"
565 " addi %r2,%r2,-8 \n"
569 " mfspr %r3, srr1 \n"
570 " andi. %r2,%r3,0xffff \n"
571 " addis %r2,%r2,0x0800 \n"
574 " mfspr %r3, srr1 \n"
575 " andi. %r2,%r3,0xffff\n"
576 " addis %r2,%r2,0x4000\n"
581 " xoris %r0,%r0, 0x8002\n"
592 * srr0 -> address of instruction that caused data tlb miss
593 * srr1 -> 0:3=cr0 4=lru way bit 5=1 if store 16:31 = saved MSR
595 * dMiss -> ea that missed
596 * dCmp -> the compare value for the va that missed
597 * hash1 -> pointer to first hash pteg
598 * hash2 -> pointer to second hash pteg
602 * r0 is saved counter
604 * r2 is pointer to pteg
605 * r3 is current compare value
607 asm volatile(".align 8; .globl __vector_dmiss; .type __vector_dmiss,@function\n"
609 " mfspr %r2, hash1 \n"
612 " mfspr %r3, dCmp \n"
613 " addi %r2, %r2, -8\n"
615 "dm1: lwzu %r1, 8(%r2) \n"
616 " cmp c0, %r1, %r3\n"
618 " bne dataSecHash \n"
621 " mfspr %r0, dMiss \n"
622 " mfspr %r3, srr1 \n"
623 " mtcrf 0x80, %r3 \n"
625 " ori %r1, %r1, 0x100\n"
626 " srwi %r1, %r1, 8 \n"
628 " stb %r1, +6(%r2)\n"
632 " andi. %r1, %r3, 0x0040\n"
634 " mfspr %r2, hash2 \n"
635 " ori %r3, %r3, 0x0040\n"
637 " addi %r2, %r2, -8\n"
642 asm volatile(".align 8; .globl __vector_dmissw; .type __vector_dmiss,@function\n"
644 " mfspr %r2, hash1 \n"
647 " mfspr %r3, dCmp \n"
648 " addi %r2, %r2, -8\n"
650 "ceq1: lwzu %r1, 8(%r2) \n"
651 " cmp c0, %r1, %r3\n"
653 " bne cEq0SecHash \n"
655 " andi. %r3,%r1,0x80\n"
656 " beq cEq0ChkProt \n"
658 " mfspr %r0, dMiss \n"
659 " mfspr %r3, srr1 \n"
660 " mtcrf 0x80, %r3 \n"
666 " andi. %r1, %r3, 0x0040\n"
668 " mfspr %r2, hash2 \n"
669 " ori %r3, %r3, 0x0040\n"
671 " addi %r2, %r2, -8\n"
675 " rlwinm. %r3,%r1,30,0,1\n"
677 " andi. %r3,%r1,1 \n"
680 "chk0: mfspr %r3,srr1 \n"
681 " andis. %r3,%r3,0x0008\n"
685 "chk2: ori %r1, %r1, 0x180\n"
686 " sth %r1, 6(%r2) \n"
690 " mfspr %r3, srr1 \n"
691 " rlwinm %r1, %r3, 9,6,6 \n"
692 " addis %r1, %r1, 0x4000 \n"
696 " mfspr %r3, srr1 \n"
697 " rlwinm %r1, %r3, 9,6,6 \n"
698 " addis %r1, %r1, 0x0800 \n"
702 " andi. %r2, %r3, 0xffff \n"
703 " mtspr srr1, %r2 \n"
704 " mtspr dsisr, %r1 \n"
705 " mfspr %r1, dMiss \n"
706 " rlwinm. %r2,%r2,0,31,31 \n"
708 " xor %r1,%r1,0x07 \n"
713 " xoris %r0, %r0, 0x2 \n"
714 " mtcrf 0x80, %r3 \n"
720 asm volatile(".align 8;");