Updated PCI IDs to latest snapshot.
[tangerine.git] / arch / ppc-chrp / efika / kernel / mmu.c
blob48e0e8fed73aa58769f35a41d873b14ef19905d5
1 /*
2 * mmu.c
4 * Created on: Aug 26, 2008
5 * Author: misc
6 */
8 #include <inttypes.h>
9 #include <asm/mpc5200b.h>
10 #include <asm/io.h>
11 #include <aros/kernel.h>
12 #include <aros/libcall.h>
13 #include <stddef.h>
14 #include <string.h>
16 #include <proto/exec.h>
17 #include <proto/kernel.h>
19 #include "kernel_intern.h"
21 typedef struct {
22 uint32_t vsid;
23 uint32_t rpn;
24 } pte_t;
26 /* Search the MMU hash table for page table entry corresponding to virtual address given */
28 pte_t *find_pte(uint64_t virt)
30 pte_t *ret = (pte_t *)0;
32 uint32_t mask;
33 pte_t *pteg;
34 uint32_t vsid;
35 int i;
37 /* Calculate the hash function */
38 uint32_t hash = (((virt >> 12) & 0xffff) ^ (virt >> 28)) & 0x7ffff;
40 /* what vsid we are looking for? */
41 vsid = 0x80000000 | ((virt >> 28) << 7) | ((virt >> 22) & 0x3f);
43 /* What mask are we using? Depends on the size of MMU hashtable */
44 mask = ((rdspr(SDR1) & 0x1ff) << 16) | 0xffc0;
46 /* Get the first group of pte's */
47 pteg = (pte_t *)((rdspr(SDR1) & ~0x1ff) | ((hash << 6) & mask));
49 /* Search the primary group */
50 for (i=0; i < 8; i++)
52 if (pteg[i].vsid == vsid)
54 ret = &pteg[i];
55 break;
60 * If the page was not found in primary group, get the second hash, second
61 * vsid and search the secondary group.
63 if (!ret)
65 uint32_t hash2 = (~hash) & 0x7ffff;
66 vsid |= 0x40;
67 pteg = (pte_t *)((rdspr(SDR1) & ~0x1ff) | ((hash2 << 6) & mask));
69 /* Search the secondary group */
70 for (i=0; i < 8; i++)
72 if (pteg[i].vsid == vsid)
74 ret = &pteg[i];
75 break;
80 return ret;
83 uint16_t mmu_protection(KRN_MapAttr flags)
85 uint16_t ppc_prot = 2 << 3; /* WIMG = 0010 */
87 if (flags & MAP_Readable)
89 ppc_prot |= 0x03;
91 if (flags & MAP_Writable)
93 ppc_prot = (ppc_prot | 2) & ~1;
96 if (flags & MAP_WriteThrough)
98 ppc_prot |= 8 << 3;
100 if (flags & MAP_Guarded)
102 ppc_prot |= 1 << 3;
104 if (flags & MAP_CacheInhibit)
106 ppc_prot = (ppc_prot | 4 << 3) & ~ (8 << 3);
109 return ppc_prot & 0xfff;
112 int mmu_map_page(uint64_t virt, uint32_t phys, uint32_t prot)
114 uint32_t mask = ((rdspr(SDR1) & 0x1ff) << 16) | 0xffc0;
115 pte_t *pteg;
116 pte_t *pte = NULL;
117 pte_t local_pte;
118 int ptenum;
120 /* Calculate the hash function */
121 uint32_t hash = (((uint32_t)(virt >> 12) & 0xffff) ^ (uint32_t)(virt >> 28)) & 0x7ffff;
123 pteg = (pte_t *)((rdspr(SDR1) & ~0x1ff) | ((hash << 6) & mask));
125 for (ptenum = 0; ptenum < 8; ptenum++)
127 if (!(pteg[ptenum].vsid & 0x80000000))
129 pte = &pteg[ptenum];
130 local_pte.vsid = 0;
131 break;
134 if (!pte)
136 uint32_t hash2 = (~hash) & 0x7ffff;
137 pteg = (pte_t *)((rdspr(SDR1) & ~0x1ff) | ((hash2 << 6) & mask));
139 for (ptenum = 0; ptenum < 8; ptenum++)
141 if (!(pteg[ptenum].vsid & 0x80000000))
143 pte = &pteg[ptenum];
144 local_pte.vsid = 0x40;
145 break;
150 if (!pte)
152 D(bug("[KRN] mmu_map_page(%06x%07x, %08x, %08x)\n", (uint32_t)(virt >> 28), (uint32_t)(virt & 0x0fffffff), phys, prot));
153 D(bug("[KRN] Run out of free page table entries\n"));
154 return 0;
157 local_pte.vsid |= ((virt >> 28) << 7);
158 local_pte.vsid |= ((virt >> 22) & 0x3f);
159 local_pte.vsid |= 0x80000000;
160 local_pte.rpn = (phys & ~0xfff) | (prot & 0xfff);
162 *pte = local_pte;
164 asm volatile("dcbst 0,%0; sync;"::"r"(pte));
165 asm volatile("tlbie %0"::"r"((uint32_t)virt));
167 return 1;
170 int mmu_map_area(uint64_t virt, uint32_t phys, uint32_t length, uint32_t prot)
172 bug("[KRN] mmu_map_area(%04x%08x, %08x, %08x, %04x)\n", (uint32_t)(virt >> 32), (uint32_t)virt, phys, length, prot);
173 while (length)
175 if (!mmu_map_page(virt, phys, prot))
176 return 0;
178 virt += 4096;
179 phys += 4096;
180 length -= 4096;
183 return 1;
186 void mmu_init(char *mmu_dir, uint32_t mmu_size)
188 int i;
190 D(bug("[KRN] Initializing MMU\n"));
191 D(bug("[KRN] Location of MMU tables: %08x-%08x\n", mmu_dir, mmu_dir + mmu_size - 1));
193 if ((intptr_t)mmu_dir & (mmu_size - 1))
195 D(bug("[KRN] WRONG! The MMU dir must be located on mmu length boundary\n"));
197 else
199 uint32_t ea;
200 /* Clear the MMU tables */
201 bzero(mmu_dir, mmu_size);
203 uint32_t sdr = (intptr_t)mmu_dir | ((mmu_size >> 16) - 1);
205 D(bug("[KRN] SDR1 = %08x\n", sdr));
207 wrspr(SDR1, sdr);
209 /* Prepare the segment registers. The proper values for virtual address
210 * are to be determined later */
212 for (i=0; i < 16; i++)
214 asm volatile ("mtsrin %0,%1"::"r"(0x20000000 | i),"r"(i << 28));
217 D(bug("[KRN] Flushing TLB\n"));
218 for (ea=0x00001000; ea <= 0x0001f000; ea+=0x1000)
219 asm volatile("tlbie %0"::"r"(ea));
223 void __attribute__((noreturn)) mmu_handler(regs_t *ctx, uint8_t exception, void *self)
225 struct KernelBase *KernelBase = getKernelBase();
226 struct ExecBase *SysBase = getSysBase();
228 ctx->dar = rdspr(19);
229 ctx->dsisr = rdspr(18);
231 /* SysBase access at 4UL? Occurs only with lwz instruction and DAR=4 */
232 if ((exception == 3) && (ctx->dar == 4))
234 uint32_t insn = *(uint32_t *)ctx->srr0;
236 if ((insn & 0xfc000000) == 0x80000000)
238 int reg = (insn & 0x03e00000) >> 21;
240 ctx->gpr[reg] = getSysBase();
241 ctx->srr0 += 4;
243 core_LeaveInterrupt(ctx);
247 D(bug("[KRN] Exception %d (%s) handler. Context @ %p, SysBase @ %p, KernelBase @ %p\n", exception, exception == 3 ? "DSI" : "ISI", ctx, SysBase, KernelBase));
248 if (SysBase)
250 struct Task *t = FindTask(NULL);
251 uint32_t offset;
252 char *func, *mod;
254 offset = findNames(ctx->srr0, &mod, &func);
255 D(bug("[KRN] %s %p (%s)\n", t->tc_Node.ln_Type == NT_TASK ? "Task":"Process", t, t->tc_Node.ln_Name ? t->tc_Node.ln_Name : "--unknown--"));
257 if (func)
258 D(bug("[KRN] Crash at byte %d in func %s, module %s\n", offset, func, mod));
259 else if (mod)
260 D(bug("[KRN] Crash at byte %d in module %s\n", offset, mod));
262 D(bug("[KRN] SPLower=%08x SPUpper=%08x\n", t->tc_SPLower, t->tc_SPUpper));
263 D(bug("[KRN] Stack usage: %d bytes (%d %%)\n", t->tc_SPUpper - ctx->gpr[1],
264 100 * ((uintptr_t)t->tc_SPUpper - ctx->gpr[1]) / ((uintptr_t)t->tc_SPUpper - (uintptr_t)t->tc_SPLower)));
266 if (ctx->gpr[1] >= t->tc_SPLower && ctx->gpr[1] < t->tc_SPUpper)
267 D(bug("[KRN] Stack in bounds\n"));
268 else
269 D(bug("[KRN] Stack exceeded the allowed size!\n"));
271 if (exception == 3)
272 D(bug("[KRN] Attempt to %s address %08x.\n", ctx->dsisr & 0x02000000 ? "write to":"read from", ctx->dar));
274 D(bug("[KRN] SRR0=%08x, SRR1=%08x\n",ctx->srr0, ctx->srr1));
275 D(bug("[KRN] CTR=%08x LR=%08x XER=%08x CCR=%08x\n", ctx->ctr, ctx->lr, ctx->xer, ctx->ccr));
276 D(bug("[KRN] DAR=%08x DSISR=%08x\n", ctx->dar, ctx->dsisr));
278 D(bug("[KRN] HASH1=%08x HASH2=%08x IMISS=%08x DMISS=%08x ICMP=%08x DCMP=%08x\n",
279 rdspr(978), rdspr(979), rdspr(980), rdspr(976), rdspr(981), rdspr(977)));
281 D(bug("[KRN] SPRG0=%08x SPRG1=%08x SPRG2=%08x SPRG3=%08x SPRG4=%08x SPRG5=%08x\n",
282 rdspr(SPRG0),rdspr(SPRG1),rdspr(SPRG2),rdspr(SPRG3),rdspr(SPRG4),rdspr(SPRG5)));
284 D(bug("[KRN] GPR00=%08x GPR01=%08x GPR02=%08x GPR03=%08x\n",
285 ctx->gpr[0],ctx->gpr[1],ctx->gpr[2],ctx->gpr[3]));
286 D(bug("[KRN] GPR04=%08x GPR05=%08x GPR06=%08x GPR07=%08x\n",
287 ctx->gpr[4],ctx->gpr[5],ctx->gpr[6],ctx->gpr[7]));
288 D(bug("[KRN] GPR08=%08x GPR09=%08x GPR10=%08x GPR11=%08x\n",
289 ctx->gpr[8],ctx->gpr[9],ctx->gpr[10],ctx->gpr[11]));
290 D(bug("[KRN] GPR12=%08x GPR13=%08x GPR14=%08x GPR15=%08x\n",
291 ctx->gpr[12],ctx->gpr[13],ctx->gpr[14],ctx->gpr[15]));
293 D(bug("[KRN] GPR16=%08x GPR17=%08x GPR18=%08x GPR19=%08x\n",
294 ctx->gpr[16],ctx->gpr[17],ctx->gpr[18],ctx->gpr[19]));
295 D(bug("[KRN] GPR20=%08x GPR21=%08x GPR22=%08x GPR23=%08x\n",
296 ctx->gpr[20],ctx->gpr[21],ctx->gpr[22],ctx->gpr[23]));
297 D(bug("[KRN] GPR24=%08x GPR25=%08x GPR26=%08x GPR27=%08x\n",
298 ctx->gpr[24],ctx->gpr[25],ctx->gpr[26],ctx->gpr[27]));
299 D(bug("[KRN] GPR28=%08x GPR29=%08x GPR30=%08x GPR31=%08x\n",
300 ctx->gpr[28],ctx->gpr[29],ctx->gpr[30],ctx->gpr[31]));
302 int i;
303 D(bug("[KRN] Hash1 dump:\n[KRN] "));
304 uint32_t *hash = (uint32_t)rdspr(978);
305 for (i=0; i < 8; i++)
307 D(bug("%08x.%08x ", hash[0], hash[1]));
308 hash += 2;
309 if (i == 3)
310 D(bug("\n[KRN] "));
312 D(bug("\n[KRN] Hash2 dump:\n[KRN] "));
313 hash = (uint32_t)rdspr(979);
314 for (i=0; i < 8; i++)
316 D(bug("%08x.%08x ", hash[0], hash[1]));
317 hash += 2;
318 if (i == 3)
319 D(bug("\n[KRN] "));
321 D(bug("\n"));
322 D(bug("[KRN] Instruction dump:\n"));
323 ULONG *p = (ULONG*)ctx->srr0;
324 for (i=0; i < 8; i++)
326 if (find_pte((uint32_t)&p[i]))
327 D(bug("[KRN] %08x: %08x\n", &p[i], p[i]));
328 else
329 D(bug("[KRN] %08x: ?\n", &p[i]));
332 D(bug("[KRN] Backtrace:\n"));
333 uint32_t *sp = ctx->gpr[1];
334 while(*sp)
336 char *mod, *func;
337 sp = (uint32_t *)sp[0];
338 uint32_t offset;
340 offset = findNames(sp[1], &mod, &func);
342 if (func)
343 D(bug("[KRN] %08x: byte %d in func %s, module %s\n", sp[1], offset, func, mod));
344 else if (mod)
345 D(bug("[KRN] %08x: byte %d in module %s\n", sp[1], offset, mod));
346 else
347 D(bug("[KRN] %08x\n", sp[1]));
350 if (SysBase)
352 struct Task *dead = SysBase->ThisTask;
354 SysBase->ThisTask = NULL;
355 KernelBase->kb_LastDeadTask = dead;
356 Remove(dead);
357 Enqueue(&KernelBase->kb_DeadTasks, dead);
359 core_Dispatch(ctx);
363 D(bug("[KRN] **UNHANDLED EXCEPTION** stopping here...\n"));
365 while(1) {
366 wrmsr(rdmsr() | MSR_POW);
370 AROS_LH3(void, KrnSetProtection,
371 AROS_LHA(void *, address, A0),
372 AROS_LHA(uint32_t, length, D0),
373 AROS_LHA(KRN_MapAttr, flags, D1),
374 struct KernelBase *, KernelBase, 9, Kernel)
376 AROS_LIBFUNC_INIT
378 uint32_t ppc_prot = mmu_protection(flags);
379 uintptr_t virt = (uintptr_t)address;
380 pte_t *pte;
382 D(bug("[KRN] KrnSetProtection(%08x, %08x, %04x)\n", virt, virt + length - 1, ppc_prot));
384 virt &= ~4095;
385 length = (length + 4095) & ~4095;
387 uint32_t msr;
388 msr = goSuper();
389 while (length)
391 pte = find_pte(virt);
393 if (pte)
395 pte->rpn = (pte->rpn & 0xfffff000) | ppc_prot;
398 virt += 4096;
399 length -= 4096;
401 goUser(msr);
403 AROS_LIBFUNC_EXIT
406 AROS_LH4(int, KrnMapGlobal,
407 AROS_LHA(void *, virtual, A0),
408 AROS_LHA(void *, physical, A1),
409 AROS_LHA(uint32_t, length, D0),
410 AROS_LHA(KRN_MapAttr, flags, D1),
411 struct KernelBase *, KernelBase, 9, Kernel)
413 AROS_LIBFUNC_INIT
415 int retval = 0;
416 uint32_t msr;
417 uint32_t ppc_prot = mmu_protection(flags);
419 D(bug("[KRN] KrnMapGlobal(%08x->%08x %08x %04x)\n", virtual, physical, length, flags));
421 msr = goSuper();
422 retval = mmu_map_area((uint64_t)virtual & 0xffffffff, physical, length, ppc_prot);
423 wrmsr(msr);
425 return retval;
427 AROS_LIBFUNC_EXIT
430 AROS_LH2(int, KrnUnmapGlobal,
431 AROS_LHA(void *, virtual, A0),
432 AROS_LHA(uint32_t, length, D0),
433 struct KernelBase *, KernelBase, 10, Kernel)
435 AROS_LIBFUNC_INIT
437 int retval = 0;
438 uint32_t msr;
439 uintptr_t virt = (uintptr_t)virtual;
440 virt &= ~4095;
441 length = (length + 4095) & ~4095;
443 msr = goSuper();
444 while(length)
446 pte_t *pte = find_pte(virt);
447 pte->vsid = 0;
448 virt += 4096;
449 length -= 4096;
451 goUser(msr);
453 return retval;
455 AROS_LIBFUNC_EXIT
458 uintptr_t virt2phys(uintptr_t virt)
460 uintptr_t phys = 0xffffffff;
461 pte_t *pte = find_pte(virt);
463 if (pte)
465 phys = pte->rpn & ~0xfff;
466 phys |= virt & 0xfff;
469 return phys;
472 AROS_LH1(void *, KrnVirtualToPhysical,
473 AROS_LHA(void *, virtual, A0),
474 struct KernelBase *, KernelBase, 0, Kernel)
476 AROS_LIBFUNC_INIT
478 uintptr_t virt = (uintptr_t)virtual;
479 uintptr_t phys;
480 uint32_t msr = goSuper();
482 phys = virt2phys(virt);
484 goUser(msr);
488 return (void*)phys;
490 AROS_LIBFUNC_EXIT
496 /* MMU exception handlers follow the G2 core manual */
497 static void __attribute__((used)) __exception_template()
499 asm volatile("\n"
500 ".set dMiss, 976\n"
501 ".set dCmp, 977\n"
502 ".set hash1, 978\n"
503 ".set hash2, 979\n"
504 ".set iMiss, 980\n"
505 ".set iCmp, 981\n"
506 ".set rpa, 982\n"
507 ".set c0, 0\n"
508 ".set dar, 19\n"
509 ".set dsisr, 18\n"
510 ".set srr0, 26\n"
511 ".set srr1, 27\n"
514 * Instruction TB miss flow
515 * Entry:
517 * Vec = 1000
518 * srr0 -> address of instruction that missed
519 * srr1 -> 0:3=cr0 4=lru way bit 16:31 = saved MSR
520 * msr<tgpr> -> 1
521 * iMiss -> ea that missed
522 * iCmp -> the compare value for the va that missed
523 * hash1 -> pointer to first hash pteg
524 * hash2 -> pointer to second hash pteg
526 * Register usage:
528 * r0 is saved counter
529 * r1 is junk
530 * r2 is pointer to pteg
531 * r3 is current compare value
533 asm volatile(".align 8; .globl __vector_imiss; .type __vector_imiss,@function\n"
534 "__vector_imiss:\n"
535 " mfspr %r2,hash1 \n"
536 " addi %r1,0,8 \n"
537 " mfctr %r0 \n"
538 " mfspr %r3,iCmp \n"
539 " addi %r2,%r2,-8 \n"
540 "im0: mtctr %r1 \n"
541 "im1: lwzu %r1,8(%r2) \n"
542 " cmp c0,%r1,%r3 \n"
543 " bdnzf eq, im1 \n"
544 " bne instrSecHash\n"
545 " l %r1,+4(%r2) \n"
546 " andi. %r3,%r1,8 \n"
547 " bne doISIp \n"
548 " mtctr %r0 \n"
549 " mfspr %r0,iMiss \n"
550 " mfspr %r3,srr1 \n"
551 " mtcrf 0x80,%r3 \n"
552 " mtspr rpa,%r1 \n"
553 " ori %r1,%r1,0x100 \n"
554 " srwi %r1,%r1,8 \n"
555 " tlbli %r0 \n"
556 " stb %r1,+6(%r2) \n"
557 " rfi \n"
559 "instrSecHash: \n"
560 " andi. %r1,%r3,0x0040 \n"
561 " bne doISI \n"
562 " mfspr %r2,hash2 \n"
563 " ori %r3,%r3,0x0040 \n"
564 " addi %r1,0,8 \n"
565 " addi %r2,%r2,-8 \n"
566 " b im0 \n"
568 "doISIp: \n"
569 " mfspr %r3, srr1 \n"
570 " andi. %r2,%r3,0xffff \n"
571 " addis %r2,%r2,0x0800 \n"
572 " b isi1 \n"
573 "doISI: \n"
574 " mfspr %r3, srr1 \n"
575 " andi. %r2,%r3,0xffff\n"
576 " addis %r2,%r2,0x4000\n"
577 "isi1: \n"
578 " mtctr %r0 \n"
579 " mtspr srr1,%r2 \n"
580 " mfmsr %r0 \n"
581 " xoris %r0,%r0, 0x8002\n"
582 " mtcrf 0x80,%r3 \n"
583 " mtmsr %r0 \n"
584 " ba 0x0400 \n"
588 * Data TLB miss flow
589 * Entry:
591 * Vec = 1100
592 * srr0 -> address of instruction that caused data tlb miss
593 * srr1 -> 0:3=cr0 4=lru way bit 5=1 if store 16:31 = saved MSR
594 * msr<tgpr> -> 1
595 * dMiss -> ea that missed
596 * dCmp -> the compare value for the va that missed
597 * hash1 -> pointer to first hash pteg
598 * hash2 -> pointer to second hash pteg
600 * Register usage:
602 * r0 is saved counter
603 * r1 is junk
604 * r2 is pointer to pteg
605 * r3 is current compare value
607 asm volatile(".align 8; .globl __vector_dmiss; .type __vector_dmiss,@function\n"
608 "__vector_dmiss:\n"
609 " mfspr %r2, hash1 \n"
610 " addi %r1, 0, 8 \n"
611 " mfctr %r0 \n"
612 " mfspr %r3, dCmp \n"
613 " addi %r2, %r2, -8\n"
614 "dm0: mtctr %r1 \n"
615 "dm1: lwzu %r1, 8(%r2) \n"
616 " cmp c0, %r1, %r3\n"
617 " bdnzf eq, dm1 \n"
618 " bne dataSecHash \n"
619 " l %r1, +4(%r2)\n"
620 " mtctr %r0 \n"
621 " mfspr %r0, dMiss \n"
622 " mfspr %r3, srr1 \n"
623 " mtcrf 0x80, %r3 \n"
624 " mtspr rpa, %r1 \n"
625 " ori %r1, %r1, 0x100\n"
626 " srwi %r1, %r1, 8 \n"
627 " tlbld %r0 \n"
628 " stb %r1, +6(%r2)\n"
629 " rfi \n"
631 "dataSecHash: \n"
632 " andi. %r1, %r3, 0x0040\n"
633 " bne doDSI \n"
634 " mfspr %r2, hash2 \n"
635 " ori %r3, %r3, 0x0040\n"
636 " addi %r1, 0, 8 \n"
637 " addi %r2, %r2, -8\n"
638 " b dm0 \n"
642 asm volatile(".align 8; .globl __vector_dmissw; .type __vector_dmiss,@function\n"
643 "__vector_dmissw:\n"
644 " mfspr %r2, hash1 \n"
645 " addi %r1, 0, 8 \n"
646 " mfctr %r0 \n"
647 " mfspr %r3, dCmp \n"
648 " addi %r2, %r2, -8\n"
649 "ceq0: mtctr %r1 \n"
650 "ceq1: lwzu %r1, 8(%r2) \n"
651 " cmp c0, %r1, %r3\n"
652 " bdnzf eq, ceq1 \n"
653 " bne cEq0SecHash \n"
654 " l %r1, +4(%r2)\n"
655 " andi. %r3,%r1,0x80\n"
656 " beq cEq0ChkProt \n"
657 "ceq2: mtctr %r0 \n"
658 " mfspr %r0, dMiss \n"
659 " mfspr %r3, srr1 \n"
660 " mtcrf 0x80, %r3 \n"
661 " mtspr rpa, %r1 \n"
662 " tlbld %r0 \n"
663 " rfi \n"
665 "cEq0SecHash: \n"
666 " andi. %r1, %r3, 0x0040\n"
667 " bne doDSI \n"
668 " mfspr %r2, hash2 \n"
669 " ori %r3, %r3, 0x0040\n"
670 " addi %r1, 0, 8 \n"
671 " addi %r2, %r2, -8\n"
672 " b ceq0 \n"
674 "cEq0ChkProt: \n"
675 " rlwinm. %r3,%r1,30,0,1\n"
676 " bge- chk0 \n"
677 " andi. %r3,%r1,1 \n"
678 " beq+ chk2 \n"
679 " b doDSIp \n"
680 "chk0: mfspr %r3,srr1 \n"
681 " andis. %r3,%r3,0x0008\n"
682 " beq chk2 \n"
683 " b doDSIp \n"
685 "chk2: ori %r1, %r1, 0x180\n"
686 " sth %r1, 6(%r2) \n"
687 " b ceq2 \n"
689 "doDSI: \n"
690 " mfspr %r3, srr1 \n"
691 " rlwinm %r1, %r3, 9,6,6 \n"
692 " addis %r1, %r1, 0x4000 \n"
693 " b dsi1 \n"
695 "doDSIp: \n"
696 " mfspr %r3, srr1 \n"
697 " rlwinm %r1, %r3, 9,6,6 \n"
698 " addis %r1, %r1, 0x0800 \n"
700 "dsi1: \n"
701 " mtctr %r0 \n"
702 " andi. %r2, %r3, 0xffff \n"
703 " mtspr srr1, %r2 \n"
704 " mtspr dsisr, %r1 \n"
705 " mfspr %r1, dMiss \n"
706 " rlwinm. %r2,%r2,0,31,31 \n"
707 " beq dsi2 \n"
708 " xor %r1,%r1,0x07 \n"
710 "dsi2: \n"
711 " mtspr dar, %r1 \n"
712 " mfmsr %r0 \n"
713 " xoris %r0, %r0, 0x2 \n"
714 " mtcrf 0x80, %r3 \n"
715 " mtmsr %r0 \n"
716 " ba 0x0300 \n"
720 asm volatile(".align 8;");