2 Copyright © 2008-2014, The AROS Development Team. All rights reserved.
7 #include <aros/kernel.h>
8 #include <aros/libcall.h>
9 #include <asm/mpc5200b.h>
13 #include <proto/exec.h>
14 #include <proto/kernel.h>
16 #include "kernel_intern.h"
19 extern void __tmpl_start();
20 extern uint32_t __tmpl_addr_lo
;
21 extern uint32_t __tmpl_addr_hi
;
22 extern uint32_t __tmpl_irq_num
;
23 extern uint32_t __tmpl_length
;
24 static void init_interrupt(uint8_t num
, void *handler
);
25 void __attribute__((noreturn
)) program_handler(regs_t
*ctx
, uint8_t exception
, void *self
);
26 void __attribute__((noreturn
)) generic_handler(regs_t
*ctx
, uint8_t exception
, void *self
);
27 void __attribute__((noreturn
)) decrementer_handler(regs_t
*ctx
, uint8_t exception
, void *self
);
28 static void flush_cache(char *start
, char *end
);
30 AROS_LH4(void *, KrnAddExceptionHandler
,
31 AROS_LHA(uint8_t, irq
, D0
),
32 AROS_LHA(void *, handler
, A0
),
33 AROS_LHA(void *, handlerData
, A1
),
34 AROS_LHA(void *, handlerData2
, A2
),
35 struct KernelBase
*, KernelBase
, 14, Kernel
)
39 struct ExecBase
*SysBase
= getSysBase();
40 struct ExceptNode
*handle
= NULL
;
41 D(bug("[KRN] KrnAddExceptionHandler(%02x, %012p, %012p, %012p):\n", irq
, handler
, handlerData
, handlerData2
));
45 /* Go to supervisor mode */
48 handle
= Allocate(KernelBase
->kb_SupervisorMem
, sizeof(struct ExceptNode
));
49 D(bug("[KRN] handle=%012p\n", handle
));
53 handle
->in_Handler
= handler
;
54 handle
->in_HandlerData
= handlerData
;
55 handle
->in_HandlerData2
= handlerData2
;
56 handle
->in_type
= it_exception
;
60 ADDHEAD(&KernelBase
->kb_Exceptions
[irq
], &handle
->in_Node
);
72 AROS_LH1(void, KrnRemExceptionHandler
,
73 AROS_LHA(void *, handle
, A0
),
74 struct KernelBase
*, KernelBase
, 15, Kernel
)
78 struct ExecBase
*SysBase
= getSysBase();
79 struct ExceptNode
*h
= handle
;
81 if (h
&& (h
->in_type
== it_exception
))
89 Deallocate(KernelBase
->kb_SupervisorMem
, h
, sizeof(struct IntrNode
));
99 AROS_LH4(void *, KrnAddIRQHandler
,
100 AROS_LHA(uint8_t, irq
, D0
),
101 AROS_LHA(void *, handler
, A0
),
102 AROS_LHA(void *, handlerData
, A1
),
103 AROS_LHA(void *, handlerData2
, A2
),
104 struct KernelBase
*, KernelBase
, 7, Kernel
)
108 struct ExecBase
*SysBase
= getSysBase();
109 struct IntrNode
*handle
= NULL
;
110 D(bug("[KRN] KrnAddIRQHandler(%02x, %012p, %012p, %012p):\n", irq
, handler
, handlerData
, handlerData2
));
114 /* Go to supervisor mode */
117 handle
= Allocate(KernelBase
->kb_SupervisorMem
, sizeof(struct IntrNode
));
118 D(bug("[KRN] handle=%012p\n", handle
));
122 handle
->in_Handler
= handler
;
123 handle
->in_HandlerData
= handlerData
;
124 handle
->in_HandlerData2
= handlerData2
;
125 handle
->in_type
= it_interrupt
;
130 ADDHEAD(&KernelBase
->kb_Interrupts
[irq
], &handle
->in_Node
);
132 ictl_enable_irq(irq
);
145 AROS_LH1(void, KrnRemIRQHandler
,
146 AROS_LHA(void *, handle
, A0
),
147 struct KernelBase
*, KernelBase
, 8, Kernel
)
151 struct ExecBase
*SysBase
= getSysBase();
152 struct IntrNode
*h
= handle
;
153 uint8_t irq
= h
->in_nr
;
155 if (h
&& (h
->in_type
== it_interrupt
))
161 if (IsListEmpty(&KernelBase
->kb_Interrupts
[irq
]))
163 ictl_disable_irq(irq
);
167 Deallocate(KernelBase
->kb_SupervisorMem
, h
, sizeof(struct IntrNode
));
177 * G2 core and exceptions.
179 * The MPC5200B CPU just like most of the PowerPC family members, has two fixed
180 * locations for exception handlers: 0x0000_xxxx and 0xFFFF_xxxx. When an
181 * exception occurs, CPU calculates the entry address by shifting exception
182 * number by eight bits left, adds the base location and begins execution of the
183 * code at calculated address.
185 * For AROS it means that it has only 256 bytes (64 instructions) for an
186 * exception entry code. Therefore, just like all PowerPC operating systems,
187 * AROS performs a quick initialization of an exception, saves few registers,
188 * sets the exception number and determines the handler address and then it jumps
189 * to a common trampoline code. There, the rest of the CPU context is saved, MMU
190 * is activated and the handler routine written in C is called.
192 * Leaving the exception is performed through core_LeaveInterrupt() call which
193 * takes the CPU context as parameter.
197 * There is some trouble related to MMU, location of exception handler and
198 * accessing data from there. PPC executes exception handlers in real mode, which
199 * means that the MMU translations are disabled completely. Therefore, going back
200 * to MMU-ed state is performed in two steps:
201 * 1. The MMU for DATA is turned on very early, because otherwise the exception
202 * handler wouldn't be able to access the supervisor stack
203 * 2. The MMU for CODE is turned on as late as possible. It happens when the C
204 * code is called. The Call is performed through the rfi instruction, which
205 * restores the old contents of MSR register (actually it is prepared by the
206 * asm part of exception handler) which in turn enables MMU for CODE.
209 extern uint32_t __vector_imiss
;
210 extern uint32_t __vector_dmiss
;
211 extern uint32_t __vector_dmissw
;
215 D(bug("[KRN] Initializing exception handlers\n"));
217 init_interrupt( 1, generic_handler
); /* RESET */
218 init_interrupt( 2, generic_handler
); /* Machine check */
219 init_interrupt( 3, mmu_handler
); /* DSI */
220 init_interrupt( 4, mmu_handler
); /* ISI */
221 init_interrupt( 5, ictl_handler
); /* External Intr */
222 init_interrupt( 6, generic_handler
); /* Alignment */
223 init_interrupt( 7, program_handler
); /* Program */
224 init_interrupt( 8, generic_handler
); /* Floating point unavailable */
225 init_interrupt( 9, decrementer_handler
);/* Decrementer */
226 init_interrupt(10, generic_handler
); /* critical exception */
227 init_interrupt(12, syscall_handler
); /* Syscall */
228 init_interrupt(13, generic_handler
); /* Trace */
229 init_interrupt(16, generic_handler
); /* Instruction translation miss */
230 init_interrupt(17, generic_handler
); /* Data load translation miss */
231 init_interrupt(18, generic_handler
); /* Data store translation miss */
232 init_interrupt(19, generic_handler
); /* Instruction address breakpoint */
233 init_interrupt(20, ictl_handler
); /* SMI */
237 * Initializer of an exception handler. It copies the template code into proper
238 * location and adjust two exception-dependent elements in the code - the
239 * instruction which loads exception number: "li %r4,exception_number" and the
240 * two instructions which load the address of a handler:
241 * "lis %r5,handler_address@ha; la %r5, handler_address@l(%r5)"
243 * Once ready, data cache has to be flushed back into memory and the instruction
244 * cache has to be invalidated.
246 static void init_interrupt(uint8_t num
, void *handler
)
248 if (num
> 0 && num
< 0x2f)
250 intptr_t target
= num
<< 8;
253 memcpy((void*)target
, &__vector_imiss
, 256);
255 memcpy((void*)target
, &__vector_dmiss
, 256);
257 memcpy((void*)target
, &__vector_dmissw
, 256);
260 memcpy((void*)target
, __tmpl_start
, __tmpl_length
);
262 /* Fix the exception number */
263 *(uint16_t *)(target
+ __tmpl_irq_num
) = num
;
265 /* Fix the handler address */
266 *(uint16_t *)(target
+ __tmpl_addr_lo
) = (intptr_t)handler
& 0x0000ffff;
267 *(uint16_t *)(target
+ __tmpl_addr_hi
) = (intptr_t)handler
>> 16;
270 * Adjustment of the lower halfword of address is done through "la"
271 * instruction, which happens to be the same as addi:
273 * "la %reg1, offset(%reg2) <=> addi %reg1, %reg1, offset"
275 * If the offset is bigger then 32KB (thus seen by addi as a negative
276 * number), increase the upper halfword by one.
278 if ((intptr_t)handler
& 0x00008000)
279 (*(uint16_t *)(target
+ __tmpl_addr_hi
))++;
282 /* Flush the cache */
283 flush_cache((char*)target
, (char*)target
+ 0xff);
287 /* Tiny routine to flush caches for a region of memory */
288 static void flush_cache(char *start
, char *end
)
290 start
= (char*)((unsigned long)start
& 0xffffffe0);
291 end
= (char*)((unsigned long)end
& 0xffffffe0);
294 for (ptr
= start
; ptr
< end
; ptr
+=32)
296 asm volatile("dcbst 0,%0"::"r"(ptr
));
298 asm volatile("sync");
300 for (ptr
= start
; ptr
< end
; ptr
+=32)
302 asm volatile("icbi 0,%0"::"r"(ptr
));
305 asm volatile("sync; isync; ");
309 void __attribute__((noreturn
)) fpu_handler(context_t
*ctx
, uint8_t exception
, void *self
)
311 struct KernelBase
*KernelBase
= getKernelBase();
315 if (!IsListEmpty(&KernelBase
->kb_Exceptions
[exception
]))
317 struct ExceptNode
*in
, *intemp
;
319 ForeachNodeSafe(&KernelBase
->kb_Exceptions
[exception
], in
, intemp
)
322 * call every handler tied to this exception.
325 in
->in_Handler(ctx
, in
->in_HandlerData
, in
->in_HandlerData2
);
330 core_LeaveInterrupt(ctx
);
333 extern uint64_t tbu1
;
334 extern uint64_t tbu2
;
335 extern uint64_t last_calc
;
336 extern uint64_t idle_time
;
337 extern uint32_t cpu_usage
;
338 extern struct Task
*idle_task
;
340 /* Decrementer handler */
341 void __attribute__((noreturn
)) decrementer_handler(regs_t
*ctx
, uint8_t exception
, void *self
)
343 struct KernelBase
*KernelBase
= getKernelBase();
344 struct ExecBase
*SysBase
= getSysBase();
345 // static uint32_t cnt = 0;
347 asm volatile("mtdec %0"::"r"(33000000/100));
351 if (!IsListEmpty(&KernelBase
->kb_Exceptions
[exception
]))
353 struct ExceptNode
*in
, *intemp
;
355 ForeachNodeSafe(&KernelBase
->kb_Exceptions
[exception
], in
, intemp
)
358 * call every handler tied to this exception.
361 in
->in_Handler(ctx
, in
->in_HandlerData
, in
->in_HandlerData2
);
366 if (SysBase
&& SysBase
->Elapsed
)
368 if (--SysBase
->Elapsed
== 0)
370 SysBase
->SysFlags
|= 0x2000;
371 SysBase
->AttnResched
|= 0x80;
375 /* CPU usage meter. it should not be here, actually */
376 uint64_t current
= mftbu();
377 if (current
- last_calc
> 33000000)
379 uint32_t total_time
= current
- last_calc
;
381 if (SysBase
->ThisTask
== idle_task
)
384 idle_time
+= tbu2
- tbu1
;
388 if (total_time
< idle_time
)
389 total_time
=idle_time
;
391 cpu_usage
= 1000 - ((uint32_t)(idle_time
))/(total_time
/1000);
393 D(bug("[KRN] CPU usage: %3d.%d\n", cpu_usage
/ 10, cpu_usage
% 10));
399 core_ExitInterrupt(ctx
);
402 /* Generic boring handler */
403 void __attribute__((noreturn
)) program_handler(regs_t
*ctx
, uint8_t exception
, void *self
)
405 struct KernelBase
*KernelBase
= getKernelBase();
406 struct ExecBase
*SysBase
= getSysBase();
409 uint32_t insn
= *(uint32_t *)ctx
->srr0
;
411 if ((insn
& 0xfc1fffff) == 0x7c1442a6) /* mfspr sprg4 */
413 ctx
->gpr
[(insn
>> 21) & 0x1f] = getKernelBase();
415 core_LeaveInterrupt(ctx
);
417 else if ((insn
& 0xfc1fffff) == 0x7c1542a6) /* mfspr sprg5 */
419 ctx
->gpr
[(insn
>> 21) & 0x1f] = getSysBase();
421 core_LeaveInterrupt(ctx
);
423 else if (insn
== 0x7fe00008)
425 D(bug("[KRN] trap @ %08x (r3=%08x)\n", ctx
->srr0
, ctx
->gpr
[3]));
429 struct Task
*t
= FindTask(NULL
);
430 D(bug("[KRN] %s %p (%s)\n", t
->tc_Node
.ln_Type
== NT_TASK
? "Task":"Process", t
, t
->tc_Node
.ln_Name
? t
->tc_Node
.ln_Name
: "--unknown--"));
432 D(bug("[KRN] SRR0=%08x, SRR1=%08x\n",ctx
->srr0
, ctx
->srr1
));
433 D(bug("[KRN] CTR=%08x LR=%08x XER=%08x CCR=%08x\n", ctx
->ctr
, ctx
->lr
, ctx
->xer
, ctx
->ccr
));
434 D(bug("[KRN] DAR=%08x DSISR=%08x\n", ctx
->dar
, ctx
->dsisr
));
436 D(bug("[KRN] HASH1=%08x HASH2=%08x IMISS=%08x DMISS=%08x ICMP=%08x DCMP=%08x\n",
437 rdspr(978), rdspr(979), rdspr(980), rdspr(976), rdspr(981), rdspr(977)));
439 D(bug("[KRN] SPRG0=%08x SPRG1=%08x SPRG2=%08x SPRG3=%08x SPRG4=%08x SPRG5=%08x\n",
440 rdspr(SPRG0
),rdspr(SPRG1
),rdspr(SPRG2
),rdspr(SPRG3
),rdspr(SPRG4
),rdspr(SPRG5
)));
442 D(bug("[KRN] GPR00=%08x GPR01=%08x GPR02=%08x GPR03=%08x\n",
443 ctx
->gpr
[0],ctx
->gpr
[1],ctx
->gpr
[2],ctx
->gpr
[3]));
444 D(bug("[KRN] GPR04=%08x GPR05=%08x GPR06=%08x GPR07=%08x\n",
445 ctx
->gpr
[4],ctx
->gpr
[5],ctx
->gpr
[6],ctx
->gpr
[7]));
446 D(bug("[KRN] GPR08=%08x GPR09=%08x GPR10=%08x GPR11=%08x\n",
447 ctx
->gpr
[8],ctx
->gpr
[9],ctx
->gpr
[10],ctx
->gpr
[11]));
448 D(bug("[KRN] GPR12=%08x GPR13=%08x GPR14=%08x GPR15=%08x\n",
449 ctx
->gpr
[12],ctx
->gpr
[13],ctx
->gpr
[14],ctx
->gpr
[15]));
451 D(bug("[KRN] GPR16=%08x GPR17=%08x GPR18=%08x GPR19=%08x\n",
452 ctx
->gpr
[16],ctx
->gpr
[17],ctx
->gpr
[18],ctx
->gpr
[19]));
453 D(bug("[KRN] GPR20=%08x GPR21=%08x GPR22=%08x GPR23=%08x\n",
454 ctx
->gpr
[20],ctx
->gpr
[21],ctx
->gpr
[22],ctx
->gpr
[23]));
455 D(bug("[KRN] GPR24=%08x GPR25=%08x GPR26=%08x GPR27=%08x\n",
456 ctx
->gpr
[24],ctx
->gpr
[25],ctx
->gpr
[26],ctx
->gpr
[27]));
457 D(bug("[KRN] GPR28=%08x GPR29=%08x GPR30=%08x GPR31=%08x\n",
458 ctx
->gpr
[28],ctx
->gpr
[29],ctx
->gpr
[30],ctx
->gpr
[31]));
461 core_LeaveInterrupt(ctx
);
464 generic_handler(ctx
, exception
, self
);
469 /* Generic boring handler */
470 void __attribute__((noreturn
)) generic_handler(regs_t
*ctx
, uint8_t exception
, void *self
)
472 struct KernelBase
*KernelBase
= getKernelBase();
473 struct ExecBase
*SysBase
= getSysBase();
478 if (!IsListEmpty(&KernelBase
->kb_Exceptions
[exception
]))
480 struct ExceptNode
*in
, *intemp
;
482 ForeachNodeSafe(&KernelBase
->kb_Exceptions
[exception
], in
, intemp
)
485 * call every handler tied to this exception. If any of them
486 * returns a non-zero value, the exception is considered handled.
488 * If no handler will return zero, or there are no handlers at all,
489 * this generic handler will stop cpu.
492 handled
|= in
->in_Handler(ctx
, in
->in_HandlerData
, in
->in_HandlerData2
);
497 D(bug("[KRN] Exception %d handler. Context @ %p, SysBase @ %p, KernelBase @ %p\n", exception
, ctx
, SysBase
, KernelBase
));
500 struct Task
*t
= FindTask(NULL
);
501 D(bug("[KRN] %s %p (%s)\n", t
->tc_Node
.ln_Type
== NT_TASK
? "Task":"Process", t
, t
->tc_Node
.ln_Name
? t
->tc_Node
.ln_Name
: "--unknown--"));
503 D(bug("[KRN] SRR0=%08x, SRR1=%08x\n",ctx
->srr0
, ctx
->srr1
));
504 D(bug("[KRN] CTR=%08x LR=%08x XER=%08x CCR=%08x\n", ctx
->ctr
, ctx
->lr
, ctx
->xer
, ctx
->ccr
));
505 D(bug("[KRN] DAR=%08x DSISR=%08x\n", ctx
->dar
, ctx
->dsisr
));
507 D(bug("[KRN] HASH1=%08x HASH2=%08x IMISS=%08x DMISS=%08x ICMP=%08x DCMP=%08x\n",
508 rdspr(978), rdspr(979), rdspr(980), rdspr(976), rdspr(981), rdspr(977)));
510 D(bug("[KRN] SPRG0=%08x SPRG1=%08x SPRG2=%08x SPRG3=%08x SPRG4=%08x SPRG5=%08x\n",
511 rdspr(SPRG0
),rdspr(SPRG1
),rdspr(SPRG2
),rdspr(SPRG3
),rdspr(SPRG4
),rdspr(SPRG5
)));
513 D(bug("[KRN] GPR00=%08x GPR01=%08x GPR02=%08x GPR03=%08x\n",
514 ctx
->gpr
[0],ctx
->gpr
[1],ctx
->gpr
[2],ctx
->gpr
[3]));
515 D(bug("[KRN] GPR04=%08x GPR05=%08x GPR06=%08x GPR07=%08x\n",
516 ctx
->gpr
[4],ctx
->gpr
[5],ctx
->gpr
[6],ctx
->gpr
[7]));
517 D(bug("[KRN] GPR08=%08x GPR09=%08x GPR10=%08x GPR11=%08x\n",
518 ctx
->gpr
[8],ctx
->gpr
[9],ctx
->gpr
[10],ctx
->gpr
[11]));
519 D(bug("[KRN] GPR12=%08x GPR13=%08x GPR14=%08x GPR15=%08x\n",
520 ctx
->gpr
[12],ctx
->gpr
[13],ctx
->gpr
[14],ctx
->gpr
[15]));
522 D(bug("[KRN] GPR16=%08x GPR17=%08x GPR18=%08x GPR19=%08x\n",
523 ctx
->gpr
[16],ctx
->gpr
[17],ctx
->gpr
[18],ctx
->gpr
[19]));
524 D(bug("[KRN] GPR20=%08x GPR21=%08x GPR22=%08x GPR23=%08x\n",
525 ctx
->gpr
[20],ctx
->gpr
[21],ctx
->gpr
[22],ctx
->gpr
[23]));
526 D(bug("[KRN] GPR24=%08x GPR25=%08x GPR26=%08x GPR27=%08x\n",
527 ctx
->gpr
[24],ctx
->gpr
[25],ctx
->gpr
[26],ctx
->gpr
[27]));
528 D(bug("[KRN] GPR28=%08x GPR29=%08x GPR30=%08x GPR31=%08x\n",
529 ctx
->gpr
[28],ctx
->gpr
[29],ctx
->gpr
[30],ctx
->gpr
[31]));
531 D(bug("[KRN] Instruction dump:\n"));
533 ULONG
*p
= (ULONG
*)ctx
->srr0
;
534 for (i
=0; i
< 8; i
++)
536 D(bug("[KRN] %08x: %08x\n", &p
[i
], p
[i
]));
541 D(bug("[KRN] **UNHANDLED EXCEPTION** stopping here...\n"));
544 wrmsr(rdmsr() | MSR_POW
);
547 core_LeaveInterrupt(ctx
);
551 * Template code for an exception handler. Packed into a static void function
552 * in order to make the assembler constrains usable.
554 static void __attribute__((used
)) __exception_template()
556 asm volatile(".globl __tmpl_start; .type __tmpl_start,@function\n"
558 " mtsprg1 %%r3 \n" /* save %r3 */
559 " mfcr %%r3 \n" /* copy CR to %r3 */
560 " mtsprg3 %%r3 \n" /* save %r3 */
563 " ori %%r3,%%r3,%2 \n" /* Enable address translation for data */
567 " mfsrr1 %%r3 \n" /* srr1 (previous MSR) reg into %r3 */
568 " andi. %%r3,%%r3,%0 \n" /* Was the PR bit set in MSR already? */
569 " beq- 1f \n" /* No, we were in supervisor mode */
571 " mfsprg0 %%r3 \n" /* user mode case: SSP into %r3 */
573 "1: mr %%r3,%%r1 \n" /* Supervisor case: use current stack */
574 "2: addi %%r3,%%r3,%1 \n"
577 ::"i"(MSR_PR
),"i"(-sizeof(context_t
)),"i"(MSR_DS
));
580 " stw %%r0, %[gpr0](%%r3) \n" /* Store bunch of registers already. I could */
581 " stw %%r1, %[gpr1](%%r3) \n" /* do it in common trampoline code, but it */
582 " stw %%r2, %[gpr2](%%r3) \n" /* is much more sexy to do it here - this code */
583 " mfsprg1 %%r0 \n" /* occupies in theory ZERO bytes in memory */
584 " stw %%r4, %[gpr4](%%r3) \n" /* because the exception vector is 256 bytes long */
585 " stw %%r0, %[gpr3](%%r3) \n" /* and shouldn't be used to anything else than */
586 " stw %%r5, %[gpr5](%%r3) \n" /* exception handler anyway ;) */
590 "__addr_hi: lis %%r5, 0xdeadbeef@ha\n" /* Load the address of an generic handler */
591 "__addr_lo: la %%r5, 0xdeadbeef@l(%%r5)\n" /* yes, load immediate sucks. Think about 64-bit PPC ;) */
592 "__irq_num: li %%r4, 0x5a5a \n" /* Load the exception number */
593 " stw %%r2,%[ccr](%%r3) \n"
594 " stw %%r0,%[srr0](%%r3) \n"
595 " stw %%r1,%[srr1](%%r3) \n"
599 " stw %%r0,%[ctr](%%r3) \n"
600 " stw %%r1,%[lr](%%r3) \n"
601 " stw %%r2,%[xer](%%r3) \n"
604 ::[gpr0
]"i"(offsetof(regs_t
, gpr
[0])),
605 [gpr1
]"i"(offsetof(regs_t
, gpr
[1])),
606 [gpr2
]"i"(offsetof(regs_t
, gpr
[2])),
607 [gpr3
]"i"(offsetof(regs_t
, gpr
[3])),
608 [gpr4
]"i"(offsetof(regs_t
, gpr
[4])),
609 [gpr5
]"i"(offsetof(regs_t
, gpr
[5])),
610 [ccr
]"i"(offsetof(regs_t
, ccr
)),
611 [srr0
]"i"(offsetof(regs_t
, srr0
)),
612 [srr1
]"i"(offsetof(regs_t
, srr1
)),
613 [ctr
]"i"(offsetof(regs_t
, ctr
)),
614 [lr
]"i"(offsetof(regs_t
, lr
)),
615 [xer
]"i"(offsetof(regs_t
, xer
)));
618 * Registers %r0 to %r5 are now saved together with CPU state. Go to the
619 * trampoline code which will care about the rest. Adjust the stack frame pointer now,
620 * or else it will be destroyed later by C code.
622 asm volatile("addi %r1,%r3,-16");
625 * Go to the trampoline code. Use long call within whole 4GB addresspace in order to
626 * avoid any trouble in future. Moreover use the PHYSICAL address since at this stage
627 * MMU for code is still not running! If one would like to use MMU at this stage
628 * already, we would have to make region 0x00000000-0x00003000 *EXECUTABLE* :))
630 asm volatile( "lis %r2,(__EXCEPTION_Trampoline - " STR(KERNEL_VIRT_BASE
) " + " STR(KERNEL_PHYS_BASE
) ")@ha;"
631 "la %r2,(__EXCEPTION_Trampoline - " STR(KERNEL_VIRT_BASE
) " + " STR(KERNEL_PHYS_BASE
) ")@l(%r2); mtctr %r2;");
633 /* Jump to the trampoline code */
634 asm volatile("bctr;");
637 * Few variables: length of the code above and offsets used to fix the
638 * exception number and handler address.
640 asm volatile("__tmpl_length: .long . - __tmpl_start\n");
641 asm volatile("__tmpl_addr_lo: .long 2 + __addr_lo - __tmpl_start\n");
642 asm volatile("__tmpl_addr_hi: .long 2 + __addr_hi - __tmpl_start\n");
643 asm volatile("__tmpl_irq_num: .long 2 + __irq_num - __tmpl_start\n");
647 * Trampoline code is boring. It stores rest of the CPU context and prepares
648 * everything for execution of the C code.
650 * The only interesting part is the jump to C routine which is done throuhg the
651 * rfi instruction (return from interrupt). I do it so because this way I may
652 * enable the MMU for code and jump CPU to the desired address within one insn.
654 static void __attribute__((used
)) __EXCEPTION_Trampoline_template()
656 asm volatile(".section .aros.init,\"ax\"\n\t.align 5\n\t.globl __EXCEPTION_Trampoline\n\t.type __EXCEPTION_Trampoline,@function\n"
657 "__EXCEPTION_Trampoline: \n\t"
658 "stw %%r6,%[gpr6](%%r3) \n\t"
659 "stw %%r7,%[gpr7](%%r3) \n\t"
660 "stw %%r8,%[gpr8](%%r3) \n\t"
661 "stw %%r9,%[gpr9](%%r3) \n\t"
662 "stw %%r10,%[gpr10](%%r3) \n\t"
663 "stw %%r11,%[gpr11](%%r3) \n\t"
664 "stw %%r12,%[gpr12](%%r3) \n\t"
665 "stw %%r13,%[gpr13](%%r3) \n\t"
666 "stw %%r14,%[gpr14](%%r3) \n\t"
667 "stw %%r15,%[gpr15](%%r3) \n\t"
668 "stw %%r16,%[gpr16](%%r3) \n\t"
669 "stw %%r17,%[gpr17](%%r3) \n\t"
670 "stw %%r18,%[gpr18](%%r3) \n\t"
671 "stw %%r19,%[gpr19](%%r3) \n\t"
672 "stw %%r20,%[gpr20](%%r3) \n\t"
673 "stw %%r21,%[gpr21](%%r3) \n\t"
674 "stw %%r22,%[gpr22](%%r3) \n\t"
675 "stw %%r23,%[gpr23](%%r3) \n\t"
676 "stw %%r24,%[gpr24](%%r3) \n\t"
677 "stw %%r25,%[gpr25](%%r3) \n\t"
678 "stw %%r26,%[gpr26](%%r3) \n\t"
679 "stw %%r27,%[gpr27](%%r3) \n\t"
680 "stw %%r28,%[gpr28](%%r3) \n\t"
681 "stw %%r29,%[gpr29](%%r3) \n\t"
682 "stw %%r30,%[gpr30](%%r3) \n\t"
683 "stw %%r31,%[gpr31](%%r3) \n\t"
685 [gpr6
]"i"(offsetof(regs_t
, gpr
[6])),
686 [gpr7
]"i"(offsetof(regs_t
, gpr
[7])),
687 [gpr8
]"i"(offsetof(regs_t
, gpr
[8])),
688 [gpr9
]"i"(offsetof(regs_t
, gpr
[9])),
689 [gpr10
]"i"(offsetof(regs_t
, gpr
[10])),
690 [gpr11
]"i"(offsetof(regs_t
, gpr
[11])),
691 [gpr12
]"i"(offsetof(regs_t
, gpr
[12])),
692 [gpr13
]"i"(offsetof(regs_t
, gpr
[13])),
693 [gpr14
]"i"(offsetof(regs_t
, gpr
[14])),
694 [gpr15
]"i"(offsetof(regs_t
, gpr
[15])),
695 [gpr16
]"i"(offsetof(regs_t
, gpr
[16])),
696 [gpr17
]"i"(offsetof(regs_t
, gpr
[17])),
697 [gpr18
]"i"(offsetof(regs_t
, gpr
[18])),
698 [gpr19
]"i"(offsetof(regs_t
, gpr
[19])),
699 [gpr20
]"i"(offsetof(regs_t
, gpr
[20])),
700 [gpr21
]"i"(offsetof(regs_t
, gpr
[21])),
701 [gpr22
]"i"(offsetof(regs_t
, gpr
[22])),
702 [gpr23
]"i"(offsetof(regs_t
, gpr
[23])),
703 [gpr24
]"i"(offsetof(regs_t
, gpr
[24])),
704 [gpr25
]"i"(offsetof(regs_t
, gpr
[25])),
705 [gpr26
]"i"(offsetof(regs_t
, gpr
[26])),
706 [gpr27
]"i"(offsetof(regs_t
, gpr
[27])),
707 [gpr28
]"i"(offsetof(regs_t
, gpr
[28])),
708 [gpr29
]"i"(offsetof(regs_t
, gpr
[29])),
709 [gpr30
]"i"(offsetof(regs_t
, gpr
[30])),
710 [gpr31
]"i"(offsetof(regs_t
, gpr
[31]))
714 "ori %%r0,%%r0, %[msrval]@l \n\t"
715 "mtmsr %%r0; isync \n\t"
716 "stfd %%f0,%[fr0](%%r3) \n\t"
718 "stfd %%f0,%[fpscr](%%r3) \n\t"
719 "stfd %%f1,%[fr1](%%r3) \n\t"
720 "stfd %%f2,%[fr2](%%r3) \n\t"
721 "stfd %%f3,%[fr3](%%r3) \n\t"
722 "stfd %%f4,%[fr4](%%r3) \n\t"
723 "stfd %%f5,%[fr5](%%r3) \n\t"
724 "stfd %%f6,%[fr6](%%r3) \n\t"
725 "stfd %%f7,%[fr7](%%r3) \n\t"
726 "stfd %%f8,%[fr8](%%r3) \n\t"
727 "stfd %%f9,%[fr9](%%r3) \n\t"
728 "stfd %%f10,%[fr10](%%r3) \n\t"
729 "stfd %%f11,%[fr11](%%r3) \n\t"
730 "stfd %%f12,%[fr12](%%r3) \n\t"
731 "stfd %%f13,%[fr13](%%r3) \n\t"
732 "stfd %%f14,%[fr14](%%r3) \n\t"
733 "stfd %%f15,%[fr15](%%r3) \n\t"
735 [fpscr
]"i"(offsetof(context_t
, fpu
.fpscr
)),
736 [fr0
]"i"(offsetof(context_t
, fpu
.fpr
[0])),
737 [fr1
]"i"(offsetof(context_t
, fpu
.fpr
[1])),
738 [fr2
]"i"(offsetof(context_t
, fpu
.fpr
[2])),
739 [fr3
]"i"(offsetof(context_t
, fpu
.fpr
[3])),
740 [fr4
]"i"(offsetof(context_t
, fpu
.fpr
[4])),
741 [fr5
]"i"(offsetof(context_t
, fpu
.fpr
[5])),
742 [fr6
]"i"(offsetof(context_t
, fpu
.fpr
[6])),
743 [fr7
]"i"(offsetof(context_t
, fpu
.fpr
[7])),
744 [fr8
]"i"(offsetof(context_t
, fpu
.fpr
[8])),
745 [fr9
]"i"(offsetof(context_t
, fpu
.fpr
[9])),
746 [fr10
]"i"(offsetof(context_t
, fpu
.fpr
[10])),
747 [fr11
]"i"(offsetof(context_t
, fpu
.fpr
[11])),
748 [fr12
]"i"(offsetof(context_t
, fpu
.fpr
[12])),
749 [fr13
]"i"(offsetof(context_t
, fpu
.fpr
[13])),
750 [fr14
]"i"(offsetof(context_t
, fpu
.fpr
[14])),
751 [fr15
]"i"(offsetof(context_t
, fpu
.fpr
[15])),
755 "stfd %%f16,%[fr16](%%r3) \n\t"
756 "stfd %%f17,%[fr17](%%r3) \n\t"
757 "stfd %%f18,%[fr18](%%r3) \n\t"
758 "stfd %%f19,%[fr19](%%r3) \n\t"
759 "stfd %%f20,%[fr20](%%r3) \n\t"
760 "stfd %%f21,%[fr21](%%r3) \n\t"
761 "stfd %%f22,%[fr22](%%r3) \n\t"
762 "stfd %%f23,%[fr23](%%r3) \n\t"
763 "stfd %%f24,%[fr24](%%r3) \n\t"
764 "stfd %%f25,%[fr25](%%r3) \n\t"
765 "stfd %%f26,%[fr26](%%r3) \n\t"
766 "stfd %%f27,%[fr27](%%r3) \n\t"
767 "stfd %%f28,%[fr28](%%r3) \n\t"
768 "stfd %%f29,%[fr29](%%r3) \n\t"
769 "stfd %%f30,%[fr30](%%r3) \n\t"
770 "stfd %%f31,%[fr31](%%r3) \n\t"
775 "lis %%r9, %[msrval]@ha \n\t"
776 "ori %%r9,%%r9, %[msrval]@l \n\t"
780 [fr16
]"i"(offsetof(context_t
, fpu
.fpr
[16])),
781 [fr17
]"i"(offsetof(context_t
, fpu
.fpr
[17])),
782 [fr18
]"i"(offsetof(context_t
, fpu
.fpr
[18])),
783 [fr19
]"i"(offsetof(context_t
, fpu
.fpr
[19])),
784 [fr20
]"i"(offsetof(context_t
, fpu
.fpr
[20])),
785 [fr21
]"i"(offsetof(context_t
, fpu
.fpr
[21])),
786 [fr22
]"i"(offsetof(context_t
, fpu
.fpr
[22])),
787 [fr23
]"i"(offsetof(context_t
, fpu
.fpr
[23])),
788 [fr24
]"i"(offsetof(context_t
, fpu
.fpr
[24])),
789 [fr25
]"i"(offsetof(context_t
, fpu
.fpr
[25])),
790 [fr26
]"i"(offsetof(context_t
, fpu
.fpr
[26])),
791 [fr27
]"i"(offsetof(context_t
, fpu
.fpr
[27])),
792 [fr28
]"i"(offsetof(context_t
, fpu
.fpr
[28])),
793 [fr29
]"i"(offsetof(context_t
, fpu
.fpr
[29])),
794 [fr30
]"i"(offsetof(context_t
, fpu
.fpr
[30])),
795 [fr31
]"i"(offsetof(context_t
, fpu
.fpr
[31])),
796 [msrval
]"i"(MSR_ME
|MSR_FP
|MSR_IS
|MSR_DS
)
802 * Return from interrupt - restores the context passed as a parameter in %r3
805 static void __attribute__((used
)) __core_LeaveInterrupt()
807 asm volatile(".section .aros.init,\"ax\"\n\t.align 5\n\t.globl core_LeaveInterrupt\n\t.type core_LeaveInterrupt,@function\n"
808 "core_LeaveInterrupt: \n\t"
809 "lwz %%r31,%[gpr31](%%r3) \n\t"
810 "lwz %%r30,%[gpr30](%%r3) \n\t"
811 "lwz %%r29,%[gpr29](%%r3) \n\t"
812 "lwz %%r28,%[gpr28](%%r3) \n\t"
813 "lwz %%r27,%[gpr27](%%r3) \n\t"
814 "lwz %%r26,%[gpr26](%%r3) \n\t"
815 "lwz %%r25,%[gpr25](%%r3) \n\t"
816 "lwz %%r24,%[gpr24](%%r3) \n\t"
817 "lwz %%r23,%[gpr23](%%r3) \n\t"
818 "lwz %%r22,%[gpr22](%%r3) \n\t"
819 "lwz %%r21,%[gpr21](%%r3) \n\t"
820 "lwz %%r20,%[gpr20](%%r3) \n\t"
821 "lwz %%r19,%[gpr19](%%r3) \n\t"
822 "lwz %%r18,%[gpr18](%%r3) \n\t"
823 "lwz %%r17,%[gpr17](%%r3) \n\t"
824 "lwz %%r16,%[gpr16](%%r3) \n\t"
825 "lwz %%r15,%[gpr15](%%r3) \n\t"
826 "lwz %%r14,%[gpr14](%%r3) \n\t"
827 "lwz %%r13,%[gpr13](%%r3) \n\t"
828 "lwz %%r12,%[gpr12](%%r3) \n\t"
830 [gpr12
]"i"(offsetof(regs_t
, gpr
[12])),
831 [gpr13
]"i"(offsetof(regs_t
, gpr
[13])),
832 [gpr14
]"i"(offsetof(regs_t
, gpr
[14])),
833 [gpr15
]"i"(offsetof(regs_t
, gpr
[15])),
834 [gpr16
]"i"(offsetof(regs_t
, gpr
[16])),
835 [gpr17
]"i"(offsetof(regs_t
, gpr
[17])),
836 [gpr18
]"i"(offsetof(regs_t
, gpr
[18])),
837 [gpr19
]"i"(offsetof(regs_t
, gpr
[19])),
838 [gpr20
]"i"(offsetof(regs_t
, gpr
[20])),
839 [gpr21
]"i"(offsetof(regs_t
, gpr
[21])),
840 [gpr22
]"i"(offsetof(regs_t
, gpr
[22])),
841 [gpr23
]"i"(offsetof(regs_t
, gpr
[23])),
842 [gpr24
]"i"(offsetof(regs_t
, gpr
[24])),
843 [gpr25
]"i"(offsetof(regs_t
, gpr
[25])),
844 [gpr26
]"i"(offsetof(regs_t
, gpr
[26])),
845 [gpr27
]"i"(offsetof(regs_t
, gpr
[27])),
846 [gpr28
]"i"(offsetof(regs_t
, gpr
[28])),
847 [gpr29
]"i"(offsetof(regs_t
, gpr
[29])),
848 [gpr30
]"i"(offsetof(regs_t
, gpr
[30])),
849 [gpr31
]"i"(offsetof(regs_t
, gpr
[31]))
853 "lfd %%f0,%[fpscr](%%r3) \n\t"
854 "mtfsf 255,%%f0 \n\t"
855 "lfd %%f0,%[fr0](%%r3) \n\t"
856 "lfd %%f1,%[fr1](%%r3) \n\t"
857 "lfd %%f2,%[fr2](%%r3) \n\t"
858 "lfd %%f3,%[fr3](%%r3) \n\t"
859 "lfd %%f4,%[fr4](%%r3) \n\t"
860 "lfd %%f5,%[fr5](%%r3) \n\t"
861 "lfd %%f6,%[fr6](%%r3) \n\t"
862 "lfd %%f7,%[fr7](%%r3) \n\t"
863 "lfd %%f8,%[fr8](%%r3) \n\t"
864 "lfd %%f9,%[fr9](%%r3) \n\t"
865 "lfd %%f10,%[fr10](%%r3) \n\t"
866 "lfd %%f11,%[fr11](%%r3) \n\t"
867 "lfd %%f12,%[fr12](%%r3) \n\t"
868 "lfd %%f13,%[fr13](%%r3) \n\t"
869 "lfd %%f14,%[fr14](%%r3) \n\t"
870 "lfd %%f15,%[fr15](%%r3) \n\t"
872 [fpscr
]"i"(offsetof(context_t
, fpu
.fpscr
)),
873 [fr0
]"i"(offsetof(context_t
, fpu
.fpr
[0])),
874 [fr1
]"i"(offsetof(context_t
, fpu
.fpr
[1])),
875 [fr2
]"i"(offsetof(context_t
, fpu
.fpr
[2])),
876 [fr3
]"i"(offsetof(context_t
, fpu
.fpr
[3])),
877 [fr4
]"i"(offsetof(context_t
, fpu
.fpr
[4])),
878 [fr5
]"i"(offsetof(context_t
, fpu
.fpr
[5])),
879 [fr6
]"i"(offsetof(context_t
, fpu
.fpr
[6])),
880 [fr7
]"i"(offsetof(context_t
, fpu
.fpr
[7])),
881 [fr8
]"i"(offsetof(context_t
, fpu
.fpr
[8])),
882 [fr9
]"i"(offsetof(context_t
, fpu
.fpr
[9])),
883 [fr10
]"i"(offsetof(context_t
, fpu
.fpr
[10])),
884 [fr11
]"i"(offsetof(context_t
, fpu
.fpr
[11])),
885 [fr12
]"i"(offsetof(context_t
, fpu
.fpr
[12])),
886 [fr13
]"i"(offsetof(context_t
, fpu
.fpr
[13])),
887 [fr14
]"i"(offsetof(context_t
, fpu
.fpr
[14])),
888 [fr15
]"i"(offsetof(context_t
, fpu
.fpr
[15]))
891 "lfd %%f16,%[fr16](%%r3) \n\t"
892 "lfd %%f17,%[fr17](%%r3) \n\t"
893 "lfd %%f18,%[fr18](%%r3) \n\t"
894 "lfd %%f19,%[fr19](%%r3) \n\t"
895 "lfd %%f20,%[fr20](%%r3) \n\t"
896 "lfd %%f21,%[fr21](%%r3) \n\t"
897 "lfd %%f22,%[fr22](%%r3) \n\t"
898 "lfd %%f23,%[fr23](%%r3) \n\t"
899 "lfd %%f24,%[fr24](%%r3) \n\t"
900 "lfd %%f25,%[fr25](%%r3) \n\t"
901 "lfd %%f26,%[fr26](%%r3) \n\t"
902 "lfd %%f27,%[fr27](%%r3) \n\t"
903 "lfd %%f28,%[fr28](%%r3) \n\t"
904 "lfd %%f29,%[fr29](%%r3) \n\t"
905 "lfd %%f30,%[fr30](%%r3) \n\t"
906 "lfd %%f31,%[fr31](%%r3) \n\t"
908 [fr16
]"i"(offsetof(context_t
, fpu
.fpr
[16])),
909 [fr17
]"i"(offsetof(context_t
, fpu
.fpr
[17])),
910 [fr18
]"i"(offsetof(context_t
, fpu
.fpr
[18])),
911 [fr19
]"i"(offsetof(context_t
, fpu
.fpr
[19])),
912 [fr20
]"i"(offsetof(context_t
, fpu
.fpr
[20])),
913 [fr21
]"i"(offsetof(context_t
, fpu
.fpr
[21])),
914 [fr22
]"i"(offsetof(context_t
, fpu
.fpr
[22])),
915 [fr23
]"i"(offsetof(context_t
, fpu
.fpr
[23])),
916 [fr24
]"i"(offsetof(context_t
, fpu
.fpr
[24])),
917 [fr25
]"i"(offsetof(context_t
, fpu
.fpr
[25])),
918 [fr26
]"i"(offsetof(context_t
, fpu
.fpr
[26])),
919 [fr27
]"i"(offsetof(context_t
, fpu
.fpr
[27])),
920 [fr28
]"i"(offsetof(context_t
, fpu
.fpr
[28])),
921 [fr29
]"i"(offsetof(context_t
, fpu
.fpr
[29])),
922 [fr30
]"i"(offsetof(context_t
, fpu
.fpr
[30])),
923 [fr31
]"i"(offsetof(context_t
, fpu
.fpr
[31]))
928 "lwz %%r11,%[gpr11](%%r3) \n\t"
929 "lwz %%r0,%[srr0](%%r3) \n\t"
931 "lwz %%r0,%[srr1](%%r3) \n\t"
932 "rlwinm %%r0,%%r0,0,14,12 \n\t"
934 "lwz %%r0,%[ctr](%%r3) \n\t"
936 "lwz %%r0,%[lr](%%r3) \n\t"
938 "lwz %%r0,%[xer](%%r3) \n\t"
940 "lwz %%r10,%[gpr10](%%r3) \n\t"
941 "lwz %%r9,%[gpr9](%%r3) \n\t"
942 "lwz %%r8,%[gpr8](%%r3) \n\t"
943 "lwz %%r7,%[gpr7](%%r3) \n\t"
944 "lwz %%r6,%[gpr6](%%r3) \n\t"
945 "lwz %%r5,%[gpr5](%%r3) \n\t"
946 "lwz %%r4,%[gpr4](%%r3) \n\t"
947 "lwz %%r0,%[gpr3](%%r3) \n\t"
949 "lwz %%r2,%[gpr2](%%r3) \n\t"
950 "stwcx. %%r0,0,%%r1 \n\t"
951 "lwz %%r0,%[ccr](%%r3) \n\t"
953 "lwz %%r1,%[gpr1](%%r3) \n\t"
954 "lwz %%r0,%[gpr0](%%r3) \n\t"
958 [ccr
]"i"(offsetof(regs_t
, ccr
)), /* */
959 [srr0
]"i"(offsetof(regs_t
, srr0
)), /* */
960 [srr1
]"i"(offsetof(regs_t
, srr1
)),/* */
961 [ctr
]"i"(offsetof(regs_t
, ctr
)),/**/
962 [lr
]"i"(offsetof(regs_t
, lr
)),/**/
963 [xer
]"i"(offsetof(regs_t
, xer
)),
964 [gpr0
]"i"(offsetof(regs_t
, gpr
[0])),
965 [gpr1
]"i"(offsetof(regs_t
, gpr
[1])),
966 [gpr2
]"i"(offsetof(regs_t
, gpr
[2])),
967 [gpr3
]"i"(offsetof(regs_t
, gpr
[3])),
968 [gpr4
]"i"(offsetof(regs_t
, gpr
[4])),
969 [gpr5
]"i"(offsetof(regs_t
, gpr
[5])),
970 [gpr6
]"i"(offsetof(regs_t
, gpr
[6])),
971 [gpr7
]"i"(offsetof(regs_t
, gpr
[7])),
972 [gpr8
]"i"(offsetof(regs_t
, gpr
[8])),
973 [gpr9
]"i"(offsetof(regs_t
, gpr
[9])),
974 [gpr10
]"i"(offsetof(regs_t
, gpr
[10])),
975 [gpr11
]"i"(offsetof(regs_t
, gpr
[11]))